sessiond: Implement UST event notifier error counter
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/on-event-internal.h>
33 #include <lttng/condition/on-event.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
39 #include "fd-limit.h"
40 #include "health-sessiond.h"
41 #include "ust-app.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
45 #include "utils.h"
46 #include "session.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
49 #include "rotate.h"
50 #include "event.h"
51 #include "event-notifier-error-accounting.h"
52
53
54 struct lttng_ht *ust_app_ht;
55 struct lttng_ht *ust_app_ht_by_sock;
56 struct lttng_ht *ust_app_ht_by_notify_sock;
57
58 static
59 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key;
63 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id;
67 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69 /*
70 * Return the incremented value of next_channel_key.
71 */
72 static uint64_t get_next_channel_key(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80 }
81
82 /*
83 * Return the atomically incremented value of next_session_id.
84 */
85 static uint64_t get_next_session_id(void)
86 {
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93 }
94
95 static void copy_channel_attr_to_ustctl(
96 struct ustctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98 {
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107 }
108
109 /*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116 {
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119 int ev_loglevel_value;
120
121 assert(node);
122 assert(_key);
123
124 event = caa_container_of(node, struct ust_app_event, node.node);
125 key = _key;
126 ev_loglevel_value = event->attr.loglevel;
127
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129
130 /* Event name */
131 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
132 goto no_match;
133 }
134
135 /* Event loglevel. */
136 if (ev_loglevel_value != key->loglevel_type) {
137 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key->loglevel_type == 0 &&
139 ev_loglevel_value == -1) {
140 /*
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
145 */
146 } else {
147 goto no_match;
148 }
149 }
150
151 /* One of the filters is NULL, fail. */
152 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
153 goto no_match;
154 }
155
156 if (key->filter && event->filter) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event->filter->len != key->filter->len ||
159 memcmp(event->filter->data, key->filter->data,
160 event->filter->len) != 0) {
161 goto no_match;
162 }
163 }
164
165 /* One of the exclusions is NULL, fail. */
166 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
167 goto no_match;
168 }
169
170 if (key->exclusion && event->exclusion) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event->exclusion->count != key->exclusion->count ||
173 memcmp(event->exclusion->names, key->exclusion->names,
174 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
175 goto no_match;
176 }
177 }
178
179
180 /* Match. */
181 return 1;
182
183 no_match:
184 return 0;
185 }
186
187 /*
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
190 */
191 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
192 struct ust_app_event *event)
193 {
194 struct cds_lfht_node *node_ptr;
195 struct ust_app_ht_key key;
196 struct lttng_ht *ht;
197
198 assert(ua_chan);
199 assert(ua_chan->events);
200 assert(event);
201
202 ht = ua_chan->events;
203 key.name = event->attr.name;
204 key.filter = event->filter;
205 key.loglevel_type = event->attr.loglevel;
206 key.exclusion = event->exclusion;
207
208 node_ptr = cds_lfht_add_unique(ht->ht,
209 ht->hash_fct(event->node.key, lttng_ht_seed),
210 ht_match_ust_app_event, &key, &event->node.node);
211 assert(node_ptr == &event->node.node);
212 }
213
214 /*
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
217 */
218 static void close_notify_sock_rcu(struct rcu_head *head)
219 {
220 int ret;
221 struct ust_app_notify_sock_obj *obj =
222 caa_container_of(head, struct ust_app_notify_sock_obj, head);
223
224 /* Must have a valid fd here. */
225 assert(obj->fd >= 0);
226
227 ret = close(obj->fd);
228 if (ret) {
229 ERR("close notify sock %d RCU", obj->fd);
230 }
231 lttng_fd_put(LTTNG_FD_APPS, 1);
232
233 free(obj);
234 }
235
236 /*
237 * Return the session registry according to the buffer type of the given
238 * session.
239 *
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
242 */
243 static struct ust_registry_session *get_session_registry(
244 struct ust_app_session *ua_sess)
245 {
246 struct ust_registry_session *registry = NULL;
247
248 assert(ua_sess);
249
250 switch (ua_sess->buffer_type) {
251 case LTTNG_BUFFER_PER_PID:
252 {
253 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
254 if (!reg_pid) {
255 goto error;
256 }
257 registry = reg_pid->registry->reg.ust;
258 break;
259 }
260 case LTTNG_BUFFER_PER_UID:
261 {
262 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
263 ua_sess->tracing_id, ua_sess->bits_per_long,
264 lttng_credentials_get_uid(&ua_sess->real_credentials));
265 if (!reg_uid) {
266 goto error;
267 }
268 registry = reg_uid->registry->reg.ust;
269 break;
270 }
271 default:
272 assert(0);
273 };
274
275 error:
276 return registry;
277 }
278
279 /*
280 * Delete ust context safely. RCU read lock must be held before calling
281 * this function.
282 */
283 static
284 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
285 struct ust_app *app)
286 {
287 int ret;
288
289 assert(ua_ctx);
290
291 if (ua_ctx->obj) {
292 pthread_mutex_lock(&app->sock_lock);
293 ret = ustctl_release_object(sock, ua_ctx->obj);
294 pthread_mutex_unlock(&app->sock_lock);
295 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock, ua_ctx->obj->handle, ret);
298 }
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302 }
303
304 /*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
308 static
309 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
311 {
312 int ret;
313
314 assert(ua_event);
315
316 free(ua_event->filter);
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
319 if (ua_event->obj != NULL) {
320 pthread_mutex_lock(&app->sock_lock);
321 ret = ustctl_release_object(sock, ua_event->obj);
322 pthread_mutex_unlock(&app->sock_lock);
323 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
324 ERR("UST app sock %d release event obj failed with ret %d",
325 sock, ret);
326 }
327 free(ua_event->obj);
328 }
329 free(ua_event);
330 }
331
332 /*
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
335 */
336 static
337 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
338 {
339 struct ust_app_event_notifier_rule *obj = caa_container_of(
340 head, struct ust_app_event_notifier_rule, rcu_head);
341
342 free(obj);
343 }
344
345 /*
346 * Delete ust app event notifier rule safely.
347 */
348 static void delete_ust_app_event_notifier_rule(int sock,
349 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
350 struct ust_app *app)
351 {
352 int ret;
353
354 assert(ua_event_notifier_rule);
355
356 if (ua_event_notifier_rule->exclusion != NULL) {
357 free(ua_event_notifier_rule->exclusion);
358 }
359
360 if (ua_event_notifier_rule->obj != NULL) {
361 pthread_mutex_lock(&app->sock_lock);
362 ret = ustctl_release_object(sock, ua_event_notifier_rule->obj);
363 pthread_mutex_unlock(&app->sock_lock);
364 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app->name, (int) app->ppid, ret);
367 }
368
369 free(ua_event_notifier_rule->obj);
370 }
371
372 lttng_trigger_put(ua_event_notifier_rule->trigger);
373 call_rcu(&ua_event_notifier_rule->rcu_head,
374 free_ust_app_event_notifier_rule_rcu);
375 }
376
377 /*
378 * Release ust data object of the given stream.
379 *
380 * Return 0 on success or else a negative value.
381 */
382 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
383 struct ust_app *app)
384 {
385 int ret = 0;
386
387 assert(stream);
388
389 if (stream->obj) {
390 pthread_mutex_lock(&app->sock_lock);
391 ret = ustctl_release_object(sock, stream->obj);
392 pthread_mutex_unlock(&app->sock_lock);
393 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
395 sock, ret);
396 }
397 lttng_fd_put(LTTNG_FD_APPS, 2);
398 free(stream->obj);
399 }
400
401 return ret;
402 }
403
404 /*
405 * Delete ust app stream safely. RCU read lock must be held before calling
406 * this function.
407 */
408 static
409 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
410 struct ust_app *app)
411 {
412 assert(stream);
413
414 (void) release_ust_app_stream(sock, stream, app);
415 free(stream);
416 }
417
418 /*
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
423 */
424 static
425 void delete_ust_app_channel_rcu(struct rcu_head *head)
426 {
427 struct ust_app_channel *ua_chan =
428 caa_container_of(head, struct ust_app_channel, rcu_head);
429
430 ht_cleanup_push(ua_chan->ctx);
431 ht_cleanup_push(ua_chan->events);
432 free(ua_chan);
433 }
434
435 /*
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
439 *
440 * The session list lock must be held by the caller.
441 */
442 static
443 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
444 {
445 uint64_t discarded = 0, lost = 0;
446 struct ltt_session *session;
447 struct ltt_ust_channel *uchan;
448
449 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
450 return;
451 }
452
453 rcu_read_lock();
454 session = session_find_by_id(ua_chan->session->tracing_id);
455 if (!session || !session->ust_session) {
456 /*
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
459 *
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
465 *
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
470 */
471 goto end;
472 }
473
474 if (ua_chan->attr.overwrite) {
475 consumer_get_lost_packets(ua_chan->session->tracing_id,
476 ua_chan->key, session->ust_session->consumer,
477 &lost);
478 } else {
479 consumer_get_discarded_events(ua_chan->session->tracing_id,
480 ua_chan->key, session->ust_session->consumer,
481 &discarded);
482 }
483 uchan = trace_ust_find_channel_by_name(
484 session->ust_session->domain_global.channels,
485 ua_chan->name);
486 if (!uchan) {
487 ERR("Missing UST channel to store discarded counters");
488 goto end;
489 }
490
491 uchan->per_pid_closed_app_discarded += discarded;
492 uchan->per_pid_closed_app_lost += lost;
493
494 end:
495 rcu_read_unlock();
496 if (session) {
497 session_put(session);
498 }
499 }
500
501 /*
502 * Delete ust app channel safely. RCU read lock must be held before calling
503 * this function.
504 *
505 * The session list lock must be held by the caller.
506 */
507 static
508 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
509 struct ust_app *app)
510 {
511 int ret;
512 struct lttng_ht_iter iter;
513 struct ust_app_event *ua_event;
514 struct ust_app_ctx *ua_ctx;
515 struct ust_app_stream *stream, *stmp;
516 struct ust_registry_session *registry;
517
518 assert(ua_chan);
519
520 DBG3("UST app deleting channel %s", ua_chan->name);
521
522 /* Wipe stream */
523 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
524 cds_list_del(&stream->list);
525 delete_ust_app_stream(sock, stream, app);
526 }
527
528 /* Wipe context */
529 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
530 cds_list_del(&ua_ctx->list);
531 ret = lttng_ht_del(ua_chan->ctx, &iter);
532 assert(!ret);
533 delete_ust_app_ctx(sock, ua_ctx, app);
534 }
535
536 /* Wipe events */
537 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
538 node.node) {
539 ret = lttng_ht_del(ua_chan->events, &iter);
540 assert(!ret);
541 delete_ust_app_event(sock, ua_event, app);
542 }
543
544 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
545 /* Wipe and free registry from session registry. */
546 registry = get_session_registry(ua_chan->session);
547 if (registry) {
548 ust_registry_channel_del_free(registry, ua_chan->key,
549 sock >= 0);
550 }
551 /*
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
555 */
556 if (sock >= 0) {
557 save_per_pid_lost_discarded_counters(ua_chan);
558 }
559 }
560
561 if (ua_chan->obj != NULL) {
562 /* Remove channel from application UST object descriptor. */
563 iter.iter.node = &ua_chan->ust_objd_node.node;
564 ret = lttng_ht_del(app->ust_objd, &iter);
565 assert(!ret);
566 pthread_mutex_lock(&app->sock_lock);
567 ret = ustctl_release_object(sock, ua_chan->obj);
568 pthread_mutex_unlock(&app->sock_lock);
569 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
571 sock, ret);
572 }
573 lttng_fd_put(LTTNG_FD_APPS, 1);
574 free(ua_chan->obj);
575 }
576 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
577 }
578
579 int ust_app_register_done(struct ust_app *app)
580 {
581 int ret;
582
583 pthread_mutex_lock(&app->sock_lock);
584 ret = ustctl_register_done(app->sock);
585 pthread_mutex_unlock(&app->sock_lock);
586 return ret;
587 }
588
589 int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
590 {
591 int ret, sock;
592
593 if (app) {
594 pthread_mutex_lock(&app->sock_lock);
595 sock = app->sock;
596 } else {
597 sock = -1;
598 }
599 ret = ustctl_release_object(sock, data);
600 if (app) {
601 pthread_mutex_unlock(&app->sock_lock);
602 }
603 return ret;
604 }
605
606 /*
607 * Push metadata to consumer socket.
608 *
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
612 *
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
617 */
618 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
619 struct consumer_socket *socket, int send_zero_data)
620 {
621 int ret;
622 char *metadata_str = NULL;
623 size_t len, offset, new_metadata_len_sent;
624 ssize_t ret_val;
625 uint64_t metadata_key, metadata_version;
626
627 assert(registry);
628 assert(socket);
629
630 metadata_key = registry->metadata_key;
631
632 /*
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
635 */
636 if (!metadata_key) {
637 return 0;
638 }
639
640 offset = registry->metadata_len_sent;
641 len = registry->metadata_len - registry->metadata_len_sent;
642 new_metadata_len_sent = registry->metadata_len;
643 metadata_version = registry->metadata_version;
644 if (len == 0) {
645 DBG3("No metadata to push for metadata key %" PRIu64,
646 registry->metadata_key);
647 ret_val = len;
648 if (send_zero_data) {
649 DBG("No metadata to push");
650 goto push_data;
651 }
652 goto end;
653 }
654
655 /* Allocate only what we have to send. */
656 metadata_str = zmalloc(len);
657 if (!metadata_str) {
658 PERROR("zmalloc ust app metadata string");
659 ret_val = -ENOMEM;
660 goto error;
661 }
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str, registry->metadata + offset, len);
664
665 push_data:
666 pthread_mutex_unlock(&registry->lock);
667 /*
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
678 */
679 ret = consumer_push_metadata(socket, metadata_key,
680 metadata_str, len, offset, metadata_version);
681 pthread_mutex_lock(&registry->lock);
682 if (ret < 0) {
683 /*
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
692 *
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
696 */
697 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
698 ret = 0;
699 } else {
700 ERR("Error pushing metadata to consumer");
701 }
702 ret_val = ret;
703 goto error_push;
704 } else {
705 /*
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
716 * send.
717 */
718 registry->metadata_len_sent =
719 max_t(size_t, registry->metadata_len_sent,
720 new_metadata_len_sent);
721 }
722 free(metadata_str);
723 return len;
724
725 end:
726 error:
727 if (ret_val) {
728 /*
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
733 * consumer.
734 */
735 registry->metadata_closed = 1;
736 }
737 error_push:
738 free(metadata_str);
739 return ret_val;
740 }
741
742 /*
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
750 *
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
755 */
756 static int push_metadata(struct ust_registry_session *registry,
757 struct consumer_output *consumer)
758 {
759 int ret_val;
760 ssize_t ret;
761 struct consumer_socket *socket;
762
763 assert(registry);
764 assert(consumer);
765
766 pthread_mutex_lock(&registry->lock);
767 if (registry->metadata_closed) {
768 ret_val = -EPIPE;
769 goto error;
770 }
771
772 /* Get consumer socket to use to push the metadata.*/
773 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
774 consumer);
775 if (!socket) {
776 ret_val = -1;
777 goto error;
778 }
779
780 ret = ust_app_push_metadata(registry, socket, 0);
781 if (ret < 0) {
782 ret_val = ret;
783 goto error;
784 }
785 pthread_mutex_unlock(&registry->lock);
786 return 0;
787
788 error:
789 pthread_mutex_unlock(&registry->lock);
790 return ret_val;
791 }
792
793 /*
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
798 *
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
802 *
803 * Return 0 on success else a negative value.
804 */
805 static int close_metadata(struct ust_registry_session *registry,
806 struct consumer_output *consumer)
807 {
808 int ret;
809 struct consumer_socket *socket;
810 uint64_t metadata_key;
811 bool registry_was_already_closed;
812
813 assert(registry);
814 assert(consumer);
815
816 rcu_read_lock();
817
818 pthread_mutex_lock(&registry->lock);
819 metadata_key = registry->metadata_key;
820 registry_was_already_closed = registry->metadata_closed;
821 if (metadata_key != 0) {
822 /*
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
826 */
827 registry->metadata_closed = 1;
828 }
829 pthread_mutex_unlock(&registry->lock);
830
831 if (metadata_key == 0 || registry_was_already_closed) {
832 ret = 0;
833 goto end;
834 }
835
836 /* Get consumer socket to use to push the metadata.*/
837 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
838 consumer);
839 if (!socket) {
840 ret = -1;
841 goto end;
842 }
843
844 ret = consumer_close_metadata(socket, metadata_key);
845 if (ret < 0) {
846 goto end;
847 }
848
849 end:
850 rcu_read_unlock();
851 return ret;
852 }
853
854 /*
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
859 */
860 static
861 void delete_ust_app_session_rcu(struct rcu_head *head)
862 {
863 struct ust_app_session *ua_sess =
864 caa_container_of(head, struct ust_app_session, rcu_head);
865
866 ht_cleanup_push(ua_sess->channels);
867 free(ua_sess);
868 }
869
870 /*
871 * Delete ust app session safely. RCU read lock must be held before calling
872 * this function.
873 *
874 * The session list lock must be held by the caller.
875 */
876 static
877 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
878 struct ust_app *app)
879 {
880 int ret;
881 struct lttng_ht_iter iter;
882 struct ust_app_channel *ua_chan;
883 struct ust_registry_session *registry;
884
885 assert(ua_sess);
886
887 pthread_mutex_lock(&ua_sess->lock);
888
889 assert(!ua_sess->deleted);
890 ua_sess->deleted = true;
891
892 registry = get_session_registry(ua_sess);
893 /* Registry can be null on error path during initialization. */
894 if (registry) {
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry, ua_sess->consumer);
897
898 /*
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
903 */
904 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry, ua_sess->consumer);
907 }
908 }
909
910 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
911 node.node) {
912 ret = lttng_ht_del(ua_sess->channels, &iter);
913 assert(!ret);
914 delete_ust_app_channel(sock, ua_chan, app);
915 }
916
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
919 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
920 if (reg_pid) {
921 /*
922 * Registry can be null on error path during
923 * initialization.
924 */
925 buffer_reg_pid_remove(reg_pid);
926 buffer_reg_pid_destroy(reg_pid);
927 }
928 }
929
930 if (ua_sess->handle != -1) {
931 pthread_mutex_lock(&app->sock_lock);
932 ret = ustctl_release_handle(sock, ua_sess->handle);
933 pthread_mutex_unlock(&app->sock_lock);
934 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
935 ERR("UST app sock %d release session handle failed with ret %d",
936 sock, ret);
937 }
938 /* Remove session from application UST object descriptor. */
939 iter.iter.node = &ua_sess->ust_objd_node.node;
940 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
941 assert(!ret);
942 }
943
944 pthread_mutex_unlock(&ua_sess->lock);
945
946 consumer_output_put(ua_sess->consumer);
947
948 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
949 }
950
951 /*
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
954 *
955 * RCU read side lock should _NOT_ be held when calling this function.
956 */
957 static
958 void delete_ust_app(struct ust_app *app)
959 {
960 int ret, sock;
961 struct ust_app_session *ua_sess, *tmp_ua_sess;
962 struct lttng_ht_iter iter;
963 struct ust_app_event_notifier_rule *event_notifier_rule;
964 bool event_notifier_write_fd_is_open;
965
966 /*
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
969 */
970 session_lock_list();
971 /* Delete ust app sessions info */
972 sock = app->sock;
973 app->sock = -1;
974
975 /* Wipe sessions */
976 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
977 teardown_node) {
978 /* Free every object in the session and the session. */
979 rcu_read_lock();
980 delete_ust_app_session(sock, ua_sess, app);
981 rcu_read_unlock();
982 }
983
984 /* Remove the event notifier rules associated with this app. */
985 rcu_read_lock();
986 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
987 &iter.iter, event_notifier_rule, node.node) {
988 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
989 assert(!ret);
990
991 delete_ust_app_event_notifier_rule(
992 app->sock, event_notifier_rule, app);
993 }
994
995 rcu_read_unlock();
996
997 ht_cleanup_push(app->sessions);
998 ht_cleanup_push(app->ust_sessions_objd);
999 ht_cleanup_push(app->ust_objd);
1000 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
1001
1002 /*
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1005 */
1006 if (app->event_notifier_group.object) {
1007 enum lttng_error_code ret_code;
1008 enum event_notifier_error_accounting_status status;
1009
1010 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1011 app->event_notifier_group.event_pipe);
1012
1013 ret_code = notification_thread_command_remove_tracer_event_source(
1014 notification_thread_handle,
1015 event_notifier_read_fd);
1016 if (ret_code != LTTNG_OK) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1018 }
1019
1020 status = event_notifier_error_accounting_unregister_app(app);
1021 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1022 ERR("Error unregistering app from event notifier error accounting");
1023 }
1024
1025 ustctl_release_object(sock, app->event_notifier_group.object);
1026 free(app->event_notifier_group.object);
1027 }
1028
1029 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1030 app->event_notifier_group.event_pipe);
1031 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1032 /*
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1037 */
1038 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1039
1040 /*
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1044 *
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1049 *
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1052 */
1053 ret = close(sock);
1054 if (ret) {
1055 PERROR("close");
1056 }
1057 lttng_fd_put(LTTNG_FD_APPS, 1);
1058
1059 DBG2("UST app pid %d deleted", app->pid);
1060 free(app);
1061 session_unlock_list();
1062 }
1063
1064 /*
1065 * URCU intermediate call to delete an UST app.
1066 */
1067 static
1068 void delete_ust_app_rcu(struct rcu_head *head)
1069 {
1070 struct lttng_ht_node_ulong *node =
1071 caa_container_of(head, struct lttng_ht_node_ulong, head);
1072 struct ust_app *app =
1073 caa_container_of(node, struct ust_app, pid_n);
1074
1075 DBG3("Call RCU deleting app PID %d", app->pid);
1076 delete_ust_app(app);
1077 }
1078
1079 /*
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1082 *
1083 * The session list lock must be held by the caller.
1084 */
1085 static void destroy_app_session(struct ust_app *app,
1086 struct ust_app_session *ua_sess)
1087 {
1088 int ret;
1089 struct lttng_ht_iter iter;
1090
1091 assert(app);
1092 assert(ua_sess);
1093
1094 iter.iter.node = &ua_sess->node.node;
1095 ret = lttng_ht_del(app->sessions, &iter);
1096 if (ret) {
1097 /* Already scheduled for teardown. */
1098 goto end;
1099 }
1100
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app->sock, ua_sess, app);
1103
1104 end:
1105 return;
1106 }
1107
1108 /*
1109 * Alloc new UST app session.
1110 */
1111 static
1112 struct ust_app_session *alloc_ust_app_session(void)
1113 {
1114 struct ust_app_session *ua_sess;
1115
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess = zmalloc(sizeof(struct ust_app_session));
1118 if (ua_sess == NULL) {
1119 PERROR("malloc");
1120 goto error_free;
1121 }
1122
1123 ua_sess->handle = -1;
1124 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1125 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1126 pthread_mutex_init(&ua_sess->lock, NULL);
1127
1128 return ua_sess;
1129
1130 error_free:
1131 return NULL;
1132 }
1133
1134 /*
1135 * Alloc new UST app channel.
1136 */
1137 static
1138 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1139 struct ust_app_session *ua_sess,
1140 struct lttng_ust_abi_channel_attr *attr)
1141 {
1142 struct ust_app_channel *ua_chan;
1143
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1146 if (ua_chan == NULL) {
1147 PERROR("malloc");
1148 goto error;
1149 }
1150
1151 /* Setup channel name */
1152 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1153 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1154
1155 ua_chan->enabled = 1;
1156 ua_chan->handle = -1;
1157 ua_chan->session = ua_sess;
1158 ua_chan->key = get_next_channel_key();
1159 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1160 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1161 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1162
1163 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1164 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1165
1166 /* Copy attributes */
1167 if (attr) {
1168 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1169 ua_chan->attr.subbuf_size = attr->subbuf_size;
1170 ua_chan->attr.num_subbuf = attr->num_subbuf;
1171 ua_chan->attr.overwrite = attr->overwrite;
1172 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1173 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1174 ua_chan->attr.output = attr->output;
1175 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1176 }
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1179
1180 DBG3("UST app channel %s allocated", ua_chan->name);
1181
1182 return ua_chan;
1183
1184 error:
1185 return NULL;
1186 }
1187
1188 /*
1189 * Allocate and initialize a UST app stream.
1190 *
1191 * Return newly allocated stream pointer or NULL on error.
1192 */
1193 struct ust_app_stream *ust_app_alloc_stream(void)
1194 {
1195 struct ust_app_stream *stream = NULL;
1196
1197 stream = zmalloc(sizeof(*stream));
1198 if (stream == NULL) {
1199 PERROR("zmalloc ust app stream");
1200 goto error;
1201 }
1202
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream->handle = -1;
1205
1206 error:
1207 return stream;
1208 }
1209
1210 /*
1211 * Alloc new UST app event.
1212 */
1213 static
1214 struct ust_app_event *alloc_ust_app_event(char *name,
1215 struct lttng_ust_abi_event *attr)
1216 {
1217 struct ust_app_event *ua_event;
1218
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event = zmalloc(sizeof(struct ust_app_event));
1221 if (ua_event == NULL) {
1222 PERROR("Failed to allocate ust_app_event structure");
1223 goto error;
1224 }
1225
1226 ua_event->enabled = 1;
1227 strncpy(ua_event->name, name, sizeof(ua_event->name));
1228 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1230
1231 /* Copy attributes */
1232 if (attr) {
1233 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1234 }
1235
1236 DBG3("UST app event %s allocated", ua_event->name);
1237
1238 return ua_event;
1239
1240 error:
1241 return NULL;
1242 }
1243
1244 /*
1245 * Allocate a new UST app event notifier rule.
1246 */
1247 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger *trigger)
1249 {
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status;
1252 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1253 struct lttng_condition *condition = NULL;
1254 const struct lttng_event_rule *event_rule = NULL;
1255
1256 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1257 if (ua_event_notifier_rule == NULL) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1259 goto error;
1260 }
1261
1262 ua_event_notifier_rule->enabled = 1;
1263 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1265 ua_event_notifier_rule->token);
1266
1267 condition = lttng_trigger_get_condition(trigger);
1268 assert(condition);
1269 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
1270
1271 assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_on_event_get_rule(condition, &event_rule));
1272 assert(event_rule);
1273
1274 /* Acquire the event notifier's reference to the trigger. */
1275 lttng_trigger_get(trigger);
1276
1277 ua_event_notifier_rule->trigger = trigger;
1278 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1279 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1280 event_rule, &ua_event_notifier_rule->exclusion);
1281 switch (generate_exclusion_status) {
1282 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1283 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1284 break;
1285 default:
1286 /* Error occured. */
1287 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1288 goto error_put_trigger;
1289 }
1290
1291 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1292 ua_event_notifier_rule->token);
1293
1294 return ua_event_notifier_rule;
1295
1296 error_put_trigger:
1297 lttng_trigger_put(trigger);
1298 error:
1299 free(ua_event_notifier_rule);
1300 return NULL;
1301 }
1302
1303 /*
1304 * Alloc new UST app context.
1305 */
1306 static
1307 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1308 {
1309 struct ust_app_ctx *ua_ctx;
1310
1311 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1312 if (ua_ctx == NULL) {
1313 goto error;
1314 }
1315
1316 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1317
1318 if (uctx) {
1319 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1320 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1321 char *provider_name = NULL, *ctx_name = NULL;
1322
1323 provider_name = strdup(uctx->u.app_ctx.provider_name);
1324 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1325 if (!provider_name || !ctx_name) {
1326 free(provider_name);
1327 free(ctx_name);
1328 goto error;
1329 }
1330
1331 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1332 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1333 }
1334 }
1335
1336 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1337 return ua_ctx;
1338 error:
1339 free(ua_ctx);
1340 return NULL;
1341 }
1342
1343 /*
1344 * Create a liblttng-ust filter bytecode from given bytecode.
1345 *
1346 * Return allocated filter or NULL on error.
1347 */
1348 static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1349 const struct lttng_bytecode *orig_f)
1350 {
1351 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1352
1353 /* Copy filter bytecode. */
1354 filter = zmalloc(sizeof(*filter) + orig_f->len);
1355 if (!filter) {
1356 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1357 goto error;
1358 }
1359
1360 assert(sizeof(struct lttng_bytecode) ==
1361 sizeof(struct lttng_ust_abi_filter_bytecode));
1362 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1363 error:
1364 return filter;
1365 }
1366
1367 /*
1368 * Create a liblttng-ust capture bytecode from given bytecode.
1369 *
1370 * Return allocated filter or NULL on error.
1371 */
1372 static struct lttng_ust_abi_capture_bytecode *
1373 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1374 {
1375 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1376
1377 /* Copy capture bytecode. */
1378 capture = zmalloc(sizeof(*capture) + orig_f->len);
1379 if (!capture) {
1380 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1381 goto error;
1382 }
1383
1384 assert(sizeof(struct lttng_bytecode) ==
1385 sizeof(struct lttng_ust_abi_capture_bytecode));
1386 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1387 error:
1388 return capture;
1389 }
1390
1391 /*
1392 * Find an ust_app using the sock and return it. RCU read side lock must be
1393 * held before calling this helper function.
1394 */
1395 struct ust_app *ust_app_find_by_sock(int sock)
1396 {
1397 struct lttng_ht_node_ulong *node;
1398 struct lttng_ht_iter iter;
1399
1400 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1401 node = lttng_ht_iter_get_node_ulong(&iter);
1402 if (node == NULL) {
1403 DBG2("UST app find by sock %d not found", sock);
1404 goto error;
1405 }
1406
1407 return caa_container_of(node, struct ust_app, sock_n);
1408
1409 error:
1410 return NULL;
1411 }
1412
1413 /*
1414 * Find an ust_app using the notify sock and return it. RCU read side lock must
1415 * be held before calling this helper function.
1416 */
1417 static struct ust_app *find_app_by_notify_sock(int sock)
1418 {
1419 struct lttng_ht_node_ulong *node;
1420 struct lttng_ht_iter iter;
1421
1422 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1423 &iter);
1424 node = lttng_ht_iter_get_node_ulong(&iter);
1425 if (node == NULL) {
1426 DBG2("UST app find by notify sock %d not found", sock);
1427 goto error;
1428 }
1429
1430 return caa_container_of(node, struct ust_app, notify_sock_n);
1431
1432 error:
1433 return NULL;
1434 }
1435
1436 /*
1437 * Lookup for an ust app event based on event name, filter bytecode and the
1438 * event loglevel.
1439 *
1440 * Return an ust_app_event object or NULL on error.
1441 */
1442 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1443 const char *name, const struct lttng_bytecode *filter,
1444 int loglevel_value,
1445 const struct lttng_event_exclusion *exclusion)
1446 {
1447 struct lttng_ht_iter iter;
1448 struct lttng_ht_node_str *node;
1449 struct ust_app_event *event = NULL;
1450 struct ust_app_ht_key key;
1451
1452 assert(name);
1453 assert(ht);
1454
1455 /* Setup key for event lookup. */
1456 key.name = name;
1457 key.filter = filter;
1458 key.loglevel_type = loglevel_value;
1459 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1460 key.exclusion = exclusion;
1461
1462 /* Lookup using the event name as hash and a custom match fct. */
1463 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1464 ht_match_ust_app_event, &key, &iter.iter);
1465 node = lttng_ht_iter_get_node_str(&iter);
1466 if (node == NULL) {
1467 goto end;
1468 }
1469
1470 event = caa_container_of(node, struct ust_app_event, node);
1471
1472 end:
1473 return event;
1474 }
1475
1476 /*
1477 * Look-up an event notifier rule based on its token id.
1478 *
1479 * Must be called with the RCU read lock held.
1480 * Return an ust_app_event_notifier_rule object or NULL on error.
1481 */
1482 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1483 struct lttng_ht *ht, uint64_t token)
1484 {
1485 struct lttng_ht_iter iter;
1486 struct lttng_ht_node_u64 *node;
1487 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1488
1489 assert(ht);
1490
1491 lttng_ht_lookup(ht, &token, &iter);
1492 node = lttng_ht_iter_get_node_u64(&iter);
1493 if (node == NULL) {
1494 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1495 token);
1496 goto end;
1497 }
1498
1499 event_notifier_rule = caa_container_of(
1500 node, struct ust_app_event_notifier_rule, node);
1501 end:
1502 return event_notifier_rule;
1503 }
1504
1505 /*
1506 * Create the channel context on the tracer.
1507 *
1508 * Called with UST app session lock held.
1509 */
1510 static
1511 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1512 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1513 {
1514 int ret;
1515
1516 health_code_update();
1517
1518 pthread_mutex_lock(&app->sock_lock);
1519 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1520 ua_chan->obj, &ua_ctx->obj);
1521 pthread_mutex_unlock(&app->sock_lock);
1522 if (ret < 0) {
1523 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1524 ERR("UST app create channel context failed for app (pid: %d) "
1525 "with ret %d", app->pid, ret);
1526 } else {
1527 /*
1528 * This is normal behavior, an application can die during the
1529 * creation process. Don't report an error so the execution can
1530 * continue normally.
1531 */
1532 ret = 0;
1533 DBG3("UST app add context failed. Application is dead.");
1534 }
1535 goto error;
1536 }
1537
1538 ua_ctx->handle = ua_ctx->obj->handle;
1539
1540 DBG2("UST app context handle %d created successfully for channel %s",
1541 ua_ctx->handle, ua_chan->name);
1542
1543 error:
1544 health_code_update();
1545 return ret;
1546 }
1547
1548 /*
1549 * Set the filter on the tracer.
1550 */
1551 static int set_ust_object_filter(struct ust_app *app,
1552 const struct lttng_bytecode *bytecode,
1553 struct lttng_ust_abi_object_data *ust_object)
1554 {
1555 int ret;
1556 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1557
1558 health_code_update();
1559
1560 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1561 if (!ust_bytecode) {
1562 ret = -LTTNG_ERR_NOMEM;
1563 goto error;
1564 }
1565 pthread_mutex_lock(&app->sock_lock);
1566 ret = ustctl_set_filter(app->sock, ust_bytecode,
1567 ust_object);
1568 pthread_mutex_unlock(&app->sock_lock);
1569 if (ret < 0) {
1570 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1571 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1572 ust_object, app->pid, ret);
1573 } else {
1574 /*
1575 * This is normal behavior, an application can die during the
1576 * creation process. Don't report an error so the execution can
1577 * continue normally.
1578 */
1579 ret = 0;
1580 DBG3("Failed to set UST app object filter. Application is dead.");
1581 }
1582 goto error;
1583 }
1584
1585 DBG2("UST filter successfully set: object = %p", ust_object);
1586
1587 error:
1588 health_code_update();
1589 free(ust_bytecode);
1590 return ret;
1591 }
1592
1593 /*
1594 * Set a capture bytecode for the passed object.
1595 * The sequence number enforces the ordering at runtime and on reception of
1596 * the captured payloads.
1597 */
1598 static int set_ust_capture(struct ust_app *app,
1599 const struct lttng_bytecode *bytecode,
1600 unsigned int capture_seqnum,
1601 struct lttng_ust_abi_object_data *ust_object)
1602 {
1603 int ret;
1604 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1605
1606 health_code_update();
1607
1608 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1609 if (!ust_bytecode) {
1610 ret = -LTTNG_ERR_NOMEM;
1611 goto error;
1612 }
1613
1614 /*
1615 * Set the sequence number to ensure the capture of fields is ordered.
1616 */
1617 ust_bytecode->seqnum = capture_seqnum;
1618
1619 pthread_mutex_lock(&app->sock_lock);
1620 ret = ustctl_set_capture(app->sock, ust_bytecode,
1621 ust_object);
1622 pthread_mutex_unlock(&app->sock_lock);
1623 if (ret < 0) {
1624 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1625 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1626 ust_object, app->pid, ret);
1627 } else {
1628 /*
1629 * This is normal behavior, an application can die during the
1630 * creation process. Don't report an error so the execution can
1631 * continue normally.
1632 */
1633 ret = 0;
1634 DBG3("Failed to set UST app object capture. Application is dead.");
1635 }
1636
1637 goto error;
1638 }
1639
1640 DBG2("UST capture successfully set: object = %p", ust_object);
1641
1642 error:
1643 health_code_update();
1644 free(ust_bytecode);
1645 return ret;
1646 }
1647
1648 static
1649 struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1650 const struct lttng_event_exclusion *exclusion)
1651 {
1652 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1653 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1654 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1655
1656 ust_exclusion = zmalloc(exclusion_alloc_size);
1657 if (!ust_exclusion) {
1658 PERROR("malloc");
1659 goto end;
1660 }
1661
1662 assert(sizeof(struct lttng_event_exclusion) ==
1663 sizeof(struct lttng_ust_abi_event_exclusion));
1664 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1665 end:
1666 return ust_exclusion;
1667 }
1668
1669 /*
1670 * Set event exclusions on the tracer.
1671 */
1672 static int set_ust_object_exclusions(struct ust_app *app,
1673 const struct lttng_event_exclusion *exclusions,
1674 struct lttng_ust_abi_object_data *ust_object)
1675 {
1676 int ret;
1677 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1678
1679 assert(exclusions && exclusions->count > 0);
1680
1681 health_code_update();
1682
1683 ust_exclusions = create_ust_exclusion_from_exclusion(
1684 exclusions);
1685 if (!ust_exclusions) {
1686 ret = -LTTNG_ERR_NOMEM;
1687 goto error;
1688 }
1689 pthread_mutex_lock(&app->sock_lock);
1690 ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1691 pthread_mutex_unlock(&app->sock_lock);
1692 if (ret < 0) {
1693 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1694 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1695 "with ret %d", ust_object, app->pid, ret);
1696 } else {
1697 /*
1698 * This is normal behavior, an application can die during the
1699 * creation process. Don't report an error so the execution can
1700 * continue normally.
1701 */
1702 ret = 0;
1703 DBG3("Failed to set UST app object exclusions. Application is dead.");
1704 }
1705 goto error;
1706 }
1707
1708 DBG2("UST exclusions set successfully for object %p", ust_object);
1709
1710 error:
1711 health_code_update();
1712 free(ust_exclusions);
1713 return ret;
1714 }
1715
1716 /*
1717 * Disable the specified event on to UST tracer for the UST session.
1718 */
1719 static int disable_ust_object(struct ust_app *app,
1720 struct lttng_ust_abi_object_data *object)
1721 {
1722 int ret;
1723
1724 health_code_update();
1725
1726 pthread_mutex_lock(&app->sock_lock);
1727 ret = ustctl_disable(app->sock, object);
1728 pthread_mutex_unlock(&app->sock_lock);
1729 if (ret < 0) {
1730 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1731 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1732 object, app->pid, ret);
1733 } else {
1734 /*
1735 * This is normal behavior, an application can die during the
1736 * creation process. Don't report an error so the execution can
1737 * continue normally.
1738 */
1739 ret = 0;
1740 DBG3("Failed to disable UST app object. Application is dead.");
1741 }
1742 goto error;
1743 }
1744
1745 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1746 object, app->pid);
1747
1748 error:
1749 health_code_update();
1750 return ret;
1751 }
1752
1753 /*
1754 * Disable the specified channel on to UST tracer for the UST session.
1755 */
1756 static int disable_ust_channel(struct ust_app *app,
1757 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1758 {
1759 int ret;
1760
1761 health_code_update();
1762
1763 pthread_mutex_lock(&app->sock_lock);
1764 ret = ustctl_disable(app->sock, ua_chan->obj);
1765 pthread_mutex_unlock(&app->sock_lock);
1766 if (ret < 0) {
1767 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1768 ERR("UST app channel %s disable failed for app (pid: %d) "
1769 "and session handle %d with ret %d",
1770 ua_chan->name, app->pid, ua_sess->handle, ret);
1771 } else {
1772 /*
1773 * This is normal behavior, an application can die during the
1774 * creation process. Don't report an error so the execution can
1775 * continue normally.
1776 */
1777 ret = 0;
1778 DBG3("UST app disable channel failed. Application is dead.");
1779 }
1780 goto error;
1781 }
1782
1783 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1784 ua_chan->name, app->pid);
1785
1786 error:
1787 health_code_update();
1788 return ret;
1789 }
1790
1791 /*
1792 * Enable the specified channel on to UST tracer for the UST session.
1793 */
1794 static int enable_ust_channel(struct ust_app *app,
1795 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1796 {
1797 int ret;
1798
1799 health_code_update();
1800
1801 pthread_mutex_lock(&app->sock_lock);
1802 ret = ustctl_enable(app->sock, ua_chan->obj);
1803 pthread_mutex_unlock(&app->sock_lock);
1804 if (ret < 0) {
1805 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1806 ERR("UST app channel %s enable failed for app (pid: %d) "
1807 "and session handle %d with ret %d",
1808 ua_chan->name, app->pid, ua_sess->handle, ret);
1809 } else {
1810 /*
1811 * This is normal behavior, an application can die during the
1812 * creation process. Don't report an error so the execution can
1813 * continue normally.
1814 */
1815 ret = 0;
1816 DBG3("UST app enable channel failed. Application is dead.");
1817 }
1818 goto error;
1819 }
1820
1821 ua_chan->enabled = 1;
1822
1823 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1824 ua_chan->name, app->pid);
1825
1826 error:
1827 health_code_update();
1828 return ret;
1829 }
1830
1831 /*
1832 * Enable the specified event on to UST tracer for the UST session.
1833 */
1834 static int enable_ust_object(
1835 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1836 {
1837 int ret;
1838
1839 health_code_update();
1840
1841 pthread_mutex_lock(&app->sock_lock);
1842 ret = ustctl_enable(app->sock, ust_object);
1843 pthread_mutex_unlock(&app->sock_lock);
1844 if (ret < 0) {
1845 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1846 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1847 ust_object, app->pid, ret);
1848 } else {
1849 /*
1850 * This is normal behavior, an application can die during the
1851 * creation process. Don't report an error so the execution can
1852 * continue normally.
1853 */
1854 ret = 0;
1855 DBG3("Failed to enable UST app object. Application is dead.");
1856 }
1857 goto error;
1858 }
1859
1860 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1861 ust_object, app->pid);
1862
1863 error:
1864 health_code_update();
1865 return ret;
1866 }
1867
1868 /*
1869 * Send channel and stream buffer to application.
1870 *
1871 * Return 0 on success. On error, a negative value is returned.
1872 */
1873 static int send_channel_pid_to_ust(struct ust_app *app,
1874 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1875 {
1876 int ret;
1877 struct ust_app_stream *stream, *stmp;
1878
1879 assert(app);
1880 assert(ua_sess);
1881 assert(ua_chan);
1882
1883 health_code_update();
1884
1885 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1886 app->sock);
1887
1888 /* Send channel to the application. */
1889 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1890 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1891 ret = -ENOTCONN; /* Caused by app exiting. */
1892 goto error;
1893 } else if (ret < 0) {
1894 goto error;
1895 }
1896
1897 health_code_update();
1898
1899 /* Send all streams to application. */
1900 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1901 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1902 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1903 ret = -ENOTCONN; /* Caused by app exiting. */
1904 goto error;
1905 } else if (ret < 0) {
1906 goto error;
1907 }
1908 /* We don't need the stream anymore once sent to the tracer. */
1909 cds_list_del(&stream->list);
1910 delete_ust_app_stream(-1, stream, app);
1911 }
1912 /* Flag the channel that it is sent to the application. */
1913 ua_chan->is_sent = 1;
1914
1915 error:
1916 health_code_update();
1917 return ret;
1918 }
1919
1920 /*
1921 * Create the specified event onto the UST tracer for a UST session.
1922 *
1923 * Should be called with session mutex held.
1924 */
1925 static
1926 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1927 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1928 {
1929 int ret = 0;
1930
1931 health_code_update();
1932
1933 /* Create UST event on tracer */
1934 pthread_mutex_lock(&app->sock_lock);
1935 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1936 &ua_event->obj);
1937 pthread_mutex_unlock(&app->sock_lock);
1938 if (ret < 0) {
1939 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1940 abort();
1941 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1942 ua_event->attr.name, app->pid, ret);
1943 } else {
1944 /*
1945 * This is normal behavior, an application can die during the
1946 * creation process. Don't report an error so the execution can
1947 * continue normally.
1948 */
1949 ret = 0;
1950 DBG3("UST app create event failed. Application is dead.");
1951 }
1952 goto error;
1953 }
1954
1955 ua_event->handle = ua_event->obj->handle;
1956
1957 DBG2("UST app event %s created successfully for pid:%d object: %p",
1958 ua_event->attr.name, app->pid, ua_event->obj);
1959
1960 health_code_update();
1961
1962 /* Set filter if one is present. */
1963 if (ua_event->filter) {
1964 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
1965 if (ret < 0) {
1966 goto error;
1967 }
1968 }
1969
1970 /* Set exclusions for the event */
1971 if (ua_event->exclusion) {
1972 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
1973 if (ret < 0) {
1974 goto error;
1975 }
1976 }
1977
1978 /* If event not enabled, disable it on the tracer */
1979 if (ua_event->enabled) {
1980 /*
1981 * We now need to explicitly enable the event, since it
1982 * is now disabled at creation.
1983 */
1984 ret = enable_ust_object(app, ua_event->obj);
1985 if (ret < 0) {
1986 /*
1987 * If we hit an EPERM, something is wrong with our enable call. If
1988 * we get an EEXIST, there is a problem on the tracer side since we
1989 * just created it.
1990 */
1991 switch (ret) {
1992 case -LTTNG_UST_ERR_PERM:
1993 /* Code flow problem */
1994 assert(0);
1995 case -LTTNG_UST_ERR_EXIST:
1996 /* It's OK for our use case. */
1997 ret = 0;
1998 break;
1999 default:
2000 break;
2001 }
2002 goto error;
2003 }
2004 }
2005
2006 error:
2007 health_code_update();
2008 return ret;
2009 }
2010
2011 static int init_ust_event_notifier_from_event_rule(
2012 const struct lttng_event_rule *rule,
2013 struct lttng_ust_abi_event_notifier *event_notifier)
2014 {
2015 enum lttng_event_rule_status status;
2016 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2017 int loglevel = -1, ret = 0;
2018 const char *pattern;
2019
2020 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2021 assert(lttng_event_rule_get_type(rule) ==
2022 LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2023
2024 memset(event_notifier, 0, sizeof(*event_notifier));
2025
2026 if (lttng_event_rule_targets_agent_domain(rule)) {
2027 /*
2028 * Special event for agents
2029 * The actual meat of the event is in the filter that will be
2030 * attached later on.
2031 * Set the default values for the agent event.
2032 */
2033 pattern = event_get_default_agent_ust_name(
2034 lttng_event_rule_get_domain_type(rule));
2035 loglevel = 0;
2036 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2037 } else {
2038 const struct lttng_log_level_rule *log_level_rule;
2039
2040 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
2041 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2042 /* At this point, this is a fatal error. */
2043 abort();
2044 }
2045
2046 status = lttng_event_rule_tracepoint_get_log_level_rule(
2047 rule, &log_level_rule);
2048 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2049 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2050 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2051 enum lttng_log_level_rule_status llr_status;
2052
2053 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2054 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2055 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2056 llr_status = lttng_log_level_rule_exactly_get_level(
2057 log_level_rule, &loglevel);
2058 break;
2059 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2060 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2061 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2062 log_level_rule, &loglevel);
2063 break;
2064 default:
2065 abort();
2066 }
2067
2068 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2069 } else {
2070 /* At this point this is a fatal error. */
2071 abort();
2072 }
2073 }
2074
2075 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2076 ret = lttng_strncpy(event_notifier->event.name, pattern,
2077 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
2078 if (ret) {
2079 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2080 pattern);
2081 goto end;
2082 }
2083
2084 event_notifier->event.loglevel_type = ust_loglevel_type;
2085 event_notifier->event.loglevel = loglevel;
2086 end:
2087 return ret;
2088 }
2089
2090 /*
2091 * Create the specified event notifier against the user space tracer of a
2092 * given application.
2093 */
2094 static int create_ust_event_notifier(struct ust_app *app,
2095 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2096 {
2097 int ret = 0;
2098 enum lttng_condition_status condition_status;
2099 const struct lttng_condition *condition = NULL;
2100 struct lttng_ust_abi_event_notifier event_notifier;
2101 const struct lttng_event_rule *event_rule = NULL;
2102 unsigned int capture_bytecode_count = 0, i;
2103 enum lttng_condition_status cond_status;
2104
2105 health_code_update();
2106 assert(app->event_notifier_group.object);
2107
2108 condition = lttng_trigger_get_const_condition(
2109 ua_event_notifier_rule->trigger);
2110 assert(condition);
2111 assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
2112
2113 condition_status = lttng_condition_on_event_get_rule(
2114 condition, &event_rule);
2115 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
2116
2117 assert(event_rule);
2118 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2119
2120 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2121 event_notifier.event.token = ua_event_notifier_rule->token;
2122 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2123
2124 /* Create UST event notifier against the tracer. */
2125 pthread_mutex_lock(&app->sock_lock);
2126 ret = ustctl_create_event_notifier(app->sock, &event_notifier,
2127 app->event_notifier_group.object,
2128 &ua_event_notifier_rule->obj);
2129 pthread_mutex_unlock(&app->sock_lock);
2130 if (ret < 0) {
2131 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2132 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2133 event_notifier.event.name, app->name,
2134 app->ppid, ret);
2135 } else {
2136 /*
2137 * This is normal behavior, an application can die
2138 * during the creation process. Don't report an error so
2139 * the execution can continue normally.
2140 */
2141 ret = 0;
2142 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2143 app->name, app->ppid);
2144 }
2145
2146 goto error;
2147 }
2148
2149 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2150
2151 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2152 event_notifier.event.name, app->name, app->ppid,
2153 ua_event_notifier_rule->obj);
2154
2155 health_code_update();
2156
2157 /* Set filter if one is present. */
2158 if (ua_event_notifier_rule->filter) {
2159 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2160 ua_event_notifier_rule->obj);
2161 if (ret < 0) {
2162 goto error;
2163 }
2164 }
2165
2166 /* Set exclusions for the event. */
2167 if (ua_event_notifier_rule->exclusion) {
2168 ret = set_ust_object_exclusions(app,
2169 ua_event_notifier_rule->exclusion,
2170 ua_event_notifier_rule->obj);
2171 if (ret < 0) {
2172 goto error;
2173 }
2174 }
2175
2176 /* Set the capture bytecodes. */
2177 cond_status = lttng_condition_on_event_get_capture_descriptor_count(
2178 condition, &capture_bytecode_count);
2179 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2180
2181 for (i = 0; i < capture_bytecode_count; i++) {
2182 const struct lttng_bytecode *capture_bytecode =
2183 lttng_condition_on_event_get_capture_bytecode_at_index(
2184 condition, i);
2185
2186 ret = set_ust_capture(app, capture_bytecode, i,
2187 ua_event_notifier_rule->obj);
2188 if (ret < 0) {
2189 goto error;
2190 }
2191 }
2192
2193 /*
2194 * We now need to explicitly enable the event, since it
2195 * is disabled at creation.
2196 */
2197 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2198 if (ret < 0) {
2199 /*
2200 * If we hit an EPERM, something is wrong with our enable call.
2201 * If we get an EEXIST, there is a problem on the tracer side
2202 * since we just created it.
2203 */
2204 switch (ret) {
2205 case -LTTNG_UST_ERR_PERM:
2206 /* Code flow problem. */
2207 abort();
2208 case -LTTNG_UST_ERR_EXIST:
2209 /* It's OK for our use case. */
2210 ret = 0;
2211 break;
2212 default:
2213 break;
2214 }
2215
2216 goto error;
2217 }
2218
2219 ua_event_notifier_rule->enabled = true;
2220
2221 error:
2222 health_code_update();
2223 return ret;
2224 }
2225
2226 /*
2227 * Copy data between an UST app event and a LTT event.
2228 */
2229 static void shadow_copy_event(struct ust_app_event *ua_event,
2230 struct ltt_ust_event *uevent)
2231 {
2232 size_t exclusion_alloc_size;
2233
2234 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2235 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2236
2237 ua_event->enabled = uevent->enabled;
2238
2239 /* Copy event attributes */
2240 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2241
2242 /* Copy filter bytecode */
2243 if (uevent->filter) {
2244 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2245 /* Filter might be NULL here in case of ENONEM. */
2246 }
2247
2248 /* Copy exclusion data */
2249 if (uevent->exclusion) {
2250 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2251 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2252 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2253 if (ua_event->exclusion == NULL) {
2254 PERROR("malloc");
2255 } else {
2256 memcpy(ua_event->exclusion, uevent->exclusion,
2257 exclusion_alloc_size);
2258 }
2259 }
2260 }
2261
2262 /*
2263 * Copy data between an UST app channel and a LTT channel.
2264 */
2265 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2266 struct ltt_ust_channel *uchan)
2267 {
2268 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2269
2270 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2271 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2272
2273 ua_chan->tracefile_size = uchan->tracefile_size;
2274 ua_chan->tracefile_count = uchan->tracefile_count;
2275
2276 /* Copy event attributes since the layout is different. */
2277 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2278 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2279 ua_chan->attr.overwrite = uchan->attr.overwrite;
2280 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2281 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2282 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2283 ua_chan->attr.output = uchan->attr.output;
2284 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2285
2286 /*
2287 * Note that the attribute channel type is not set since the channel on the
2288 * tracing registry side does not have this information.
2289 */
2290
2291 ua_chan->enabled = uchan->enabled;
2292 ua_chan->tracing_channel_id = uchan->id;
2293
2294 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2295 }
2296
2297 /*
2298 * Copy data between a UST app session and a regular LTT session.
2299 */
2300 static void shadow_copy_session(struct ust_app_session *ua_sess,
2301 struct ltt_ust_session *usess, struct ust_app *app)
2302 {
2303 struct tm *timeinfo;
2304 char datetime[16];
2305 int ret;
2306 char tmp_shm_path[PATH_MAX];
2307
2308 timeinfo = localtime(&app->registration_time);
2309 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2310
2311 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2312
2313 ua_sess->tracing_id = usess->id;
2314 ua_sess->id = get_next_session_id();
2315 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2316 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2317 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2318 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2319 ua_sess->buffer_type = usess->buffer_type;
2320 ua_sess->bits_per_long = app->bits_per_long;
2321
2322 /* There is only one consumer object per session possible. */
2323 consumer_output_get(usess->consumer);
2324 ua_sess->consumer = usess->consumer;
2325
2326 ua_sess->output_traces = usess->output_traces;
2327 ua_sess->live_timer_interval = usess->live_timer_interval;
2328 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2329 &usess->metadata_attr);
2330
2331 switch (ua_sess->buffer_type) {
2332 case LTTNG_BUFFER_PER_PID:
2333 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2334 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2335 datetime);
2336 break;
2337 case LTTNG_BUFFER_PER_UID:
2338 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2339 DEFAULT_UST_TRACE_UID_PATH,
2340 lttng_credentials_get_uid(&ua_sess->real_credentials),
2341 app->bits_per_long);
2342 break;
2343 default:
2344 assert(0);
2345 goto error;
2346 }
2347 if (ret < 0) {
2348 PERROR("asprintf UST shadow copy session");
2349 assert(0);
2350 goto error;
2351 }
2352
2353 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2354 sizeof(ua_sess->root_shm_path));
2355 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2356 strncpy(ua_sess->shm_path, usess->shm_path,
2357 sizeof(ua_sess->shm_path));
2358 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2359 if (ua_sess->shm_path[0]) {
2360 switch (ua_sess->buffer_type) {
2361 case LTTNG_BUFFER_PER_PID:
2362 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2363 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2364 app->name, app->pid, datetime);
2365 break;
2366 case LTTNG_BUFFER_PER_UID:
2367 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2368 "/" DEFAULT_UST_TRACE_UID_PATH,
2369 app->uid, app->bits_per_long);
2370 break;
2371 default:
2372 assert(0);
2373 goto error;
2374 }
2375 if (ret < 0) {
2376 PERROR("sprintf UST shadow copy session");
2377 assert(0);
2378 goto error;
2379 }
2380 strncat(ua_sess->shm_path, tmp_shm_path,
2381 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2382 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2383 }
2384 return;
2385
2386 error:
2387 consumer_output_put(ua_sess->consumer);
2388 }
2389
2390 /*
2391 * Lookup sesison wrapper.
2392 */
2393 static
2394 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2395 struct ust_app *app, struct lttng_ht_iter *iter)
2396 {
2397 /* Get right UST app session from app */
2398 lttng_ht_lookup(app->sessions, &usess->id, iter);
2399 }
2400
2401 /*
2402 * Return ust app session from the app session hashtable using the UST session
2403 * id.
2404 */
2405 static struct ust_app_session *lookup_session_by_app(
2406 const struct ltt_ust_session *usess, struct ust_app *app)
2407 {
2408 struct lttng_ht_iter iter;
2409 struct lttng_ht_node_u64 *node;
2410
2411 __lookup_session_by_app(usess, app, &iter);
2412 node = lttng_ht_iter_get_node_u64(&iter);
2413 if (node == NULL) {
2414 goto error;
2415 }
2416
2417 return caa_container_of(node, struct ust_app_session, node);
2418
2419 error:
2420 return NULL;
2421 }
2422
2423 /*
2424 * Setup buffer registry per PID for the given session and application. If none
2425 * is found, a new one is created, added to the global registry and
2426 * initialized. If regp is valid, it's set with the newly created object.
2427 *
2428 * Return 0 on success or else a negative value.
2429 */
2430 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2431 struct ust_app *app, struct buffer_reg_pid **regp)
2432 {
2433 int ret = 0;
2434 struct buffer_reg_pid *reg_pid;
2435
2436 assert(ua_sess);
2437 assert(app);
2438
2439 rcu_read_lock();
2440
2441 reg_pid = buffer_reg_pid_find(ua_sess->id);
2442 if (!reg_pid) {
2443 /*
2444 * This is the create channel path meaning that if there is NO
2445 * registry available, we have to create one for this session.
2446 */
2447 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2448 ua_sess->root_shm_path, ua_sess->shm_path);
2449 if (ret < 0) {
2450 goto error;
2451 }
2452 } else {
2453 goto end;
2454 }
2455
2456 /* Initialize registry. */
2457 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2458 app->bits_per_long, app->uint8_t_alignment,
2459 app->uint16_t_alignment, app->uint32_t_alignment,
2460 app->uint64_t_alignment, app->long_alignment,
2461 app->byte_order, app->version.major, app->version.minor,
2462 reg_pid->root_shm_path, reg_pid->shm_path,
2463 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2464 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2465 ua_sess->tracing_id,
2466 app->uid);
2467 if (ret < 0) {
2468 /*
2469 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2470 * destroy the buffer registry, because it is always expected
2471 * that if the buffer registry can be found, its ust registry is
2472 * non-NULL.
2473 */
2474 buffer_reg_pid_destroy(reg_pid);
2475 goto error;
2476 }
2477
2478 buffer_reg_pid_add(reg_pid);
2479
2480 DBG3("UST app buffer registry per PID created successfully");
2481
2482 end:
2483 if (regp) {
2484 *regp = reg_pid;
2485 }
2486 error:
2487 rcu_read_unlock();
2488 return ret;
2489 }
2490
2491 /*
2492 * Setup buffer registry per UID for the given session and application. If none
2493 * is found, a new one is created, added to the global registry and
2494 * initialized. If regp is valid, it's set with the newly created object.
2495 *
2496 * Return 0 on success or else a negative value.
2497 */
2498 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2499 struct ust_app_session *ua_sess,
2500 struct ust_app *app, struct buffer_reg_uid **regp)
2501 {
2502 int ret = 0;
2503 struct buffer_reg_uid *reg_uid;
2504
2505 assert(usess);
2506 assert(app);
2507
2508 rcu_read_lock();
2509
2510 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2511 if (!reg_uid) {
2512 /*
2513 * This is the create channel path meaning that if there is NO
2514 * registry available, we have to create one for this session.
2515 */
2516 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2517 LTTNG_DOMAIN_UST, &reg_uid,
2518 ua_sess->root_shm_path, ua_sess->shm_path);
2519 if (ret < 0) {
2520 goto error;
2521 }
2522 } else {
2523 goto end;
2524 }
2525
2526 /* Initialize registry. */
2527 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2528 app->bits_per_long, app->uint8_t_alignment,
2529 app->uint16_t_alignment, app->uint32_t_alignment,
2530 app->uint64_t_alignment, app->long_alignment,
2531 app->byte_order, app->version.major,
2532 app->version.minor, reg_uid->root_shm_path,
2533 reg_uid->shm_path, usess->uid, usess->gid,
2534 ua_sess->tracing_id, app->uid);
2535 if (ret < 0) {
2536 /*
2537 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2538 * destroy the buffer registry, because it is always expected
2539 * that if the buffer registry can be found, its ust registry is
2540 * non-NULL.
2541 */
2542 buffer_reg_uid_destroy(reg_uid, NULL);
2543 goto error;
2544 }
2545 /* Add node to teardown list of the session. */
2546 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2547
2548 buffer_reg_uid_add(reg_uid);
2549
2550 DBG3("UST app buffer registry per UID created successfully");
2551 end:
2552 if (regp) {
2553 *regp = reg_uid;
2554 }
2555 error:
2556 rcu_read_unlock();
2557 return ret;
2558 }
2559
2560 /*
2561 * Create a session on the tracer side for the given app.
2562 *
2563 * On success, ua_sess_ptr is populated with the session pointer or else left
2564 * untouched. If the session was created, is_created is set to 1. On error,
2565 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2566 * be NULL.
2567 *
2568 * Returns 0 on success or else a negative code which is either -ENOMEM or
2569 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2570 */
2571 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2572 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2573 int *is_created)
2574 {
2575 int ret, created = 0;
2576 struct ust_app_session *ua_sess;
2577
2578 assert(usess);
2579 assert(app);
2580 assert(ua_sess_ptr);
2581
2582 health_code_update();
2583
2584 ua_sess = lookup_session_by_app(usess, app);
2585 if (ua_sess == NULL) {
2586 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2587 app->pid, usess->id);
2588 ua_sess = alloc_ust_app_session();
2589 if (ua_sess == NULL) {
2590 /* Only malloc can failed so something is really wrong */
2591 ret = -ENOMEM;
2592 goto error;
2593 }
2594 shadow_copy_session(ua_sess, usess, app);
2595 created = 1;
2596 }
2597
2598 switch (usess->buffer_type) {
2599 case LTTNG_BUFFER_PER_PID:
2600 /* Init local registry. */
2601 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2602 if (ret < 0) {
2603 delete_ust_app_session(-1, ua_sess, app);
2604 goto error;
2605 }
2606 break;
2607 case LTTNG_BUFFER_PER_UID:
2608 /* Look for a global registry. If none exists, create one. */
2609 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2610 if (ret < 0) {
2611 delete_ust_app_session(-1, ua_sess, app);
2612 goto error;
2613 }
2614 break;
2615 default:
2616 assert(0);
2617 ret = -EINVAL;
2618 goto error;
2619 }
2620
2621 health_code_update();
2622
2623 if (ua_sess->handle == -1) {
2624 pthread_mutex_lock(&app->sock_lock);
2625 ret = ustctl_create_session(app->sock);
2626 pthread_mutex_unlock(&app->sock_lock);
2627 if (ret < 0) {
2628 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2629 ERR("Creating session for app pid %d with ret %d",
2630 app->pid, ret);
2631 } else {
2632 DBG("UST app creating session failed. Application is dead");
2633 /*
2634 * This is normal behavior, an application can die during the
2635 * creation process. Don't report an error so the execution can
2636 * continue normally. This will get flagged ENOTCONN and the
2637 * caller will handle it.
2638 */
2639 ret = 0;
2640 }
2641 delete_ust_app_session(-1, ua_sess, app);
2642 if (ret != -ENOMEM) {
2643 /*
2644 * Tracer is probably gone or got an internal error so let's
2645 * behave like it will soon unregister or not usable.
2646 */
2647 ret = -ENOTCONN;
2648 }
2649 goto error;
2650 }
2651
2652 ua_sess->handle = ret;
2653
2654 /* Add ust app session to app's HT */
2655 lttng_ht_node_init_u64(&ua_sess->node,
2656 ua_sess->tracing_id);
2657 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2658 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2659 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2660 &ua_sess->ust_objd_node);
2661
2662 DBG2("UST app session created successfully with handle %d", ret);
2663 }
2664
2665 *ua_sess_ptr = ua_sess;
2666 if (is_created) {
2667 *is_created = created;
2668 }
2669
2670 /* Everything went well. */
2671 ret = 0;
2672
2673 error:
2674 health_code_update();
2675 return ret;
2676 }
2677
2678 /*
2679 * Match function for a hash table lookup of ust_app_ctx.
2680 *
2681 * It matches an ust app context based on the context type and, in the case
2682 * of perf counters, their name.
2683 */
2684 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2685 {
2686 struct ust_app_ctx *ctx;
2687 const struct lttng_ust_context_attr *key;
2688
2689 assert(node);
2690 assert(_key);
2691
2692 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2693 key = _key;
2694
2695 /* Context type */
2696 if (ctx->ctx.ctx != key->ctx) {
2697 goto no_match;
2698 }
2699
2700 switch(key->ctx) {
2701 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2702 if (strncmp(key->u.perf_counter.name,
2703 ctx->ctx.u.perf_counter.name,
2704 sizeof(key->u.perf_counter.name))) {
2705 goto no_match;
2706 }
2707 break;
2708 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2709 if (strcmp(key->u.app_ctx.provider_name,
2710 ctx->ctx.u.app_ctx.provider_name) ||
2711 strcmp(key->u.app_ctx.ctx_name,
2712 ctx->ctx.u.app_ctx.ctx_name)) {
2713 goto no_match;
2714 }
2715 break;
2716 default:
2717 break;
2718 }
2719
2720 /* Match. */
2721 return 1;
2722
2723 no_match:
2724 return 0;
2725 }
2726
2727 /*
2728 * Lookup for an ust app context from an lttng_ust_context.
2729 *
2730 * Must be called while holding RCU read side lock.
2731 * Return an ust_app_ctx object or NULL on error.
2732 */
2733 static
2734 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2735 struct lttng_ust_context_attr *uctx)
2736 {
2737 struct lttng_ht_iter iter;
2738 struct lttng_ht_node_ulong *node;
2739 struct ust_app_ctx *app_ctx = NULL;
2740
2741 assert(uctx);
2742 assert(ht);
2743
2744 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2745 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2746 ht_match_ust_app_ctx, uctx, &iter.iter);
2747 node = lttng_ht_iter_get_node_ulong(&iter);
2748 if (!node) {
2749 goto end;
2750 }
2751
2752 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2753
2754 end:
2755 return app_ctx;
2756 }
2757
2758 /*
2759 * Create a context for the channel on the tracer.
2760 *
2761 * Called with UST app session lock held and a RCU read side lock.
2762 */
2763 static
2764 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2765 struct lttng_ust_context_attr *uctx,
2766 struct ust_app *app)
2767 {
2768 int ret = 0;
2769 struct ust_app_ctx *ua_ctx;
2770
2771 DBG2("UST app adding context to channel %s", ua_chan->name);
2772
2773 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2774 if (ua_ctx) {
2775 ret = -EEXIST;
2776 goto error;
2777 }
2778
2779 ua_ctx = alloc_ust_app_ctx(uctx);
2780 if (ua_ctx == NULL) {
2781 /* malloc failed */
2782 ret = -ENOMEM;
2783 goto error;
2784 }
2785
2786 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2787 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2788 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2789
2790 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2791 if (ret < 0) {
2792 goto error;
2793 }
2794
2795 error:
2796 return ret;
2797 }
2798
2799 /*
2800 * Enable on the tracer side a ust app event for the session and channel.
2801 *
2802 * Called with UST app session lock held.
2803 */
2804 static
2805 int enable_ust_app_event(struct ust_app_session *ua_sess,
2806 struct ust_app_event *ua_event, struct ust_app *app)
2807 {
2808 int ret;
2809
2810 ret = enable_ust_object(app, ua_event->obj);
2811 if (ret < 0) {
2812 goto error;
2813 }
2814
2815 ua_event->enabled = 1;
2816
2817 error:
2818 return ret;
2819 }
2820
2821 /*
2822 * Disable on the tracer side a ust app event for the session and channel.
2823 */
2824 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2825 struct ust_app_event *ua_event, struct ust_app *app)
2826 {
2827 int ret;
2828
2829 ret = disable_ust_object(app, ua_event->obj);
2830 if (ret < 0) {
2831 goto error;
2832 }
2833
2834 ua_event->enabled = 0;
2835
2836 error:
2837 return ret;
2838 }
2839
2840 /*
2841 * Lookup ust app channel for session and disable it on the tracer side.
2842 */
2843 static
2844 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2845 struct ust_app_channel *ua_chan, struct ust_app *app)
2846 {
2847 int ret;
2848
2849 ret = disable_ust_channel(app, ua_sess, ua_chan);
2850 if (ret < 0) {
2851 goto error;
2852 }
2853
2854 ua_chan->enabled = 0;
2855
2856 error:
2857 return ret;
2858 }
2859
2860 /*
2861 * Lookup ust app channel for session and enable it on the tracer side. This
2862 * MUST be called with a RCU read side lock acquired.
2863 */
2864 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2865 struct ltt_ust_channel *uchan, struct ust_app *app)
2866 {
2867 int ret = 0;
2868 struct lttng_ht_iter iter;
2869 struct lttng_ht_node_str *ua_chan_node;
2870 struct ust_app_channel *ua_chan;
2871
2872 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2873 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2874 if (ua_chan_node == NULL) {
2875 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2876 uchan->name, ua_sess->tracing_id);
2877 goto error;
2878 }
2879
2880 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2881
2882 ret = enable_ust_channel(app, ua_sess, ua_chan);
2883 if (ret < 0) {
2884 goto error;
2885 }
2886
2887 error:
2888 return ret;
2889 }
2890
2891 /*
2892 * Ask the consumer to create a channel and get it if successful.
2893 *
2894 * Called with UST app session lock held.
2895 *
2896 * Return 0 on success or else a negative value.
2897 */
2898 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2899 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2900 int bitness, struct ust_registry_session *registry,
2901 uint64_t trace_archive_id)
2902 {
2903 int ret;
2904 unsigned int nb_fd = 0;
2905 struct consumer_socket *socket;
2906
2907 assert(usess);
2908 assert(ua_sess);
2909 assert(ua_chan);
2910 assert(registry);
2911
2912 rcu_read_lock();
2913 health_code_update();
2914
2915 /* Get the right consumer socket for the application. */
2916 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2917 if (!socket) {
2918 ret = -EINVAL;
2919 goto error;
2920 }
2921
2922 health_code_update();
2923
2924 /* Need one fd for the channel. */
2925 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2926 if (ret < 0) {
2927 ERR("Exhausted number of available FD upon create channel");
2928 goto error;
2929 }
2930
2931 /*
2932 * Ask consumer to create channel. The consumer will return the number of
2933 * stream we have to expect.
2934 */
2935 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2936 registry, usess->current_trace_chunk);
2937 if (ret < 0) {
2938 goto error_ask;
2939 }
2940
2941 /*
2942 * Compute the number of fd needed before receiving them. It must be 2 per
2943 * stream (2 being the default value here).
2944 */
2945 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2946
2947 /* Reserve the amount of file descriptor we need. */
2948 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2949 if (ret < 0) {
2950 ERR("Exhausted number of available FD upon create channel");
2951 goto error_fd_get_stream;
2952 }
2953
2954 health_code_update();
2955
2956 /*
2957 * Now get the channel from the consumer. This call wil populate the stream
2958 * list of that channel and set the ust objects.
2959 */
2960 if (usess->consumer->enabled) {
2961 ret = ust_consumer_get_channel(socket, ua_chan);
2962 if (ret < 0) {
2963 goto error_destroy;
2964 }
2965 }
2966
2967 rcu_read_unlock();
2968 return 0;
2969
2970 error_destroy:
2971 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2972 error_fd_get_stream:
2973 /*
2974 * Initiate a destroy channel on the consumer since we had an error
2975 * handling it on our side. The return value is of no importance since we
2976 * already have a ret value set by the previous error that we need to
2977 * return.
2978 */
2979 (void) ust_consumer_destroy_channel(socket, ua_chan);
2980 error_ask:
2981 lttng_fd_put(LTTNG_FD_APPS, 1);
2982 error:
2983 health_code_update();
2984 rcu_read_unlock();
2985 return ret;
2986 }
2987
2988 /*
2989 * Duplicate the ust data object of the ust app stream and save it in the
2990 * buffer registry stream.
2991 *
2992 * Return 0 on success or else a negative value.
2993 */
2994 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2995 struct ust_app_stream *stream)
2996 {
2997 int ret;
2998
2999 assert(reg_stream);
3000 assert(stream);
3001
3002 /* Reserve the amount of file descriptor we need. */
3003 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3004 if (ret < 0) {
3005 ERR("Exhausted number of available FD upon duplicate stream");
3006 goto error;
3007 }
3008
3009 /* Duplicate object for stream once the original is in the registry. */
3010 ret = ustctl_duplicate_ust_object_data(&stream->obj,
3011 reg_stream->obj.ust);
3012 if (ret < 0) {
3013 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3014 reg_stream->obj.ust, stream->obj, ret);
3015 lttng_fd_put(LTTNG_FD_APPS, 2);
3016 goto error;
3017 }
3018 stream->handle = stream->obj->handle;
3019
3020 error:
3021 return ret;
3022 }
3023
3024 /*
3025 * Duplicate the ust data object of the ust app. channel and save it in the
3026 * buffer registry channel.
3027 *
3028 * Return 0 on success or else a negative value.
3029 */
3030 static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3031 struct ust_app_channel *ua_chan)
3032 {
3033 int ret;
3034
3035 assert(buf_reg_chan);
3036 assert(ua_chan);
3037
3038 /* Need two fds for the channel. */
3039 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3040 if (ret < 0) {
3041 ERR("Exhausted number of available FD upon duplicate channel");
3042 goto error_fd_get;
3043 }
3044
3045 /* Duplicate object for stream once the original is in the registry. */
3046 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3047 if (ret < 0) {
3048 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3049 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3050 goto error;
3051 }
3052 ua_chan->handle = ua_chan->obj->handle;
3053
3054 return 0;
3055
3056 error:
3057 lttng_fd_put(LTTNG_FD_APPS, 1);
3058 error_fd_get:
3059 return ret;
3060 }
3061
3062 /*
3063 * For a given channel buffer registry, setup all streams of the given ust
3064 * application channel.
3065 *
3066 * Return 0 on success or else a negative value.
3067 */
3068 static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3069 struct ust_app_channel *ua_chan,
3070 struct ust_app *app)
3071 {
3072 int ret = 0;
3073 struct ust_app_stream *stream, *stmp;
3074
3075 assert(buf_reg_chan);
3076 assert(ua_chan);
3077
3078 DBG2("UST app setup buffer registry stream");
3079
3080 /* Send all streams to application. */
3081 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3082 struct buffer_reg_stream *reg_stream;
3083
3084 ret = buffer_reg_stream_create(&reg_stream);
3085 if (ret < 0) {
3086 goto error;
3087 }
3088
3089 /*
3090 * Keep original pointer and nullify it in the stream so the delete
3091 * stream call does not release the object.
3092 */
3093 reg_stream->obj.ust = stream->obj;
3094 stream->obj = NULL;
3095 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3096
3097 /* We don't need the streams anymore. */
3098 cds_list_del(&stream->list);
3099 delete_ust_app_stream(-1, stream, app);
3100 }
3101
3102 error:
3103 return ret;
3104 }
3105
3106 /*
3107 * Create a buffer registry channel for the given session registry and
3108 * application channel object. If regp pointer is valid, it's set with the
3109 * created object. Important, the created object is NOT added to the session
3110 * registry hash table.
3111 *
3112 * Return 0 on success else a negative value.
3113 */
3114 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3115 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3116 {
3117 int ret;
3118 struct buffer_reg_channel *buf_reg_chan = NULL;
3119
3120 assert(reg_sess);
3121 assert(ua_chan);
3122
3123 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3124
3125 /* Create buffer registry channel. */
3126 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3127 if (ret < 0) {
3128 goto error_create;
3129 }
3130 assert(buf_reg_chan);
3131 buf_reg_chan->consumer_key = ua_chan->key;
3132 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3133 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3134
3135 /* Create and add a channel registry to session. */
3136 ret = ust_registry_channel_add(reg_sess->reg.ust,
3137 ua_chan->tracing_channel_id);
3138 if (ret < 0) {
3139 goto error;
3140 }
3141 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3142
3143 if (regp) {
3144 *regp = buf_reg_chan;
3145 }
3146
3147 return 0;
3148
3149 error:
3150 /* Safe because the registry channel object was not added to any HT. */
3151 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3152 error_create:
3153 return ret;
3154 }
3155
3156 /*
3157 * Setup buffer registry channel for the given session registry and application
3158 * channel object. If regp pointer is valid, it's set with the created object.
3159 *
3160 * Return 0 on success else a negative value.
3161 */
3162 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3163 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3164 struct ust_app *app)
3165 {
3166 int ret;
3167
3168 assert(reg_sess);
3169 assert(buf_reg_chan);
3170 assert(ua_chan);
3171 assert(ua_chan->obj);
3172
3173 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3174
3175 /* Setup all streams for the registry. */
3176 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3177 if (ret < 0) {
3178 goto error;
3179 }
3180
3181 buf_reg_chan->obj.ust = ua_chan->obj;
3182 ua_chan->obj = NULL;
3183
3184 return 0;
3185
3186 error:
3187 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3188 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3189 return ret;
3190 }
3191
3192 /*
3193 * Send buffer registry channel to the application.
3194 *
3195 * Return 0 on success else a negative value.
3196 */
3197 static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3198 struct ust_app *app, struct ust_app_session *ua_sess,
3199 struct ust_app_channel *ua_chan)
3200 {
3201 int ret;
3202 struct buffer_reg_stream *reg_stream;
3203
3204 assert(buf_reg_chan);
3205 assert(app);
3206 assert(ua_sess);
3207 assert(ua_chan);
3208
3209 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3210
3211 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3212 if (ret < 0) {
3213 goto error;
3214 }
3215
3216 /* Send channel to the application. */
3217 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3218 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3219 ret = -ENOTCONN; /* Caused by app exiting. */
3220 goto error;
3221 } else if (ret < 0) {
3222 goto error;
3223 }
3224
3225 health_code_update();
3226
3227 /* Send all streams to application. */
3228 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3229 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
3230 struct ust_app_stream stream;
3231
3232 ret = duplicate_stream_object(reg_stream, &stream);
3233 if (ret < 0) {
3234 goto error_stream_unlock;
3235 }
3236
3237 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3238 if (ret < 0) {
3239 (void) release_ust_app_stream(-1, &stream, app);
3240 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3241 ret = -ENOTCONN; /* Caused by app exiting. */
3242 }
3243 goto error_stream_unlock;
3244 }
3245
3246 /*
3247 * The return value is not important here. This function will output an
3248 * error if needed.
3249 */
3250 (void) release_ust_app_stream(-1, &stream, app);
3251 }
3252 ua_chan->is_sent = 1;
3253
3254 error_stream_unlock:
3255 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3256 error:
3257 return ret;
3258 }
3259
3260 /*
3261 * Create and send to the application the created buffers with per UID buffers.
3262 *
3263 * This MUST be called with a RCU read side lock acquired.
3264 * The session list lock and the session's lock must be acquired.
3265 *
3266 * Return 0 on success else a negative value.
3267 */
3268 static int create_channel_per_uid(struct ust_app *app,
3269 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3270 struct ust_app_channel *ua_chan)
3271 {
3272 int ret;
3273 struct buffer_reg_uid *reg_uid;
3274 struct buffer_reg_channel *buf_reg_chan;
3275 struct ltt_session *session = NULL;
3276 enum lttng_error_code notification_ret;
3277 struct ust_registry_channel *ust_reg_chan;
3278
3279 assert(app);
3280 assert(usess);
3281 assert(ua_sess);
3282 assert(ua_chan);
3283
3284 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3285
3286 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3287 /*
3288 * The session creation handles the creation of this global registry
3289 * object. If none can be find, there is a code flow problem or a
3290 * teardown race.
3291 */
3292 assert(reg_uid);
3293
3294 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3295 reg_uid);
3296 if (buf_reg_chan) {
3297 goto send_channel;
3298 }
3299
3300 /* Create the buffer registry channel object. */
3301 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3302 if (ret < 0) {
3303 ERR("Error creating the UST channel \"%s\" registry instance",
3304 ua_chan->name);
3305 goto error;
3306 }
3307
3308 session = session_find_by_id(ua_sess->tracing_id);
3309 assert(session);
3310 assert(pthread_mutex_trylock(&session->lock));
3311 assert(session_trylock_list());
3312
3313 /*
3314 * Create the buffers on the consumer side. This call populates the
3315 * ust app channel object with all streams and data object.
3316 */
3317 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3318 app->bits_per_long, reg_uid->registry->reg.ust,
3319 session->most_recent_chunk_id.value);
3320 if (ret < 0) {
3321 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3322 ua_chan->name);
3323
3324 /*
3325 * Let's remove the previously created buffer registry channel so
3326 * it's not visible anymore in the session registry.
3327 */
3328 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3329 ua_chan->tracing_channel_id, false);
3330 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3331 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3332 goto error;
3333 }
3334
3335 /*
3336 * Setup the streams and add it to the session registry.
3337 */
3338 ret = setup_buffer_reg_channel(reg_uid->registry,
3339 ua_chan, buf_reg_chan, app);
3340 if (ret < 0) {
3341 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3342 goto error;
3343 }
3344
3345 /* Notify the notification subsystem of the channel's creation. */
3346 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3347 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
3348 ua_chan->tracing_channel_id);
3349 assert(ust_reg_chan);
3350 ust_reg_chan->consumer_key = ua_chan->key;
3351 ust_reg_chan = NULL;
3352 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3353
3354 notification_ret = notification_thread_command_add_channel(
3355 notification_thread_handle, session->name,
3356 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3357 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3358 ua_chan->name,
3359 ua_chan->key, LTTNG_DOMAIN_UST,
3360 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3361 if (notification_ret != LTTNG_OK) {
3362 ret = - (int) notification_ret;
3363 ERR("Failed to add channel to notification thread");
3364 goto error;
3365 }
3366
3367 send_channel:
3368 /* Send buffers to the application. */
3369 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3370 if (ret < 0) {
3371 if (ret != -ENOTCONN) {
3372 ERR("Error sending channel to application");
3373 }
3374 goto error;
3375 }
3376
3377 error:
3378 if (session) {
3379 session_put(session);
3380 }
3381 return ret;
3382 }
3383
3384 /*
3385 * Create and send to the application the created buffers with per PID buffers.
3386 *
3387 * Called with UST app session lock held.
3388 * The session list lock and the session's lock must be acquired.
3389 *
3390 * Return 0 on success else a negative value.
3391 */
3392 static int create_channel_per_pid(struct ust_app *app,
3393 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3394 struct ust_app_channel *ua_chan)
3395 {
3396 int ret;
3397 struct ust_registry_session *registry;
3398 enum lttng_error_code cmd_ret;
3399 struct ltt_session *session = NULL;
3400 uint64_t chan_reg_key;
3401 struct ust_registry_channel *ust_reg_chan;
3402
3403 assert(app);
3404 assert(usess);
3405 assert(ua_sess);
3406 assert(ua_chan);
3407
3408 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3409
3410 rcu_read_lock();
3411
3412 registry = get_session_registry(ua_sess);
3413 /* The UST app session lock is held, registry shall not be null. */
3414 assert(registry);
3415
3416 /* Create and add a new channel registry to session. */
3417 ret = ust_registry_channel_add(registry, ua_chan->key);
3418 if (ret < 0) {
3419 ERR("Error creating the UST channel \"%s\" registry instance",
3420 ua_chan->name);
3421 goto error;
3422 }
3423
3424 session = session_find_by_id(ua_sess->tracing_id);
3425 assert(session);
3426
3427 assert(pthread_mutex_trylock(&session->lock));
3428 assert(session_trylock_list());
3429
3430 /* Create and get channel on the consumer side. */
3431 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3432 app->bits_per_long, registry,
3433 session->most_recent_chunk_id.value);
3434 if (ret < 0) {
3435 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3436 ua_chan->name);
3437 goto error_remove_from_registry;
3438 }
3439
3440 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3441 if (ret < 0) {
3442 if (ret != -ENOTCONN) {
3443 ERR("Error sending channel to application");
3444 }
3445 goto error_remove_from_registry;
3446 }
3447
3448 chan_reg_key = ua_chan->key;
3449 pthread_mutex_lock(&registry->lock);
3450 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3451 assert(ust_reg_chan);
3452 ust_reg_chan->consumer_key = ua_chan->key;
3453 pthread_mutex_unlock(&registry->lock);
3454
3455 cmd_ret = notification_thread_command_add_channel(
3456 notification_thread_handle, session->name,
3457 lttng_credentials_get_uid(&ua_sess->effective_credentials),
3458 lttng_credentials_get_gid(&ua_sess->effective_credentials),
3459 ua_chan->name,
3460 ua_chan->key, LTTNG_DOMAIN_UST,
3461 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3462 if (cmd_ret != LTTNG_OK) {
3463 ret = - (int) cmd_ret;
3464 ERR("Failed to add channel to notification thread");
3465 goto error_remove_from_registry;
3466 }
3467
3468 error_remove_from_registry:
3469 if (ret) {
3470 ust_registry_channel_del_free(registry, ua_chan->key, false);
3471 }
3472 error:
3473 rcu_read_unlock();
3474 if (session) {
3475 session_put(session);
3476 }
3477 return ret;
3478 }
3479
3480 /*
3481 * From an already allocated ust app channel, create the channel buffers if
3482 * needed and send them to the application. This MUST be called with a RCU read
3483 * side lock acquired.
3484 *
3485 * Called with UST app session lock held.
3486 *
3487 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3488 * the application exited concurrently.
3489 */
3490 static int ust_app_channel_send(struct ust_app *app,
3491 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3492 struct ust_app_channel *ua_chan)
3493 {
3494 int ret;
3495
3496 assert(app);
3497 assert(usess);
3498 assert(usess->active);
3499 assert(ua_sess);
3500 assert(ua_chan);
3501
3502 /* Handle buffer type before sending the channel to the application. */
3503 switch (usess->buffer_type) {
3504 case LTTNG_BUFFER_PER_UID:
3505 {
3506 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3507 if (ret < 0) {
3508 goto error;
3509 }
3510 break;
3511 }
3512 case LTTNG_BUFFER_PER_PID:
3513 {
3514 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3515 if (ret < 0) {
3516 goto error;
3517 }
3518 break;
3519 }
3520 default:
3521 assert(0);
3522 ret = -EINVAL;
3523 goto error;
3524 }
3525
3526 /* Initialize ust objd object using the received handle and add it. */
3527 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3528 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3529
3530 /* If channel is not enabled, disable it on the tracer */
3531 if (!ua_chan->enabled) {
3532 ret = disable_ust_channel(app, ua_sess, ua_chan);
3533 if (ret < 0) {
3534 goto error;
3535 }
3536 }
3537
3538 error:
3539 return ret;
3540 }
3541
3542 /*
3543 * Create UST app channel and return it through ua_chanp if not NULL.
3544 *
3545 * Called with UST app session lock and RCU read-side lock held.
3546 *
3547 * Return 0 on success or else a negative value.
3548 */
3549 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3550 struct ltt_ust_channel *uchan,
3551 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
3552 struct ust_app_channel **ua_chanp)
3553 {
3554 int ret = 0;
3555 struct lttng_ht_iter iter;
3556 struct lttng_ht_node_str *ua_chan_node;
3557 struct ust_app_channel *ua_chan;
3558
3559 /* Lookup channel in the ust app session */
3560 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3561 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3562 if (ua_chan_node != NULL) {
3563 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3564 goto end;
3565 }
3566
3567 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3568 if (ua_chan == NULL) {
3569 /* Only malloc can fail here */
3570 ret = -ENOMEM;
3571 goto error;
3572 }
3573 shadow_copy_channel(ua_chan, uchan);
3574
3575 /* Set channel type. */
3576 ua_chan->attr.type = type;
3577
3578 /* Only add the channel if successful on the tracer side. */
3579 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3580 end:
3581 if (ua_chanp) {
3582 *ua_chanp = ua_chan;
3583 }
3584
3585 /* Everything went well. */
3586 return 0;
3587
3588 error:
3589 return ret;
3590 }
3591
3592 /*
3593 * Create UST app event and create it on the tracer side.
3594 *
3595 * Must be called with the RCU read side lock held.
3596 * Called with ust app session mutex held.
3597 */
3598 static
3599 int create_ust_app_event(struct ust_app_session *ua_sess,
3600 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3601 struct ust_app *app)
3602 {
3603 int ret = 0;
3604 struct ust_app_event *ua_event;
3605
3606 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3607 if (ua_event == NULL) {
3608 /* Only failure mode of alloc_ust_app_event(). */
3609 ret = -ENOMEM;
3610 goto end;
3611 }
3612 shadow_copy_event(ua_event, uevent);
3613
3614 /* Create it on the tracer side */
3615 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3616 if (ret < 0) {
3617 /*
3618 * Not found previously means that it does not exist on the
3619 * tracer. If the application reports that the event existed,
3620 * it means there is a bug in the sessiond or lttng-ust
3621 * (or corruption, etc.)
3622 */
3623 if (ret == -LTTNG_UST_ERR_EXIST) {
3624 ERR("Tracer for application reported that an event being created already existed: "
3625 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3626 uevent->attr.name,
3627 app->pid, app->ppid, app->uid,
3628 app->gid);
3629 }
3630 goto error;
3631 }
3632
3633 add_unique_ust_app_event(ua_chan, ua_event);
3634
3635 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3636 app->name, app->ppid);
3637
3638 end:
3639 return ret;
3640
3641 error:
3642 /* Valid. Calling here is already in a read side lock */
3643 delete_ust_app_event(-1, ua_event, app);
3644 return ret;
3645 }
3646
3647 /*
3648 * Create UST app event notifier rule and create it on the tracer side.
3649 *
3650 * Must be called with the RCU read side lock held.
3651 * Called with ust app session mutex held.
3652 */
3653 static
3654 int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3655 struct ust_app *app)
3656 {
3657 int ret = 0;
3658 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3659
3660 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3661 if (ua_event_notifier_rule == NULL) {
3662 ret = -ENOMEM;
3663 goto end;
3664 }
3665
3666 /* Create it on the tracer side. */
3667 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3668 if (ret < 0) {
3669 /*
3670 * Not found previously means that it does not exist on the
3671 * tracer. If the application reports that the event existed,
3672 * it means there is a bug in the sessiond or lttng-ust
3673 * (or corruption, etc.)
3674 */
3675 if (ret == -LTTNG_UST_ERR_EXIST) {
3676 ERR("Tracer for application reported that an event notifier being created already exists: "
3677 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3678 lttng_trigger_get_tracer_token(trigger),
3679 app->pid, app->ppid, app->uid,
3680 app->gid);
3681 }
3682 goto error;
3683 }
3684
3685 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3686 &ua_event_notifier_rule->node);
3687
3688 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64,
3689 app->name, app->ppid, lttng_trigger_get_tracer_token(trigger));
3690
3691 goto end;
3692
3693 error:
3694 /* The RCU read side lock is already being held by the caller. */
3695 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3696 end:
3697 return ret;
3698 }
3699
3700 /*
3701 * Create UST metadata and open it on the tracer side.
3702 *
3703 * Called with UST app session lock held and RCU read side lock.
3704 */
3705 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3706 struct ust_app *app, struct consumer_output *consumer)
3707 {
3708 int ret = 0;
3709 struct ust_app_channel *metadata;
3710 struct consumer_socket *socket;
3711 struct ust_registry_session *registry;
3712 struct ltt_session *session = NULL;
3713
3714 assert(ua_sess);
3715 assert(app);
3716 assert(consumer);
3717
3718 registry = get_session_registry(ua_sess);
3719 /* The UST app session is held registry shall not be null. */
3720 assert(registry);
3721
3722 pthread_mutex_lock(&registry->lock);
3723
3724 /* Metadata already exists for this registry or it was closed previously */
3725 if (registry->metadata_key || registry->metadata_closed) {
3726 ret = 0;
3727 goto error;
3728 }
3729
3730 /* Allocate UST metadata */
3731 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3732 if (!metadata) {
3733 /* malloc() failed */
3734 ret = -ENOMEM;
3735 goto error;
3736 }
3737
3738 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3739
3740 /* Need one fd for the channel. */
3741 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3742 if (ret < 0) {
3743 ERR("Exhausted number of available FD upon create metadata");
3744 goto error;
3745 }
3746
3747 /* Get the right consumer socket for the application. */
3748 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3749 if (!socket) {
3750 ret = -EINVAL;
3751 goto error_consumer;
3752 }
3753
3754 /*
3755 * Keep metadata key so we can identify it on the consumer side. Assign it
3756 * to the registry *before* we ask the consumer so we avoid the race of the
3757 * consumer requesting the metadata and the ask_channel call on our side
3758 * did not returned yet.
3759 */
3760 registry->metadata_key = metadata->key;
3761
3762 session = session_find_by_id(ua_sess->tracing_id);
3763 assert(session);
3764
3765 assert(pthread_mutex_trylock(&session->lock));
3766 assert(session_trylock_list());
3767
3768 /*
3769 * Ask the metadata channel creation to the consumer. The metadata object
3770 * will be created by the consumer and kept their. However, the stream is
3771 * never added or monitored until we do a first push metadata to the
3772 * consumer.
3773 */
3774 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3775 registry, session->current_trace_chunk);
3776 if (ret < 0) {
3777 /* Nullify the metadata key so we don't try to close it later on. */
3778 registry->metadata_key = 0;
3779 goto error_consumer;
3780 }
3781
3782 /*
3783 * The setup command will make the metadata stream be sent to the relayd,
3784 * if applicable, and the thread managing the metadatas. This is important
3785 * because after this point, if an error occurs, the only way the stream
3786 * can be deleted is to be monitored in the consumer.
3787 */
3788 ret = consumer_setup_metadata(socket, metadata->key);
3789 if (ret < 0) {
3790 /* Nullify the metadata key so we don't try to close it later on. */
3791 registry->metadata_key = 0;
3792 goto error_consumer;
3793 }
3794
3795 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3796 metadata->key, app->pid);
3797
3798 error_consumer:
3799 lttng_fd_put(LTTNG_FD_APPS, 1);
3800 delete_ust_app_channel(-1, metadata, app);
3801 error:
3802 pthread_mutex_unlock(&registry->lock);
3803 if (session) {
3804 session_put(session);
3805 }
3806 return ret;
3807 }
3808
3809 /*
3810 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3811 * acquired before calling this function.
3812 */
3813 struct ust_app *ust_app_find_by_pid(pid_t pid)
3814 {
3815 struct ust_app *app = NULL;
3816 struct lttng_ht_node_ulong *node;
3817 struct lttng_ht_iter iter;
3818
3819 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3820 node = lttng_ht_iter_get_node_ulong(&iter);
3821 if (node == NULL) {
3822 DBG2("UST app no found with pid %d", pid);
3823 goto error;
3824 }
3825
3826 DBG2("Found UST app by pid %d", pid);
3827
3828 app = caa_container_of(node, struct ust_app, pid_n);
3829
3830 error:
3831 return app;
3832 }
3833
3834 /*
3835 * Allocate and init an UST app object using the registration information and
3836 * the command socket. This is called when the command socket connects to the
3837 * session daemon.
3838 *
3839 * The object is returned on success or else NULL.
3840 */
3841 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3842 {
3843 int ret;
3844 struct ust_app *lta = NULL;
3845 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3846
3847 assert(msg);
3848 assert(sock >= 0);
3849
3850 DBG3("UST app creating application for socket %d", sock);
3851
3852 if ((msg->bits_per_long == 64 &&
3853 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3854 || (msg->bits_per_long == 32 &&
3855 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3856 ERR("Registration failed: application \"%s\" (pid: %d) has "
3857 "%d-bit long, but no consumerd for this size is available.\n",
3858 msg->name, msg->pid, msg->bits_per_long);
3859 goto error;
3860 }
3861
3862 /*
3863 * Reserve the two file descriptors of the event source pipe. The write
3864 * end will be closed once it is passed to the application, at which
3865 * point a single 'put' will be performed.
3866 */
3867 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3868 if (ret) {
3869 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3870 msg->name, (int) msg->ppid);
3871 goto error;
3872 }
3873
3874 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3875 if (!event_notifier_event_source_pipe) {
3876 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3877 msg->name, msg->ppid);
3878 goto error;
3879 }
3880
3881 lta = zmalloc(sizeof(struct ust_app));
3882 if (lta == NULL) {
3883 PERROR("malloc");
3884 goto error_free_pipe;
3885 }
3886
3887 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3888
3889 lta->ppid = msg->ppid;
3890 lta->uid = msg->uid;
3891 lta->gid = msg->gid;
3892
3893 lta->bits_per_long = msg->bits_per_long;
3894 lta->uint8_t_alignment = msg->uint8_t_alignment;
3895 lta->uint16_t_alignment = msg->uint16_t_alignment;
3896 lta->uint32_t_alignment = msg->uint32_t_alignment;
3897 lta->uint64_t_alignment = msg->uint64_t_alignment;
3898 lta->long_alignment = msg->long_alignment;
3899 lta->byte_order = msg->byte_order;
3900
3901 lta->v_major = msg->major;
3902 lta->v_minor = msg->minor;
3903 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3904 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3905 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3906 lta->notify_sock = -1;
3907 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3908
3909 /* Copy name and make sure it's NULL terminated. */
3910 strncpy(lta->name, msg->name, sizeof(lta->name));
3911 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3912
3913 /*
3914 * Before this can be called, when receiving the registration information,
3915 * the application compatibility is checked. So, at this point, the
3916 * application can work with this session daemon.
3917 */
3918 lta->compatible = 1;
3919
3920 lta->pid = msg->pid;
3921 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3922 lta->sock = sock;
3923 pthread_mutex_init(&lta->sock_lock, NULL);
3924 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3925
3926 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3927 return lta;
3928
3929 error_free_pipe:
3930 lttng_pipe_destroy(event_notifier_event_source_pipe);
3931 lttng_fd_put(LTTNG_FD_APPS, 2);
3932 error:
3933 return NULL;
3934 }
3935
3936 /*
3937 * For a given application object, add it to every hash table.
3938 */
3939 void ust_app_add(struct ust_app *app)
3940 {
3941 assert(app);
3942 assert(app->notify_sock >= 0);
3943
3944 app->registration_time = time(NULL);
3945
3946 rcu_read_lock();
3947
3948 /*
3949 * On a re-registration, we want to kick out the previous registration of
3950 * that pid
3951 */
3952 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3953
3954 /*
3955 * The socket _should_ be unique until _we_ call close. So, a add_unique
3956 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3957 * already in the table.
3958 */
3959 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3960
3961 /* Add application to the notify socket hash table. */
3962 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3963 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3964
3965 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3966 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3967 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3968 app->v_minor);
3969
3970 rcu_read_unlock();
3971 }
3972
3973 /*
3974 * Set the application version into the object.
3975 *
3976 * Return 0 on success else a negative value either an errno code or a
3977 * LTTng-UST error code.
3978 */
3979 int ust_app_version(struct ust_app *app)
3980 {
3981 int ret;
3982
3983 assert(app);
3984
3985 pthread_mutex_lock(&app->sock_lock);
3986 ret = ustctl_tracer_version(app->sock, &app->version);
3987 pthread_mutex_unlock(&app->sock_lock);
3988 if (ret < 0) {
3989 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3990 ERR("UST app %d version failed with ret %d", app->sock, ret);
3991 } else {
3992 DBG3("UST app %d version failed. Application is dead", app->sock);
3993 }
3994 }
3995
3996 return ret;
3997 }
3998
3999 /*
4000 * Setup the base event notifier group.
4001 *
4002 * Return 0 on success else a negative value either an errno code or a
4003 * LTTng-UST error code.
4004 */
4005 int ust_app_setup_event_notifier_group(struct ust_app *app)
4006 {
4007 int ret;
4008 int event_pipe_write_fd;
4009 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
4010 enum lttng_error_code lttng_ret;
4011 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4012
4013 assert(app);
4014
4015 /* Get the write side of the pipe. */
4016 event_pipe_write_fd = lttng_pipe_get_writefd(
4017 app->event_notifier_group.event_pipe);
4018
4019 pthread_mutex_lock(&app->sock_lock);
4020 ret = ustctl_create_event_notifier_group(app->sock,
4021 event_pipe_write_fd, &event_notifier_group);
4022 pthread_mutex_unlock(&app->sock_lock);
4023 if (ret < 0) {
4024 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4025 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
4026 ret, app->sock, event_pipe_write_fd);
4027 } else {
4028 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
4029 app->sock);
4030 }
4031
4032 goto error;
4033 }
4034
4035 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4036 if (ret) {
4037 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
4038 app->name, app->ppid);
4039 goto error;
4040 }
4041
4042 /*
4043 * Release the file descriptor that was reserved for the write-end of
4044 * the pipe.
4045 */
4046 lttng_fd_put(LTTNG_FD_APPS, 1);
4047
4048 lttng_ret = notification_thread_command_add_tracer_event_source(
4049 notification_thread_handle,
4050 lttng_pipe_get_readfd(app->event_notifier_group.event_pipe),
4051 LTTNG_DOMAIN_UST);
4052 if (lttng_ret != LTTNG_OK) {
4053 ERR("Failed to add tracer event source to notification thread");
4054 ret = - 1;
4055 goto error;
4056 }
4057
4058 /* Assign handle only when the complete setup is valid. */
4059 app->event_notifier_group.object = event_notifier_group;
4060
4061 event_notifier_error_accounting_status = event_notifier_error_accounting_register_app(app);
4062 if (event_notifier_error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
4063 ERR("Failed to setup event notifier error accounting for app");
4064 ret = -1;
4065 goto error;
4066 }
4067
4068 return ret;
4069
4070 error:
4071 ustctl_release_object(app->sock, app->event_notifier_group.object);
4072 free(app->event_notifier_group.object);
4073 return ret;
4074 }
4075
4076 /*
4077 * Unregister app by removing it from the global traceable app list and freeing
4078 * the data struct.
4079 *
4080 * The socket is already closed at this point so no close to sock.
4081 */
4082 void ust_app_unregister(int sock)
4083 {
4084 struct ust_app *lta;
4085 struct lttng_ht_node_ulong *node;
4086 struct lttng_ht_iter ust_app_sock_iter;
4087 struct lttng_ht_iter iter;
4088 struct ust_app_session *ua_sess;
4089 int ret;
4090
4091 rcu_read_lock();
4092
4093 /* Get the node reference for a call_rcu */
4094 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4095 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4096 assert(node);
4097
4098 lta = caa_container_of(node, struct ust_app, sock_n);
4099 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4100
4101 /*
4102 * For per-PID buffers, perform "push metadata" and flush all
4103 * application streams before removing app from hash tables,
4104 * ensuring proper behavior of data_pending check.
4105 * Remove sessions so they are not visible during deletion.
4106 */
4107 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
4108 node.node) {
4109 struct ust_registry_session *registry;
4110
4111 ret = lttng_ht_del(lta->sessions, &iter);
4112 if (ret) {
4113 /* The session was already removed so scheduled for teardown. */
4114 continue;
4115 }
4116
4117 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4118 (void) ust_app_flush_app_session(lta, ua_sess);
4119 }
4120
4121 /*
4122 * Add session to list for teardown. This is safe since at this point we
4123 * are the only one using this list.
4124 */
4125 pthread_mutex_lock(&ua_sess->lock);
4126
4127 if (ua_sess->deleted) {
4128 pthread_mutex_unlock(&ua_sess->lock);
4129 continue;
4130 }
4131
4132 /*
4133 * Normally, this is done in the delete session process which is
4134 * executed in the call rcu below. However, upon registration we can't
4135 * afford to wait for the grace period before pushing data or else the
4136 * data pending feature can race between the unregistration and stop
4137 * command where the data pending command is sent *before* the grace
4138 * period ended.
4139 *
4140 * The close metadata below nullifies the metadata pointer in the
4141 * session so the delete session will NOT push/close a second time.
4142 */
4143 registry = get_session_registry(ua_sess);
4144 if (registry) {
4145 /* Push metadata for application before freeing the application. */
4146 (void) push_metadata(registry, ua_sess->consumer);
4147
4148 /*
4149 * Don't ask to close metadata for global per UID buffers. Close
4150 * metadata only on destroy trace session in this case. Also, the
4151 * previous push metadata could have flag the metadata registry to
4152 * close so don't send a close command if closed.
4153 */
4154 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4155 /* And ask to close it for this session registry. */
4156 (void) close_metadata(registry, ua_sess->consumer);
4157 }
4158 }
4159 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4160
4161 pthread_mutex_unlock(&ua_sess->lock);
4162 }
4163
4164 /* Remove application from PID hash table */
4165 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4166 assert(!ret);
4167
4168 /*
4169 * Remove application from notify hash table. The thread handling the
4170 * notify socket could have deleted the node so ignore on error because
4171 * either way it's valid. The close of that socket is handled by the
4172 * apps_notify_thread.
4173 */
4174 iter.iter.node = &lta->notify_sock_n.node;
4175 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4176
4177 /*
4178 * Ignore return value since the node might have been removed before by an
4179 * add replace during app registration because the PID can be reassigned by
4180 * the OS.
4181 */
4182 iter.iter.node = &lta->pid_n.node;
4183 ret = lttng_ht_del(ust_app_ht, &iter);
4184 if (ret) {
4185 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4186 lta->pid);
4187 }
4188
4189 /* Free memory */
4190 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4191
4192 rcu_read_unlock();
4193 return;
4194 }
4195
4196 /*
4197 * Fill events array with all events name of all registered apps.
4198 */
4199 int ust_app_list_events(struct lttng_event **events)
4200 {
4201 int ret, handle;
4202 size_t nbmem, count = 0;
4203 struct lttng_ht_iter iter;
4204 struct ust_app *app;
4205 struct lttng_event *tmp_event;
4206
4207 nbmem = UST_APP_EVENT_LIST_SIZE;
4208 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4209 if (tmp_event == NULL) {
4210 PERROR("zmalloc ust app events");
4211 ret = -ENOMEM;
4212 goto error;
4213 }
4214
4215 rcu_read_lock();
4216
4217 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4218 struct lttng_ust_abi_tracepoint_iter uiter;
4219
4220 health_code_update();
4221
4222 if (!app->compatible) {
4223 /*
4224 * TODO: In time, we should notice the caller of this error by
4225 * telling him that this is a version error.
4226 */
4227 continue;
4228 }
4229 pthread_mutex_lock(&app->sock_lock);
4230 handle = ustctl_tracepoint_list(app->sock);
4231 if (handle < 0) {
4232 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4233 ERR("UST app list events getting handle failed for app pid %d",
4234 app->pid);
4235 }
4236 pthread_mutex_unlock(&app->sock_lock);
4237 continue;
4238 }
4239
4240 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
4241 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4242 /* Handle ustctl error. */
4243 if (ret < 0) {
4244 int release_ret;
4245
4246 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4247 ERR("UST app tp list get failed for app %d with ret %d",
4248 app->sock, ret);
4249 } else {
4250 DBG3("UST app tp list get failed. Application is dead");
4251 /*
4252 * This is normal behavior, an application can die during the
4253 * creation process. Don't report an error so the execution can
4254 * continue normally. Continue normal execution.
4255 */
4256 break;
4257 }
4258 free(tmp_event);
4259 release_ret = ustctl_release_handle(app->sock, handle);
4260 if (release_ret < 0 &&
4261 release_ret != -LTTNG_UST_ERR_EXITING &&
4262 release_ret != -EPIPE) {
4263 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4264 }
4265 pthread_mutex_unlock(&app->sock_lock);
4266 goto rcu_error;
4267 }
4268
4269 health_code_update();
4270 if (count >= nbmem) {
4271 /* In case the realloc fails, we free the memory */
4272 struct lttng_event *new_tmp_event;
4273 size_t new_nbmem;
4274
4275 new_nbmem = nbmem << 1;
4276 DBG2("Reallocating event list from %zu to %zu entries",
4277 nbmem, new_nbmem);
4278 new_tmp_event = realloc(tmp_event,
4279 new_nbmem * sizeof(struct lttng_event));
4280 if (new_tmp_event == NULL) {
4281 int release_ret;
4282
4283 PERROR("realloc ust app events");
4284 free(tmp_event);
4285 ret = -ENOMEM;
4286 release_ret = ustctl_release_handle(app->sock, handle);
4287 if (release_ret < 0 &&
4288 release_ret != -LTTNG_UST_ERR_EXITING &&
4289 release_ret != -EPIPE) {
4290 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4291 }
4292 pthread_mutex_unlock(&app->sock_lock);
4293 goto rcu_error;
4294 }
4295 /* Zero the new memory */
4296 memset(new_tmp_event + nbmem, 0,
4297 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4298 nbmem = new_nbmem;
4299 tmp_event = new_tmp_event;
4300 }
4301 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4302 tmp_event[count].loglevel = uiter.loglevel;
4303 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4304 tmp_event[count].pid = app->pid;
4305 tmp_event[count].enabled = -1;
4306 count++;
4307 }
4308 ret = ustctl_release_handle(app->sock, handle);
4309 pthread_mutex_unlock(&app->sock_lock);
4310 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4311 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4312 }
4313 }
4314
4315 ret = count;
4316 *events = tmp_event;
4317
4318 DBG2("UST app list events done (%zu events)", count);
4319
4320 rcu_error:
4321 rcu_read_unlock();
4322 error:
4323 health_code_update();
4324 return ret;
4325 }
4326
4327 /*
4328 * Fill events array with all events name of all registered apps.
4329 */
4330 int ust_app_list_event_fields(struct lttng_event_field **fields)
4331 {
4332 int ret, handle;
4333 size_t nbmem, count = 0;
4334 struct lttng_ht_iter iter;
4335 struct ust_app *app;
4336 struct lttng_event_field *tmp_event;
4337
4338 nbmem = UST_APP_EVENT_LIST_SIZE;
4339 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4340 if (tmp_event == NULL) {
4341 PERROR("zmalloc ust app event fields");
4342 ret = -ENOMEM;
4343 goto error;
4344 }
4345
4346 rcu_read_lock();
4347
4348 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4349 struct lttng_ust_abi_field_iter uiter;
4350
4351 health_code_update();
4352
4353 if (!app->compatible) {
4354 /*
4355 * TODO: In time, we should notice the caller of this error by
4356 * telling him that this is a version error.
4357 */
4358 continue;
4359 }
4360 pthread_mutex_lock(&app->sock_lock);
4361 handle = ustctl_tracepoint_field_list(app->sock);
4362 if (handle < 0) {
4363 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4364 ERR("UST app list field getting handle failed for app pid %d",
4365 app->pid);
4366 }
4367 pthread_mutex_unlock(&app->sock_lock);
4368 continue;
4369 }
4370
4371 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
4372 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4373 /* Handle ustctl error. */
4374 if (ret < 0) {
4375 int release_ret;
4376
4377 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4378 ERR("UST app tp list field failed for app %d with ret %d",
4379 app->sock, ret);
4380 } else {
4381 DBG3("UST app tp list field failed. Application is dead");
4382 /*
4383 * This is normal behavior, an application can die during the
4384 * creation process. Don't report an error so the execution can
4385 * continue normally. Reset list and count for next app.
4386 */
4387 break;
4388 }
4389 free(tmp_event);
4390 release_ret = ustctl_release_handle(app->sock, handle);
4391 pthread_mutex_unlock(&app->sock_lock);
4392 if (release_ret < 0 &&
4393 release_ret != -LTTNG_UST_ERR_EXITING &&
4394 release_ret != -EPIPE) {
4395 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4396 }
4397 goto rcu_error;
4398 }
4399
4400 health_code_update();
4401 if (count >= nbmem) {
4402 /* In case the realloc fails, we free the memory */
4403 struct lttng_event_field *new_tmp_event;
4404 size_t new_nbmem;
4405
4406 new_nbmem = nbmem << 1;
4407 DBG2("Reallocating event field list from %zu to %zu entries",
4408 nbmem, new_nbmem);
4409 new_tmp_event = realloc(tmp_event,
4410 new_nbmem * sizeof(struct lttng_event_field));
4411 if (new_tmp_event == NULL) {
4412 int release_ret;
4413
4414 PERROR("realloc ust app event fields");
4415 free(tmp_event);
4416 ret = -ENOMEM;
4417 release_ret = ustctl_release_handle(app->sock, handle);
4418 pthread_mutex_unlock(&app->sock_lock);
4419 if (release_ret &&
4420 release_ret != -LTTNG_UST_ERR_EXITING &&
4421 release_ret != -EPIPE) {
4422 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4423 }
4424 goto rcu_error;
4425 }
4426 /* Zero the new memory */
4427 memset(new_tmp_event + nbmem, 0,
4428 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4429 nbmem = new_nbmem;
4430 tmp_event = new_tmp_event;
4431 }
4432
4433 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4434 /* Mapping between these enums matches 1 to 1. */
4435 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4436 tmp_event[count].nowrite = uiter.nowrite;
4437
4438 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4439 tmp_event[count].event.loglevel = uiter.loglevel;
4440 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4441 tmp_event[count].event.pid = app->pid;
4442 tmp_event[count].event.enabled = -1;
4443 count++;
4444 }
4445 ret = ustctl_release_handle(app->sock, handle);
4446 pthread_mutex_unlock(&app->sock_lock);
4447 if (ret < 0 &&
4448 ret != -LTTNG_UST_ERR_EXITING &&
4449 ret != -EPIPE) {
4450 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4451 }
4452 }
4453
4454 ret = count;
4455 *fields = tmp_event;
4456
4457 DBG2("UST app list event fields done (%zu events)", count);
4458
4459 rcu_error:
4460 rcu_read_unlock();
4461 error:
4462 health_code_update();
4463 return ret;
4464 }
4465
4466 /*
4467 * Free and clean all traceable apps of the global list.
4468 *
4469 * Should _NOT_ be called with RCU read-side lock held.
4470 */
4471 void ust_app_clean_list(void)
4472 {
4473 int ret;
4474 struct ust_app *app;
4475 struct lttng_ht_iter iter;
4476
4477 DBG2("UST app cleaning registered apps hash table");
4478
4479 rcu_read_lock();
4480
4481 /* Cleanup notify socket hash table */
4482 if (ust_app_ht_by_notify_sock) {
4483 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4484 notify_sock_n.node) {
4485 /*
4486 * Assert that all notifiers are gone as all triggers
4487 * are unregistered prior to this clean-up.
4488 */
4489 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4490
4491 ust_app_notify_sock_unregister(app->notify_sock);
4492 }
4493 }
4494
4495 if (ust_app_ht) {
4496 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4497 ret = lttng_ht_del(ust_app_ht, &iter);
4498 assert(!ret);
4499 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4500 }
4501 }
4502
4503 /* Cleanup socket hash table */
4504 if (ust_app_ht_by_sock) {
4505 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4506 sock_n.node) {
4507 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4508 assert(!ret);
4509 }
4510 }
4511
4512 rcu_read_unlock();
4513
4514 /* Destroy is done only when the ht is empty */
4515 if (ust_app_ht) {
4516 ht_cleanup_push(ust_app_ht);
4517 }
4518 if (ust_app_ht_by_sock) {
4519 ht_cleanup_push(ust_app_ht_by_sock);
4520 }
4521 if (ust_app_ht_by_notify_sock) {
4522 ht_cleanup_push(ust_app_ht_by_notify_sock);
4523 }
4524 }
4525
4526 /*
4527 * Init UST app hash table.
4528 */
4529 int ust_app_ht_alloc(void)
4530 {
4531 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4532 if (!ust_app_ht) {
4533 return -1;
4534 }
4535 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4536 if (!ust_app_ht_by_sock) {
4537 return -1;
4538 }
4539 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4540 if (!ust_app_ht_by_notify_sock) {
4541 return -1;
4542 }
4543 return 0;
4544 }
4545
4546 /*
4547 * For a specific UST session, disable the channel for all registered apps.
4548 */
4549 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4550 struct ltt_ust_channel *uchan)
4551 {
4552 int ret = 0;
4553 struct lttng_ht_iter iter;
4554 struct lttng_ht_node_str *ua_chan_node;
4555 struct ust_app *app;
4556 struct ust_app_session *ua_sess;
4557 struct ust_app_channel *ua_chan;
4558
4559 assert(usess->active);
4560 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4561 uchan->name, usess->id);
4562
4563 rcu_read_lock();
4564
4565 /* For every registered applications */
4566 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4567 struct lttng_ht_iter uiter;
4568 if (!app->compatible) {
4569 /*
4570 * TODO: In time, we should notice the caller of this error by
4571 * telling him that this is a version error.
4572 */
4573 continue;
4574 }
4575 ua_sess = lookup_session_by_app(usess, app);
4576 if (ua_sess == NULL) {
4577 continue;
4578 }
4579
4580 /* Get channel */
4581 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4582 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4583 /* If the session if found for the app, the channel must be there */
4584 assert(ua_chan_node);
4585
4586 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4587 /* The channel must not be already disabled */
4588 assert(ua_chan->enabled == 1);
4589
4590 /* Disable channel onto application */
4591 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4592 if (ret < 0) {
4593 /* XXX: We might want to report this error at some point... */
4594 continue;
4595 }
4596 }
4597
4598 rcu_read_unlock();
4599 return ret;
4600 }
4601
4602 /*
4603 * For a specific UST session, enable the channel for all registered apps.
4604 */
4605 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4606 struct ltt_ust_channel *uchan)
4607 {
4608 int ret = 0;
4609 struct lttng_ht_iter iter;
4610 struct ust_app *app;
4611 struct ust_app_session *ua_sess;
4612
4613 assert(usess->active);
4614 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4615 uchan->name, usess->id);
4616
4617 rcu_read_lock();
4618
4619 /* For every registered applications */
4620 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4621 if (!app->compatible) {
4622 /*
4623 * TODO: In time, we should notice the caller of this error by
4624 * telling him that this is a version error.
4625 */
4626 continue;
4627 }
4628 ua_sess = lookup_session_by_app(usess, app);
4629 if (ua_sess == NULL) {
4630 continue;
4631 }
4632
4633 /* Enable channel onto application */
4634 ret = enable_ust_app_channel(ua_sess, uchan, app);
4635 if (ret < 0) {
4636 /* XXX: We might want to report this error at some point... */
4637 continue;
4638 }
4639 }
4640
4641 rcu_read_unlock();
4642 return ret;
4643 }
4644
4645 /*
4646 * Disable an event in a channel and for a specific session.
4647 */
4648 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4649 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4650 {
4651 int ret = 0;
4652 struct lttng_ht_iter iter, uiter;
4653 struct lttng_ht_node_str *ua_chan_node;
4654 struct ust_app *app;
4655 struct ust_app_session *ua_sess;
4656 struct ust_app_channel *ua_chan;
4657 struct ust_app_event *ua_event;
4658
4659 assert(usess->active);
4660 DBG("UST app disabling event %s for all apps in channel "
4661 "%s for session id %" PRIu64,
4662 uevent->attr.name, uchan->name, usess->id);
4663
4664 rcu_read_lock();
4665
4666 /* For all registered applications */
4667 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4668 if (!app->compatible) {
4669 /*
4670 * TODO: In time, we should notice the caller of this error by
4671 * telling him that this is a version error.
4672 */
4673 continue;
4674 }
4675 ua_sess = lookup_session_by_app(usess, app);
4676 if (ua_sess == NULL) {
4677 /* Next app */
4678 continue;
4679 }
4680
4681 /* Lookup channel in the ust app session */
4682 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4683 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4684 if (ua_chan_node == NULL) {
4685 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4686 "Skipping", uchan->name, usess->id, app->pid);
4687 continue;
4688 }
4689 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4690
4691 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4692 uevent->filter, uevent->attr.loglevel,
4693 uevent->exclusion);
4694 if (ua_event == NULL) {
4695 DBG2("Event %s not found in channel %s for app pid %d."
4696 "Skipping", uevent->attr.name, uchan->name, app->pid);
4697 continue;
4698 }
4699
4700 ret = disable_ust_app_event(ua_sess, ua_event, app);
4701 if (ret < 0) {
4702 /* XXX: Report error someday... */
4703 continue;
4704 }
4705 }
4706
4707 rcu_read_unlock();
4708 return ret;
4709 }
4710
4711 /* The ua_sess lock must be held by the caller. */
4712 static
4713 int ust_app_channel_create(struct ltt_ust_session *usess,
4714 struct ust_app_session *ua_sess,
4715 struct ltt_ust_channel *uchan, struct ust_app *app,
4716 struct ust_app_channel **_ua_chan)
4717 {
4718 int ret = 0;
4719 struct ust_app_channel *ua_chan = NULL;
4720
4721 assert(ua_sess);
4722 ASSERT_LOCKED(ua_sess->lock);
4723
4724 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4725 sizeof(uchan->name))) {
4726 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4727 &uchan->attr);
4728 ret = 0;
4729 } else {
4730 struct ltt_ust_context *uctx = NULL;
4731
4732 /*
4733 * Create channel onto application and synchronize its
4734 * configuration.
4735 */
4736 ret = ust_app_channel_allocate(ua_sess, uchan,
4737 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
4738 &ua_chan);
4739 if (ret < 0) {
4740 goto error;
4741 }
4742
4743 ret = ust_app_channel_send(app, usess,
4744 ua_sess, ua_chan);
4745 if (ret) {
4746 goto error;
4747 }
4748
4749 /* Add contexts. */
4750 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4751 ret = create_ust_app_channel_context(ua_chan,
4752 &uctx->ctx, app);
4753 if (ret) {
4754 goto error;
4755 }
4756 }
4757 }
4758
4759 error:
4760 if (ret < 0) {
4761 switch (ret) {
4762 case -ENOTCONN:
4763 /*
4764 * The application's socket is not valid. Either a bad socket
4765 * or a timeout on it. We can't inform the caller that for a
4766 * specific app, the session failed so lets continue here.
4767 */
4768 ret = 0; /* Not an error. */
4769 break;
4770 case -ENOMEM:
4771 default:
4772 break;
4773 }
4774 }
4775
4776 if (ret == 0 && _ua_chan) {
4777 /*
4778 * Only return the application's channel on success. Note
4779 * that the channel can still be part of the application's
4780 * channel hashtable on error.
4781 */
4782 *_ua_chan = ua_chan;
4783 }
4784 return ret;
4785 }
4786
4787 /*
4788 * Enable event for a specific session and channel on the tracer.
4789 */
4790 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4791 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4792 {
4793 int ret = 0;
4794 struct lttng_ht_iter iter, uiter;
4795 struct lttng_ht_node_str *ua_chan_node;
4796 struct ust_app *app;
4797 struct ust_app_session *ua_sess;
4798 struct ust_app_channel *ua_chan;
4799 struct ust_app_event *ua_event;
4800
4801 assert(usess->active);
4802 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4803 uevent->attr.name, usess->id);
4804
4805 /*
4806 * NOTE: At this point, this function is called only if the session and
4807 * channel passed are already created for all apps. and enabled on the
4808 * tracer also.
4809 */
4810
4811 rcu_read_lock();
4812
4813 /* For all registered applications */
4814 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4815 if (!app->compatible) {
4816 /*
4817 * TODO: In time, we should notice the caller of this error by
4818 * telling him that this is a version error.
4819 */
4820 continue;
4821 }
4822 ua_sess = lookup_session_by_app(usess, app);
4823 if (!ua_sess) {
4824 /* The application has problem or is probably dead. */
4825 continue;
4826 }
4827
4828 pthread_mutex_lock(&ua_sess->lock);
4829
4830 if (ua_sess->deleted) {
4831 pthread_mutex_unlock(&ua_sess->lock);
4832 continue;
4833 }
4834
4835 /* Lookup channel in the ust app session */
4836 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4837 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4838 /*
4839 * It is possible that the channel cannot be found is
4840 * the channel/event creation occurs concurrently with
4841 * an application exit.
4842 */
4843 if (!ua_chan_node) {
4844 pthread_mutex_unlock(&ua_sess->lock);
4845 continue;
4846 }
4847
4848 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4849
4850 /* Get event node */
4851 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4852 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4853 if (ua_event == NULL) {
4854 DBG3("UST app enable event %s not found for app PID %d."
4855 "Skipping app", uevent->attr.name, app->pid);
4856 goto next_app;
4857 }
4858
4859 ret = enable_ust_app_event(ua_sess, ua_event, app);
4860 if (ret < 0) {
4861 pthread_mutex_unlock(&ua_sess->lock);
4862 goto error;
4863 }
4864 next_app:
4865 pthread_mutex_unlock(&ua_sess->lock);
4866 }
4867
4868 error:
4869 rcu_read_unlock();
4870 return ret;
4871 }
4872
4873 /*
4874 * For a specific existing UST session and UST channel, creates the event for
4875 * all registered apps.
4876 */
4877 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4878 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4879 {
4880 int ret = 0;
4881 struct lttng_ht_iter iter, uiter;
4882 struct lttng_ht_node_str *ua_chan_node;
4883 struct ust_app *app;
4884 struct ust_app_session *ua_sess;
4885 struct ust_app_channel *ua_chan;
4886
4887 assert(usess->active);
4888 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4889 uevent->attr.name, usess->id);
4890
4891 rcu_read_lock();
4892
4893 /* For all registered applications */
4894 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4895 if (!app->compatible) {
4896 /*
4897 * TODO: In time, we should notice the caller of this error by
4898 * telling him that this is a version error.
4899 */
4900 continue;
4901 }
4902 ua_sess = lookup_session_by_app(usess, app);
4903 if (!ua_sess) {
4904 /* The application has problem or is probably dead. */
4905 continue;
4906 }
4907
4908 pthread_mutex_lock(&ua_sess->lock);
4909
4910 if (ua_sess->deleted) {
4911 pthread_mutex_unlock(&ua_sess->lock);
4912 continue;
4913 }
4914
4915 /* Lookup channel in the ust app session */
4916 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4917 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4918 /* If the channel is not found, there is a code flow error */
4919 assert(ua_chan_node);
4920
4921 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4922
4923 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4924 pthread_mutex_unlock(&ua_sess->lock);
4925 if (ret < 0) {
4926 if (ret != -LTTNG_UST_ERR_EXIST) {
4927 /* Possible value at this point: -ENOMEM. If so, we stop! */
4928 break;
4929 }
4930 DBG2("UST app event %s already exist on app PID %d",
4931 uevent->attr.name, app->pid);
4932 continue;
4933 }
4934 }
4935
4936 rcu_read_unlock();
4937 return ret;
4938 }
4939
4940 /*
4941 * Start tracing for a specific UST session and app.
4942 *
4943 * Called with UST app session lock held.
4944 *
4945 */
4946 static
4947 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4948 {
4949 int ret = 0;
4950 struct ust_app_session *ua_sess;
4951
4952 DBG("Starting tracing for ust app pid %d", app->pid);
4953
4954 rcu_read_lock();
4955
4956 if (!app->compatible) {
4957 goto end;
4958 }
4959
4960 ua_sess = lookup_session_by_app(usess, app);
4961 if (ua_sess == NULL) {
4962 /* The session is in teardown process. Ignore and continue. */
4963 goto end;
4964 }
4965
4966 pthread_mutex_lock(&ua_sess->lock);
4967
4968 if (ua_sess->deleted) {
4969 pthread_mutex_unlock(&ua_sess->lock);
4970 goto end;
4971 }
4972
4973 if (ua_sess->enabled) {
4974 pthread_mutex_unlock(&ua_sess->lock);
4975 goto end;
4976 }
4977
4978 /* Upon restart, we skip the setup, already done */
4979 if (ua_sess->started) {
4980 goto skip_setup;
4981 }
4982
4983 health_code_update();
4984
4985 skip_setup:
4986 /* This starts the UST tracing */
4987 pthread_mutex_lock(&app->sock_lock);
4988 ret = ustctl_start_session(app->sock, ua_sess->handle);
4989 pthread_mutex_unlock(&app->sock_lock);
4990 if (ret < 0) {
4991 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4992 ERR("Error starting tracing for app pid: %d (ret: %d)",
4993 app->pid, ret);
4994 } else {
4995 DBG("UST app start session failed. Application is dead.");
4996 /*
4997 * This is normal behavior, an application can die during the
4998 * creation process. Don't report an error so the execution can
4999 * continue normally.
5000 */
5001 pthread_mutex_unlock(&ua_sess->lock);
5002 goto end;
5003 }
5004 goto error_unlock;
5005 }
5006
5007 /* Indicate that the session has been started once */
5008 ua_sess->started = 1;
5009 ua_sess->enabled = 1;
5010
5011 pthread_mutex_unlock(&ua_sess->lock);
5012
5013 health_code_update();
5014
5015 /* Quiescent wait after starting trace */
5016 pthread_mutex_lock(&app->sock_lock);
5017 ret = ustctl_wait_quiescent(app->sock);
5018 pthread_mutex_unlock(&app->sock_lock);
5019 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5020 ERR("UST app wait quiescent failed for app pid %d ret %d",
5021 app->pid, ret);
5022 }
5023
5024 end:
5025 rcu_read_unlock();
5026 health_code_update();
5027 return 0;
5028
5029 error_unlock:
5030 pthread_mutex_unlock(&ua_sess->lock);
5031 rcu_read_unlock();
5032 health_code_update();
5033 return -1;
5034 }
5035
5036 /*
5037 * Stop tracing for a specific UST session and app.
5038 */
5039 static
5040 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5041 {
5042 int ret = 0;
5043 struct ust_app_session *ua_sess;
5044 struct ust_registry_session *registry;
5045
5046 DBG("Stopping tracing for ust app pid %d", app->pid);
5047
5048 rcu_read_lock();
5049
5050 if (!app->compatible) {
5051 goto end_no_session;
5052 }
5053
5054 ua_sess = lookup_session_by_app(usess, app);
5055 if (ua_sess == NULL) {
5056 goto end_no_session;
5057 }
5058
5059 pthread_mutex_lock(&ua_sess->lock);
5060
5061 if (ua_sess->deleted) {
5062 pthread_mutex_unlock(&ua_sess->lock);
5063 goto end_no_session;
5064 }
5065
5066 /*
5067 * If started = 0, it means that stop trace has been called for a session
5068 * that was never started. It's possible since we can have a fail start
5069 * from either the application manager thread or the command thread. Simply
5070 * indicate that this is a stop error.
5071 */
5072 if (!ua_sess->started) {
5073 goto error_rcu_unlock;
5074 }
5075
5076 health_code_update();
5077
5078 /* This inhibits UST tracing */
5079 pthread_mutex_lock(&app->sock_lock);
5080 ret = ustctl_stop_session(app->sock, ua_sess->handle);
5081 pthread_mutex_unlock(&app->sock_lock);
5082 if (ret < 0) {
5083 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5084 ERR("Error stopping tracing for app pid: %d (ret: %d)",
5085 app->pid, ret);
5086 } else {
5087 DBG("UST app stop session failed. Application is dead.");
5088 /*
5089 * This is normal behavior, an application can die during the
5090 * creation process. Don't report an error so the execution can
5091 * continue normally.
5092 */
5093 goto end_unlock;
5094 }
5095 goto error_rcu_unlock;
5096 }
5097
5098 health_code_update();
5099 ua_sess->enabled = 0;
5100
5101 /* Quiescent wait after stopping trace */
5102 pthread_mutex_lock(&app->sock_lock);
5103 ret = ustctl_wait_quiescent(app->sock);
5104 pthread_mutex_unlock(&app->sock_lock);
5105 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5106 ERR("UST app wait quiescent failed for app pid %d ret %d",
5107 app->pid, ret);
5108 }
5109
5110 health_code_update();
5111
5112 registry = get_session_registry(ua_sess);
5113
5114 /* The UST app session is held registry shall not be null. */
5115 assert(registry);
5116
5117 /* Push metadata for application before freeing the application. */
5118 (void) push_metadata(registry, ua_sess->consumer);
5119
5120 end_unlock:
5121 pthread_mutex_unlock(&ua_sess->lock);
5122 end_no_session:
5123 rcu_read_unlock();
5124 health_code_update();
5125 return 0;
5126
5127 error_rcu_unlock:
5128 pthread_mutex_unlock(&ua_sess->lock);
5129 rcu_read_unlock();
5130 health_code_update();
5131 return -1;
5132 }
5133
5134 static
5135 int ust_app_flush_app_session(struct ust_app *app,
5136 struct ust_app_session *ua_sess)
5137 {
5138 int ret, retval = 0;
5139 struct lttng_ht_iter iter;
5140 struct ust_app_channel *ua_chan;
5141 struct consumer_socket *socket;
5142
5143 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5144
5145 rcu_read_lock();
5146
5147 if (!app->compatible) {
5148 goto end_not_compatible;
5149 }
5150
5151 pthread_mutex_lock(&ua_sess->lock);
5152
5153 if (ua_sess->deleted) {
5154 goto end_deleted;
5155 }
5156
5157 health_code_update();
5158
5159 /* Flushing buffers */
5160 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5161 ua_sess->consumer);
5162
5163 /* Flush buffers and push metadata. */
5164 switch (ua_sess->buffer_type) {
5165 case LTTNG_BUFFER_PER_PID:
5166 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5167 node.node) {
5168 health_code_update();
5169 ret = consumer_flush_channel(socket, ua_chan->key);
5170 if (ret) {
5171 ERR("Error flushing consumer channel");
5172 retval = -1;
5173 continue;
5174 }
5175 }
5176 break;
5177 case LTTNG_BUFFER_PER_UID:
5178 default:
5179 assert(0);
5180 break;
5181 }
5182
5183 health_code_update();
5184
5185 end_deleted:
5186 pthread_mutex_unlock(&ua_sess->lock);
5187
5188 end_not_compatible:
5189 rcu_read_unlock();
5190 health_code_update();
5191 return retval;
5192 }
5193
5194 /*
5195 * Flush buffers for all applications for a specific UST session.
5196 * Called with UST session lock held.
5197 */
5198 static
5199 int ust_app_flush_session(struct ltt_ust_session *usess)
5200
5201 {
5202 int ret = 0;
5203
5204 DBG("Flushing session buffers for all ust apps");
5205
5206 rcu_read_lock();
5207
5208 /* Flush buffers and push metadata. */
5209 switch (usess->buffer_type) {
5210 case LTTNG_BUFFER_PER_UID:
5211 {
5212 struct buffer_reg_uid *reg;
5213 struct lttng_ht_iter iter;
5214
5215 /* Flush all per UID buffers associated to that session. */
5216 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5217 struct ust_registry_session *ust_session_reg;
5218 struct buffer_reg_channel *buf_reg_chan;
5219 struct consumer_socket *socket;
5220
5221 /* Get consumer socket to use to push the metadata.*/
5222 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5223 usess->consumer);
5224 if (!socket) {
5225 /* Ignore request if no consumer is found for the session. */
5226 continue;
5227 }
5228
5229 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5230 buf_reg_chan, node.node) {
5231 /*
5232 * The following call will print error values so the return
5233 * code is of little importance because whatever happens, we
5234 * have to try them all.
5235 */
5236 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5237 }
5238
5239 ust_session_reg = reg->registry->reg.ust;
5240 /* Push metadata. */
5241 (void) push_metadata(ust_session_reg, usess->consumer);
5242 }
5243 break;
5244 }
5245 case LTTNG_BUFFER_PER_PID:
5246 {
5247 struct ust_app_session *ua_sess;
5248 struct lttng_ht_iter iter;
5249 struct ust_app *app;
5250
5251 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5252 ua_sess = lookup_session_by_app(usess, app);
5253 if (ua_sess == NULL) {
5254 continue;
5255 }
5256 (void) ust_app_flush_app_session(app, ua_sess);
5257 }
5258 break;
5259 }
5260 default:
5261 ret = -1;
5262 assert(0);
5263 break;
5264 }
5265
5266 rcu_read_unlock();
5267 health_code_update();
5268 return ret;
5269 }
5270
5271 static
5272 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5273 struct ust_app_session *ua_sess)
5274 {
5275 int ret = 0;
5276 struct lttng_ht_iter iter;
5277 struct ust_app_channel *ua_chan;
5278 struct consumer_socket *socket;
5279
5280 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5281
5282 rcu_read_lock();
5283
5284 if (!app->compatible) {
5285 goto end_not_compatible;
5286 }
5287
5288 pthread_mutex_lock(&ua_sess->lock);
5289
5290 if (ua_sess->deleted) {
5291 goto end_unlock;
5292 }
5293
5294 health_code_update();
5295
5296 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5297 ua_sess->consumer);
5298 if (!socket) {
5299 ERR("Failed to find consumer (%" PRIu32 ") socket",
5300 app->bits_per_long);
5301 ret = -1;
5302 goto end_unlock;
5303 }
5304
5305 /* Clear quiescent state. */
5306 switch (ua_sess->buffer_type) {
5307 case LTTNG_BUFFER_PER_PID:
5308 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5309 ua_chan, node.node) {
5310 health_code_update();
5311 ret = consumer_clear_quiescent_channel(socket,
5312 ua_chan->key);
5313 if (ret) {
5314 ERR("Error clearing quiescent state for consumer channel");
5315 ret = -1;
5316 continue;
5317 }
5318 }
5319 break;
5320 case LTTNG_BUFFER_PER_UID:
5321 default:
5322 assert(0);
5323 ret = -1;
5324 break;
5325 }
5326
5327 health_code_update();
5328
5329 end_unlock:
5330 pthread_mutex_unlock(&ua_sess->lock);
5331
5332 end_not_compatible:
5333 rcu_read_unlock();
5334 health_code_update();
5335 return ret;
5336 }
5337
5338 /*
5339 * Clear quiescent state in each stream for all applications for a
5340 * specific UST session.
5341 * Called with UST session lock held.
5342 */
5343 static
5344 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5345
5346 {
5347 int ret = 0;
5348
5349 DBG("Clearing stream quiescent state for all ust apps");
5350
5351 rcu_read_lock();
5352
5353 switch (usess->buffer_type) {
5354 case LTTNG_BUFFER_PER_UID:
5355 {
5356 struct lttng_ht_iter iter;
5357 struct buffer_reg_uid *reg;
5358
5359 /*
5360 * Clear quiescent for all per UID buffers associated to
5361 * that session.
5362 */
5363 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5364 struct consumer_socket *socket;
5365 struct buffer_reg_channel *buf_reg_chan;
5366
5367 /* Get associated consumer socket.*/
5368 socket = consumer_find_socket_by_bitness(
5369 reg->bits_per_long, usess->consumer);
5370 if (!socket) {
5371 /*
5372 * Ignore request if no consumer is found for
5373 * the session.
5374 */
5375 continue;
5376 }
5377
5378 cds_lfht_for_each_entry(reg->registry->channels->ht,
5379 &iter.iter, buf_reg_chan, node.node) {
5380 /*
5381 * The following call will print error values so
5382 * the return code is of little importance
5383 * because whatever happens, we have to try them
5384 * all.
5385 */
5386 (void) consumer_clear_quiescent_channel(socket,
5387 buf_reg_chan->consumer_key);
5388 }
5389 }
5390 break;
5391 }
5392 case LTTNG_BUFFER_PER_PID:
5393 {
5394 struct ust_app_session *ua_sess;
5395 struct lttng_ht_iter iter;
5396 struct ust_app *app;
5397
5398 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5399 pid_n.node) {
5400 ua_sess = lookup_session_by_app(usess, app);
5401 if (ua_sess == NULL) {
5402 continue;
5403 }
5404 (void) ust_app_clear_quiescent_app_session(app,
5405 ua_sess);
5406 }
5407 break;
5408 }
5409 default:
5410 ret = -1;
5411 assert(0);
5412 break;
5413 }
5414
5415 rcu_read_unlock();
5416 health_code_update();
5417 return ret;
5418 }
5419
5420 /*
5421 * Destroy a specific UST session in apps.
5422 */
5423 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5424 {
5425 int ret;
5426 struct ust_app_session *ua_sess;
5427 struct lttng_ht_iter iter;
5428 struct lttng_ht_node_u64 *node;
5429
5430 DBG("Destroy tracing for ust app pid %d", app->pid);
5431
5432 rcu_read_lock();
5433
5434 if (!app->compatible) {
5435 goto end;
5436 }
5437
5438 __lookup_session_by_app(usess, app, &iter);
5439 node = lttng_ht_iter_get_node_u64(&iter);
5440 if (node == NULL) {
5441 /* Session is being or is deleted. */
5442 goto end;
5443 }
5444 ua_sess = caa_container_of(node, struct ust_app_session, node);
5445
5446 health_code_update();
5447 destroy_app_session(app, ua_sess);
5448
5449 health_code_update();
5450
5451 /* Quiescent wait after stopping trace */
5452 pthread_mutex_lock(&app->sock_lock);
5453 ret = ustctl_wait_quiescent(app->sock);
5454 pthread_mutex_unlock(&app->sock_lock);
5455 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5456 ERR("UST app wait quiescent failed for app pid %d ret %d",
5457 app->pid, ret);
5458 }
5459 end:
5460 rcu_read_unlock();
5461 health_code_update();
5462 return 0;
5463 }
5464
5465 /*
5466 * Start tracing for the UST session.
5467 */
5468 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5469 {
5470 struct lttng_ht_iter iter;
5471 struct ust_app *app;
5472
5473 DBG("Starting all UST traces");
5474
5475 /*
5476 * Even though the start trace might fail, flag this session active so
5477 * other application coming in are started by default.
5478 */
5479 usess->active = 1;
5480
5481 rcu_read_lock();
5482
5483 /*
5484 * In a start-stop-start use-case, we need to clear the quiescent state
5485 * of each channel set by the prior stop command, thus ensuring that a
5486 * following stop or destroy is sure to grab a timestamp_end near those
5487 * operations, even if the packet is empty.
5488 */
5489 (void) ust_app_clear_quiescent_session(usess);
5490
5491 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5492 ust_app_global_update(usess, app);
5493 }
5494
5495 rcu_read_unlock();
5496
5497 return 0;
5498 }
5499
5500 /*
5501 * Start tracing for the UST session.
5502 * Called with UST session lock held.
5503 */
5504 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5505 {
5506 int ret = 0;
5507 struct lttng_ht_iter iter;
5508 struct ust_app *app;
5509
5510 DBG("Stopping all UST traces");
5511
5512 /*
5513 * Even though the stop trace might fail, flag this session inactive so
5514 * other application coming in are not started by default.
5515 */
5516 usess->active = 0;
5517
5518 rcu_read_lock();
5519
5520 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5521 ret = ust_app_stop_trace(usess, app);
5522 if (ret < 0) {
5523 /* Continue to next apps even on error */
5524 continue;
5525 }
5526 }
5527
5528 (void) ust_app_flush_session(usess);
5529
5530 rcu_read_unlock();
5531
5532 return 0;
5533 }
5534
5535 /*
5536 * Destroy app UST session.
5537 */
5538 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5539 {
5540 int ret = 0;
5541 struct lttng_ht_iter iter;
5542 struct ust_app *app;
5543
5544 DBG("Destroy all UST traces");
5545
5546 rcu_read_lock();
5547
5548 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5549 ret = destroy_trace(usess, app);
5550 if (ret < 0) {
5551 /* Continue to next apps even on error */
5552 continue;
5553 }
5554 }
5555
5556 rcu_read_unlock();
5557
5558 return 0;
5559 }
5560
5561 /* The ua_sess lock must be held by the caller. */
5562 static
5563 int find_or_create_ust_app_channel(
5564 struct ltt_ust_session *usess,
5565 struct ust_app_session *ua_sess,
5566 struct ust_app *app,
5567 struct ltt_ust_channel *uchan,
5568 struct ust_app_channel **ua_chan)
5569 {
5570 int ret = 0;
5571 struct lttng_ht_iter iter;
5572 struct lttng_ht_node_str *ua_chan_node;
5573
5574 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5575 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5576 if (ua_chan_node) {
5577 *ua_chan = caa_container_of(ua_chan_node,
5578 struct ust_app_channel, node);
5579 goto end;
5580 }
5581
5582 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5583 if (ret) {
5584 goto end;
5585 }
5586 end:
5587 return ret;
5588 }
5589
5590 static
5591 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5592 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5593 struct ust_app *app)
5594 {
5595 int ret = 0;
5596 struct ust_app_event *ua_event = NULL;
5597
5598 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5599 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5600 if (!ua_event) {
5601 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5602 if (ret < 0) {
5603 goto end;
5604 }
5605 } else {
5606 if (ua_event->enabled != uevent->enabled) {
5607 ret = uevent->enabled ?
5608 enable_ust_app_event(ua_sess, ua_event, app) :
5609 disable_ust_app_event(ua_sess, ua_event, app);
5610 }
5611 }
5612
5613 end:
5614 return ret;
5615 }
5616
5617 /* Called with RCU read-side lock held. */
5618 static
5619 void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5620 {
5621 int ret = 0;
5622 enum lttng_error_code ret_code;
5623 enum lttng_trigger_status t_status;
5624 struct lttng_ht_iter app_trigger_iter;
5625 struct lttng_triggers *triggers = NULL;
5626 struct ust_app_event_notifier_rule *event_notifier_rule;
5627 unsigned int count, i;
5628
5629 /*
5630 * Currrently, registering or unregistering a trigger with an
5631 * event rule condition causes a full synchronization of the event
5632 * notifiers.
5633 *
5634 * The first step attempts to add an event notifier for all registered
5635 * triggers that apply to the user space tracers. Then, the
5636 * application's event notifiers rules are all checked against the list
5637 * of registered triggers. Any event notifier that doesn't have a
5638 * matching trigger can be assumed to have been disabled.
5639 *
5640 * All of this is inefficient, but is put in place to get the feature
5641 * rolling as it is simpler at this moment. It will be optimized Soon™
5642 * to allow the state of enabled
5643 * event notifiers to be synchronized in a piece-wise way.
5644 */
5645
5646 /* Get all triggers using uid 0 (root) */
5647 ret_code = notification_thread_command_list_triggers(
5648 notification_thread_handle, 0, &triggers);
5649 if (ret_code != LTTNG_OK) {
5650 ret = -1;
5651 goto end;
5652 }
5653
5654 assert(triggers);
5655
5656 t_status = lttng_triggers_get_count(triggers, &count);
5657 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5658 ret = -1;
5659 goto end;
5660 }
5661
5662 for (i = 0; i < count; i++) {
5663 struct lttng_condition *condition;
5664 struct lttng_event_rule *event_rule;
5665 struct lttng_trigger *trigger;
5666 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5667 enum lttng_condition_status condition_status;
5668 uint64_t token;
5669
5670 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5671 assert(trigger);
5672
5673 token = lttng_trigger_get_tracer_token(trigger);
5674 condition = lttng_trigger_get_condition(trigger);
5675
5676 if (lttng_condition_get_type(condition) != LTTNG_CONDITION_TYPE_ON_EVENT) {
5677 /* Does not apply */
5678 continue;
5679 }
5680
5681 condition_status = lttng_condition_on_event_borrow_rule_mutable(condition, &event_rule);
5682 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5683
5684 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5685 /* Skip kernel related triggers. */
5686 continue;
5687 }
5688
5689 /*
5690 * Find or create the associated token event rule. The caller
5691 * holds the RCU read lock, so this is safe to call without
5692 * explicitly acquiring it here.
5693 */
5694 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5695 app->token_to_event_notifier_rule_ht, token);
5696 if (!looked_up_event_notifier_rule) {
5697 ret = create_ust_app_event_notifier_rule(trigger, app);
5698 if (ret < 0) {
5699 goto end;
5700 }
5701 }
5702 }
5703
5704 rcu_read_lock();
5705 /* Remove all unknown event sources from the app. */
5706 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5707 &app_trigger_iter.iter, event_notifier_rule,
5708 node.node) {
5709 const uint64_t app_token = event_notifier_rule->token;
5710 bool found = false;
5711
5712 /*
5713 * Check if the app event trigger still exists on the
5714 * notification side.
5715 */
5716 for (i = 0; i < count; i++) {
5717 uint64_t notification_thread_token;
5718 const struct lttng_trigger *trigger =
5719 lttng_triggers_get_at_index(
5720 triggers, i);
5721
5722 assert(trigger);
5723
5724 notification_thread_token =
5725 lttng_trigger_get_tracer_token(trigger);
5726
5727 if (notification_thread_token == app_token) {
5728 found = true;
5729 break;
5730 }
5731 }
5732
5733 if (found) {
5734 /* Still valid. */
5735 continue;
5736 }
5737
5738 /*
5739 * This trigger was unregistered, disable it on the tracer's
5740 * side.
5741 */
5742 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5743 &app_trigger_iter);
5744 assert(ret == 0);
5745
5746 /* Callee logs errors. */
5747 (void) disable_ust_object(app, event_notifier_rule->obj);
5748
5749 delete_ust_app_event_notifier_rule(
5750 app->sock, event_notifier_rule, app);
5751 }
5752
5753 rcu_read_unlock();
5754
5755 end:
5756 lttng_triggers_destroy(triggers);
5757 return;
5758 }
5759
5760 /*
5761 * RCU read lock must be held by the caller.
5762 */
5763 static
5764 void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5765 struct ust_app_session *ua_sess,
5766 struct ust_app *app)
5767 {
5768 int ret = 0;
5769 struct cds_lfht_iter uchan_iter;
5770 struct ltt_ust_channel *uchan;
5771
5772 assert(usess);
5773 assert(ua_sess);
5774 assert(app);
5775
5776 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5777 uchan, node.node) {
5778 struct ust_app_channel *ua_chan;
5779 struct cds_lfht_iter uevent_iter;
5780 struct ltt_ust_event *uevent;
5781
5782 /*
5783 * Search for a matching ust_app_channel. If none is found,
5784 * create it. Creating the channel will cause the ua_chan
5785 * structure to be allocated, the channel buffers to be
5786 * allocated (if necessary) and sent to the application, and
5787 * all enabled contexts will be added to the channel.
5788 */
5789 ret = find_or_create_ust_app_channel(usess, ua_sess,
5790 app, uchan, &ua_chan);
5791 if (ret) {
5792 /* Tracer is probably gone or ENOMEM. */
5793 goto end;
5794 }
5795
5796 if (!ua_chan) {
5797 /* ua_chan will be NULL for the metadata channel */
5798 continue;
5799 }
5800
5801 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5802 node.node) {
5803 ret = ust_app_channel_synchronize_event(ua_chan,
5804 uevent, ua_sess, app);
5805 if (ret) {
5806 goto end;
5807 }
5808 }
5809
5810 if (ua_chan->enabled != uchan->enabled) {
5811 ret = uchan->enabled ?
5812 enable_ust_app_channel(ua_sess, uchan, app) :
5813 disable_ust_app_channel(ua_sess, ua_chan, app);
5814 if (ret) {
5815 goto end;
5816 }
5817 }
5818 }
5819 end:
5820 return;
5821 }
5822
5823 /*
5824 * The caller must ensure that the application is compatible and is tracked
5825 * by the process attribute trackers.
5826 */
5827 static
5828 void ust_app_synchronize(struct ltt_ust_session *usess,
5829 struct ust_app *app)
5830 {
5831 int ret = 0;
5832 struct ust_app_session *ua_sess = NULL;
5833
5834 /*
5835 * The application's configuration should only be synchronized for
5836 * active sessions.
5837 */
5838 assert(usess->active);
5839
5840 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
5841 if (ret < 0) {
5842 /* Tracer is probably gone or ENOMEM. */
5843 goto error;
5844 }
5845 assert(ua_sess);
5846
5847 pthread_mutex_lock(&ua_sess->lock);
5848 if (ua_sess->deleted) {
5849 pthread_mutex_unlock(&ua_sess->lock);
5850 goto end;
5851 }
5852
5853 rcu_read_lock();
5854
5855 ust_app_synchronize_all_channels(usess, ua_sess, app);
5856
5857 /*
5858 * Create the metadata for the application. This returns gracefully if a
5859 * metadata was already set for the session.
5860 *
5861 * The metadata channel must be created after the data channels as the
5862 * consumer daemon assumes this ordering. When interacting with a relay
5863 * daemon, the consumer will use this assumption to send the
5864 * "STREAMS_SENT" message to the relay daemon.
5865 */
5866 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
5867 if (ret < 0) {
5868 goto error_unlock;
5869 }
5870
5871 rcu_read_unlock();
5872
5873 end:
5874 pthread_mutex_unlock(&ua_sess->lock);
5875 /* Everything went well at this point. */
5876 return;
5877
5878 error_unlock:
5879 rcu_read_unlock();
5880 pthread_mutex_unlock(&ua_sess->lock);
5881 error:
5882 if (ua_sess) {
5883 destroy_app_session(app, ua_sess);
5884 }
5885 return;
5886 }
5887
5888 static
5889 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5890 {
5891 struct ust_app_session *ua_sess;
5892
5893 ua_sess = lookup_session_by_app(usess, app);
5894 if (ua_sess == NULL) {
5895 return;
5896 }
5897 destroy_app_session(app, ua_sess);
5898 }
5899
5900 /*
5901 * Add channels/events from UST global domain to registered apps at sock.
5902 *
5903 * Called with session lock held.
5904 * Called with RCU read-side lock held.
5905 */
5906 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5907 {
5908 assert(usess);
5909 assert(usess->active);
5910
5911 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5912 app->sock, usess->id);
5913
5914 if (!app->compatible) {
5915 return;
5916 }
5917 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5918 usess, app->pid) &&
5919 trace_ust_id_tracker_lookup(
5920 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5921 usess, app->uid) &&
5922 trace_ust_id_tracker_lookup(
5923 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5924 usess, app->gid)) {
5925 /*
5926 * Synchronize the application's internal tracing configuration
5927 * and start tracing.
5928 */
5929 ust_app_synchronize(usess, app);
5930 ust_app_start_trace(usess, app);
5931 } else {
5932 ust_app_global_destroy(usess, app);
5933 }
5934 }
5935
5936 /*
5937 * Add all event notifiers to an application.
5938 *
5939 * Called with session lock held.
5940 * Called with RCU read-side lock held.
5941 */
5942 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
5943 {
5944 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5945 app->name, app->ppid);
5946
5947 if (!app->compatible) {
5948 return;
5949 }
5950
5951 if (app->event_notifier_group.object == NULL) {
5952 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5953 app->name, app->ppid);
5954 return;
5955 }
5956
5957 ust_app_synchronize_event_notifier_rules(app);
5958 }
5959
5960 /*
5961 * Called with session lock held.
5962 */
5963 void ust_app_global_update_all(struct ltt_ust_session *usess)
5964 {
5965 struct lttng_ht_iter iter;
5966 struct ust_app *app;
5967
5968 rcu_read_lock();
5969 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5970 ust_app_global_update(usess, app);
5971 }
5972 rcu_read_unlock();
5973 }
5974
5975 void ust_app_global_update_all_event_notifier_rules(void)
5976 {
5977 struct lttng_ht_iter iter;
5978 struct ust_app *app;
5979
5980 rcu_read_lock();
5981 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5982 ust_app_global_update_event_notifier_rules(app);
5983 }
5984
5985 rcu_read_unlock();
5986 }
5987
5988 void ust_app_update_event_notifier_error_count(struct lttng_trigger *trigger)
5989 {
5990 uint64_t error_count = 0;
5991 enum event_notifier_error_accounting_status status;
5992 struct lttng_condition *condition = lttng_trigger_get_condition(trigger);
5993
5994 status = event_notifier_error_accounting_get_count(trigger, &error_count);
5995 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
5996 ERR("Error getting trigger error count.");
5997 }
5998
5999 lttng_condition_on_event_set_error_count(condition, error_count);
6000 }
6001
6002 /*
6003 * Add context to a specific channel for global UST domain.
6004 */
6005 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6006 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6007 {
6008 int ret = 0;
6009 struct lttng_ht_node_str *ua_chan_node;
6010 struct lttng_ht_iter iter, uiter;
6011 struct ust_app_channel *ua_chan = NULL;
6012 struct ust_app_session *ua_sess;
6013 struct ust_app *app;
6014
6015 assert(usess->active);
6016
6017 rcu_read_lock();
6018 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6019 if (!app->compatible) {
6020 /*
6021 * TODO: In time, we should notice the caller of this error by
6022 * telling him that this is a version error.
6023 */
6024 continue;
6025 }
6026 ua_sess = lookup_session_by_app(usess, app);
6027 if (ua_sess == NULL) {
6028 continue;
6029 }
6030
6031 pthread_mutex_lock(&ua_sess->lock);
6032
6033 if (ua_sess->deleted) {
6034 pthread_mutex_unlock(&ua_sess->lock);
6035 continue;
6036 }
6037
6038 /* Lookup channel in the ust app session */
6039 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6040 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6041 if (ua_chan_node == NULL) {
6042 goto next_app;
6043 }
6044 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6045 node);
6046 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6047 if (ret < 0) {
6048 goto next_app;
6049 }
6050 next_app:
6051 pthread_mutex_unlock(&ua_sess->lock);
6052 }
6053
6054 rcu_read_unlock();
6055 return ret;
6056 }
6057
6058 /*
6059 * Receive registration and populate the given msg structure.
6060 *
6061 * On success return 0 else a negative value returned by the ustctl call.
6062 */
6063 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6064 {
6065 int ret;
6066 uint32_t pid, ppid, uid, gid;
6067
6068 assert(msg);
6069
6070 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
6071 &pid, &ppid, &uid, &gid,
6072 &msg->bits_per_long,
6073 &msg->uint8_t_alignment,
6074 &msg->uint16_t_alignment,
6075 &msg->uint32_t_alignment,
6076 &msg->uint64_t_alignment,
6077 &msg->long_alignment,
6078 &msg->byte_order,
6079 msg->name);
6080 if (ret < 0) {
6081 switch (-ret) {
6082 case EPIPE:
6083 case ECONNRESET:
6084 case LTTNG_UST_ERR_EXITING:
6085 DBG3("UST app recv reg message failed. Application died");
6086 break;
6087 case LTTNG_UST_ERR_UNSUP_MAJOR:
6088 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6089 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6090 LTTNG_UST_ABI_MINOR_VERSION);
6091 break;
6092 default:
6093 ERR("UST app recv reg message failed with ret %d", ret);
6094 break;
6095 }
6096 goto error;
6097 }
6098 msg->pid = (pid_t) pid;
6099 msg->ppid = (pid_t) ppid;
6100 msg->uid = (uid_t) uid;
6101 msg->gid = (gid_t) gid;
6102
6103 error:
6104 return ret;
6105 }
6106
6107 /*
6108 * Return a ust app session object using the application object and the
6109 * session object descriptor has a key. If not found, NULL is returned.
6110 * A RCU read side lock MUST be acquired when calling this function.
6111 */
6112 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6113 int objd)
6114 {
6115 struct lttng_ht_node_ulong *node;
6116 struct lttng_ht_iter iter;
6117 struct ust_app_session *ua_sess = NULL;
6118
6119 assert(app);
6120
6121 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6122 node = lttng_ht_iter_get_node_ulong(&iter);
6123 if (node == NULL) {
6124 DBG2("UST app session find by objd %d not found", objd);
6125 goto error;
6126 }
6127
6128 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6129
6130 error:
6131 return ua_sess;
6132 }
6133
6134 /*
6135 * Return a ust app channel object using the application object and the channel
6136 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6137 * lock MUST be acquired before calling this function.
6138 */
6139 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6140 int objd)
6141 {
6142 struct lttng_ht_node_ulong *node;
6143 struct lttng_ht_iter iter;
6144 struct ust_app_channel *ua_chan = NULL;
6145
6146 assert(app);
6147
6148 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6149 node = lttng_ht_iter_get_node_ulong(&iter);
6150 if (node == NULL) {
6151 DBG2("UST app channel find by objd %d not found", objd);
6152 goto error;
6153 }
6154
6155 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6156
6157 error:
6158 return ua_chan;
6159 }
6160
6161 /*
6162 * Reply to a register channel notification from an application on the notify
6163 * socket. The channel metadata is also created.
6164 *
6165 * The session UST registry lock is acquired in this function.
6166 *
6167 * On success 0 is returned else a negative value.
6168 */
6169 static int reply_ust_register_channel(int sock, int cobjd,
6170 size_t nr_fields, struct ustctl_field *fields)
6171 {
6172 int ret, ret_code = 0;
6173 uint32_t chan_id;
6174 uint64_t chan_reg_key;
6175 enum ustctl_channel_header type;
6176 struct ust_app *app;
6177 struct ust_app_channel *ua_chan;
6178 struct ust_app_session *ua_sess;
6179 struct ust_registry_session *registry;
6180 struct ust_registry_channel *ust_reg_chan;
6181
6182 rcu_read_lock();
6183
6184 /* Lookup application. If not found, there is a code flow error. */
6185 app = find_app_by_notify_sock(sock);
6186 if (!app) {
6187 DBG("Application socket %d is being torn down. Abort event notify",
6188 sock);
6189 ret = 0;
6190 goto error_rcu_unlock;
6191 }
6192
6193 /* Lookup channel by UST object descriptor. */
6194 ua_chan = find_channel_by_objd(app, cobjd);
6195 if (!ua_chan) {
6196 DBG("Application channel is being torn down. Abort event notify");
6197 ret = 0;
6198 goto error_rcu_unlock;
6199 }
6200
6201 assert(ua_chan->session);
6202 ua_sess = ua_chan->session;
6203
6204 /* Get right session registry depending on the session buffer type. */
6205 registry = get_session_registry(ua_sess);
6206 if (!registry) {
6207 DBG("Application session is being torn down. Abort event notify");
6208 ret = 0;
6209 goto error_rcu_unlock;
6210 };
6211
6212 /* Depending on the buffer type, a different channel key is used. */
6213 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6214 chan_reg_key = ua_chan->tracing_channel_id;
6215 } else {
6216 chan_reg_key = ua_chan->key;
6217 }
6218
6219 pthread_mutex_lock(&registry->lock);
6220
6221 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6222 assert(ust_reg_chan);
6223
6224 if (!ust_reg_chan->register_done) {
6225 /*
6226 * TODO: eventually use the registry event count for
6227 * this channel to better guess header type for per-pid
6228 * buffers.
6229 */
6230 type = USTCTL_CHANNEL_HEADER_LARGE;
6231 ust_reg_chan->nr_ctx_fields = nr_fields;
6232 ust_reg_chan->ctx_fields = fields;
6233 fields = NULL;
6234 ust_reg_chan->header_type = type;
6235 } else {
6236 /* Get current already assigned values. */
6237 type = ust_reg_chan->header_type;
6238 }
6239 /* Channel id is set during the object creation. */
6240 chan_id = ust_reg_chan->chan_id;
6241
6242 /* Append to metadata */
6243 if (!ust_reg_chan->metadata_dumped) {
6244 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
6245 if (ret_code) {
6246 ERR("Error appending channel metadata (errno = %d)", ret_code);
6247 goto reply;
6248 }
6249 }
6250
6251 reply:
6252 DBG3("UST app replying to register channel key %" PRIu64
6253 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6254 ret_code);
6255
6256 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
6257 if (ret < 0) {
6258 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6259 ERR("UST app reply channel failed with ret %d", ret);
6260 } else {
6261 DBG3("UST app reply channel failed. Application died");
6262 }
6263 goto error;
6264 }
6265
6266 /* This channel registry registration is completed. */
6267 ust_reg_chan->register_done = 1;
6268
6269 error:
6270 pthread_mutex_unlock(&registry->lock);
6271 error_rcu_unlock:
6272 rcu_read_unlock();
6273 free(fields);
6274 return ret;
6275 }
6276
6277 /*
6278 * Add event to the UST channel registry. When the event is added to the
6279 * registry, the metadata is also created. Once done, this replies to the
6280 * application with the appropriate error code.
6281 *
6282 * The session UST registry lock is acquired in the function.
6283 *
6284 * On success 0 is returned else a negative value.
6285 */
6286 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6287 char *sig, size_t nr_fields, struct ustctl_field *fields,
6288 int loglevel_value, char *model_emf_uri)
6289 {
6290 int ret, ret_code;
6291 uint32_t event_id = 0;
6292 uint64_t chan_reg_key;
6293 struct ust_app *app;
6294 struct ust_app_channel *ua_chan;
6295 struct ust_app_session *ua_sess;
6296 struct ust_registry_session *registry;
6297
6298 rcu_read_lock();
6299
6300 /* Lookup application. If not found, there is a code flow error. */
6301 app = find_app_by_notify_sock(sock);
6302 if (!app) {
6303 DBG("Application socket %d is being torn down. Abort event notify",
6304 sock);
6305 ret = 0;
6306 goto error_rcu_unlock;
6307 }
6308
6309 /* Lookup channel by UST object descriptor. */
6310 ua_chan = find_channel_by_objd(app, cobjd);
6311 if (!ua_chan) {
6312 DBG("Application channel is being torn down. Abort event notify");
6313 ret = 0;
6314 goto error_rcu_unlock;
6315 }
6316
6317 assert(ua_chan->session);
6318 ua_sess = ua_chan->session;
6319
6320 registry = get_session_registry(ua_sess);
6321 if (!registry) {
6322 DBG("Application session is being torn down. Abort event notify");
6323 ret = 0;
6324 goto error_rcu_unlock;
6325 }
6326
6327 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6328 chan_reg_key = ua_chan->tracing_channel_id;
6329 } else {
6330 chan_reg_key = ua_chan->key;
6331 }
6332
6333 pthread_mutex_lock(&registry->lock);
6334
6335 /*
6336 * From this point on, this call acquires the ownership of the sig, fields
6337 * and model_emf_uri meaning any free are done inside it if needed. These
6338 * three variables MUST NOT be read/write after this.
6339 */
6340 ret_code = ust_registry_create_event(registry, chan_reg_key,
6341 sobjd, cobjd, name, sig, nr_fields, fields,
6342 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6343 &event_id, app);
6344 sig = NULL;
6345 fields = NULL;
6346 model_emf_uri = NULL;
6347
6348 /*
6349 * The return value is returned to ustctl so in case of an error, the
6350 * application can be notified. In case of an error, it's important not to
6351 * return a negative error or else the application will get closed.
6352 */
6353 ret = ustctl_reply_register_event(sock, event_id, ret_code);
6354 if (ret < 0) {
6355 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6356 ERR("UST app reply event failed with ret %d", ret);
6357 } else {
6358 DBG3("UST app reply event failed. Application died");
6359 }
6360 /*
6361 * No need to wipe the create event since the application socket will
6362 * get close on error hence cleaning up everything by itself.
6363 */
6364 goto error;
6365 }
6366
6367 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6368 name, event_id);
6369
6370 error:
6371 pthread_mutex_unlock(&registry->lock);
6372 error_rcu_unlock:
6373 rcu_read_unlock();
6374 free(sig);
6375 free(fields);
6376 free(model_emf_uri);
6377 return ret;
6378 }
6379
6380 /*
6381 * Add enum to the UST session registry. Once done, this replies to the
6382 * application with the appropriate error code.
6383 *
6384 * The session UST registry lock is acquired within this function.
6385 *
6386 * On success 0 is returned else a negative value.
6387 */
6388 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6389 struct ustctl_enum_entry *entries, size_t nr_entries)
6390 {
6391 int ret = 0, ret_code;
6392 struct ust_app *app;
6393 struct ust_app_session *ua_sess;
6394 struct ust_registry_session *registry;
6395 uint64_t enum_id = -1ULL;
6396
6397 rcu_read_lock();
6398
6399 /* Lookup application. If not found, there is a code flow error. */
6400 app = find_app_by_notify_sock(sock);
6401 if (!app) {
6402 /* Return an error since this is not an error */
6403 DBG("Application socket %d is being torn down. Aborting enum registration",
6404 sock);
6405 free(entries);
6406 goto error_rcu_unlock;
6407 }
6408
6409 /* Lookup session by UST object descriptor. */
6410 ua_sess = find_session_by_objd(app, sobjd);
6411 if (!ua_sess) {
6412 /* Return an error since this is not an error */
6413 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6414 free(entries);
6415 goto error_rcu_unlock;
6416 }
6417
6418 registry = get_session_registry(ua_sess);
6419 if (!registry) {
6420 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6421 free(entries);
6422 goto error_rcu_unlock;
6423 }
6424
6425 pthread_mutex_lock(&registry->lock);
6426
6427 /*
6428 * From this point on, the callee acquires the ownership of
6429 * entries. The variable entries MUST NOT be read/written after
6430 * call.
6431 */
6432 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6433 entries, nr_entries, &enum_id);
6434 entries = NULL;
6435
6436 /*
6437 * The return value is returned to ustctl so in case of an error, the
6438 * application can be notified. In case of an error, it's important not to
6439 * return a negative error or else the application will get closed.
6440 */
6441 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
6442 if (ret < 0) {
6443 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6444 ERR("UST app reply enum failed with ret %d", ret);
6445 } else {
6446 DBG3("UST app reply enum failed. Application died");
6447 }
6448 /*
6449 * No need to wipe the create enum since the application socket will
6450 * get close on error hence cleaning up everything by itself.
6451 */
6452 goto error;
6453 }
6454
6455 DBG3("UST registry enum %s added successfully or already found", name);
6456
6457 error:
6458 pthread_mutex_unlock(&registry->lock);
6459 error_rcu_unlock:
6460 rcu_read_unlock();
6461 return ret;
6462 }
6463
6464 /*
6465 * Handle application notification through the given notify socket.
6466 *
6467 * Return 0 on success or else a negative value.
6468 */
6469 int ust_app_recv_notify(int sock)
6470 {
6471 int ret;
6472 enum ustctl_notify_cmd cmd;
6473
6474 DBG3("UST app receiving notify from sock %d", sock);
6475
6476 ret = ustctl_recv_notify(sock, &cmd);
6477 if (ret < 0) {
6478 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6479 ERR("UST app recv notify failed with ret %d", ret);
6480 } else {
6481 DBG3("UST app recv notify failed. Application died");
6482 }
6483 goto error;
6484 }
6485
6486 switch (cmd) {
6487 case USTCTL_NOTIFY_CMD_EVENT:
6488 {
6489 int sobjd, cobjd, loglevel_value;
6490 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6491 size_t nr_fields;
6492 struct ustctl_field *fields;
6493
6494 DBG2("UST app ustctl register event received");
6495
6496 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
6497 &loglevel_value, &sig, &nr_fields, &fields,
6498 &model_emf_uri);
6499 if (ret < 0) {
6500 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6501 ERR("UST app recv event failed with ret %d", ret);
6502 } else {
6503 DBG3("UST app recv event failed. Application died");
6504 }
6505 goto error;
6506 }
6507
6508 /*
6509 * Add event to the UST registry coming from the notify socket. This
6510 * call will free if needed the sig, fields and model_emf_uri. This
6511 * code path loses the ownsership of these variables and transfer them
6512 * to the this function.
6513 */
6514 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6515 fields, loglevel_value, model_emf_uri);
6516 if (ret < 0) {
6517 goto error;
6518 }
6519
6520 break;
6521 }
6522 case USTCTL_NOTIFY_CMD_CHANNEL:
6523 {
6524 int sobjd, cobjd;
6525 size_t nr_fields;
6526 struct ustctl_field *fields;
6527
6528 DBG2("UST app ustctl register channel received");
6529
6530 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6531 &fields);
6532 if (ret < 0) {
6533 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6534 ERR("UST app recv channel failed with ret %d", ret);
6535 } else {
6536 DBG3("UST app recv channel failed. Application died");
6537 }
6538 goto error;
6539 }
6540
6541 /*
6542 * The fields ownership are transfered to this function call meaning
6543 * that if needed it will be freed. After this, it's invalid to access
6544 * fields or clean it up.
6545 */
6546 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6547 fields);
6548 if (ret < 0) {
6549 goto error;
6550 }
6551
6552 break;
6553 }
6554 case USTCTL_NOTIFY_CMD_ENUM:
6555 {
6556 int sobjd;
6557 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6558 size_t nr_entries;
6559 struct ustctl_enum_entry *entries;
6560
6561 DBG2("UST app ustctl register enum received");
6562
6563 ret = ustctl_recv_register_enum(sock, &sobjd, name,
6564 &entries, &nr_entries);
6565 if (ret < 0) {
6566 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6567 ERR("UST app recv enum failed with ret %d", ret);
6568 } else {
6569 DBG3("UST app recv enum failed. Application died");
6570 }
6571 goto error;
6572 }
6573
6574 /* Callee assumes ownership of entries */
6575 ret = add_enum_ust_registry(sock, sobjd, name,
6576 entries, nr_entries);
6577 if (ret < 0) {
6578 goto error;
6579 }
6580
6581 break;
6582 }
6583 default:
6584 /* Should NEVER happen. */
6585 assert(0);
6586 }
6587
6588 error:
6589 return ret;
6590 }
6591
6592 /*
6593 * Once the notify socket hangs up, this is called. First, it tries to find the
6594 * corresponding application. On failure, the call_rcu to close the socket is
6595 * executed. If an application is found, it tries to delete it from the notify
6596 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6597 *
6598 * Note that an object needs to be allocated here so on ENOMEM failure, the
6599 * call RCU is not done but the rest of the cleanup is.
6600 */
6601 void ust_app_notify_sock_unregister(int sock)
6602 {
6603 int err_enomem = 0;
6604 struct lttng_ht_iter iter;
6605 struct ust_app *app;
6606 struct ust_app_notify_sock_obj *obj;
6607
6608 assert(sock >= 0);
6609
6610 rcu_read_lock();
6611
6612 obj = zmalloc(sizeof(*obj));
6613 if (!obj) {
6614 /*
6615 * An ENOMEM is kind of uncool. If this strikes we continue the
6616 * procedure but the call_rcu will not be called. In this case, we
6617 * accept the fd leak rather than possibly creating an unsynchronized
6618 * state between threads.
6619 *
6620 * TODO: The notify object should be created once the notify socket is
6621 * registered and stored independantely from the ust app object. The
6622 * tricky part is to synchronize the teardown of the application and
6623 * this notify object. Let's keep that in mind so we can avoid this
6624 * kind of shenanigans with ENOMEM in the teardown path.
6625 */
6626 err_enomem = 1;
6627 } else {
6628 obj->fd = sock;
6629 }
6630
6631 DBG("UST app notify socket unregister %d", sock);
6632
6633 /*
6634 * Lookup application by notify socket. If this fails, this means that the
6635 * hash table delete has already been done by the application
6636 * unregistration process so we can safely close the notify socket in a
6637 * call RCU.
6638 */
6639 app = find_app_by_notify_sock(sock);
6640 if (!app) {
6641 goto close_socket;
6642 }
6643
6644 iter.iter.node = &app->notify_sock_n.node;
6645
6646 /*
6647 * Whatever happens here either we fail or succeed, in both cases we have
6648 * to close the socket after a grace period to continue to the call RCU
6649 * here. If the deletion is successful, the application is not visible
6650 * anymore by other threads and is it fails it means that it was already
6651 * deleted from the hash table so either way we just have to close the
6652 * socket.
6653 */
6654 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6655
6656 close_socket:
6657 rcu_read_unlock();
6658
6659 /*
6660 * Close socket after a grace period to avoid for the socket to be reused
6661 * before the application object is freed creating potential race between
6662 * threads trying to add unique in the global hash table.
6663 */
6664 if (!err_enomem) {
6665 call_rcu(&obj->head, close_notify_sock_rcu);
6666 }
6667 }
6668
6669 /*
6670 * Destroy a ust app data structure and free its memory.
6671 */
6672 void ust_app_destroy(struct ust_app *app)
6673 {
6674 if (!app) {
6675 return;
6676 }
6677
6678 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6679 }
6680
6681 /*
6682 * Take a snapshot for a given UST session. The snapshot is sent to the given
6683 * output.
6684 *
6685 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6686 */
6687 enum lttng_error_code ust_app_snapshot_record(
6688 const struct ltt_ust_session *usess,
6689 const struct consumer_output *output, int wait,
6690 uint64_t nb_packets_per_stream)
6691 {
6692 int ret = 0;
6693 enum lttng_error_code status = LTTNG_OK;
6694 struct lttng_ht_iter iter;
6695 struct ust_app *app;
6696 char *trace_path = NULL;
6697
6698 assert(usess);
6699 assert(output);
6700
6701 rcu_read_lock();
6702
6703 switch (usess->buffer_type) {
6704 case LTTNG_BUFFER_PER_UID:
6705 {
6706 struct buffer_reg_uid *reg;
6707
6708 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6709 struct buffer_reg_channel *buf_reg_chan;
6710 struct consumer_socket *socket;
6711 char pathname[PATH_MAX];
6712 size_t consumer_path_offset = 0;
6713
6714 if (!reg->registry->reg.ust->metadata_key) {
6715 /* Skip since no metadata is present */
6716 continue;
6717 }
6718
6719 /* Get consumer socket to use to push the metadata.*/
6720 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6721 usess->consumer);
6722 if (!socket) {
6723 status = LTTNG_ERR_INVALID;
6724 goto error;
6725 }
6726
6727 memset(pathname, 0, sizeof(pathname));
6728 ret = snprintf(pathname, sizeof(pathname),
6729 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
6730 reg->uid, reg->bits_per_long);
6731 if (ret < 0) {
6732 PERROR("snprintf snapshot path");
6733 status = LTTNG_ERR_INVALID;
6734 goto error;
6735 }
6736 /* Free path allowed on previous iteration. */
6737 free(trace_path);
6738 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6739 &consumer_path_offset);
6740 if (!trace_path) {
6741 status = LTTNG_ERR_INVALID;
6742 goto error;
6743 }
6744 /* Add the UST default trace dir to path. */
6745 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6746 buf_reg_chan, node.node) {
6747 status = consumer_snapshot_channel(socket,
6748 buf_reg_chan->consumer_key,
6749 output, 0, usess->uid,
6750 usess->gid, &trace_path[consumer_path_offset], wait,
6751 nb_packets_per_stream);
6752 if (status != LTTNG_OK) {
6753 goto error;
6754 }
6755 }
6756 status = consumer_snapshot_channel(socket,
6757 reg->registry->reg.ust->metadata_key, output, 1,
6758 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6759 wait, 0);
6760 if (status != LTTNG_OK) {
6761 goto error;
6762 }
6763 }
6764 break;
6765 }
6766 case LTTNG_BUFFER_PER_PID:
6767 {
6768 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6769 struct consumer_socket *socket;
6770 struct lttng_ht_iter chan_iter;
6771 struct ust_app_channel *ua_chan;
6772 struct ust_app_session *ua_sess;
6773 struct ust_registry_session *registry;
6774 char pathname[PATH_MAX];
6775 size_t consumer_path_offset = 0;
6776
6777 ua_sess = lookup_session_by_app(usess, app);
6778 if (!ua_sess) {
6779 /* Session not associated with this app. */
6780 continue;
6781 }
6782
6783 /* Get the right consumer socket for the application. */
6784 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6785 output);
6786 if (!socket) {
6787 status = LTTNG_ERR_INVALID;
6788 goto error;
6789 }
6790
6791 /* Add the UST default trace dir to path. */
6792 memset(pathname, 0, sizeof(pathname));
6793 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
6794 ua_sess->path);
6795 if (ret < 0) {
6796 status = LTTNG_ERR_INVALID;
6797 PERROR("snprintf snapshot path");
6798 goto error;
6799 }
6800 /* Free path allowed on previous iteration. */
6801 free(trace_path);
6802 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6803 &consumer_path_offset);
6804 if (!trace_path) {
6805 status = LTTNG_ERR_INVALID;
6806 goto error;
6807 }
6808 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6809 ua_chan, node.node) {
6810 status = consumer_snapshot_channel(socket,
6811 ua_chan->key, output, 0,
6812 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6813 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6814 &trace_path[consumer_path_offset], wait,
6815 nb_packets_per_stream);
6816 switch (status) {
6817 case LTTNG_OK:
6818 break;
6819 case LTTNG_ERR_CHAN_NOT_FOUND:
6820 continue;
6821 default:
6822 goto error;
6823 }
6824 }
6825
6826 registry = get_session_registry(ua_sess);
6827 if (!registry) {
6828 DBG("Application session is being torn down. Skip application.");
6829 continue;
6830 }
6831 status = consumer_snapshot_channel(socket,
6832 registry->metadata_key, output, 1,
6833 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6834 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6835 &trace_path[consumer_path_offset], wait, 0);
6836 switch (status) {
6837 case LTTNG_OK:
6838 break;
6839 case LTTNG_ERR_CHAN_NOT_FOUND:
6840 continue;
6841 default:
6842 goto error;
6843 }
6844 }
6845 break;
6846 }
6847 default:
6848 assert(0);
6849 break;
6850 }
6851
6852 error:
6853 free(trace_path);
6854 rcu_read_unlock();
6855 return status;
6856 }
6857
6858 /*
6859 * Return the size taken by one more packet per stream.
6860 */
6861 uint64_t ust_app_get_size_one_more_packet_per_stream(
6862 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
6863 {
6864 uint64_t tot_size = 0;
6865 struct ust_app *app;
6866 struct lttng_ht_iter iter;
6867
6868 assert(usess);
6869
6870 switch (usess->buffer_type) {
6871 case LTTNG_BUFFER_PER_UID:
6872 {
6873 struct buffer_reg_uid *reg;
6874
6875 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6876 struct buffer_reg_channel *buf_reg_chan;
6877
6878 rcu_read_lock();
6879 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6880 buf_reg_chan, node.node) {
6881 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
6882 /*
6883 * Don't take channel into account if we
6884 * already grab all its packets.
6885 */
6886 continue;
6887 }
6888 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
6889 }
6890 rcu_read_unlock();
6891 }
6892 break;
6893 }
6894 case LTTNG_BUFFER_PER_PID:
6895 {
6896 rcu_read_lock();
6897 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6898 struct ust_app_channel *ua_chan;
6899 struct ust_app_session *ua_sess;
6900 struct lttng_ht_iter chan_iter;
6901
6902 ua_sess = lookup_session_by_app(usess, app);
6903 if (!ua_sess) {
6904 /* Session not associated with this app. */
6905 continue;
6906 }
6907
6908 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6909 ua_chan, node.node) {
6910 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6911 /*
6912 * Don't take channel into account if we
6913 * already grab all its packets.
6914 */
6915 continue;
6916 }
6917 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6918 }
6919 }
6920 rcu_read_unlock();
6921 break;
6922 }
6923 default:
6924 assert(0);
6925 break;
6926 }
6927
6928 return tot_size;
6929 }
6930
6931 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6932 struct cds_list_head *buffer_reg_uid_list,
6933 struct consumer_output *consumer, uint64_t uchan_id,
6934 int overwrite, uint64_t *discarded, uint64_t *lost)
6935 {
6936 int ret;
6937 uint64_t consumer_chan_key;
6938
6939 *discarded = 0;
6940 *lost = 0;
6941
6942 ret = buffer_reg_uid_consumer_channel_key(
6943 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6944 if (ret < 0) {
6945 /* Not found */
6946 ret = 0;
6947 goto end;
6948 }
6949
6950 if (overwrite) {
6951 ret = consumer_get_lost_packets(ust_session_id,
6952 consumer_chan_key, consumer, lost);
6953 } else {
6954 ret = consumer_get_discarded_events(ust_session_id,
6955 consumer_chan_key, consumer, discarded);
6956 }
6957
6958 end:
6959 return ret;
6960 }
6961
6962 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6963 struct ltt_ust_channel *uchan,
6964 struct consumer_output *consumer, int overwrite,
6965 uint64_t *discarded, uint64_t *lost)
6966 {
6967 int ret = 0;
6968 struct lttng_ht_iter iter;
6969 struct lttng_ht_node_str *ua_chan_node;
6970 struct ust_app *app;
6971 struct ust_app_session *ua_sess;
6972 struct ust_app_channel *ua_chan;
6973
6974 *discarded = 0;
6975 *lost = 0;
6976
6977 rcu_read_lock();
6978 /*
6979 * Iterate over every registered applications. Sum counters for
6980 * all applications containing requested session and channel.
6981 */
6982 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6983 struct lttng_ht_iter uiter;
6984
6985 ua_sess = lookup_session_by_app(usess, app);
6986 if (ua_sess == NULL) {
6987 continue;
6988 }
6989
6990 /* Get channel */
6991 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6992 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6993 /* If the session is found for the app, the channel must be there */
6994 assert(ua_chan_node);
6995
6996 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6997
6998 if (overwrite) {
6999 uint64_t _lost;
7000
7001 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
7002 consumer, &_lost);
7003 if (ret < 0) {
7004 break;
7005 }
7006 (*lost) += _lost;
7007 } else {
7008 uint64_t _discarded;
7009
7010 ret = consumer_get_discarded_events(usess->id,
7011 ua_chan->key, consumer, &_discarded);
7012 if (ret < 0) {
7013 break;
7014 }
7015 (*discarded) += _discarded;
7016 }
7017 }
7018
7019 rcu_read_unlock();
7020 return ret;
7021 }
7022
7023 static
7024 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7025 struct ust_app *app)
7026 {
7027 int ret = 0;
7028 struct ust_app_session *ua_sess;
7029
7030 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7031
7032 rcu_read_lock();
7033
7034 ua_sess = lookup_session_by_app(usess, app);
7035 if (ua_sess == NULL) {
7036 /* The session is in teardown process. Ignore and continue. */
7037 goto end;
7038 }
7039
7040 pthread_mutex_lock(&ua_sess->lock);
7041
7042 if (ua_sess->deleted) {
7043 goto end_unlock;
7044 }
7045
7046 pthread_mutex_lock(&app->sock_lock);
7047 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
7048 pthread_mutex_unlock(&app->sock_lock);
7049
7050 end_unlock:
7051 pthread_mutex_unlock(&ua_sess->lock);
7052
7053 end:
7054 rcu_read_unlock();
7055 health_code_update();
7056 return ret;
7057 }
7058
7059 /*
7060 * Regenerate the statedump for each app in the session.
7061 */
7062 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7063 {
7064 int ret = 0;
7065 struct lttng_ht_iter iter;
7066 struct ust_app *app;
7067
7068 DBG("Regenerating the metadata for all UST apps");
7069
7070 rcu_read_lock();
7071
7072 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7073 if (!app->compatible) {
7074 continue;
7075 }
7076
7077 ret = ust_app_regenerate_statedump(usess, app);
7078 if (ret < 0) {
7079 /* Continue to the next app even on error */
7080 continue;
7081 }
7082 }
7083
7084 rcu_read_unlock();
7085
7086 return 0;
7087 }
7088
7089 /*
7090 * Rotate all the channels of a session.
7091 *
7092 * Return LTTNG_OK on success or else an LTTng error code.
7093 */
7094 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7095 {
7096 int ret;
7097 enum lttng_error_code cmd_ret = LTTNG_OK;
7098 struct lttng_ht_iter iter;
7099 struct ust_app *app;
7100 struct ltt_ust_session *usess = session->ust_session;
7101
7102 assert(usess);
7103
7104 rcu_read_lock();
7105
7106 switch (usess->buffer_type) {
7107 case LTTNG_BUFFER_PER_UID:
7108 {
7109 struct buffer_reg_uid *reg;
7110
7111 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7112 struct buffer_reg_channel *buf_reg_chan;
7113 struct consumer_socket *socket;
7114
7115 if (!reg->registry->reg.ust->metadata_key) {
7116 /* Skip since no metadata is present */
7117 continue;
7118 }
7119
7120 /* Get consumer socket to use to push the metadata.*/
7121 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7122 usess->consumer);
7123 if (!socket) {
7124 cmd_ret = LTTNG_ERR_INVALID;
7125 goto error;
7126 }
7127
7128 /* Rotate the data channels. */
7129 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7130 buf_reg_chan, node.node) {
7131 ret = consumer_rotate_channel(socket,
7132 buf_reg_chan->consumer_key,
7133 usess->uid, usess->gid,
7134 usess->consumer,
7135 /* is_metadata_channel */ false);
7136 if (ret < 0) {
7137 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7138 goto error;
7139 }
7140 }
7141
7142 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7143
7144 ret = consumer_rotate_channel(socket,
7145 reg->registry->reg.ust->metadata_key,
7146 usess->uid, usess->gid,
7147 usess->consumer,
7148 /* is_metadata_channel */ true);
7149 if (ret < 0) {
7150 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7151 goto error;
7152 }
7153 }
7154 break;
7155 }
7156 case LTTNG_BUFFER_PER_PID:
7157 {
7158 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7159 struct consumer_socket *socket;
7160 struct lttng_ht_iter chan_iter;
7161 struct ust_app_channel *ua_chan;
7162 struct ust_app_session *ua_sess;
7163 struct ust_registry_session *registry;
7164
7165 ua_sess = lookup_session_by_app(usess, app);
7166 if (!ua_sess) {
7167 /* Session not associated with this app. */
7168 continue;
7169 }
7170
7171 /* Get the right consumer socket for the application. */
7172 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7173 usess->consumer);
7174 if (!socket) {
7175 cmd_ret = LTTNG_ERR_INVALID;
7176 goto error;
7177 }
7178
7179 registry = get_session_registry(ua_sess);
7180 if (!registry) {
7181 DBG("Application session is being torn down. Skip application.");
7182 continue;
7183 }
7184
7185 /* Rotate the data channels. */
7186 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7187 ua_chan, node.node) {
7188 ret = consumer_rotate_channel(socket,
7189 ua_chan->key,
7190 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7191 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7192 ua_sess->consumer,
7193 /* is_metadata_channel */ false);
7194 if (ret < 0) {
7195 /* Per-PID buffer and application going away. */
7196 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7197 continue;
7198 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7199 goto error;
7200 }
7201 }
7202
7203 /* Rotate the metadata channel. */
7204 (void) push_metadata(registry, usess->consumer);
7205 ret = consumer_rotate_channel(socket,
7206 registry->metadata_key,
7207 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7208 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7209 ua_sess->consumer,
7210 /* is_metadata_channel */ true);
7211 if (ret < 0) {
7212 /* Per-PID buffer and application going away. */
7213 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7214 continue;
7215 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7216 goto error;
7217 }
7218 }
7219 break;
7220 }
7221 default:
7222 assert(0);
7223 break;
7224 }
7225
7226 cmd_ret = LTTNG_OK;
7227
7228 error:
7229 rcu_read_unlock();
7230 return cmd_ret;
7231 }
7232
7233 enum lttng_error_code ust_app_create_channel_subdirectories(
7234 const struct ltt_ust_session *usess)
7235 {
7236 enum lttng_error_code ret = LTTNG_OK;
7237 struct lttng_ht_iter iter;
7238 enum lttng_trace_chunk_status chunk_status;
7239 char *pathname_index;
7240 int fmt_ret;
7241
7242 assert(usess->current_trace_chunk);
7243 rcu_read_lock();
7244
7245 switch (usess->buffer_type) {
7246 case LTTNG_BUFFER_PER_UID:
7247 {
7248 struct buffer_reg_uid *reg;
7249
7250 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7251 fmt_ret = asprintf(&pathname_index,
7252 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7253 reg->uid, reg->bits_per_long);
7254 if (fmt_ret < 0) {
7255 ERR("Failed to format channel index directory");
7256 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7257 goto error;
7258 }
7259
7260 /*
7261 * Create the index subdirectory which will take care
7262 * of implicitly creating the channel's path.
7263 */
7264 chunk_status = lttng_trace_chunk_create_subdirectory(
7265 usess->current_trace_chunk,
7266 pathname_index);
7267 free(pathname_index);
7268 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7269 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7270 goto error;
7271 }
7272 }
7273 break;
7274 }
7275 case LTTNG_BUFFER_PER_PID:
7276 {
7277 struct ust_app *app;
7278
7279 /*
7280 * Create the toplevel ust/ directory in case no apps are running.
7281 */
7282 chunk_status = lttng_trace_chunk_create_subdirectory(
7283 usess->current_trace_chunk,
7284 DEFAULT_UST_TRACE_DIR);
7285 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7286 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7287 goto error;
7288 }
7289
7290 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7291 pid_n.node) {
7292 struct ust_app_session *ua_sess;
7293 struct ust_registry_session *registry;
7294
7295 ua_sess = lookup_session_by_app(usess, app);
7296 if (!ua_sess) {
7297 /* Session not associated with this app. */
7298 continue;
7299 }
7300
7301 registry = get_session_registry(ua_sess);
7302 if (!registry) {
7303 DBG("Application session is being torn down. Skip application.");
7304 continue;
7305 }
7306
7307 fmt_ret = asprintf(&pathname_index,
7308 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7309 ua_sess->path);
7310 if (fmt_ret < 0) {
7311 ERR("Failed to format channel index directory");
7312 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7313 goto error;
7314 }
7315 /*
7316 * Create the index subdirectory which will take care
7317 * of implicitly creating the channel's path.
7318 */
7319 chunk_status = lttng_trace_chunk_create_subdirectory(
7320 usess->current_trace_chunk,
7321 pathname_index);
7322 free(pathname_index);
7323 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7324 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7325 goto error;
7326 }
7327 }
7328 break;
7329 }
7330 default:
7331 abort();
7332 }
7333
7334 ret = LTTNG_OK;
7335 error:
7336 rcu_read_unlock();
7337 return ret;
7338 }
7339
7340 /*
7341 * Clear all the channels of a session.
7342 *
7343 * Return LTTNG_OK on success or else an LTTng error code.
7344 */
7345 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7346 {
7347 int ret;
7348 enum lttng_error_code cmd_ret = LTTNG_OK;
7349 struct lttng_ht_iter iter;
7350 struct ust_app *app;
7351 struct ltt_ust_session *usess = session->ust_session;
7352
7353 assert(usess);
7354
7355 rcu_read_lock();
7356
7357 if (usess->active) {
7358 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7359 cmd_ret = LTTNG_ERR_FATAL;
7360 goto end;
7361 }
7362
7363 switch (usess->buffer_type) {
7364 case LTTNG_BUFFER_PER_UID:
7365 {
7366 struct buffer_reg_uid *reg;
7367
7368 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7369 struct buffer_reg_channel *buf_reg_chan;
7370 struct consumer_socket *socket;
7371
7372 /* Get consumer socket to use to push the metadata.*/
7373 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7374 usess->consumer);
7375 if (!socket) {
7376 cmd_ret = LTTNG_ERR_INVALID;
7377 goto error_socket;
7378 }
7379
7380 /* Clear the data channels. */
7381 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7382 buf_reg_chan, node.node) {
7383 ret = consumer_clear_channel(socket,
7384 buf_reg_chan->consumer_key);
7385 if (ret < 0) {
7386 goto error;
7387 }
7388 }
7389
7390 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7391
7392 /*
7393 * Clear the metadata channel.
7394 * Metadata channel is not cleared per se but we still need to
7395 * perform a rotation operation on it behind the scene.
7396 */
7397 ret = consumer_clear_channel(socket,
7398 reg->registry->reg.ust->metadata_key);
7399 if (ret < 0) {
7400 goto error;
7401 }
7402 }
7403 break;
7404 }
7405 case LTTNG_BUFFER_PER_PID:
7406 {
7407 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7408 struct consumer_socket *socket;
7409 struct lttng_ht_iter chan_iter;
7410 struct ust_app_channel *ua_chan;
7411 struct ust_app_session *ua_sess;
7412 struct ust_registry_session *registry;
7413
7414 ua_sess = lookup_session_by_app(usess, app);
7415 if (!ua_sess) {
7416 /* Session not associated with this app. */
7417 continue;
7418 }
7419
7420 /* Get the right consumer socket for the application. */
7421 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7422 usess->consumer);
7423 if (!socket) {
7424 cmd_ret = LTTNG_ERR_INVALID;
7425 goto error_socket;
7426 }
7427
7428 registry = get_session_registry(ua_sess);
7429 if (!registry) {
7430 DBG("Application session is being torn down. Skip application.");
7431 continue;
7432 }
7433
7434 /* Clear the data channels. */
7435 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7436 ua_chan, node.node) {
7437 ret = consumer_clear_channel(socket, ua_chan->key);
7438 if (ret < 0) {
7439 /* Per-PID buffer and application going away. */
7440 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7441 continue;
7442 }
7443 goto error;
7444 }
7445 }
7446
7447 (void) push_metadata(registry, usess->consumer);
7448
7449 /*
7450 * Clear the metadata channel.
7451 * Metadata channel is not cleared per se but we still need to
7452 * perform rotation operation on it behind the scene.
7453 */
7454 ret = consumer_clear_channel(socket, registry->metadata_key);
7455 if (ret < 0) {
7456 /* Per-PID buffer and application going away. */
7457 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7458 continue;
7459 }
7460 goto error;
7461 }
7462 }
7463 break;
7464 }
7465 default:
7466 assert(0);
7467 break;
7468 }
7469
7470 cmd_ret = LTTNG_OK;
7471 goto end;
7472
7473 error:
7474 switch (-ret) {
7475 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7476 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7477 break;
7478 default:
7479 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7480 }
7481
7482 error_socket:
7483 end:
7484 rcu_read_unlock();
7485 return cmd_ret;
7486 }
7487
7488 /*
7489 * This function skips the metadata channel as the begin/end timestamps of a
7490 * metadata packet are useless.
7491 *
7492 * Moreover, opening a packet after a "clear" will cause problems for live
7493 * sessions as it will introduce padding that was not part of the first trace
7494 * chunk. The relay daemon expects the content of the metadata stream of
7495 * successive metadata trace chunks to be strict supersets of one another.
7496 *
7497 * For example, flushing a packet at the beginning of the metadata stream of
7498 * a trace chunk resulting from a "clear" session command will cause the
7499 * size of the metadata stream of the new trace chunk to not match the size of
7500 * the metadata stream of the original chunk. This will confuse the relay
7501 * daemon as the same "offset" in a metadata stream will no longer point
7502 * to the same content.
7503 */
7504 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7505 {
7506 enum lttng_error_code ret = LTTNG_OK;
7507 struct lttng_ht_iter iter;
7508 struct ltt_ust_session *usess = session->ust_session;
7509
7510 assert(usess);
7511
7512 rcu_read_lock();
7513
7514 switch (usess->buffer_type) {
7515 case LTTNG_BUFFER_PER_UID:
7516 {
7517 struct buffer_reg_uid *reg;
7518
7519 cds_list_for_each_entry (
7520 reg, &usess->buffer_reg_uid_list, lnode) {
7521 struct buffer_reg_channel *buf_reg_chan;
7522 struct consumer_socket *socket;
7523
7524 socket = consumer_find_socket_by_bitness(
7525 reg->bits_per_long, usess->consumer);
7526 if (!socket) {
7527 ret = LTTNG_ERR_FATAL;
7528 goto error;
7529 }
7530
7531 cds_lfht_for_each_entry(reg->registry->channels->ht,
7532 &iter.iter, buf_reg_chan, node.node) {
7533 const int open_ret =
7534 consumer_open_channel_packets(
7535 socket,
7536 buf_reg_chan->consumer_key);
7537
7538 if (open_ret < 0) {
7539 ret = LTTNG_ERR_UNK;
7540 goto error;
7541 }
7542 }
7543 }
7544 break;
7545 }
7546 case LTTNG_BUFFER_PER_PID:
7547 {
7548 struct ust_app *app;
7549
7550 cds_lfht_for_each_entry (
7551 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7552 struct consumer_socket *socket;
7553 struct lttng_ht_iter chan_iter;
7554 struct ust_app_channel *ua_chan;
7555 struct ust_app_session *ua_sess;
7556 struct ust_registry_session *registry;
7557
7558 ua_sess = lookup_session_by_app(usess, app);
7559 if (!ua_sess) {
7560 /* Session not associated with this app. */
7561 continue;
7562 }
7563
7564 /* Get the right consumer socket for the application. */
7565 socket = consumer_find_socket_by_bitness(
7566 app->bits_per_long, usess->consumer);
7567 if (!socket) {
7568 ret = LTTNG_ERR_FATAL;
7569 goto error;
7570 }
7571
7572 registry = get_session_registry(ua_sess);
7573 if (!registry) {
7574 DBG("Application session is being torn down. Skip application.");
7575 continue;
7576 }
7577
7578 cds_lfht_for_each_entry(ua_sess->channels->ht,
7579 &chan_iter.iter, ua_chan, node.node) {
7580 const int open_ret =
7581 consumer_open_channel_packets(
7582 socket,
7583 ua_chan->key);
7584
7585 if (open_ret < 0) {
7586 /*
7587 * Per-PID buffer and application going
7588 * away.
7589 */
7590 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7591 continue;
7592 }
7593
7594 ret = LTTNG_ERR_UNK;
7595 goto error;
7596 }
7597 }
7598 }
7599 break;
7600 }
7601 default:
7602 abort();
7603 break;
7604 }
7605
7606 error:
7607 rcu_read_unlock();
7608 return ret;
7609 }
This page took 0.17273 seconds and 5 git commands to generate.