a9dff558cbb22940bd6bc75c2041c3e6952c858a
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43 #include "session.h"
44 #include "lttng-sessiond.h"
45 #include "notification-thread-commands.h"
46
47 static
48 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
49
50 /* Next available channel key. Access under next_channel_key_lock. */
51 static uint64_t _next_channel_key;
52 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
53
54 /* Next available session ID. Access under next_session_id_lock. */
55 static uint64_t _next_session_id;
56 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
57
58 /*
59 * Return the incremented value of next_channel_key.
60 */
61 static uint64_t get_next_channel_key(void)
62 {
63 uint64_t ret;
64
65 pthread_mutex_lock(&next_channel_key_lock);
66 ret = ++_next_channel_key;
67 pthread_mutex_unlock(&next_channel_key_lock);
68 return ret;
69 }
70
71 /*
72 * Return the atomically incremented value of next_session_id.
73 */
74 static uint64_t get_next_session_id(void)
75 {
76 uint64_t ret;
77
78 pthread_mutex_lock(&next_session_id_lock);
79 ret = ++_next_session_id;
80 pthread_mutex_unlock(&next_session_id_lock);
81 return ret;
82 }
83
84 static void copy_channel_attr_to_ustctl(
85 struct ustctl_consumer_channel_attr *attr,
86 struct lttng_ust_channel_attr *uattr)
87 {
88 /* Copy event attributes since the layout is different. */
89 attr->subbuf_size = uattr->subbuf_size;
90 attr->num_subbuf = uattr->num_subbuf;
91 attr->overwrite = uattr->overwrite;
92 attr->switch_timer_interval = uattr->switch_timer_interval;
93 attr->read_timer_interval = uattr->read_timer_interval;
94 attr->output = uattr->output;
95 }
96
97 /*
98 * Match function for the hash table lookup.
99 *
100 * It matches an ust app event based on three attributes which are the event
101 * name, the filter bytecode and the loglevel.
102 */
103 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
104 {
105 struct ust_app_event *event;
106 const struct ust_app_ht_key *key;
107 int ev_loglevel_value;
108
109 assert(node);
110 assert(_key);
111
112 event = caa_container_of(node, struct ust_app_event, node.node);
113 key = _key;
114 ev_loglevel_value = event->attr.loglevel;
115
116 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
117
118 /* Event name */
119 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
120 goto no_match;
121 }
122
123 /* Event loglevel. */
124 if (ev_loglevel_value != key->loglevel_type) {
125 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
126 && key->loglevel_type == 0 &&
127 ev_loglevel_value == -1) {
128 /*
129 * Match is accepted. This is because on event creation, the
130 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
131 * -1 are accepted for this loglevel type since 0 is the one set by
132 * the API when receiving an enable event.
133 */
134 } else {
135 goto no_match;
136 }
137 }
138
139 /* One of the filters is NULL, fail. */
140 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
141 goto no_match;
142 }
143
144 if (key->filter && event->filter) {
145 /* Both filters exists, check length followed by the bytecode. */
146 if (event->filter->len != key->filter->len ||
147 memcmp(event->filter->data, key->filter->data,
148 event->filter->len) != 0) {
149 goto no_match;
150 }
151 }
152
153 /* One of the exclusions is NULL, fail. */
154 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
155 goto no_match;
156 }
157
158 if (key->exclusion && event->exclusion) {
159 /* Both exclusions exists, check count followed by the names. */
160 if (event->exclusion->count != key->exclusion->count ||
161 memcmp(event->exclusion->names, key->exclusion->names,
162 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
163 goto no_match;
164 }
165 }
166
167
168 /* Match. */
169 return 1;
170
171 no_match:
172 return 0;
173 }
174
175 /*
176 * Unique add of an ust app event in the given ht. This uses the custom
177 * ht_match_ust_app_event match function and the event name as hash.
178 */
179 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
180 struct ust_app_event *event)
181 {
182 struct cds_lfht_node *node_ptr;
183 struct ust_app_ht_key key;
184 struct lttng_ht *ht;
185
186 assert(ua_chan);
187 assert(ua_chan->events);
188 assert(event);
189
190 ht = ua_chan->events;
191 key.name = event->attr.name;
192 key.filter = event->filter;
193 key.loglevel_type = event->attr.loglevel;
194 key.exclusion = event->exclusion;
195
196 node_ptr = cds_lfht_add_unique(ht->ht,
197 ht->hash_fct(event->node.key, lttng_ht_seed),
198 ht_match_ust_app_event, &key, &event->node.node);
199 assert(node_ptr == &event->node.node);
200 }
201
202 /*
203 * Close the notify socket from the given RCU head object. This MUST be called
204 * through a call_rcu().
205 */
206 static void close_notify_sock_rcu(struct rcu_head *head)
207 {
208 int ret;
209 struct ust_app_notify_sock_obj *obj =
210 caa_container_of(head, struct ust_app_notify_sock_obj, head);
211
212 /* Must have a valid fd here. */
213 assert(obj->fd >= 0);
214
215 ret = close(obj->fd);
216 if (ret) {
217 ERR("close notify sock %d RCU", obj->fd);
218 }
219 lttng_fd_put(LTTNG_FD_APPS, 1);
220
221 free(obj);
222 }
223
224 /*
225 * Return the session registry according to the buffer type of the given
226 * session.
227 *
228 * A registry per UID object MUST exists before calling this function or else
229 * it assert() if not found. RCU read side lock must be acquired.
230 */
231 static struct ust_registry_session *get_session_registry(
232 struct ust_app_session *ua_sess)
233 {
234 struct ust_registry_session *registry = NULL;
235
236 assert(ua_sess);
237
238 switch (ua_sess->buffer_type) {
239 case LTTNG_BUFFER_PER_PID:
240 {
241 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
242 if (!reg_pid) {
243 goto error;
244 }
245 registry = reg_pid->registry->reg.ust;
246 break;
247 }
248 case LTTNG_BUFFER_PER_UID:
249 {
250 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
251 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
252 if (!reg_uid) {
253 goto error;
254 }
255 registry = reg_uid->registry->reg.ust;
256 break;
257 }
258 default:
259 assert(0);
260 };
261
262 error:
263 return registry;
264 }
265
266 /*
267 * Delete ust context safely. RCU read lock must be held before calling
268 * this function.
269 */
270 static
271 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
272 struct ust_app *app)
273 {
274 int ret;
275
276 assert(ua_ctx);
277
278 if (ua_ctx->obj) {
279 pthread_mutex_lock(&app->sock_lock);
280 ret = ustctl_release_object(sock, ua_ctx->obj);
281 pthread_mutex_unlock(&app->sock_lock);
282 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
283 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
284 sock, ua_ctx->obj->handle, ret);
285 }
286 free(ua_ctx->obj);
287 }
288 free(ua_ctx);
289 }
290
291 /*
292 * Delete ust app event safely. RCU read lock must be held before calling
293 * this function.
294 */
295 static
296 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
297 struct ust_app *app)
298 {
299 int ret;
300
301 assert(ua_event);
302
303 free(ua_event->filter);
304 if (ua_event->exclusion != NULL)
305 free(ua_event->exclusion);
306 if (ua_event->obj != NULL) {
307 pthread_mutex_lock(&app->sock_lock);
308 ret = ustctl_release_object(sock, ua_event->obj);
309 pthread_mutex_unlock(&app->sock_lock);
310 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
311 ERR("UST app sock %d release event obj failed with ret %d",
312 sock, ret);
313 }
314 free(ua_event->obj);
315 }
316 free(ua_event);
317 }
318
319 /*
320 * Release ust data object of the given stream.
321 *
322 * Return 0 on success or else a negative value.
323 */
324 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
325 struct ust_app *app)
326 {
327 int ret = 0;
328
329 assert(stream);
330
331 if (stream->obj) {
332 pthread_mutex_lock(&app->sock_lock);
333 ret = ustctl_release_object(sock, stream->obj);
334 pthread_mutex_unlock(&app->sock_lock);
335 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
336 ERR("UST app sock %d release stream obj failed with ret %d",
337 sock, ret);
338 }
339 lttng_fd_put(LTTNG_FD_APPS, 2);
340 free(stream->obj);
341 }
342
343 return ret;
344 }
345
346 /*
347 * Delete ust app stream safely. RCU read lock must be held before calling
348 * this function.
349 */
350 static
351 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
352 struct ust_app *app)
353 {
354 assert(stream);
355
356 (void) release_ust_app_stream(sock, stream, app);
357 free(stream);
358 }
359
360 /*
361 * We need to execute ht_destroy outside of RCU read-side critical
362 * section and outside of call_rcu thread, so we postpone its execution
363 * using ht_cleanup_push. It is simpler than to change the semantic of
364 * the many callers of delete_ust_app_session().
365 */
366 static
367 void delete_ust_app_channel_rcu(struct rcu_head *head)
368 {
369 struct ust_app_channel *ua_chan =
370 caa_container_of(head, struct ust_app_channel, rcu_head);
371
372 ht_cleanup_push(ua_chan->ctx);
373 ht_cleanup_push(ua_chan->events);
374 free(ua_chan);
375 }
376
377 /*
378 * Extract the lost packet or discarded events counter when the channel is
379 * being deleted and store the value in the parent channel so we can
380 * access it from lttng list and at stop/destroy.
381 *
382 * The session list lock must be held by the caller.
383 */
384 static
385 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
386 {
387 uint64_t discarded = 0, lost = 0;
388 struct ltt_session *session;
389 struct ltt_ust_channel *uchan;
390
391 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
392 return;
393 }
394
395 rcu_read_lock();
396 session = session_find_by_id(ua_chan->session->tracing_id);
397 if (!session || !session->ust_session) {
398 /*
399 * Not finding the session is not an error because there are
400 * multiple ways the channels can be torn down.
401 *
402 * 1) The session daemon can initiate the destruction of the
403 * ust app session after receiving a destroy command or
404 * during its shutdown/teardown.
405 * 2) The application, since we are in per-pid tracing, is
406 * unregistering and tearing down its ust app session.
407 *
408 * Both paths are protected by the session list lock which
409 * ensures that the accounting of lost packets and discarded
410 * events is done exactly once. The session is then unpublished
411 * from the session list, resulting in this condition.
412 */
413 goto end;
414 }
415
416 if (ua_chan->attr.overwrite) {
417 consumer_get_lost_packets(ua_chan->session->tracing_id,
418 ua_chan->key, session->ust_session->consumer,
419 &lost);
420 } else {
421 consumer_get_discarded_events(ua_chan->session->tracing_id,
422 ua_chan->key, session->ust_session->consumer,
423 &discarded);
424 }
425 uchan = trace_ust_find_channel_by_name(
426 session->ust_session->domain_global.channels,
427 ua_chan->name);
428 if (!uchan) {
429 ERR("Missing UST channel to store discarded counters");
430 goto end;
431 }
432
433 uchan->per_pid_closed_app_discarded += discarded;
434 uchan->per_pid_closed_app_lost += lost;
435
436 end:
437 rcu_read_unlock();
438 }
439
440 /*
441 * Delete ust app channel safely. RCU read lock must be held before calling
442 * this function.
443 *
444 * The session list lock must be held by the caller.
445 */
446 static
447 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
448 struct ust_app *app)
449 {
450 int ret;
451 struct lttng_ht_iter iter;
452 struct ust_app_event *ua_event;
453 struct ust_app_ctx *ua_ctx;
454 struct ust_app_stream *stream, *stmp;
455 struct ust_registry_session *registry;
456
457 assert(ua_chan);
458
459 DBG3("UST app deleting channel %s", ua_chan->name);
460
461 /* Wipe stream */
462 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
463 cds_list_del(&stream->list);
464 delete_ust_app_stream(sock, stream, app);
465 }
466
467 /* Wipe context */
468 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
469 cds_list_del(&ua_ctx->list);
470 ret = lttng_ht_del(ua_chan->ctx, &iter);
471 assert(!ret);
472 delete_ust_app_ctx(sock, ua_ctx, app);
473 }
474
475 /* Wipe events */
476 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
477 node.node) {
478 ret = lttng_ht_del(ua_chan->events, &iter);
479 assert(!ret);
480 delete_ust_app_event(sock, ua_event, app);
481 }
482
483 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
484 /* Wipe and free registry from session registry. */
485 registry = get_session_registry(ua_chan->session);
486 if (registry) {
487 ust_registry_channel_del_free(registry, ua_chan->key,
488 true);
489 }
490 save_per_pid_lost_discarded_counters(ua_chan);
491 }
492
493 if (ua_chan->obj != NULL) {
494 /* Remove channel from application UST object descriptor. */
495 iter.iter.node = &ua_chan->ust_objd_node.node;
496 ret = lttng_ht_del(app->ust_objd, &iter);
497 assert(!ret);
498 pthread_mutex_lock(&app->sock_lock);
499 ret = ustctl_release_object(sock, ua_chan->obj);
500 pthread_mutex_unlock(&app->sock_lock);
501 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
502 ERR("UST app sock %d release channel obj failed with ret %d",
503 sock, ret);
504 }
505 lttng_fd_put(LTTNG_FD_APPS, 1);
506 free(ua_chan->obj);
507 }
508 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
509 }
510
511 int ust_app_register_done(struct ust_app *app)
512 {
513 int ret;
514
515 pthread_mutex_lock(&app->sock_lock);
516 ret = ustctl_register_done(app->sock);
517 pthread_mutex_unlock(&app->sock_lock);
518 return ret;
519 }
520
521 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
522 {
523 int ret, sock;
524
525 if (app) {
526 pthread_mutex_lock(&app->sock_lock);
527 sock = app->sock;
528 } else {
529 sock = -1;
530 }
531 ret = ustctl_release_object(sock, data);
532 if (app) {
533 pthread_mutex_unlock(&app->sock_lock);
534 }
535 return ret;
536 }
537
538 /*
539 * Push metadata to consumer socket.
540 *
541 * RCU read-side lock must be held to guarantee existance of socket.
542 * Must be called with the ust app session lock held.
543 * Must be called with the registry lock held.
544 *
545 * On success, return the len of metadata pushed or else a negative value.
546 * Returning a -EPIPE return value means we could not send the metadata,
547 * but it can be caused by recoverable errors (e.g. the application has
548 * terminated concurrently).
549 */
550 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
551 struct consumer_socket *socket, int send_zero_data)
552 {
553 int ret;
554 char *metadata_str = NULL;
555 size_t len, offset, new_metadata_len_sent;
556 ssize_t ret_val;
557 uint64_t metadata_key, metadata_version;
558
559 assert(registry);
560 assert(socket);
561
562 metadata_key = registry->metadata_key;
563
564 /*
565 * Means that no metadata was assigned to the session. This can
566 * happens if no start has been done previously.
567 */
568 if (!metadata_key) {
569 return 0;
570 }
571
572 offset = registry->metadata_len_sent;
573 len = registry->metadata_len - registry->metadata_len_sent;
574 new_metadata_len_sent = registry->metadata_len;
575 metadata_version = registry->metadata_version;
576 if (len == 0) {
577 DBG3("No metadata to push for metadata key %" PRIu64,
578 registry->metadata_key);
579 ret_val = len;
580 if (send_zero_data) {
581 DBG("No metadata to push");
582 goto push_data;
583 }
584 goto end;
585 }
586
587 /* Allocate only what we have to send. */
588 metadata_str = zmalloc(len);
589 if (!metadata_str) {
590 PERROR("zmalloc ust app metadata string");
591 ret_val = -ENOMEM;
592 goto error;
593 }
594 /* Copy what we haven't sent out. */
595 memcpy(metadata_str, registry->metadata + offset, len);
596
597 push_data:
598 pthread_mutex_unlock(&registry->lock);
599 /*
600 * We need to unlock the registry while we push metadata to
601 * break a circular dependency between the consumerd metadata
602 * lock and the sessiond registry lock. Indeed, pushing metadata
603 * to the consumerd awaits that it gets pushed all the way to
604 * relayd, but doing so requires grabbing the metadata lock. If
605 * a concurrent metadata request is being performed by
606 * consumerd, this can try to grab the registry lock on the
607 * sessiond while holding the metadata lock on the consumer
608 * daemon. Those push and pull schemes are performed on two
609 * different bidirectionnal communication sockets.
610 */
611 ret = consumer_push_metadata(socket, metadata_key,
612 metadata_str, len, offset, metadata_version);
613 pthread_mutex_lock(&registry->lock);
614 if (ret < 0) {
615 /*
616 * There is an acceptable race here between the registry
617 * metadata key assignment and the creation on the
618 * consumer. The session daemon can concurrently push
619 * metadata for this registry while being created on the
620 * consumer since the metadata key of the registry is
621 * assigned *before* it is setup to avoid the consumer
622 * to ask for metadata that could possibly be not found
623 * in the session daemon.
624 *
625 * The metadata will get pushed either by the session
626 * being stopped or the consumer requesting metadata if
627 * that race is triggered.
628 */
629 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
630 ret = 0;
631 } else {
632 ERR("Error pushing metadata to consumer");
633 }
634 ret_val = ret;
635 goto error_push;
636 } else {
637 /*
638 * Metadata may have been concurrently pushed, since
639 * we're not holding the registry lock while pushing to
640 * consumer. This is handled by the fact that we send
641 * the metadata content, size, and the offset at which
642 * that metadata belongs. This may arrive out of order
643 * on the consumer side, and the consumer is able to
644 * deal with overlapping fragments. The consumer
645 * supports overlapping fragments, which must be
646 * contiguous starting from offset 0. We keep the
647 * largest metadata_len_sent value of the concurrent
648 * send.
649 */
650 registry->metadata_len_sent =
651 max_t(size_t, registry->metadata_len_sent,
652 new_metadata_len_sent);
653 }
654 free(metadata_str);
655 return len;
656
657 end:
658 error:
659 if (ret_val) {
660 /*
661 * On error, flag the registry that the metadata is
662 * closed. We were unable to push anything and this
663 * means that either the consumer is not responding or
664 * the metadata cache has been destroyed on the
665 * consumer.
666 */
667 registry->metadata_closed = 1;
668 }
669 error_push:
670 free(metadata_str);
671 return ret_val;
672 }
673
674 /*
675 * For a given application and session, push metadata to consumer.
676 * Either sock or consumer is required : if sock is NULL, the default
677 * socket to send the metadata is retrieved from consumer, if sock
678 * is not NULL we use it to send the metadata.
679 * RCU read-side lock must be held while calling this function,
680 * therefore ensuring existance of registry. It also ensures existance
681 * of socket throughout this function.
682 *
683 * Return 0 on success else a negative error.
684 * Returning a -EPIPE return value means we could not send the metadata,
685 * but it can be caused by recoverable errors (e.g. the application has
686 * terminated concurrently).
687 */
688 static int push_metadata(struct ust_registry_session *registry,
689 struct consumer_output *consumer)
690 {
691 int ret_val;
692 ssize_t ret;
693 struct consumer_socket *socket;
694
695 assert(registry);
696 assert(consumer);
697
698 pthread_mutex_lock(&registry->lock);
699 if (registry->metadata_closed) {
700 ret_val = -EPIPE;
701 goto error;
702 }
703
704 /* Get consumer socket to use to push the metadata.*/
705 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
706 consumer);
707 if (!socket) {
708 ret_val = -1;
709 goto error;
710 }
711
712 ret = ust_app_push_metadata(registry, socket, 0);
713 if (ret < 0) {
714 ret_val = ret;
715 goto error;
716 }
717 pthread_mutex_unlock(&registry->lock);
718 return 0;
719
720 error:
721 pthread_mutex_unlock(&registry->lock);
722 return ret_val;
723 }
724
725 /*
726 * Send to the consumer a close metadata command for the given session. Once
727 * done, the metadata channel is deleted and the session metadata pointer is
728 * nullified. The session lock MUST be held unless the application is
729 * in the destroy path.
730 *
731 * Return 0 on success else a negative value.
732 */
733 static int close_metadata(struct ust_registry_session *registry,
734 struct consumer_output *consumer)
735 {
736 int ret;
737 struct consumer_socket *socket;
738
739 assert(registry);
740 assert(consumer);
741
742 rcu_read_lock();
743
744 pthread_mutex_lock(&registry->lock);
745
746 if (!registry->metadata_key || registry->metadata_closed) {
747 ret = 0;
748 goto end;
749 }
750
751 /* Get consumer socket to use to push the metadata.*/
752 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
753 consumer);
754 if (!socket) {
755 ret = -1;
756 goto error;
757 }
758
759 ret = consumer_close_metadata(socket, registry->metadata_key);
760 if (ret < 0) {
761 goto error;
762 }
763
764 error:
765 /*
766 * Metadata closed. Even on error this means that the consumer is not
767 * responding or not found so either way a second close should NOT be emit
768 * for this registry.
769 */
770 registry->metadata_closed = 1;
771 end:
772 pthread_mutex_unlock(&registry->lock);
773 rcu_read_unlock();
774 return ret;
775 }
776
777 /*
778 * We need to execute ht_destroy outside of RCU read-side critical
779 * section and outside of call_rcu thread, so we postpone its execution
780 * using ht_cleanup_push. It is simpler than to change the semantic of
781 * the many callers of delete_ust_app_session().
782 */
783 static
784 void delete_ust_app_session_rcu(struct rcu_head *head)
785 {
786 struct ust_app_session *ua_sess =
787 caa_container_of(head, struct ust_app_session, rcu_head);
788
789 ht_cleanup_push(ua_sess->channels);
790 free(ua_sess);
791 }
792
793 /*
794 * Delete ust app session safely. RCU read lock must be held before calling
795 * this function.
796 *
797 * The session list lock must be held by the caller.
798 */
799 static
800 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
801 struct ust_app *app)
802 {
803 int ret;
804 struct lttng_ht_iter iter;
805 struct ust_app_channel *ua_chan;
806 struct ust_registry_session *registry;
807
808 assert(ua_sess);
809
810 pthread_mutex_lock(&ua_sess->lock);
811
812 assert(!ua_sess->deleted);
813 ua_sess->deleted = true;
814
815 registry = get_session_registry(ua_sess);
816 /* Registry can be null on error path during initialization. */
817 if (registry) {
818 /* Push metadata for application before freeing the application. */
819 (void) push_metadata(registry, ua_sess->consumer);
820
821 /*
822 * Don't ask to close metadata for global per UID buffers. Close
823 * metadata only on destroy trace session in this case. Also, the
824 * previous push metadata could have flag the metadata registry to
825 * close so don't send a close command if closed.
826 */
827 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
828 /* And ask to close it for this session registry. */
829 (void) close_metadata(registry, ua_sess->consumer);
830 }
831 }
832
833 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
834 node.node) {
835 ret = lttng_ht_del(ua_sess->channels, &iter);
836 assert(!ret);
837 delete_ust_app_channel(sock, ua_chan, app);
838 }
839
840 /* In case of per PID, the registry is kept in the session. */
841 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
842 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
843 if (reg_pid) {
844 /*
845 * Registry can be null on error path during
846 * initialization.
847 */
848 buffer_reg_pid_remove(reg_pid);
849 buffer_reg_pid_destroy(reg_pid);
850 }
851 }
852
853 if (ua_sess->handle != -1) {
854 pthread_mutex_lock(&app->sock_lock);
855 ret = ustctl_release_handle(sock, ua_sess->handle);
856 pthread_mutex_unlock(&app->sock_lock);
857 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
858 ERR("UST app sock %d release session handle failed with ret %d",
859 sock, ret);
860 }
861 /* Remove session from application UST object descriptor. */
862 iter.iter.node = &ua_sess->ust_objd_node.node;
863 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
864 assert(!ret);
865 }
866
867 pthread_mutex_unlock(&ua_sess->lock);
868
869 consumer_output_put(ua_sess->consumer);
870
871 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
872 }
873
874 /*
875 * Delete a traceable application structure from the global list. Never call
876 * this function outside of a call_rcu call.
877 *
878 * RCU read side lock should _NOT_ be held when calling this function.
879 */
880 static
881 void delete_ust_app(struct ust_app *app)
882 {
883 int ret, sock;
884 struct ust_app_session *ua_sess, *tmp_ua_sess;
885
886 /*
887 * The session list lock must be held during this function to guarantee
888 * the existence of ua_sess.
889 */
890 session_lock_list();
891 /* Delete ust app sessions info */
892 sock = app->sock;
893 app->sock = -1;
894
895 /* Wipe sessions */
896 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
897 teardown_node) {
898 /* Free every object in the session and the session. */
899 rcu_read_lock();
900 delete_ust_app_session(sock, ua_sess, app);
901 rcu_read_unlock();
902 }
903
904 ht_cleanup_push(app->sessions);
905 ht_cleanup_push(app->ust_sessions_objd);
906 ht_cleanup_push(app->ust_objd);
907
908 /*
909 * Wait until we have deleted the application from the sock hash table
910 * before closing this socket, otherwise an application could re-use the
911 * socket ID and race with the teardown, using the same hash table entry.
912 *
913 * It's OK to leave the close in call_rcu. We want it to stay unique for
914 * all RCU readers that could run concurrently with unregister app,
915 * therefore we _need_ to only close that socket after a grace period. So
916 * it should stay in this RCU callback.
917 *
918 * This close() is a very important step of the synchronization model so
919 * every modification to this function must be carefully reviewed.
920 */
921 ret = close(sock);
922 if (ret) {
923 PERROR("close");
924 }
925 lttng_fd_put(LTTNG_FD_APPS, 1);
926
927 DBG2("UST app pid %d deleted", app->pid);
928 free(app);
929 session_unlock_list();
930 }
931
932 /*
933 * URCU intermediate call to delete an UST app.
934 */
935 static
936 void delete_ust_app_rcu(struct rcu_head *head)
937 {
938 struct lttng_ht_node_ulong *node =
939 caa_container_of(head, struct lttng_ht_node_ulong, head);
940 struct ust_app *app =
941 caa_container_of(node, struct ust_app, pid_n);
942
943 DBG3("Call RCU deleting app PID %d", app->pid);
944 delete_ust_app(app);
945 }
946
947 /*
948 * Delete the session from the application ht and delete the data structure by
949 * freeing every object inside and releasing them.
950 *
951 * The session list lock must be held by the caller.
952 */
953 static void destroy_app_session(struct ust_app *app,
954 struct ust_app_session *ua_sess)
955 {
956 int ret;
957 struct lttng_ht_iter iter;
958
959 assert(app);
960 assert(ua_sess);
961
962 iter.iter.node = &ua_sess->node.node;
963 ret = lttng_ht_del(app->sessions, &iter);
964 if (ret) {
965 /* Already scheduled for teardown. */
966 goto end;
967 }
968
969 /* Once deleted, free the data structure. */
970 delete_ust_app_session(app->sock, ua_sess, app);
971
972 end:
973 return;
974 }
975
976 /*
977 * Alloc new UST app session.
978 */
979 static
980 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
981 {
982 struct ust_app_session *ua_sess;
983
984 /* Init most of the default value by allocating and zeroing */
985 ua_sess = zmalloc(sizeof(struct ust_app_session));
986 if (ua_sess == NULL) {
987 PERROR("malloc");
988 goto error_free;
989 }
990
991 ua_sess->handle = -1;
992 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
993 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
994 pthread_mutex_init(&ua_sess->lock, NULL);
995
996 return ua_sess;
997
998 error_free:
999 return NULL;
1000 }
1001
1002 /*
1003 * Alloc new UST app channel.
1004 */
1005 static
1006 struct ust_app_channel *alloc_ust_app_channel(char *name,
1007 struct ust_app_session *ua_sess,
1008 struct lttng_ust_channel_attr *attr)
1009 {
1010 struct ust_app_channel *ua_chan;
1011
1012 /* Init most of the default value by allocating and zeroing */
1013 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1014 if (ua_chan == NULL) {
1015 PERROR("malloc");
1016 goto error;
1017 }
1018
1019 /* Setup channel name */
1020 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1021 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1022
1023 ua_chan->enabled = 1;
1024 ua_chan->handle = -1;
1025 ua_chan->session = ua_sess;
1026 ua_chan->key = get_next_channel_key();
1027 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1028 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1029 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1030
1031 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1032 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1033
1034 /* Copy attributes */
1035 if (attr) {
1036 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1037 ua_chan->attr.subbuf_size = attr->subbuf_size;
1038 ua_chan->attr.num_subbuf = attr->num_subbuf;
1039 ua_chan->attr.overwrite = attr->overwrite;
1040 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1041 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1042 ua_chan->attr.output = attr->output;
1043 }
1044 /* By default, the channel is a per cpu channel. */
1045 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1046
1047 DBG3("UST app channel %s allocated", ua_chan->name);
1048
1049 return ua_chan;
1050
1051 error:
1052 return NULL;
1053 }
1054
1055 /*
1056 * Allocate and initialize a UST app stream.
1057 *
1058 * Return newly allocated stream pointer or NULL on error.
1059 */
1060 struct ust_app_stream *ust_app_alloc_stream(void)
1061 {
1062 struct ust_app_stream *stream = NULL;
1063
1064 stream = zmalloc(sizeof(*stream));
1065 if (stream == NULL) {
1066 PERROR("zmalloc ust app stream");
1067 goto error;
1068 }
1069
1070 /* Zero could be a valid value for a handle so flag it to -1. */
1071 stream->handle = -1;
1072
1073 error:
1074 return stream;
1075 }
1076
1077 /*
1078 * Alloc new UST app event.
1079 */
1080 static
1081 struct ust_app_event *alloc_ust_app_event(char *name,
1082 struct lttng_ust_event *attr)
1083 {
1084 struct ust_app_event *ua_event;
1085
1086 /* Init most of the default value by allocating and zeroing */
1087 ua_event = zmalloc(sizeof(struct ust_app_event));
1088 if (ua_event == NULL) {
1089 PERROR("malloc");
1090 goto error;
1091 }
1092
1093 ua_event->enabled = 1;
1094 strncpy(ua_event->name, name, sizeof(ua_event->name));
1095 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1096 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1097
1098 /* Copy attributes */
1099 if (attr) {
1100 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1101 }
1102
1103 DBG3("UST app event %s allocated", ua_event->name);
1104
1105 return ua_event;
1106
1107 error:
1108 return NULL;
1109 }
1110
1111 /*
1112 * Alloc new UST app context.
1113 */
1114 static
1115 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1116 {
1117 struct ust_app_ctx *ua_ctx;
1118
1119 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1120 if (ua_ctx == NULL) {
1121 goto error;
1122 }
1123
1124 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1125
1126 if (uctx) {
1127 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1128 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1129 char *provider_name = NULL, *ctx_name = NULL;
1130
1131 provider_name = strdup(uctx->u.app_ctx.provider_name);
1132 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1133 if (!provider_name || !ctx_name) {
1134 free(provider_name);
1135 free(ctx_name);
1136 goto error;
1137 }
1138
1139 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1140 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1141 }
1142 }
1143
1144 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1145 return ua_ctx;
1146 error:
1147 free(ua_ctx);
1148 return NULL;
1149 }
1150
1151 /*
1152 * Allocate a filter and copy the given original filter.
1153 *
1154 * Return allocated filter or NULL on error.
1155 */
1156 static struct lttng_filter_bytecode *copy_filter_bytecode(
1157 struct lttng_filter_bytecode *orig_f)
1158 {
1159 struct lttng_filter_bytecode *filter = NULL;
1160
1161 /* Copy filter bytecode */
1162 filter = zmalloc(sizeof(*filter) + orig_f->len);
1163 if (!filter) {
1164 PERROR("zmalloc alloc filter bytecode");
1165 goto error;
1166 }
1167
1168 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1169
1170 error:
1171 return filter;
1172 }
1173
1174 /*
1175 * Create a liblttng-ust filter bytecode from given bytecode.
1176 *
1177 * Return allocated filter or NULL on error.
1178 */
1179 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1180 struct lttng_filter_bytecode *orig_f)
1181 {
1182 struct lttng_ust_filter_bytecode *filter = NULL;
1183
1184 /* Copy filter bytecode */
1185 filter = zmalloc(sizeof(*filter) + orig_f->len);
1186 if (!filter) {
1187 PERROR("zmalloc alloc ust filter bytecode");
1188 goto error;
1189 }
1190
1191 assert(sizeof(struct lttng_filter_bytecode) ==
1192 sizeof(struct lttng_ust_filter_bytecode));
1193 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1194 error:
1195 return filter;
1196 }
1197
1198 /*
1199 * Find an ust_app using the sock and return it. RCU read side lock must be
1200 * held before calling this helper function.
1201 */
1202 struct ust_app *ust_app_find_by_sock(int sock)
1203 {
1204 struct lttng_ht_node_ulong *node;
1205 struct lttng_ht_iter iter;
1206
1207 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1208 node = lttng_ht_iter_get_node_ulong(&iter);
1209 if (node == NULL) {
1210 DBG2("UST app find by sock %d not found", sock);
1211 goto error;
1212 }
1213
1214 return caa_container_of(node, struct ust_app, sock_n);
1215
1216 error:
1217 return NULL;
1218 }
1219
1220 /*
1221 * Find an ust_app using the notify sock and return it. RCU read side lock must
1222 * be held before calling this helper function.
1223 */
1224 static struct ust_app *find_app_by_notify_sock(int sock)
1225 {
1226 struct lttng_ht_node_ulong *node;
1227 struct lttng_ht_iter iter;
1228
1229 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1230 &iter);
1231 node = lttng_ht_iter_get_node_ulong(&iter);
1232 if (node == NULL) {
1233 DBG2("UST app find by notify sock %d not found", sock);
1234 goto error;
1235 }
1236
1237 return caa_container_of(node, struct ust_app, notify_sock_n);
1238
1239 error:
1240 return NULL;
1241 }
1242
1243 /*
1244 * Lookup for an ust app event based on event name, filter bytecode and the
1245 * event loglevel.
1246 *
1247 * Return an ust_app_event object or NULL on error.
1248 */
1249 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1250 char *name, struct lttng_filter_bytecode *filter,
1251 int loglevel_value,
1252 const struct lttng_event_exclusion *exclusion)
1253 {
1254 struct lttng_ht_iter iter;
1255 struct lttng_ht_node_str *node;
1256 struct ust_app_event *event = NULL;
1257 struct ust_app_ht_key key;
1258
1259 assert(name);
1260 assert(ht);
1261
1262 /* Setup key for event lookup. */
1263 key.name = name;
1264 key.filter = filter;
1265 key.loglevel_type = loglevel_value;
1266 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1267 key.exclusion = exclusion;
1268
1269 /* Lookup using the event name as hash and a custom match fct. */
1270 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1271 ht_match_ust_app_event, &key, &iter.iter);
1272 node = lttng_ht_iter_get_node_str(&iter);
1273 if (node == NULL) {
1274 goto end;
1275 }
1276
1277 event = caa_container_of(node, struct ust_app_event, node);
1278
1279 end:
1280 return event;
1281 }
1282
1283 /*
1284 * Create the channel context on the tracer.
1285 *
1286 * Called with UST app session lock held.
1287 */
1288 static
1289 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1290 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1291 {
1292 int ret;
1293
1294 health_code_update();
1295
1296 pthread_mutex_lock(&app->sock_lock);
1297 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1298 ua_chan->obj, &ua_ctx->obj);
1299 pthread_mutex_unlock(&app->sock_lock);
1300 if (ret < 0) {
1301 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1302 ERR("UST app create channel context failed for app (pid: %d) "
1303 "with ret %d", app->pid, ret);
1304 } else {
1305 /*
1306 * This is normal behavior, an application can die during the
1307 * creation process. Don't report an error so the execution can
1308 * continue normally.
1309 */
1310 ret = 0;
1311 DBG3("UST app disable event failed. Application is dead.");
1312 }
1313 goto error;
1314 }
1315
1316 ua_ctx->handle = ua_ctx->obj->handle;
1317
1318 DBG2("UST app context handle %d created successfully for channel %s",
1319 ua_ctx->handle, ua_chan->name);
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Set the filter on the tracer.
1328 */
1329 static
1330 int set_ust_event_filter(struct ust_app_event *ua_event,
1331 struct ust_app *app)
1332 {
1333 int ret;
1334 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1335
1336 health_code_update();
1337
1338 if (!ua_event->filter) {
1339 ret = 0;
1340 goto error;
1341 }
1342
1343 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1344 if (!ust_bytecode) {
1345 ret = -LTTNG_ERR_NOMEM;
1346 goto error;
1347 }
1348 pthread_mutex_lock(&app->sock_lock);
1349 ret = ustctl_set_filter(app->sock, ust_bytecode,
1350 ua_event->obj);
1351 pthread_mutex_unlock(&app->sock_lock);
1352 if (ret < 0) {
1353 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1354 ERR("UST app event %s filter failed for app (pid: %d) "
1355 "with ret %d", ua_event->attr.name, app->pid, ret);
1356 } else {
1357 /*
1358 * This is normal behavior, an application can die during the
1359 * creation process. Don't report an error so the execution can
1360 * continue normally.
1361 */
1362 ret = 0;
1363 DBG3("UST app filter event failed. Application is dead.");
1364 }
1365 goto error;
1366 }
1367
1368 DBG2("UST filter set successfully for event %s", ua_event->name);
1369
1370 error:
1371 health_code_update();
1372 free(ust_bytecode);
1373 return ret;
1374 }
1375
1376 static
1377 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1378 struct lttng_event_exclusion *exclusion)
1379 {
1380 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1381 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1382 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1383
1384 ust_exclusion = zmalloc(exclusion_alloc_size);
1385 if (!ust_exclusion) {
1386 PERROR("malloc");
1387 goto end;
1388 }
1389
1390 assert(sizeof(struct lttng_event_exclusion) ==
1391 sizeof(struct lttng_ust_event_exclusion));
1392 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1393 end:
1394 return ust_exclusion;
1395 }
1396
1397 /*
1398 * Set event exclusions on the tracer.
1399 */
1400 static
1401 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1402 struct ust_app *app)
1403 {
1404 int ret;
1405 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1406
1407 health_code_update();
1408
1409 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1410 ret = 0;
1411 goto error;
1412 }
1413
1414 ust_exclusion = create_ust_exclusion_from_exclusion(
1415 ua_event->exclusion);
1416 if (!ust_exclusion) {
1417 ret = -LTTNG_ERR_NOMEM;
1418 goto error;
1419 }
1420 pthread_mutex_lock(&app->sock_lock);
1421 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1422 pthread_mutex_unlock(&app->sock_lock);
1423 if (ret < 0) {
1424 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1425 ERR("UST app event %s exclusions failed for app (pid: %d) "
1426 "with ret %d", ua_event->attr.name, app->pid, ret);
1427 } else {
1428 /*
1429 * This is normal behavior, an application can die during the
1430 * creation process. Don't report an error so the execution can
1431 * continue normally.
1432 */
1433 ret = 0;
1434 DBG3("UST app event exclusion failed. Application is dead.");
1435 }
1436 goto error;
1437 }
1438
1439 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1440
1441 error:
1442 health_code_update();
1443 free(ust_exclusion);
1444 return ret;
1445 }
1446
1447 /*
1448 * Disable the specified event on to UST tracer for the UST session.
1449 */
1450 static int disable_ust_event(struct ust_app *app,
1451 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1452 {
1453 int ret;
1454
1455 health_code_update();
1456
1457 pthread_mutex_lock(&app->sock_lock);
1458 ret = ustctl_disable(app->sock, ua_event->obj);
1459 pthread_mutex_unlock(&app->sock_lock);
1460 if (ret < 0) {
1461 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1462 ERR("UST app event %s disable failed for app (pid: %d) "
1463 "and session handle %d with ret %d",
1464 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1465 } else {
1466 /*
1467 * This is normal behavior, an application can die during the
1468 * creation process. Don't report an error so the execution can
1469 * continue normally.
1470 */
1471 ret = 0;
1472 DBG3("UST app disable event failed. Application is dead.");
1473 }
1474 goto error;
1475 }
1476
1477 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1478 ua_event->attr.name, app->pid);
1479
1480 error:
1481 health_code_update();
1482 return ret;
1483 }
1484
1485 /*
1486 * Disable the specified channel on to UST tracer for the UST session.
1487 */
1488 static int disable_ust_channel(struct ust_app *app,
1489 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1490 {
1491 int ret;
1492
1493 health_code_update();
1494
1495 pthread_mutex_lock(&app->sock_lock);
1496 ret = ustctl_disable(app->sock, ua_chan->obj);
1497 pthread_mutex_unlock(&app->sock_lock);
1498 if (ret < 0) {
1499 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1500 ERR("UST app channel %s disable failed for app (pid: %d) "
1501 "and session handle %d with ret %d",
1502 ua_chan->name, app->pid, ua_sess->handle, ret);
1503 } else {
1504 /*
1505 * This is normal behavior, an application can die during the
1506 * creation process. Don't report an error so the execution can
1507 * continue normally.
1508 */
1509 ret = 0;
1510 DBG3("UST app disable channel failed. Application is dead.");
1511 }
1512 goto error;
1513 }
1514
1515 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1516 ua_chan->name, app->pid);
1517
1518 error:
1519 health_code_update();
1520 return ret;
1521 }
1522
1523 /*
1524 * Enable the specified channel on to UST tracer for the UST session.
1525 */
1526 static int enable_ust_channel(struct ust_app *app,
1527 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1528 {
1529 int ret;
1530
1531 health_code_update();
1532
1533 pthread_mutex_lock(&app->sock_lock);
1534 ret = ustctl_enable(app->sock, ua_chan->obj);
1535 pthread_mutex_unlock(&app->sock_lock);
1536 if (ret < 0) {
1537 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1538 ERR("UST app channel %s enable failed for app (pid: %d) "
1539 "and session handle %d with ret %d",
1540 ua_chan->name, app->pid, ua_sess->handle, ret);
1541 } else {
1542 /*
1543 * This is normal behavior, an application can die during the
1544 * creation process. Don't report an error so the execution can
1545 * continue normally.
1546 */
1547 ret = 0;
1548 DBG3("UST app enable channel failed. Application is dead.");
1549 }
1550 goto error;
1551 }
1552
1553 ua_chan->enabled = 1;
1554
1555 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1556 ua_chan->name, app->pid);
1557
1558 error:
1559 health_code_update();
1560 return ret;
1561 }
1562
1563 /*
1564 * Enable the specified event on to UST tracer for the UST session.
1565 */
1566 static int enable_ust_event(struct ust_app *app,
1567 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1568 {
1569 int ret;
1570
1571 health_code_update();
1572
1573 pthread_mutex_lock(&app->sock_lock);
1574 ret = ustctl_enable(app->sock, ua_event->obj);
1575 pthread_mutex_unlock(&app->sock_lock);
1576 if (ret < 0) {
1577 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1578 ERR("UST app event %s enable failed for app (pid: %d) "
1579 "and session handle %d with ret %d",
1580 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1581 } else {
1582 /*
1583 * This is normal behavior, an application can die during the
1584 * creation process. Don't report an error so the execution can
1585 * continue normally.
1586 */
1587 ret = 0;
1588 DBG3("UST app enable event failed. Application is dead.");
1589 }
1590 goto error;
1591 }
1592
1593 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1594 ua_event->attr.name, app->pid);
1595
1596 error:
1597 health_code_update();
1598 return ret;
1599 }
1600
1601 /*
1602 * Send channel and stream buffer to application.
1603 *
1604 * Return 0 on success. On error, a negative value is returned.
1605 */
1606 static int send_channel_pid_to_ust(struct ust_app *app,
1607 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1608 {
1609 int ret;
1610 struct ust_app_stream *stream, *stmp;
1611
1612 assert(app);
1613 assert(ua_sess);
1614 assert(ua_chan);
1615
1616 health_code_update();
1617
1618 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1619 app->sock);
1620
1621 /* Send channel to the application. */
1622 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1623 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1624 ret = -ENOTCONN; /* Caused by app exiting. */
1625 goto error;
1626 } else if (ret < 0) {
1627 goto error;
1628 }
1629
1630 health_code_update();
1631
1632 /* Send all streams to application. */
1633 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1634 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1635 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1636 ret = -ENOTCONN; /* Caused by app exiting. */
1637 goto error;
1638 } else if (ret < 0) {
1639 goto error;
1640 }
1641 /* We don't need the stream anymore once sent to the tracer. */
1642 cds_list_del(&stream->list);
1643 delete_ust_app_stream(-1, stream, app);
1644 }
1645 /* Flag the channel that it is sent to the application. */
1646 ua_chan->is_sent = 1;
1647
1648 error:
1649 health_code_update();
1650 return ret;
1651 }
1652
1653 /*
1654 * Create the specified event onto the UST tracer for a UST session.
1655 *
1656 * Should be called with session mutex held.
1657 */
1658 static
1659 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1660 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1661 {
1662 int ret = 0;
1663
1664 health_code_update();
1665
1666 /* Create UST event on tracer */
1667 pthread_mutex_lock(&app->sock_lock);
1668 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1669 &ua_event->obj);
1670 pthread_mutex_unlock(&app->sock_lock);
1671 if (ret < 0) {
1672 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1673 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1674 ua_event->attr.name, app->pid, ret);
1675 } else {
1676 /*
1677 * This is normal behavior, an application can die during the
1678 * creation process. Don't report an error so the execution can
1679 * continue normally.
1680 */
1681 ret = 0;
1682 DBG3("UST app create event failed. Application is dead.");
1683 }
1684 goto error;
1685 }
1686
1687 ua_event->handle = ua_event->obj->handle;
1688
1689 DBG2("UST app event %s created successfully for pid:%d",
1690 ua_event->attr.name, app->pid);
1691
1692 health_code_update();
1693
1694 /* Set filter if one is present. */
1695 if (ua_event->filter) {
1696 ret = set_ust_event_filter(ua_event, app);
1697 if (ret < 0) {
1698 goto error;
1699 }
1700 }
1701
1702 /* Set exclusions for the event */
1703 if (ua_event->exclusion) {
1704 ret = set_ust_event_exclusion(ua_event, app);
1705 if (ret < 0) {
1706 goto error;
1707 }
1708 }
1709
1710 /* If event not enabled, disable it on the tracer */
1711 if (ua_event->enabled) {
1712 /*
1713 * We now need to explicitly enable the event, since it
1714 * is now disabled at creation.
1715 */
1716 ret = enable_ust_event(app, ua_sess, ua_event);
1717 if (ret < 0) {
1718 /*
1719 * If we hit an EPERM, something is wrong with our enable call. If
1720 * we get an EEXIST, there is a problem on the tracer side since we
1721 * just created it.
1722 */
1723 switch (ret) {
1724 case -LTTNG_UST_ERR_PERM:
1725 /* Code flow problem */
1726 assert(0);
1727 case -LTTNG_UST_ERR_EXIST:
1728 /* It's OK for our use case. */
1729 ret = 0;
1730 break;
1731 default:
1732 break;
1733 }
1734 goto error;
1735 }
1736 }
1737
1738 error:
1739 health_code_update();
1740 return ret;
1741 }
1742
1743 /*
1744 * Copy data between an UST app event and a LTT event.
1745 */
1746 static void shadow_copy_event(struct ust_app_event *ua_event,
1747 struct ltt_ust_event *uevent)
1748 {
1749 size_t exclusion_alloc_size;
1750
1751 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1752 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1753
1754 ua_event->enabled = uevent->enabled;
1755
1756 /* Copy event attributes */
1757 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1758
1759 /* Copy filter bytecode */
1760 if (uevent->filter) {
1761 ua_event->filter = copy_filter_bytecode(uevent->filter);
1762 /* Filter might be NULL here in case of ENONEM. */
1763 }
1764
1765 /* Copy exclusion data */
1766 if (uevent->exclusion) {
1767 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1768 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1769 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1770 if (ua_event->exclusion == NULL) {
1771 PERROR("malloc");
1772 } else {
1773 memcpy(ua_event->exclusion, uevent->exclusion,
1774 exclusion_alloc_size);
1775 }
1776 }
1777 }
1778
1779 /*
1780 * Copy data between an UST app channel and a LTT channel.
1781 */
1782 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1783 struct ltt_ust_channel *uchan)
1784 {
1785 struct lttng_ht_iter iter;
1786 struct ltt_ust_event *uevent;
1787 struct ltt_ust_context *uctx;
1788 struct ust_app_event *ua_event;
1789
1790 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1791
1792 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1793 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1794
1795 ua_chan->tracefile_size = uchan->tracefile_size;
1796 ua_chan->tracefile_count = uchan->tracefile_count;
1797
1798 /* Copy event attributes since the layout is different. */
1799 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1800 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1801 ua_chan->attr.overwrite = uchan->attr.overwrite;
1802 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1803 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1804 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
1805 ua_chan->attr.output = uchan->attr.output;
1806 /*
1807 * Note that the attribute channel type is not set since the channel on the
1808 * tracing registry side does not have this information.
1809 */
1810
1811 ua_chan->enabled = uchan->enabled;
1812 ua_chan->tracing_channel_id = uchan->id;
1813
1814 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1815 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1816
1817 if (ua_ctx == NULL) {
1818 continue;
1819 }
1820 lttng_ht_node_init_ulong(&ua_ctx->node,
1821 (unsigned long) ua_ctx->ctx.ctx);
1822 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1823 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1824 }
1825
1826 /* Copy all events from ltt ust channel to ust app channel */
1827 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1828 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1829 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1830 if (ua_event == NULL) {
1831 DBG2("UST event %s not found on shadow copy channel",
1832 uevent->attr.name);
1833 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1834 if (ua_event == NULL) {
1835 continue;
1836 }
1837 shadow_copy_event(ua_event, uevent);
1838 add_unique_ust_app_event(ua_chan, ua_event);
1839 }
1840 }
1841
1842 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1843 }
1844
1845 /*
1846 * Copy data between a UST app session and a regular LTT session.
1847 */
1848 static void shadow_copy_session(struct ust_app_session *ua_sess,
1849 struct ltt_ust_session *usess, struct ust_app *app)
1850 {
1851 struct lttng_ht_node_str *ua_chan_node;
1852 struct lttng_ht_iter iter;
1853 struct ltt_ust_channel *uchan;
1854 struct ust_app_channel *ua_chan;
1855 time_t rawtime;
1856 struct tm *timeinfo;
1857 char datetime[16];
1858 int ret;
1859 char tmp_shm_path[PATH_MAX];
1860
1861 /* Get date and time for unique app path */
1862 time(&rawtime);
1863 timeinfo = localtime(&rawtime);
1864 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1865
1866 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1867
1868 ua_sess->tracing_id = usess->id;
1869 ua_sess->id = get_next_session_id();
1870 ua_sess->uid = app->uid;
1871 ua_sess->gid = app->gid;
1872 ua_sess->euid = usess->uid;
1873 ua_sess->egid = usess->gid;
1874 ua_sess->buffer_type = usess->buffer_type;
1875 ua_sess->bits_per_long = app->bits_per_long;
1876
1877 /* There is only one consumer object per session possible. */
1878 consumer_output_get(usess->consumer);
1879 ua_sess->consumer = usess->consumer;
1880
1881 ua_sess->output_traces = usess->output_traces;
1882 ua_sess->live_timer_interval = usess->live_timer_interval;
1883 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1884 &usess->metadata_attr);
1885
1886 switch (ua_sess->buffer_type) {
1887 case LTTNG_BUFFER_PER_PID:
1888 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1889 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1890 datetime);
1891 break;
1892 case LTTNG_BUFFER_PER_UID:
1893 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1894 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1895 break;
1896 default:
1897 assert(0);
1898 goto error;
1899 }
1900 if (ret < 0) {
1901 PERROR("asprintf UST shadow copy session");
1902 assert(0);
1903 goto error;
1904 }
1905
1906 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1907 sizeof(ua_sess->root_shm_path));
1908 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1909 strncpy(ua_sess->shm_path, usess->shm_path,
1910 sizeof(ua_sess->shm_path));
1911 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1912 if (ua_sess->shm_path[0]) {
1913 switch (ua_sess->buffer_type) {
1914 case LTTNG_BUFFER_PER_PID:
1915 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1916 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1917 app->name, app->pid, datetime);
1918 break;
1919 case LTTNG_BUFFER_PER_UID:
1920 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1921 DEFAULT_UST_TRACE_UID_PATH,
1922 app->uid, app->bits_per_long);
1923 break;
1924 default:
1925 assert(0);
1926 goto error;
1927 }
1928 if (ret < 0) {
1929 PERROR("sprintf UST shadow copy session");
1930 assert(0);
1931 goto error;
1932 }
1933 strncat(ua_sess->shm_path, tmp_shm_path,
1934 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1935 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1936 }
1937
1938 /* Iterate over all channels in global domain. */
1939 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1940 uchan, node.node) {
1941 struct lttng_ht_iter uiter;
1942
1943 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1944 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1945 if (ua_chan_node != NULL) {
1946 /* Session exist. Contiuing. */
1947 continue;
1948 }
1949
1950 DBG2("Channel %s not found on shadow session copy, creating it",
1951 uchan->name);
1952 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1953 &uchan->attr);
1954 if (ua_chan == NULL) {
1955 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1956 continue;
1957 }
1958 shadow_copy_channel(ua_chan, uchan);
1959 /*
1960 * The concept of metadata channel does not exist on the tracing
1961 * registry side of the session daemon so this can only be a per CPU
1962 * channel and not metadata.
1963 */
1964 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1965
1966 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1967 }
1968 return;
1969
1970 error:
1971 consumer_output_put(ua_sess->consumer);
1972 }
1973
1974 /*
1975 * Lookup sesison wrapper.
1976 */
1977 static
1978 void __lookup_session_by_app(struct ltt_ust_session *usess,
1979 struct ust_app *app, struct lttng_ht_iter *iter)
1980 {
1981 /* Get right UST app session from app */
1982 lttng_ht_lookup(app->sessions, &usess->id, iter);
1983 }
1984
1985 /*
1986 * Return ust app session from the app session hashtable using the UST session
1987 * id.
1988 */
1989 static struct ust_app_session *lookup_session_by_app(
1990 struct ltt_ust_session *usess, struct ust_app *app)
1991 {
1992 struct lttng_ht_iter iter;
1993 struct lttng_ht_node_u64 *node;
1994
1995 __lookup_session_by_app(usess, app, &iter);
1996 node = lttng_ht_iter_get_node_u64(&iter);
1997 if (node == NULL) {
1998 goto error;
1999 }
2000
2001 return caa_container_of(node, struct ust_app_session, node);
2002
2003 error:
2004 return NULL;
2005 }
2006
2007 /*
2008 * Setup buffer registry per PID for the given session and application. If none
2009 * is found, a new one is created, added to the global registry and
2010 * initialized. If regp is valid, it's set with the newly created object.
2011 *
2012 * Return 0 on success or else a negative value.
2013 */
2014 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2015 struct ust_app *app, struct buffer_reg_pid **regp)
2016 {
2017 int ret = 0;
2018 struct buffer_reg_pid *reg_pid;
2019
2020 assert(ua_sess);
2021 assert(app);
2022
2023 rcu_read_lock();
2024
2025 reg_pid = buffer_reg_pid_find(ua_sess->id);
2026 if (!reg_pid) {
2027 /*
2028 * This is the create channel path meaning that if there is NO
2029 * registry available, we have to create one for this session.
2030 */
2031 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2032 ua_sess->root_shm_path, ua_sess->shm_path);
2033 if (ret < 0) {
2034 goto error;
2035 }
2036 } else {
2037 goto end;
2038 }
2039
2040 /* Initialize registry. */
2041 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2042 app->bits_per_long, app->uint8_t_alignment,
2043 app->uint16_t_alignment, app->uint32_t_alignment,
2044 app->uint64_t_alignment, app->long_alignment,
2045 app->byte_order, app->version.major,
2046 app->version.minor, reg_pid->root_shm_path,
2047 reg_pid->shm_path,
2048 ua_sess->euid, ua_sess->egid);
2049 if (ret < 0) {
2050 /*
2051 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2052 * destroy the buffer registry, because it is always expected
2053 * that if the buffer registry can be found, its ust registry is
2054 * non-NULL.
2055 */
2056 buffer_reg_pid_destroy(reg_pid);
2057 goto error;
2058 }
2059
2060 buffer_reg_pid_add(reg_pid);
2061
2062 DBG3("UST app buffer registry per PID created successfully");
2063
2064 end:
2065 if (regp) {
2066 *regp = reg_pid;
2067 }
2068 error:
2069 rcu_read_unlock();
2070 return ret;
2071 }
2072
2073 /*
2074 * Setup buffer registry per UID for the given session and application. If none
2075 * is found, a new one is created, added to the global registry and
2076 * initialized. If regp is valid, it's set with the newly created object.
2077 *
2078 * Return 0 on success or else a negative value.
2079 */
2080 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2081 struct ust_app_session *ua_sess,
2082 struct ust_app *app, struct buffer_reg_uid **regp)
2083 {
2084 int ret = 0;
2085 struct buffer_reg_uid *reg_uid;
2086
2087 assert(usess);
2088 assert(app);
2089
2090 rcu_read_lock();
2091
2092 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2093 if (!reg_uid) {
2094 /*
2095 * This is the create channel path meaning that if there is NO
2096 * registry available, we have to create one for this session.
2097 */
2098 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2099 LTTNG_DOMAIN_UST, &reg_uid,
2100 ua_sess->root_shm_path, ua_sess->shm_path);
2101 if (ret < 0) {
2102 goto error;
2103 }
2104 } else {
2105 goto end;
2106 }
2107
2108 /* Initialize registry. */
2109 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2110 app->bits_per_long, app->uint8_t_alignment,
2111 app->uint16_t_alignment, app->uint32_t_alignment,
2112 app->uint64_t_alignment, app->long_alignment,
2113 app->byte_order, app->version.major,
2114 app->version.minor, reg_uid->root_shm_path,
2115 reg_uid->shm_path, usess->uid, usess->gid);
2116 if (ret < 0) {
2117 /*
2118 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2119 * destroy the buffer registry, because it is always expected
2120 * that if the buffer registry can be found, its ust registry is
2121 * non-NULL.
2122 */
2123 buffer_reg_uid_destroy(reg_uid, NULL);
2124 goto error;
2125 }
2126 /* Add node to teardown list of the session. */
2127 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2128
2129 buffer_reg_uid_add(reg_uid);
2130
2131 DBG3("UST app buffer registry per UID created successfully");
2132 end:
2133 if (regp) {
2134 *regp = reg_uid;
2135 }
2136 error:
2137 rcu_read_unlock();
2138 return ret;
2139 }
2140
2141 /*
2142 * Create a session on the tracer side for the given app.
2143 *
2144 * On success, ua_sess_ptr is populated with the session pointer or else left
2145 * untouched. If the session was created, is_created is set to 1. On error,
2146 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2147 * be NULL.
2148 *
2149 * Returns 0 on success or else a negative code which is either -ENOMEM or
2150 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2151 */
2152 static int create_ust_app_session(struct ltt_ust_session *usess,
2153 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2154 int *is_created)
2155 {
2156 int ret, created = 0;
2157 struct ust_app_session *ua_sess;
2158
2159 assert(usess);
2160 assert(app);
2161 assert(ua_sess_ptr);
2162
2163 health_code_update();
2164
2165 ua_sess = lookup_session_by_app(usess, app);
2166 if (ua_sess == NULL) {
2167 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2168 app->pid, usess->id);
2169 ua_sess = alloc_ust_app_session(app);
2170 if (ua_sess == NULL) {
2171 /* Only malloc can failed so something is really wrong */
2172 ret = -ENOMEM;
2173 goto error;
2174 }
2175 shadow_copy_session(ua_sess, usess, app);
2176 created = 1;
2177 }
2178
2179 switch (usess->buffer_type) {
2180 case LTTNG_BUFFER_PER_PID:
2181 /* Init local registry. */
2182 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2183 if (ret < 0) {
2184 delete_ust_app_session(-1, ua_sess, app);
2185 goto error;
2186 }
2187 break;
2188 case LTTNG_BUFFER_PER_UID:
2189 /* Look for a global registry. If none exists, create one. */
2190 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2191 if (ret < 0) {
2192 delete_ust_app_session(-1, ua_sess, app);
2193 goto error;
2194 }
2195 break;
2196 default:
2197 assert(0);
2198 ret = -EINVAL;
2199 goto error;
2200 }
2201
2202 health_code_update();
2203
2204 if (ua_sess->handle == -1) {
2205 pthread_mutex_lock(&app->sock_lock);
2206 ret = ustctl_create_session(app->sock);
2207 pthread_mutex_unlock(&app->sock_lock);
2208 if (ret < 0) {
2209 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2210 ERR("Creating session for app pid %d with ret %d",
2211 app->pid, ret);
2212 } else {
2213 DBG("UST app creating session failed. Application is dead");
2214 /*
2215 * This is normal behavior, an application can die during the
2216 * creation process. Don't report an error so the execution can
2217 * continue normally. This will get flagged ENOTCONN and the
2218 * caller will handle it.
2219 */
2220 ret = 0;
2221 }
2222 delete_ust_app_session(-1, ua_sess, app);
2223 if (ret != -ENOMEM) {
2224 /*
2225 * Tracer is probably gone or got an internal error so let's
2226 * behave like it will soon unregister or not usable.
2227 */
2228 ret = -ENOTCONN;
2229 }
2230 goto error;
2231 }
2232
2233 ua_sess->handle = ret;
2234
2235 /* Add ust app session to app's HT */
2236 lttng_ht_node_init_u64(&ua_sess->node,
2237 ua_sess->tracing_id);
2238 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2239 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2240 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2241 &ua_sess->ust_objd_node);
2242
2243 DBG2("UST app session created successfully with handle %d", ret);
2244 }
2245
2246 *ua_sess_ptr = ua_sess;
2247 if (is_created) {
2248 *is_created = created;
2249 }
2250
2251 /* Everything went well. */
2252 ret = 0;
2253
2254 error:
2255 health_code_update();
2256 return ret;
2257 }
2258
2259 /*
2260 * Match function for a hash table lookup of ust_app_ctx.
2261 *
2262 * It matches an ust app context based on the context type and, in the case
2263 * of perf counters, their name.
2264 */
2265 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2266 {
2267 struct ust_app_ctx *ctx;
2268 const struct lttng_ust_context_attr *key;
2269
2270 assert(node);
2271 assert(_key);
2272
2273 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2274 key = _key;
2275
2276 /* Context type */
2277 if (ctx->ctx.ctx != key->ctx) {
2278 goto no_match;
2279 }
2280
2281 switch(key->ctx) {
2282 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2283 if (strncmp(key->u.perf_counter.name,
2284 ctx->ctx.u.perf_counter.name,
2285 sizeof(key->u.perf_counter.name))) {
2286 goto no_match;
2287 }
2288 break;
2289 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2290 if (strcmp(key->u.app_ctx.provider_name,
2291 ctx->ctx.u.app_ctx.provider_name) ||
2292 strcmp(key->u.app_ctx.ctx_name,
2293 ctx->ctx.u.app_ctx.ctx_name)) {
2294 goto no_match;
2295 }
2296 break;
2297 default:
2298 break;
2299 }
2300
2301 /* Match. */
2302 return 1;
2303
2304 no_match:
2305 return 0;
2306 }
2307
2308 /*
2309 * Lookup for an ust app context from an lttng_ust_context.
2310 *
2311 * Must be called while holding RCU read side lock.
2312 * Return an ust_app_ctx object or NULL on error.
2313 */
2314 static
2315 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2316 struct lttng_ust_context_attr *uctx)
2317 {
2318 struct lttng_ht_iter iter;
2319 struct lttng_ht_node_ulong *node;
2320 struct ust_app_ctx *app_ctx = NULL;
2321
2322 assert(uctx);
2323 assert(ht);
2324
2325 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2326 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2327 ht_match_ust_app_ctx, uctx, &iter.iter);
2328 node = lttng_ht_iter_get_node_ulong(&iter);
2329 if (!node) {
2330 goto end;
2331 }
2332
2333 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2334
2335 end:
2336 return app_ctx;
2337 }
2338
2339 /*
2340 * Create a context for the channel on the tracer.
2341 *
2342 * Called with UST app session lock held and a RCU read side lock.
2343 */
2344 static
2345 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2346 struct ust_app_channel *ua_chan,
2347 struct lttng_ust_context_attr *uctx,
2348 struct ust_app *app)
2349 {
2350 int ret = 0;
2351 struct ust_app_ctx *ua_ctx;
2352
2353 DBG2("UST app adding context to channel %s", ua_chan->name);
2354
2355 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2356 if (ua_ctx) {
2357 ret = -EEXIST;
2358 goto error;
2359 }
2360
2361 ua_ctx = alloc_ust_app_ctx(uctx);
2362 if (ua_ctx == NULL) {
2363 /* malloc failed */
2364 ret = -1;
2365 goto error;
2366 }
2367
2368 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2369 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2370 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2371
2372 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2373 if (ret < 0) {
2374 goto error;
2375 }
2376
2377 error:
2378 return ret;
2379 }
2380
2381 /*
2382 * Enable on the tracer side a ust app event for the session and channel.
2383 *
2384 * Called with UST app session lock held.
2385 */
2386 static
2387 int enable_ust_app_event(struct ust_app_session *ua_sess,
2388 struct ust_app_event *ua_event, struct ust_app *app)
2389 {
2390 int ret;
2391
2392 ret = enable_ust_event(app, ua_sess, ua_event);
2393 if (ret < 0) {
2394 goto error;
2395 }
2396
2397 ua_event->enabled = 1;
2398
2399 error:
2400 return ret;
2401 }
2402
2403 /*
2404 * Disable on the tracer side a ust app event for the session and channel.
2405 */
2406 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2407 struct ust_app_event *ua_event, struct ust_app *app)
2408 {
2409 int ret;
2410
2411 ret = disable_ust_event(app, ua_sess, ua_event);
2412 if (ret < 0) {
2413 goto error;
2414 }
2415
2416 ua_event->enabled = 0;
2417
2418 error:
2419 return ret;
2420 }
2421
2422 /*
2423 * Lookup ust app channel for session and disable it on the tracer side.
2424 */
2425 static
2426 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2427 struct ust_app_channel *ua_chan, struct ust_app *app)
2428 {
2429 int ret;
2430
2431 ret = disable_ust_channel(app, ua_sess, ua_chan);
2432 if (ret < 0) {
2433 goto error;
2434 }
2435
2436 ua_chan->enabled = 0;
2437
2438 error:
2439 return ret;
2440 }
2441
2442 /*
2443 * Lookup ust app channel for session and enable it on the tracer side. This
2444 * MUST be called with a RCU read side lock acquired.
2445 */
2446 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2447 struct ltt_ust_channel *uchan, struct ust_app *app)
2448 {
2449 int ret = 0;
2450 struct lttng_ht_iter iter;
2451 struct lttng_ht_node_str *ua_chan_node;
2452 struct ust_app_channel *ua_chan;
2453
2454 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2455 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2456 if (ua_chan_node == NULL) {
2457 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2458 uchan->name, ua_sess->tracing_id);
2459 goto error;
2460 }
2461
2462 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2463
2464 ret = enable_ust_channel(app, ua_sess, ua_chan);
2465 if (ret < 0) {
2466 goto error;
2467 }
2468
2469 error:
2470 return ret;
2471 }
2472
2473 /*
2474 * Ask the consumer to create a channel and get it if successful.
2475 *
2476 * Called with UST app session lock held.
2477 *
2478 * Return 0 on success or else a negative value.
2479 */
2480 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2481 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2482 int bitness, struct ust_registry_session *registry)
2483 {
2484 int ret;
2485 unsigned int nb_fd = 0;
2486 struct consumer_socket *socket;
2487
2488 assert(usess);
2489 assert(ua_sess);
2490 assert(ua_chan);
2491 assert(registry);
2492
2493 rcu_read_lock();
2494 health_code_update();
2495
2496 /* Get the right consumer socket for the application. */
2497 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2498 if (!socket) {
2499 ret = -EINVAL;
2500 goto error;
2501 }
2502
2503 health_code_update();
2504
2505 /* Need one fd for the channel. */
2506 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2507 if (ret < 0) {
2508 ERR("Exhausted number of available FD upon create channel");
2509 goto error;
2510 }
2511
2512 /*
2513 * Ask consumer to create channel. The consumer will return the number of
2514 * stream we have to expect.
2515 */
2516 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2517 registry);
2518 if (ret < 0) {
2519 goto error_ask;
2520 }
2521
2522 /*
2523 * Compute the number of fd needed before receiving them. It must be 2 per
2524 * stream (2 being the default value here).
2525 */
2526 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2527
2528 /* Reserve the amount of file descriptor we need. */
2529 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2530 if (ret < 0) {
2531 ERR("Exhausted number of available FD upon create channel");
2532 goto error_fd_get_stream;
2533 }
2534
2535 health_code_update();
2536
2537 /*
2538 * Now get the channel from the consumer. This call wil populate the stream
2539 * list of that channel and set the ust objects.
2540 */
2541 if (usess->consumer->enabled) {
2542 ret = ust_consumer_get_channel(socket, ua_chan);
2543 if (ret < 0) {
2544 goto error_destroy;
2545 }
2546 }
2547
2548 rcu_read_unlock();
2549 return 0;
2550
2551 error_destroy:
2552 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2553 error_fd_get_stream:
2554 /*
2555 * Initiate a destroy channel on the consumer since we had an error
2556 * handling it on our side. The return value is of no importance since we
2557 * already have a ret value set by the previous error that we need to
2558 * return.
2559 */
2560 (void) ust_consumer_destroy_channel(socket, ua_chan);
2561 error_ask:
2562 lttng_fd_put(LTTNG_FD_APPS, 1);
2563 error:
2564 health_code_update();
2565 rcu_read_unlock();
2566 return ret;
2567 }
2568
2569 /*
2570 * Duplicate the ust data object of the ust app stream and save it in the
2571 * buffer registry stream.
2572 *
2573 * Return 0 on success or else a negative value.
2574 */
2575 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2576 struct ust_app_stream *stream)
2577 {
2578 int ret;
2579
2580 assert(reg_stream);
2581 assert(stream);
2582
2583 /* Reserve the amount of file descriptor we need. */
2584 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2585 if (ret < 0) {
2586 ERR("Exhausted number of available FD upon duplicate stream");
2587 goto error;
2588 }
2589
2590 /* Duplicate object for stream once the original is in the registry. */
2591 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2592 reg_stream->obj.ust);
2593 if (ret < 0) {
2594 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2595 reg_stream->obj.ust, stream->obj, ret);
2596 lttng_fd_put(LTTNG_FD_APPS, 2);
2597 goto error;
2598 }
2599 stream->handle = stream->obj->handle;
2600
2601 error:
2602 return ret;
2603 }
2604
2605 /*
2606 * Duplicate the ust data object of the ust app. channel and save it in the
2607 * buffer registry channel.
2608 *
2609 * Return 0 on success or else a negative value.
2610 */
2611 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2612 struct ust_app_channel *ua_chan)
2613 {
2614 int ret;
2615
2616 assert(reg_chan);
2617 assert(ua_chan);
2618
2619 /* Need two fds for the channel. */
2620 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2621 if (ret < 0) {
2622 ERR("Exhausted number of available FD upon duplicate channel");
2623 goto error_fd_get;
2624 }
2625
2626 /* Duplicate object for stream once the original is in the registry. */
2627 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2628 if (ret < 0) {
2629 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2630 reg_chan->obj.ust, ua_chan->obj, ret);
2631 goto error;
2632 }
2633 ua_chan->handle = ua_chan->obj->handle;
2634
2635 return 0;
2636
2637 error:
2638 lttng_fd_put(LTTNG_FD_APPS, 1);
2639 error_fd_get:
2640 return ret;
2641 }
2642
2643 /*
2644 * For a given channel buffer registry, setup all streams of the given ust
2645 * application channel.
2646 *
2647 * Return 0 on success or else a negative value.
2648 */
2649 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2650 struct ust_app_channel *ua_chan,
2651 struct ust_app *app)
2652 {
2653 int ret = 0;
2654 struct ust_app_stream *stream, *stmp;
2655
2656 assert(reg_chan);
2657 assert(ua_chan);
2658
2659 DBG2("UST app setup buffer registry stream");
2660
2661 /* Send all streams to application. */
2662 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2663 struct buffer_reg_stream *reg_stream;
2664
2665 ret = buffer_reg_stream_create(&reg_stream);
2666 if (ret < 0) {
2667 goto error;
2668 }
2669
2670 /*
2671 * Keep original pointer and nullify it in the stream so the delete
2672 * stream call does not release the object.
2673 */
2674 reg_stream->obj.ust = stream->obj;
2675 stream->obj = NULL;
2676 buffer_reg_stream_add(reg_stream, reg_chan);
2677
2678 /* We don't need the streams anymore. */
2679 cds_list_del(&stream->list);
2680 delete_ust_app_stream(-1, stream, app);
2681 }
2682
2683 error:
2684 return ret;
2685 }
2686
2687 /*
2688 * Create a buffer registry channel for the given session registry and
2689 * application channel object. If regp pointer is valid, it's set with the
2690 * created object. Important, the created object is NOT added to the session
2691 * registry hash table.
2692 *
2693 * Return 0 on success else a negative value.
2694 */
2695 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2696 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2697 {
2698 int ret;
2699 struct buffer_reg_channel *reg_chan = NULL;
2700
2701 assert(reg_sess);
2702 assert(ua_chan);
2703
2704 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2705
2706 /* Create buffer registry channel. */
2707 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2708 if (ret < 0) {
2709 goto error_create;
2710 }
2711 assert(reg_chan);
2712 reg_chan->consumer_key = ua_chan->key;
2713 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2714 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2715
2716 /* Create and add a channel registry to session. */
2717 ret = ust_registry_channel_add(reg_sess->reg.ust,
2718 ua_chan->tracing_channel_id);
2719 if (ret < 0) {
2720 goto error;
2721 }
2722 buffer_reg_channel_add(reg_sess, reg_chan);
2723
2724 if (regp) {
2725 *regp = reg_chan;
2726 }
2727
2728 return 0;
2729
2730 error:
2731 /* Safe because the registry channel object was not added to any HT. */
2732 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2733 error_create:
2734 return ret;
2735 }
2736
2737 /*
2738 * Setup buffer registry channel for the given session registry and application
2739 * channel object. If regp pointer is valid, it's set with the created object.
2740 *
2741 * Return 0 on success else a negative value.
2742 */
2743 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2744 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2745 struct ust_app *app)
2746 {
2747 int ret;
2748
2749 assert(reg_sess);
2750 assert(reg_chan);
2751 assert(ua_chan);
2752 assert(ua_chan->obj);
2753
2754 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2755
2756 /* Setup all streams for the registry. */
2757 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2758 if (ret < 0) {
2759 goto error;
2760 }
2761
2762 reg_chan->obj.ust = ua_chan->obj;
2763 ua_chan->obj = NULL;
2764
2765 return 0;
2766
2767 error:
2768 buffer_reg_channel_remove(reg_sess, reg_chan);
2769 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2770 return ret;
2771 }
2772
2773 /*
2774 * Send buffer registry channel to the application.
2775 *
2776 * Return 0 on success else a negative value.
2777 */
2778 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2779 struct ust_app *app, struct ust_app_session *ua_sess,
2780 struct ust_app_channel *ua_chan)
2781 {
2782 int ret;
2783 struct buffer_reg_stream *reg_stream;
2784
2785 assert(reg_chan);
2786 assert(app);
2787 assert(ua_sess);
2788 assert(ua_chan);
2789
2790 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2791
2792 ret = duplicate_channel_object(reg_chan, ua_chan);
2793 if (ret < 0) {
2794 goto error;
2795 }
2796
2797 /* Send channel to the application. */
2798 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2799 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2800 ret = -ENOTCONN; /* Caused by app exiting. */
2801 goto error;
2802 } else if (ret < 0) {
2803 goto error;
2804 }
2805
2806 health_code_update();
2807
2808 /* Send all streams to application. */
2809 pthread_mutex_lock(&reg_chan->stream_list_lock);
2810 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2811 struct ust_app_stream stream;
2812
2813 ret = duplicate_stream_object(reg_stream, &stream);
2814 if (ret < 0) {
2815 goto error_stream_unlock;
2816 }
2817
2818 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2819 if (ret < 0) {
2820 (void) release_ust_app_stream(-1, &stream, app);
2821 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2822 ret = -ENOTCONN; /* Caused by app exiting. */
2823 }
2824 goto error_stream_unlock;
2825 }
2826
2827 /*
2828 * The return value is not important here. This function will output an
2829 * error if needed.
2830 */
2831 (void) release_ust_app_stream(-1, &stream, app);
2832 }
2833 ua_chan->is_sent = 1;
2834
2835 error_stream_unlock:
2836 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2837 error:
2838 return ret;
2839 }
2840
2841 /*
2842 * Create and send to the application the created buffers with per UID buffers.
2843 *
2844 * Return 0 on success else a negative value.
2845 */
2846 static int create_channel_per_uid(struct ust_app *app,
2847 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2848 struct ust_app_channel *ua_chan)
2849 {
2850 int ret;
2851 struct buffer_reg_uid *reg_uid;
2852 struct buffer_reg_channel *reg_chan;
2853 bool created = false;
2854
2855 assert(app);
2856 assert(usess);
2857 assert(ua_sess);
2858 assert(ua_chan);
2859
2860 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2861
2862 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2863 /*
2864 * The session creation handles the creation of this global registry
2865 * object. If none can be find, there is a code flow problem or a
2866 * teardown race.
2867 */
2868 assert(reg_uid);
2869
2870 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2871 reg_uid);
2872 if (!reg_chan) {
2873 /* Create the buffer registry channel object. */
2874 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2875 if (ret < 0) {
2876 ERR("Error creating the UST channel \"%s\" registry instance",
2877 ua_chan->name);
2878 goto error;
2879 }
2880 assert(reg_chan);
2881
2882 /*
2883 * Create the buffers on the consumer side. This call populates the
2884 * ust app channel object with all streams and data object.
2885 */
2886 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2887 app->bits_per_long, reg_uid->registry->reg.ust);
2888 if (ret < 0) {
2889 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2890 ua_chan->name);
2891
2892 /*
2893 * Let's remove the previously created buffer registry channel so
2894 * it's not visible anymore in the session registry.
2895 */
2896 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2897 ua_chan->tracing_channel_id, false);
2898 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2899 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2900 goto error;
2901 }
2902
2903 /*
2904 * Setup the streams and add it to the session registry.
2905 */
2906 ret = setup_buffer_reg_channel(reg_uid->registry,
2907 ua_chan, reg_chan, app);
2908 if (ret < 0) {
2909 ERR("Error setting up UST channel \"%s\"",
2910 ua_chan->name);
2911 goto error;
2912 }
2913 created = true;
2914 }
2915
2916 /* Send buffers to the application. */
2917 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2918 if (ret < 0) {
2919 if (ret != -ENOTCONN) {
2920 ERR("Error sending channel to application");
2921 }
2922 goto error;
2923 }
2924
2925 if (created) {
2926 enum lttng_error_code cmd_ret;
2927 struct ltt_session *session;
2928 uint64_t chan_reg_key;
2929 struct ust_registry_channel *chan_reg;
2930
2931 rcu_read_lock();
2932 chan_reg_key = ua_chan->tracing_channel_id;
2933
2934 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
2935 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
2936 chan_reg_key);
2937 assert(chan_reg);
2938 chan_reg->consumer_key = ua_chan->key;
2939 chan_reg = NULL;
2940 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
2941
2942 session = session_find_by_id(ua_sess->tracing_id);
2943 assert(session);
2944
2945 cmd_ret = notification_thread_command_add_channel(
2946 notification_thread_handle, session->name,
2947 ua_sess->euid, ua_sess->egid,
2948 ua_chan->name,
2949 ua_chan->key,
2950 LTTNG_DOMAIN_UST,
2951 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
2952 rcu_read_unlock();
2953 if (cmd_ret != LTTNG_OK) {
2954 ret = - (int) cmd_ret;
2955 ERR("Failed to add channel to notification thread");
2956 goto error;
2957 }
2958 }
2959
2960 error:
2961 return ret;
2962 }
2963
2964 /*
2965 * Create and send to the application the created buffers with per PID buffers.
2966 *
2967 * Called with UST app session lock held.
2968 *
2969 * Return 0 on success else a negative value.
2970 */
2971 static int create_channel_per_pid(struct ust_app *app,
2972 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2973 struct ust_app_channel *ua_chan)
2974 {
2975 int ret;
2976 struct ust_registry_session *registry;
2977 enum lttng_error_code cmd_ret;
2978 struct ltt_session *session;
2979 uint64_t chan_reg_key;
2980 struct ust_registry_channel *chan_reg;
2981
2982 assert(app);
2983 assert(usess);
2984 assert(ua_sess);
2985 assert(ua_chan);
2986
2987 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2988
2989 rcu_read_lock();
2990
2991 registry = get_session_registry(ua_sess);
2992 /* The UST app session lock is held, registry shall not be null. */
2993 assert(registry);
2994
2995 /* Create and add a new channel registry to session. */
2996 ret = ust_registry_channel_add(registry, ua_chan->key);
2997 if (ret < 0) {
2998 ERR("Error creating the UST channel \"%s\" registry instance",
2999 ua_chan->name);
3000 goto error;
3001 }
3002
3003 /* Create and get channel on the consumer side. */
3004 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3005 app->bits_per_long, registry);
3006 if (ret < 0) {
3007 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3008 ua_chan->name);
3009 goto error;
3010 }
3011
3012 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3013 if (ret < 0) {
3014 if (ret != -ENOTCONN) {
3015 ERR("Error sending channel to application");
3016 }
3017 goto error;
3018 }
3019
3020 session = session_find_by_id(ua_sess->tracing_id);
3021 assert(session);
3022
3023 chan_reg_key = ua_chan->key;
3024 pthread_mutex_lock(&registry->lock);
3025 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
3026 assert(chan_reg);
3027 chan_reg->consumer_key = ua_chan->key;
3028 pthread_mutex_unlock(&registry->lock);
3029
3030 cmd_ret = notification_thread_command_add_channel(
3031 notification_thread_handle, session->name,
3032 ua_sess->euid, ua_sess->egid,
3033 ua_chan->name,
3034 ua_chan->key,
3035 LTTNG_DOMAIN_UST,
3036 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3037 if (cmd_ret != LTTNG_OK) {
3038 ret = - (int) cmd_ret;
3039 ERR("Failed to add channel to notification thread");
3040 goto error;
3041 }
3042
3043 error:
3044 rcu_read_unlock();
3045 return ret;
3046 }
3047
3048 /*
3049 * From an already allocated ust app channel, create the channel buffers if
3050 * need and send it to the application. This MUST be called with a RCU read
3051 * side lock acquired.
3052 *
3053 * Called with UST app session lock held.
3054 *
3055 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3056 * the application exited concurrently.
3057 */
3058 static int do_create_channel(struct ust_app *app,
3059 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3060 struct ust_app_channel *ua_chan)
3061 {
3062 int ret;
3063
3064 assert(app);
3065 assert(usess);
3066 assert(ua_sess);
3067 assert(ua_chan);
3068
3069 /* Handle buffer type before sending the channel to the application. */
3070 switch (usess->buffer_type) {
3071 case LTTNG_BUFFER_PER_UID:
3072 {
3073 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3074 if (ret < 0) {
3075 goto error;
3076 }
3077 break;
3078 }
3079 case LTTNG_BUFFER_PER_PID:
3080 {
3081 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3082 if (ret < 0) {
3083 goto error;
3084 }
3085 break;
3086 }
3087 default:
3088 assert(0);
3089 ret = -EINVAL;
3090 goto error;
3091 }
3092
3093 /* Initialize ust objd object using the received handle and add it. */
3094 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3095 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3096
3097 /* If channel is not enabled, disable it on the tracer */
3098 if (!ua_chan->enabled) {
3099 ret = disable_ust_channel(app, ua_sess, ua_chan);
3100 if (ret < 0) {
3101 goto error;
3102 }
3103 }
3104
3105 error:
3106 return ret;
3107 }
3108
3109 /*
3110 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3111 * newly created channel if not NULL.
3112 *
3113 * Called with UST app session lock and RCU read-side lock held.
3114 *
3115 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3116 * the application exited concurrently.
3117 */
3118 static int create_ust_app_channel(struct ust_app_session *ua_sess,
3119 struct ltt_ust_channel *uchan, struct ust_app *app,
3120 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3121 struct ust_app_channel **ua_chanp)
3122 {
3123 int ret = 0;
3124 struct lttng_ht_iter iter;
3125 struct lttng_ht_node_str *ua_chan_node;
3126 struct ust_app_channel *ua_chan;
3127
3128 /* Lookup channel in the ust app session */
3129 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3130 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3131 if (ua_chan_node != NULL) {
3132 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3133 goto end;
3134 }
3135
3136 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3137 if (ua_chan == NULL) {
3138 /* Only malloc can fail here */
3139 ret = -ENOMEM;
3140 goto error_alloc;
3141 }
3142 shadow_copy_channel(ua_chan, uchan);
3143
3144 /* Set channel type. */
3145 ua_chan->attr.type = type;
3146
3147 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3148 if (ret < 0) {
3149 goto error;
3150 }
3151
3152 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3153 app->pid);
3154
3155 /* Only add the channel if successful on the tracer side. */
3156 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3157 end:
3158 if (ua_chanp) {
3159 *ua_chanp = ua_chan;
3160 }
3161
3162 /* Everything went well. */
3163 return 0;
3164
3165 error:
3166 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3167 error_alloc:
3168 return ret;
3169 }
3170
3171 /*
3172 * Create UST app event and create it on the tracer side.
3173 *
3174 * Called with ust app session mutex held.
3175 */
3176 static
3177 int create_ust_app_event(struct ust_app_session *ua_sess,
3178 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3179 struct ust_app *app)
3180 {
3181 int ret = 0;
3182 struct ust_app_event *ua_event;
3183
3184 /* Get event node */
3185 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3186 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3187 if (ua_event != NULL) {
3188 ret = -EEXIST;
3189 goto end;
3190 }
3191
3192 /* Does not exist so create one */
3193 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3194 if (ua_event == NULL) {
3195 /* Only malloc can failed so something is really wrong */
3196 ret = -ENOMEM;
3197 goto end;
3198 }
3199 shadow_copy_event(ua_event, uevent);
3200
3201 /* Create it on the tracer side */
3202 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3203 if (ret < 0) {
3204 /* Not found previously means that it does not exist on the tracer */
3205 assert(ret != -LTTNG_UST_ERR_EXIST);
3206 goto error;
3207 }
3208
3209 add_unique_ust_app_event(ua_chan, ua_event);
3210
3211 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3212 app->pid);
3213
3214 end:
3215 return ret;
3216
3217 error:
3218 /* Valid. Calling here is already in a read side lock */
3219 delete_ust_app_event(-1, ua_event, app);
3220 return ret;
3221 }
3222
3223 /*
3224 * Create UST metadata and open it on the tracer side.
3225 *
3226 * Called with UST app session lock held and RCU read side lock.
3227 */
3228 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3229 struct ust_app *app, struct consumer_output *consumer)
3230 {
3231 int ret = 0;
3232 struct ust_app_channel *metadata;
3233 struct consumer_socket *socket;
3234 struct ust_registry_session *registry;
3235
3236 assert(ua_sess);
3237 assert(app);
3238 assert(consumer);
3239
3240 registry = get_session_registry(ua_sess);
3241 /* The UST app session is held registry shall not be null. */
3242 assert(registry);
3243
3244 pthread_mutex_lock(&registry->lock);
3245
3246 /* Metadata already exists for this registry or it was closed previously */
3247 if (registry->metadata_key || registry->metadata_closed) {
3248 ret = 0;
3249 goto error;
3250 }
3251
3252 /* Allocate UST metadata */
3253 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3254 if (!metadata) {
3255 /* malloc() failed */
3256 ret = -ENOMEM;
3257 goto error;
3258 }
3259
3260 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3261
3262 /* Need one fd for the channel. */
3263 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3264 if (ret < 0) {
3265 ERR("Exhausted number of available FD upon create metadata");
3266 goto error;
3267 }
3268
3269 /* Get the right consumer socket for the application. */
3270 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3271 if (!socket) {
3272 ret = -EINVAL;
3273 goto error_consumer;
3274 }
3275
3276 /*
3277 * Keep metadata key so we can identify it on the consumer side. Assign it
3278 * to the registry *before* we ask the consumer so we avoid the race of the
3279 * consumer requesting the metadata and the ask_channel call on our side
3280 * did not returned yet.
3281 */
3282 registry->metadata_key = metadata->key;
3283
3284 /*
3285 * Ask the metadata channel creation to the consumer. The metadata object
3286 * will be created by the consumer and kept their. However, the stream is
3287 * never added or monitored until we do a first push metadata to the
3288 * consumer.
3289 */
3290 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3291 registry);
3292 if (ret < 0) {
3293 /* Nullify the metadata key so we don't try to close it later on. */
3294 registry->metadata_key = 0;
3295 goto error_consumer;
3296 }
3297
3298 /*
3299 * The setup command will make the metadata stream be sent to the relayd,
3300 * if applicable, and the thread managing the metadatas. This is important
3301 * because after this point, if an error occurs, the only way the stream
3302 * can be deleted is to be monitored in the consumer.
3303 */
3304 ret = consumer_setup_metadata(socket, metadata->key);
3305 if (ret < 0) {
3306 /* Nullify the metadata key so we don't try to close it later on. */
3307 registry->metadata_key = 0;
3308 goto error_consumer;
3309 }
3310
3311 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3312 metadata->key, app->pid);
3313
3314 error_consumer:
3315 lttng_fd_put(LTTNG_FD_APPS, 1);
3316 delete_ust_app_channel(-1, metadata, app);
3317 error:
3318 pthread_mutex_unlock(&registry->lock);
3319 return ret;
3320 }
3321
3322 /*
3323 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3324 * acquired before calling this function.
3325 */
3326 struct ust_app *ust_app_find_by_pid(pid_t pid)
3327 {
3328 struct ust_app *app = NULL;
3329 struct lttng_ht_node_ulong *node;
3330 struct lttng_ht_iter iter;
3331
3332 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3333 node = lttng_ht_iter_get_node_ulong(&iter);
3334 if (node == NULL) {
3335 DBG2("UST app no found with pid %d", pid);
3336 goto error;
3337 }
3338
3339 DBG2("Found UST app by pid %d", pid);
3340
3341 app = caa_container_of(node, struct ust_app, pid_n);
3342
3343 error:
3344 return app;
3345 }
3346
3347 /*
3348 * Allocate and init an UST app object using the registration information and
3349 * the command socket. This is called when the command socket connects to the
3350 * session daemon.
3351 *
3352 * The object is returned on success or else NULL.
3353 */
3354 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3355 {
3356 struct ust_app *lta = NULL;
3357
3358 assert(msg);
3359 assert(sock >= 0);
3360
3361 DBG3("UST app creating application for socket %d", sock);
3362
3363 if ((msg->bits_per_long == 64 &&
3364 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3365 || (msg->bits_per_long == 32 &&
3366 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3367 ERR("Registration failed: application \"%s\" (pid: %d) has "
3368 "%d-bit long, but no consumerd for this size is available.\n",
3369 msg->name, msg->pid, msg->bits_per_long);
3370 goto error;
3371 }
3372
3373 lta = zmalloc(sizeof(struct ust_app));
3374 if (lta == NULL) {
3375 PERROR("malloc");
3376 goto error;
3377 }
3378
3379 lta->ppid = msg->ppid;
3380 lta->uid = msg->uid;
3381 lta->gid = msg->gid;
3382
3383 lta->bits_per_long = msg->bits_per_long;
3384 lta->uint8_t_alignment = msg->uint8_t_alignment;
3385 lta->uint16_t_alignment = msg->uint16_t_alignment;
3386 lta->uint32_t_alignment = msg->uint32_t_alignment;
3387 lta->uint64_t_alignment = msg->uint64_t_alignment;
3388 lta->long_alignment = msg->long_alignment;
3389 lta->byte_order = msg->byte_order;
3390
3391 lta->v_major = msg->major;
3392 lta->v_minor = msg->minor;
3393 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3394 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3395 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3396 lta->notify_sock = -1;
3397
3398 /* Copy name and make sure it's NULL terminated. */
3399 strncpy(lta->name, msg->name, sizeof(lta->name));
3400 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3401
3402 /*
3403 * Before this can be called, when receiving the registration information,
3404 * the application compatibility is checked. So, at this point, the
3405 * application can work with this session daemon.
3406 */
3407 lta->compatible = 1;
3408
3409 lta->pid = msg->pid;
3410 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3411 lta->sock = sock;
3412 pthread_mutex_init(&lta->sock_lock, NULL);
3413 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3414
3415 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3416 error:
3417 return lta;
3418 }
3419
3420 /*
3421 * For a given application object, add it to every hash table.
3422 */
3423 void ust_app_add(struct ust_app *app)
3424 {
3425 assert(app);
3426 assert(app->notify_sock >= 0);
3427
3428 rcu_read_lock();
3429
3430 /*
3431 * On a re-registration, we want to kick out the previous registration of
3432 * that pid
3433 */
3434 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3435
3436 /*
3437 * The socket _should_ be unique until _we_ call close. So, a add_unique
3438 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3439 * already in the table.
3440 */
3441 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3442
3443 /* Add application to the notify socket hash table. */
3444 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3445 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3446
3447 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3448 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3449 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3450 app->v_minor);
3451
3452 rcu_read_unlock();
3453 }
3454
3455 /*
3456 * Set the application version into the object.
3457 *
3458 * Return 0 on success else a negative value either an errno code or a
3459 * LTTng-UST error code.
3460 */
3461 int ust_app_version(struct ust_app *app)
3462 {
3463 int ret;
3464
3465 assert(app);
3466
3467 pthread_mutex_lock(&app->sock_lock);
3468 ret = ustctl_tracer_version(app->sock, &app->version);
3469 pthread_mutex_unlock(&app->sock_lock);
3470 if (ret < 0) {
3471 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3472 ERR("UST app %d version failed with ret %d", app->sock, ret);
3473 } else {
3474 DBG3("UST app %d version failed. Application is dead", app->sock);
3475 }
3476 }
3477
3478 return ret;
3479 }
3480
3481 /*
3482 * Unregister app by removing it from the global traceable app list and freeing
3483 * the data struct.
3484 *
3485 * The socket is already closed at this point so no close to sock.
3486 */
3487 void ust_app_unregister(int sock)
3488 {
3489 struct ust_app *lta;
3490 struct lttng_ht_node_ulong *node;
3491 struct lttng_ht_iter ust_app_sock_iter;
3492 struct lttng_ht_iter iter;
3493 struct ust_app_session *ua_sess;
3494 int ret;
3495
3496 rcu_read_lock();
3497
3498 /* Get the node reference for a call_rcu */
3499 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3500 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3501 assert(node);
3502
3503 lta = caa_container_of(node, struct ust_app, sock_n);
3504 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3505
3506 /*
3507 * For per-PID buffers, perform "push metadata" and flush all
3508 * application streams before removing app from hash tables,
3509 * ensuring proper behavior of data_pending check.
3510 * Remove sessions so they are not visible during deletion.
3511 */
3512 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3513 node.node) {
3514 struct ust_registry_session *registry;
3515
3516 ret = lttng_ht_del(lta->sessions, &iter);
3517 if (ret) {
3518 /* The session was already removed so scheduled for teardown. */
3519 continue;
3520 }
3521
3522 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3523 (void) ust_app_flush_app_session(lta, ua_sess);
3524 }
3525
3526 /*
3527 * Add session to list for teardown. This is safe since at this point we
3528 * are the only one using this list.
3529 */
3530 pthread_mutex_lock(&ua_sess->lock);
3531
3532 if (ua_sess->deleted) {
3533 pthread_mutex_unlock(&ua_sess->lock);
3534 continue;
3535 }
3536
3537 /*
3538 * Normally, this is done in the delete session process which is
3539 * executed in the call rcu below. However, upon registration we can't
3540 * afford to wait for the grace period before pushing data or else the
3541 * data pending feature can race between the unregistration and stop
3542 * command where the data pending command is sent *before* the grace
3543 * period ended.
3544 *
3545 * The close metadata below nullifies the metadata pointer in the
3546 * session so the delete session will NOT push/close a second time.
3547 */
3548 registry = get_session_registry(ua_sess);
3549 if (registry) {
3550 /* Push metadata for application before freeing the application. */
3551 (void) push_metadata(registry, ua_sess->consumer);
3552
3553 /*
3554 * Don't ask to close metadata for global per UID buffers. Close
3555 * metadata only on destroy trace session in this case. Also, the
3556 * previous push metadata could have flag the metadata registry to
3557 * close so don't send a close command if closed.
3558 */
3559 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3560 /* And ask to close it for this session registry. */
3561 (void) close_metadata(registry, ua_sess->consumer);
3562 }
3563 }
3564 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3565
3566 pthread_mutex_unlock(&ua_sess->lock);
3567 }
3568
3569 /* Remove application from PID hash table */
3570 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3571 assert(!ret);
3572
3573 /*
3574 * Remove application from notify hash table. The thread handling the
3575 * notify socket could have deleted the node so ignore on error because
3576 * either way it's valid. The close of that socket is handled by the other
3577 * thread.
3578 */
3579 iter.iter.node = &lta->notify_sock_n.node;
3580 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3581
3582 /*
3583 * Ignore return value since the node might have been removed before by an
3584 * add replace during app registration because the PID can be reassigned by
3585 * the OS.
3586 */
3587 iter.iter.node = &lta->pid_n.node;
3588 ret = lttng_ht_del(ust_app_ht, &iter);
3589 if (ret) {
3590 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3591 lta->pid);
3592 }
3593
3594 /* Free memory */
3595 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3596
3597 rcu_read_unlock();
3598 return;
3599 }
3600
3601 /*
3602 * Fill events array with all events name of all registered apps.
3603 */
3604 int ust_app_list_events(struct lttng_event **events)
3605 {
3606 int ret, handle;
3607 size_t nbmem, count = 0;
3608 struct lttng_ht_iter iter;
3609 struct ust_app *app;
3610 struct lttng_event *tmp_event;
3611
3612 nbmem = UST_APP_EVENT_LIST_SIZE;
3613 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3614 if (tmp_event == NULL) {
3615 PERROR("zmalloc ust app events");
3616 ret = -ENOMEM;
3617 goto error;
3618 }
3619
3620 rcu_read_lock();
3621
3622 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3623 struct lttng_ust_tracepoint_iter uiter;
3624
3625 health_code_update();
3626
3627 if (!app->compatible) {
3628 /*
3629 * TODO: In time, we should notice the caller of this error by
3630 * telling him that this is a version error.
3631 */
3632 continue;
3633 }
3634 pthread_mutex_lock(&app->sock_lock);
3635 handle = ustctl_tracepoint_list(app->sock);
3636 if (handle < 0) {
3637 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3638 ERR("UST app list events getting handle failed for app pid %d",
3639 app->pid);
3640 }
3641 pthread_mutex_unlock(&app->sock_lock);
3642 continue;
3643 }
3644
3645 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3646 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3647 /* Handle ustctl error. */
3648 if (ret < 0) {
3649 int release_ret;
3650
3651 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3652 ERR("UST app tp list get failed for app %d with ret %d",
3653 app->sock, ret);
3654 } else {
3655 DBG3("UST app tp list get failed. Application is dead");
3656 /*
3657 * This is normal behavior, an application can die during the
3658 * creation process. Don't report an error so the execution can
3659 * continue normally. Continue normal execution.
3660 */
3661 break;
3662 }
3663 free(tmp_event);
3664 release_ret = ustctl_release_handle(app->sock, handle);
3665 if (release_ret < 0 &&
3666 release_ret != -LTTNG_UST_ERR_EXITING &&
3667 release_ret != -EPIPE) {
3668 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3669 }
3670 pthread_mutex_unlock(&app->sock_lock);
3671 goto rcu_error;
3672 }
3673
3674 health_code_update();
3675 if (count >= nbmem) {
3676 /* In case the realloc fails, we free the memory */
3677 struct lttng_event *new_tmp_event;
3678 size_t new_nbmem;
3679
3680 new_nbmem = nbmem << 1;
3681 DBG2("Reallocating event list from %zu to %zu entries",
3682 nbmem, new_nbmem);
3683 new_tmp_event = realloc(tmp_event,
3684 new_nbmem * sizeof(struct lttng_event));
3685 if (new_tmp_event == NULL) {
3686 int release_ret;
3687
3688 PERROR("realloc ust app events");
3689 free(tmp_event);
3690 ret = -ENOMEM;
3691 release_ret = ustctl_release_handle(app->sock, handle);
3692 if (release_ret < 0 &&
3693 release_ret != -LTTNG_UST_ERR_EXITING &&
3694 release_ret != -EPIPE) {
3695 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3696 }
3697 pthread_mutex_unlock(&app->sock_lock);
3698 goto rcu_error;
3699 }
3700 /* Zero the new memory */
3701 memset(new_tmp_event + nbmem, 0,
3702 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3703 nbmem = new_nbmem;
3704 tmp_event = new_tmp_event;
3705 }
3706 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3707 tmp_event[count].loglevel = uiter.loglevel;
3708 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3709 tmp_event[count].pid = app->pid;
3710 tmp_event[count].enabled = -1;
3711 count++;
3712 }
3713 ret = ustctl_release_handle(app->sock, handle);
3714 pthread_mutex_unlock(&app->sock_lock);
3715 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3716 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3717 }
3718 }
3719
3720 ret = count;
3721 *events = tmp_event;
3722
3723 DBG2("UST app list events done (%zu events)", count);
3724
3725 rcu_error:
3726 rcu_read_unlock();
3727 error:
3728 health_code_update();
3729 return ret;
3730 }
3731
3732 /*
3733 * Fill events array with all events name of all registered apps.
3734 */
3735 int ust_app_list_event_fields(struct lttng_event_field **fields)
3736 {
3737 int ret, handle;
3738 size_t nbmem, count = 0;
3739 struct lttng_ht_iter iter;
3740 struct ust_app *app;
3741 struct lttng_event_field *tmp_event;
3742
3743 nbmem = UST_APP_EVENT_LIST_SIZE;
3744 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3745 if (tmp_event == NULL) {
3746 PERROR("zmalloc ust app event fields");
3747 ret = -ENOMEM;
3748 goto error;
3749 }
3750
3751 rcu_read_lock();
3752
3753 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3754 struct lttng_ust_field_iter uiter;
3755
3756 health_code_update();
3757
3758 if (!app->compatible) {
3759 /*
3760 * TODO: In time, we should notice the caller of this error by
3761 * telling him that this is a version error.
3762 */
3763 continue;
3764 }
3765 pthread_mutex_lock(&app->sock_lock);
3766 handle = ustctl_tracepoint_field_list(app->sock);
3767 if (handle < 0) {
3768 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3769 ERR("UST app list field getting handle failed for app pid %d",
3770 app->pid);
3771 }
3772 pthread_mutex_unlock(&app->sock_lock);
3773 continue;
3774 }
3775
3776 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3777 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3778 /* Handle ustctl error. */
3779 if (ret < 0) {
3780 int release_ret;
3781
3782 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3783 ERR("UST app tp list field failed for app %d with ret %d",
3784 app->sock, ret);
3785 } else {
3786 DBG3("UST app tp list field failed. Application is dead");
3787 /*
3788 * This is normal behavior, an application can die during the
3789 * creation process. Don't report an error so the execution can
3790 * continue normally. Reset list and count for next app.
3791 */
3792 break;
3793 }
3794 free(tmp_event);
3795 release_ret = ustctl_release_handle(app->sock, handle);
3796 pthread_mutex_unlock(&app->sock_lock);
3797 if (release_ret < 0 &&
3798 release_ret != -LTTNG_UST_ERR_EXITING &&
3799 release_ret != -EPIPE) {
3800 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3801 }
3802 goto rcu_error;
3803 }
3804
3805 health_code_update();
3806 if (count >= nbmem) {
3807 /* In case the realloc fails, we free the memory */
3808 struct lttng_event_field *new_tmp_event;
3809 size_t new_nbmem;
3810
3811 new_nbmem = nbmem << 1;
3812 DBG2("Reallocating event field list from %zu to %zu entries",
3813 nbmem, new_nbmem);
3814 new_tmp_event = realloc(tmp_event,
3815 new_nbmem * sizeof(struct lttng_event_field));
3816 if (new_tmp_event == NULL) {
3817 int release_ret;
3818
3819 PERROR("realloc ust app event fields");
3820 free(tmp_event);
3821 ret = -ENOMEM;
3822 release_ret = ustctl_release_handle(app->sock, handle);
3823 pthread_mutex_unlock(&app->sock_lock);
3824 if (release_ret &&
3825 release_ret != -LTTNG_UST_ERR_EXITING &&
3826 release_ret != -EPIPE) {
3827 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3828 }
3829 goto rcu_error;
3830 }
3831 /* Zero the new memory */
3832 memset(new_tmp_event + nbmem, 0,
3833 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3834 nbmem = new_nbmem;
3835 tmp_event = new_tmp_event;
3836 }
3837
3838 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3839 /* Mapping between these enums matches 1 to 1. */
3840 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3841 tmp_event[count].nowrite = uiter.nowrite;
3842
3843 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3844 tmp_event[count].event.loglevel = uiter.loglevel;
3845 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3846 tmp_event[count].event.pid = app->pid;
3847 tmp_event[count].event.enabled = -1;
3848 count++;
3849 }
3850 ret = ustctl_release_handle(app->sock, handle);
3851 pthread_mutex_unlock(&app->sock_lock);
3852 if (ret < 0 &&
3853 ret != -LTTNG_UST_ERR_EXITING &&
3854 ret != -EPIPE) {
3855 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3856 }
3857 }
3858
3859 ret = count;
3860 *fields = tmp_event;
3861
3862 DBG2("UST app list event fields done (%zu events)", count);
3863
3864 rcu_error:
3865 rcu_read_unlock();
3866 error:
3867 health_code_update();
3868 return ret;
3869 }
3870
3871 /*
3872 * Free and clean all traceable apps of the global list.
3873 *
3874 * Should _NOT_ be called with RCU read-side lock held.
3875 */
3876 void ust_app_clean_list(void)
3877 {
3878 int ret;
3879 struct ust_app *app;
3880 struct lttng_ht_iter iter;
3881
3882 DBG2("UST app cleaning registered apps hash table");
3883
3884 rcu_read_lock();
3885
3886 if (ust_app_ht) {
3887 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3888 ret = lttng_ht_del(ust_app_ht, &iter);
3889 assert(!ret);
3890 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3891 }
3892 }
3893
3894 /* Cleanup socket hash table */
3895 if (ust_app_ht_by_sock) {
3896 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3897 sock_n.node) {
3898 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3899 assert(!ret);
3900 }
3901 }
3902
3903 /* Cleanup notify socket hash table */
3904 if (ust_app_ht_by_notify_sock) {
3905 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3906 notify_sock_n.node) {
3907 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3908 assert(!ret);
3909 }
3910 }
3911 rcu_read_unlock();
3912
3913 /* Destroy is done only when the ht is empty */
3914 if (ust_app_ht) {
3915 ht_cleanup_push(ust_app_ht);
3916 }
3917 if (ust_app_ht_by_sock) {
3918 ht_cleanup_push(ust_app_ht_by_sock);
3919 }
3920 if (ust_app_ht_by_notify_sock) {
3921 ht_cleanup_push(ust_app_ht_by_notify_sock);
3922 }
3923 }
3924
3925 /*
3926 * Init UST app hash table.
3927 */
3928 int ust_app_ht_alloc(void)
3929 {
3930 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3931 if (!ust_app_ht) {
3932 return -1;
3933 }
3934 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3935 if (!ust_app_ht_by_sock) {
3936 return -1;
3937 }
3938 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3939 if (!ust_app_ht_by_notify_sock) {
3940 return -1;
3941 }
3942 return 0;
3943 }
3944
3945 /*
3946 * For a specific UST session, disable the channel for all registered apps.
3947 */
3948 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3949 struct ltt_ust_channel *uchan)
3950 {
3951 int ret = 0;
3952 struct lttng_ht_iter iter;
3953 struct lttng_ht_node_str *ua_chan_node;
3954 struct ust_app *app;
3955 struct ust_app_session *ua_sess;
3956 struct ust_app_channel *ua_chan;
3957
3958 if (usess == NULL || uchan == NULL) {
3959 ERR("Disabling UST global channel with NULL values");
3960 ret = -1;
3961 goto error;
3962 }
3963
3964 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3965 uchan->name, usess->id);
3966
3967 rcu_read_lock();
3968
3969 /* For every registered applications */
3970 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3971 struct lttng_ht_iter uiter;
3972 if (!app->compatible) {
3973 /*
3974 * TODO: In time, we should notice the caller of this error by
3975 * telling him that this is a version error.
3976 */
3977 continue;
3978 }
3979 ua_sess = lookup_session_by_app(usess, app);
3980 if (ua_sess == NULL) {
3981 continue;
3982 }
3983
3984 /* Get channel */
3985 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3986 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3987 /* If the session if found for the app, the channel must be there */
3988 assert(ua_chan_node);
3989
3990 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3991 /* The channel must not be already disabled */
3992 assert(ua_chan->enabled == 1);
3993
3994 /* Disable channel onto application */
3995 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3996 if (ret < 0) {
3997 /* XXX: We might want to report this error at some point... */
3998 continue;
3999 }
4000 }
4001
4002 rcu_read_unlock();
4003
4004 error:
4005 return ret;
4006 }
4007
4008 /*
4009 * For a specific UST session, enable the channel for all registered apps.
4010 */
4011 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4012 struct ltt_ust_channel *uchan)
4013 {
4014 int ret = 0;
4015 struct lttng_ht_iter iter;
4016 struct ust_app *app;
4017 struct ust_app_session *ua_sess;
4018
4019 if (usess == NULL || uchan == NULL) {
4020 ERR("Adding UST global channel to NULL values");
4021 ret = -1;
4022 goto error;
4023 }
4024
4025 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4026 uchan->name, usess->id);
4027
4028 rcu_read_lock();
4029
4030 /* For every registered applications */
4031 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4032 if (!app->compatible) {
4033 /*
4034 * TODO: In time, we should notice the caller of this error by
4035 * telling him that this is a version error.
4036 */
4037 continue;
4038 }
4039 ua_sess = lookup_session_by_app(usess, app);
4040 if (ua_sess == NULL) {
4041 continue;
4042 }
4043
4044 /* Enable channel onto application */
4045 ret = enable_ust_app_channel(ua_sess, uchan, app);
4046 if (ret < 0) {
4047 /* XXX: We might want to report this error at some point... */
4048 continue;
4049 }
4050 }
4051
4052 rcu_read_unlock();
4053
4054 error:
4055 return ret;
4056 }
4057
4058 /*
4059 * Disable an event in a channel and for a specific session.
4060 */
4061 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4062 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4063 {
4064 int ret = 0;
4065 struct lttng_ht_iter iter, uiter;
4066 struct lttng_ht_node_str *ua_chan_node;
4067 struct ust_app *app;
4068 struct ust_app_session *ua_sess;
4069 struct ust_app_channel *ua_chan;
4070 struct ust_app_event *ua_event;
4071
4072 DBG("UST app disabling event %s for all apps in channel "
4073 "%s for session id %" PRIu64,
4074 uevent->attr.name, uchan->name, usess->id);
4075
4076 rcu_read_lock();
4077
4078 /* For all registered applications */
4079 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4080 if (!app->compatible) {
4081 /*
4082 * TODO: In time, we should notice the caller of this error by
4083 * telling him that this is a version error.
4084 */
4085 continue;
4086 }
4087 ua_sess = lookup_session_by_app(usess, app);
4088 if (ua_sess == NULL) {
4089 /* Next app */
4090 continue;
4091 }
4092
4093 /* Lookup channel in the ust app session */
4094 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4095 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4096 if (ua_chan_node == NULL) {
4097 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4098 "Skipping", uchan->name, usess->id, app->pid);
4099 continue;
4100 }
4101 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4102
4103 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4104 uevent->filter, uevent->attr.loglevel,
4105 uevent->exclusion);
4106 if (ua_event == NULL) {
4107 DBG2("Event %s not found in channel %s for app pid %d."
4108 "Skipping", uevent->attr.name, uchan->name, app->pid);
4109 continue;
4110 }
4111
4112 ret = disable_ust_app_event(ua_sess, ua_event, app);
4113 if (ret < 0) {
4114 /* XXX: Report error someday... */
4115 continue;
4116 }
4117 }
4118
4119 rcu_read_unlock();
4120
4121 return ret;
4122 }
4123
4124 /*
4125 * For a specific UST session, create the channel for all registered apps.
4126 */
4127 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
4128 struct ltt_ust_channel *uchan)
4129 {
4130 int ret = 0, created;
4131 struct lttng_ht_iter iter;
4132 struct ust_app *app;
4133 struct ust_app_session *ua_sess = NULL;
4134
4135 /* Very wrong code flow */
4136 assert(usess);
4137 assert(uchan);
4138
4139 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
4140 uchan->name, usess->id);
4141
4142 rcu_read_lock();
4143
4144 /* For every registered applications */
4145 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4146 if (!app->compatible) {
4147 /*
4148 * TODO: In time, we should notice the caller of this error by
4149 * telling him that this is a version error.
4150 */
4151 continue;
4152 }
4153 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
4154 /* Skip. */
4155 continue;
4156 }
4157
4158 /*
4159 * Create session on the tracer side and add it to app session HT. Note
4160 * that if session exist, it will simply return a pointer to the ust
4161 * app session.
4162 */
4163 ret = create_ust_app_session(usess, app, &ua_sess, &created);
4164 if (ret < 0) {
4165 switch (ret) {
4166 case -ENOTCONN:
4167 /*
4168 * The application's socket is not valid. Either a bad socket
4169 * or a timeout on it. We can't inform the caller that for a
4170 * specific app, the session failed so lets continue here.
4171 */
4172 ret = 0; /* Not an error. */
4173 continue;
4174 case -ENOMEM:
4175 default:
4176 goto error_rcu_unlock;
4177 }
4178 }
4179 assert(ua_sess);
4180
4181 pthread_mutex_lock(&ua_sess->lock);
4182
4183 if (ua_sess->deleted) {
4184 pthread_mutex_unlock(&ua_sess->lock);
4185 continue;
4186 }
4187
4188 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4189 sizeof(uchan->name))) {
4190 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4191 ret = 0;
4192 } else {
4193 /* Create channel onto application. We don't need the chan ref. */
4194 ret = create_ust_app_channel(ua_sess, uchan, app,
4195 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4196 }
4197 pthread_mutex_unlock(&ua_sess->lock);
4198 if (ret < 0) {
4199 /* Cleanup the created session if it's the case. */
4200 if (created) {
4201 destroy_app_session(app, ua_sess);
4202 }
4203 switch (ret) {
4204 case -ENOTCONN:
4205 /*
4206 * The application's socket is not valid. Either a bad socket
4207 * or a timeout on it. We can't inform the caller that for a
4208 * specific app, the session failed so lets continue here.
4209 */
4210 ret = 0; /* Not an error. */
4211 continue;
4212 case -ENOMEM:
4213 default:
4214 goto error_rcu_unlock;
4215 }
4216 }
4217 }
4218
4219 error_rcu_unlock:
4220 rcu_read_unlock();
4221 return ret;
4222 }
4223
4224 /*
4225 * Enable event for a specific session and channel on the tracer.
4226 */
4227 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4228 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4229 {
4230 int ret = 0;
4231 struct lttng_ht_iter iter, uiter;
4232 struct lttng_ht_node_str *ua_chan_node;
4233 struct ust_app *app;
4234 struct ust_app_session *ua_sess;
4235 struct ust_app_channel *ua_chan;
4236 struct ust_app_event *ua_event;
4237
4238 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4239 uevent->attr.name, usess->id);
4240
4241 /*
4242 * NOTE: At this point, this function is called only if the session and
4243 * channel passed are already created for all apps. and enabled on the
4244 * tracer also.
4245 */
4246
4247 rcu_read_lock();
4248
4249 /* For all registered applications */
4250 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4251 if (!app->compatible) {
4252 /*
4253 * TODO: In time, we should notice the caller of this error by
4254 * telling him that this is a version error.
4255 */
4256 continue;
4257 }
4258 ua_sess = lookup_session_by_app(usess, app);
4259 if (!ua_sess) {
4260 /* The application has problem or is probably dead. */
4261 continue;
4262 }
4263
4264 pthread_mutex_lock(&ua_sess->lock);
4265
4266 if (ua_sess->deleted) {
4267 pthread_mutex_unlock(&ua_sess->lock);
4268 continue;
4269 }
4270
4271 /* Lookup channel in the ust app session */
4272 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4273 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4274 /*
4275 * It is possible that the channel cannot be found is
4276 * the channel/event creation occurs concurrently with
4277 * an application exit.
4278 */
4279 if (!ua_chan_node) {
4280 pthread_mutex_unlock(&ua_sess->lock);
4281 continue;
4282 }
4283
4284 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4285
4286 /* Get event node */
4287 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4288 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4289 if (ua_event == NULL) {
4290 DBG3("UST app enable event %s not found for app PID %d."
4291 "Skipping app", uevent->attr.name, app->pid);
4292 goto next_app;
4293 }
4294
4295 ret = enable_ust_app_event(ua_sess, ua_event, app);
4296 if (ret < 0) {
4297 pthread_mutex_unlock(&ua_sess->lock);
4298 goto error;
4299 }
4300 next_app:
4301 pthread_mutex_unlock(&ua_sess->lock);
4302 }
4303
4304 error:
4305 rcu_read_unlock();
4306 return ret;
4307 }
4308
4309 /*
4310 * For a specific existing UST session and UST channel, creates the event for
4311 * all registered apps.
4312 */
4313 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4314 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4315 {
4316 int ret = 0;
4317 struct lttng_ht_iter iter, uiter;
4318 struct lttng_ht_node_str *ua_chan_node;
4319 struct ust_app *app;
4320 struct ust_app_session *ua_sess;
4321 struct ust_app_channel *ua_chan;
4322
4323 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4324 uevent->attr.name, usess->id);
4325
4326 rcu_read_lock();
4327
4328 /* For all registered applications */
4329 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4330 if (!app->compatible) {
4331 /*
4332 * TODO: In time, we should notice the caller of this error by
4333 * telling him that this is a version error.
4334 */
4335 continue;
4336 }
4337 ua_sess = lookup_session_by_app(usess, app);
4338 if (!ua_sess) {
4339 /* The application has problem or is probably dead. */
4340 continue;
4341 }
4342
4343 pthread_mutex_lock(&ua_sess->lock);
4344
4345 if (ua_sess->deleted) {
4346 pthread_mutex_unlock(&ua_sess->lock);
4347 continue;
4348 }
4349
4350 /* Lookup channel in the ust app session */
4351 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4352 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4353 /* If the channel is not found, there is a code flow error */
4354 assert(ua_chan_node);
4355
4356 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4357
4358 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4359 pthread_mutex_unlock(&ua_sess->lock);
4360 if (ret < 0) {
4361 if (ret != -LTTNG_UST_ERR_EXIST) {
4362 /* Possible value at this point: -ENOMEM. If so, we stop! */
4363 break;
4364 }
4365 DBG2("UST app event %s already exist on app PID %d",
4366 uevent->attr.name, app->pid);
4367 continue;
4368 }
4369 }
4370
4371 rcu_read_unlock();
4372
4373 return ret;
4374 }
4375
4376 /*
4377 * Start tracing for a specific UST session and app.
4378 *
4379 * Called with UST app session lock held.
4380 *
4381 */
4382 static
4383 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4384 {
4385 int ret = 0;
4386 struct ust_app_session *ua_sess;
4387
4388 DBG("Starting tracing for ust app pid %d", app->pid);
4389
4390 rcu_read_lock();
4391
4392 if (!app->compatible) {
4393 goto end;
4394 }
4395
4396 ua_sess = lookup_session_by_app(usess, app);
4397 if (ua_sess == NULL) {
4398 /* The session is in teardown process. Ignore and continue. */
4399 goto end;
4400 }
4401
4402 pthread_mutex_lock(&ua_sess->lock);
4403
4404 if (ua_sess->deleted) {
4405 pthread_mutex_unlock(&ua_sess->lock);
4406 goto end;
4407 }
4408
4409 /* Upon restart, we skip the setup, already done */
4410 if (ua_sess->started) {
4411 goto skip_setup;
4412 }
4413
4414 /* Create directories if consumer is LOCAL and has a path defined. */
4415 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4416 strlen(usess->consumer->dst.trace_path) > 0) {
4417 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
4418 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
4419 if (ret < 0) {
4420 if (errno != EEXIST) {
4421 ERR("Trace directory creation error");
4422 goto error_unlock;
4423 }
4424 }
4425 }
4426
4427 /*
4428 * Create the metadata for the application. This returns gracefully if a
4429 * metadata was already set for the session.
4430 */
4431 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4432 if (ret < 0) {
4433 goto error_unlock;
4434 }
4435
4436 health_code_update();
4437
4438 skip_setup:
4439 /* This start the UST tracing */
4440 pthread_mutex_lock(&app->sock_lock);
4441 ret = ustctl_start_session(app->sock, ua_sess->handle);
4442 pthread_mutex_unlock(&app->sock_lock);
4443 if (ret < 0) {
4444 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4445 ERR("Error starting tracing for app pid: %d (ret: %d)",
4446 app->pid, ret);
4447 } else {
4448 DBG("UST app start session failed. Application is dead.");
4449 /*
4450 * This is normal behavior, an application can die during the
4451 * creation process. Don't report an error so the execution can
4452 * continue normally.
4453 */
4454 pthread_mutex_unlock(&ua_sess->lock);
4455 goto end;
4456 }
4457 goto error_unlock;
4458 }
4459
4460 /* Indicate that the session has been started once */
4461 ua_sess->started = 1;
4462
4463 pthread_mutex_unlock(&ua_sess->lock);
4464
4465 health_code_update();
4466
4467 /* Quiescent wait after starting trace */
4468 pthread_mutex_lock(&app->sock_lock);
4469 ret = ustctl_wait_quiescent(app->sock);
4470 pthread_mutex_unlock(&app->sock_lock);
4471 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4472 ERR("UST app wait quiescent failed for app pid %d ret %d",
4473 app->pid, ret);
4474 }
4475
4476 end:
4477 rcu_read_unlock();
4478 health_code_update();
4479 return 0;
4480
4481 error_unlock:
4482 pthread_mutex_unlock(&ua_sess->lock);
4483 rcu_read_unlock();
4484 health_code_update();
4485 return -1;
4486 }
4487
4488 /*
4489 * Stop tracing for a specific UST session and app.
4490 */
4491 static
4492 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4493 {
4494 int ret = 0;
4495 struct ust_app_session *ua_sess;
4496 struct ust_registry_session *registry;
4497
4498 DBG("Stopping tracing for ust app pid %d", app->pid);
4499
4500 rcu_read_lock();
4501
4502 if (!app->compatible) {
4503 goto end_no_session;
4504 }
4505
4506 ua_sess = lookup_session_by_app(usess, app);
4507 if (ua_sess == NULL) {
4508 goto end_no_session;
4509 }
4510
4511 pthread_mutex_lock(&ua_sess->lock);
4512
4513 if (ua_sess->deleted) {
4514 pthread_mutex_unlock(&ua_sess->lock);
4515 goto end_no_session;
4516 }
4517
4518 /*
4519 * If started = 0, it means that stop trace has been called for a session
4520 * that was never started. It's possible since we can have a fail start
4521 * from either the application manager thread or the command thread. Simply
4522 * indicate that this is a stop error.
4523 */
4524 if (!ua_sess->started) {
4525 goto error_rcu_unlock;
4526 }
4527
4528 health_code_update();
4529
4530 /* This inhibits UST tracing */
4531 pthread_mutex_lock(&app->sock_lock);
4532 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4533 pthread_mutex_unlock(&app->sock_lock);
4534 if (ret < 0) {
4535 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4536 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4537 app->pid, ret);
4538 } else {
4539 DBG("UST app stop session failed. Application is dead.");
4540 /*
4541 * This is normal behavior, an application can die during the
4542 * creation process. Don't report an error so the execution can
4543 * continue normally.
4544 */
4545 goto end_unlock;
4546 }
4547 goto error_rcu_unlock;
4548 }
4549
4550 health_code_update();
4551
4552 /* Quiescent wait after stopping trace */
4553 pthread_mutex_lock(&app->sock_lock);
4554 ret = ustctl_wait_quiescent(app->sock);
4555 pthread_mutex_unlock(&app->sock_lock);
4556 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4557 ERR("UST app wait quiescent failed for app pid %d ret %d",
4558 app->pid, ret);
4559 }
4560
4561 health_code_update();
4562
4563 registry = get_session_registry(ua_sess);
4564
4565 /* The UST app session is held registry shall not be null. */
4566 assert(registry);
4567
4568 /* Push metadata for application before freeing the application. */
4569 (void) push_metadata(registry, ua_sess->consumer);
4570
4571 end_unlock:
4572 pthread_mutex_unlock(&ua_sess->lock);
4573 end_no_session:
4574 rcu_read_unlock();
4575 health_code_update();
4576 return 0;
4577
4578 error_rcu_unlock:
4579 pthread_mutex_unlock(&ua_sess->lock);
4580 rcu_read_unlock();
4581 health_code_update();
4582 return -1;
4583 }
4584
4585 static
4586 int ust_app_flush_app_session(struct ust_app *app,
4587 struct ust_app_session *ua_sess)
4588 {
4589 int ret, retval = 0;
4590 struct lttng_ht_iter iter;
4591 struct ust_app_channel *ua_chan;
4592 struct consumer_socket *socket;
4593
4594 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4595
4596 rcu_read_lock();
4597
4598 if (!app->compatible) {
4599 goto end_not_compatible;
4600 }
4601
4602 pthread_mutex_lock(&ua_sess->lock);
4603
4604 if (ua_sess->deleted) {
4605 goto end_deleted;
4606 }
4607
4608 health_code_update();
4609
4610 /* Flushing buffers */
4611 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4612 ua_sess->consumer);
4613
4614 /* Flush buffers and push metadata. */
4615 switch (ua_sess->buffer_type) {
4616 case LTTNG_BUFFER_PER_PID:
4617 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4618 node.node) {
4619 health_code_update();
4620 ret = consumer_flush_channel(socket, ua_chan->key);
4621 if (ret) {
4622 ERR("Error flushing consumer channel");
4623 retval = -1;
4624 continue;
4625 }
4626 }
4627 break;
4628 case LTTNG_BUFFER_PER_UID:
4629 default:
4630 assert(0);
4631 break;
4632 }
4633
4634 health_code_update();
4635
4636 end_deleted:
4637 pthread_mutex_unlock(&ua_sess->lock);
4638
4639 end_not_compatible:
4640 rcu_read_unlock();
4641 health_code_update();
4642 return retval;
4643 }
4644
4645 /*
4646 * Flush buffers for all applications for a specific UST session.
4647 * Called with UST session lock held.
4648 */
4649 static
4650 int ust_app_flush_session(struct ltt_ust_session *usess)
4651
4652 {
4653 int ret = 0;
4654
4655 DBG("Flushing session buffers for all ust apps");
4656
4657 rcu_read_lock();
4658
4659 /* Flush buffers and push metadata. */
4660 switch (usess->buffer_type) {
4661 case LTTNG_BUFFER_PER_UID:
4662 {
4663 struct buffer_reg_uid *reg;
4664 struct lttng_ht_iter iter;
4665
4666 /* Flush all per UID buffers associated to that session. */
4667 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4668 struct ust_registry_session *ust_session_reg;
4669 struct buffer_reg_channel *reg_chan;
4670 struct consumer_socket *socket;
4671
4672 /* Get consumer socket to use to push the metadata.*/
4673 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4674 usess->consumer);
4675 if (!socket) {
4676 /* Ignore request if no consumer is found for the session. */
4677 continue;
4678 }
4679
4680 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4681 reg_chan, node.node) {
4682 /*
4683 * The following call will print error values so the return
4684 * code is of little importance because whatever happens, we
4685 * have to try them all.
4686 */
4687 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4688 }
4689
4690 ust_session_reg = reg->registry->reg.ust;
4691 /* Push metadata. */
4692 (void) push_metadata(ust_session_reg, usess->consumer);
4693 }
4694 break;
4695 }
4696 case LTTNG_BUFFER_PER_PID:
4697 {
4698 struct ust_app_session *ua_sess;
4699 struct lttng_ht_iter iter;
4700 struct ust_app *app;
4701
4702 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4703 ua_sess = lookup_session_by_app(usess, app);
4704 if (ua_sess == NULL) {
4705 continue;
4706 }
4707 (void) ust_app_flush_app_session(app, ua_sess);
4708 }
4709 break;
4710 }
4711 default:
4712 ret = -1;
4713 assert(0);
4714 break;
4715 }
4716
4717 rcu_read_unlock();
4718 health_code_update();
4719 return ret;
4720 }
4721
4722 static
4723 int ust_app_clear_quiescent_app_session(struct ust_app *app,
4724 struct ust_app_session *ua_sess)
4725 {
4726 int ret = 0;
4727 struct lttng_ht_iter iter;
4728 struct ust_app_channel *ua_chan;
4729 struct consumer_socket *socket;
4730
4731 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4732
4733 rcu_read_lock();
4734
4735 if (!app->compatible) {
4736 goto end_not_compatible;
4737 }
4738
4739 pthread_mutex_lock(&ua_sess->lock);
4740
4741 if (ua_sess->deleted) {
4742 goto end_unlock;
4743 }
4744
4745 health_code_update();
4746
4747 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4748 ua_sess->consumer);
4749 if (!socket) {
4750 ERR("Failed to find consumer (%" PRIu32 ") socket",
4751 app->bits_per_long);
4752 ret = -1;
4753 goto end_unlock;
4754 }
4755
4756 /* Clear quiescent state. */
4757 switch (ua_sess->buffer_type) {
4758 case LTTNG_BUFFER_PER_PID:
4759 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4760 ua_chan, node.node) {
4761 health_code_update();
4762 ret = consumer_clear_quiescent_channel(socket,
4763 ua_chan->key);
4764 if (ret) {
4765 ERR("Error clearing quiescent state for consumer channel");
4766 ret = -1;
4767 continue;
4768 }
4769 }
4770 break;
4771 case LTTNG_BUFFER_PER_UID:
4772 default:
4773 assert(0);
4774 ret = -1;
4775 break;
4776 }
4777
4778 health_code_update();
4779
4780 end_unlock:
4781 pthread_mutex_unlock(&ua_sess->lock);
4782
4783 end_not_compatible:
4784 rcu_read_unlock();
4785 health_code_update();
4786 return ret;
4787 }
4788
4789 /*
4790 * Clear quiescent state in each stream for all applications for a
4791 * specific UST session.
4792 * Called with UST session lock held.
4793 */
4794 static
4795 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4796
4797 {
4798 int ret = 0;
4799
4800 DBG("Clearing stream quiescent state for all ust apps");
4801
4802 rcu_read_lock();
4803
4804 switch (usess->buffer_type) {
4805 case LTTNG_BUFFER_PER_UID:
4806 {
4807 struct lttng_ht_iter iter;
4808 struct buffer_reg_uid *reg;
4809
4810 /*
4811 * Clear quiescent for all per UID buffers associated to
4812 * that session.
4813 */
4814 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4815 struct consumer_socket *socket;
4816 struct buffer_reg_channel *reg_chan;
4817
4818 /* Get associated consumer socket.*/
4819 socket = consumer_find_socket_by_bitness(
4820 reg->bits_per_long, usess->consumer);
4821 if (!socket) {
4822 /*
4823 * Ignore request if no consumer is found for
4824 * the session.
4825 */
4826 continue;
4827 }
4828
4829 cds_lfht_for_each_entry(reg->registry->channels->ht,
4830 &iter.iter, reg_chan, node.node) {
4831 /*
4832 * The following call will print error values so
4833 * the return code is of little importance
4834 * because whatever happens, we have to try them
4835 * all.
4836 */
4837 (void) consumer_clear_quiescent_channel(socket,
4838 reg_chan->consumer_key);
4839 }
4840 }
4841 break;
4842 }
4843 case LTTNG_BUFFER_PER_PID:
4844 {
4845 struct ust_app_session *ua_sess;
4846 struct lttng_ht_iter iter;
4847 struct ust_app *app;
4848
4849 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4850 pid_n.node) {
4851 ua_sess = lookup_session_by_app(usess, app);
4852 if (ua_sess == NULL) {
4853 continue;
4854 }
4855 (void) ust_app_clear_quiescent_app_session(app,
4856 ua_sess);
4857 }
4858 break;
4859 }
4860 default:
4861 ret = -1;
4862 assert(0);
4863 break;
4864 }
4865
4866 rcu_read_unlock();
4867 health_code_update();
4868 return ret;
4869 }
4870
4871 /*
4872 * Destroy a specific UST session in apps.
4873 */
4874 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4875 {
4876 int ret;
4877 struct ust_app_session *ua_sess;
4878 struct lttng_ht_iter iter;
4879 struct lttng_ht_node_u64 *node;
4880
4881 DBG("Destroy tracing for ust app pid %d", app->pid);
4882
4883 rcu_read_lock();
4884
4885 if (!app->compatible) {
4886 goto end;
4887 }
4888
4889 __lookup_session_by_app(usess, app, &iter);
4890 node = lttng_ht_iter_get_node_u64(&iter);
4891 if (node == NULL) {
4892 /* Session is being or is deleted. */
4893 goto end;
4894 }
4895 ua_sess = caa_container_of(node, struct ust_app_session, node);
4896
4897 health_code_update();
4898 destroy_app_session(app, ua_sess);
4899
4900 health_code_update();
4901
4902 /* Quiescent wait after stopping trace */
4903 pthread_mutex_lock(&app->sock_lock);
4904 ret = ustctl_wait_quiescent(app->sock);
4905 pthread_mutex_unlock(&app->sock_lock);
4906 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4907 ERR("UST app wait quiescent failed for app pid %d ret %d",
4908 app->pid, ret);
4909 }
4910 end:
4911 rcu_read_unlock();
4912 health_code_update();
4913 return 0;
4914 }
4915
4916 /*
4917 * Start tracing for the UST session.
4918 */
4919 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4920 {
4921 int ret = 0;
4922 struct lttng_ht_iter iter;
4923 struct ust_app *app;
4924
4925 DBG("Starting all UST traces");
4926
4927 rcu_read_lock();
4928
4929 /*
4930 * In a start-stop-start use-case, we need to clear the quiescent state
4931 * of each channel set by the prior stop command, thus ensuring that a
4932 * following stop or destroy is sure to grab a timestamp_end near those
4933 * operations, even if the packet is empty.
4934 */
4935 (void) ust_app_clear_quiescent_session(usess);
4936
4937 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4938 ret = ust_app_start_trace(usess, app);
4939 if (ret < 0) {
4940 /* Continue to next apps even on error */
4941 continue;
4942 }
4943 }
4944
4945 rcu_read_unlock();
4946
4947 return 0;
4948 }
4949
4950 /*
4951 * Start tracing for the UST session.
4952 * Called with UST session lock held.
4953 */
4954 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4955 {
4956 int ret = 0;
4957 struct lttng_ht_iter iter;
4958 struct ust_app *app;
4959
4960 DBG("Stopping all UST traces");
4961
4962 rcu_read_lock();
4963
4964 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4965 ret = ust_app_stop_trace(usess, app);
4966 if (ret < 0) {
4967 /* Continue to next apps even on error */
4968 continue;
4969 }
4970 }
4971
4972 (void) ust_app_flush_session(usess);
4973
4974 rcu_read_unlock();
4975
4976 return 0;
4977 }
4978
4979 /*
4980 * Destroy app UST session.
4981 */
4982 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4983 {
4984 int ret = 0;
4985 struct lttng_ht_iter iter;
4986 struct ust_app *app;
4987
4988 DBG("Destroy all UST traces");
4989
4990 rcu_read_lock();
4991
4992 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4993 ret = destroy_trace(usess, app);
4994 if (ret < 0) {
4995 /* Continue to next apps even on error */
4996 continue;
4997 }
4998 }
4999
5000 rcu_read_unlock();
5001
5002 return 0;
5003 }
5004
5005 static
5006 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
5007 {
5008 int ret = 0;
5009 struct lttng_ht_iter iter, uiter;
5010 struct ust_app_session *ua_sess = NULL;
5011 struct ust_app_channel *ua_chan;
5012 struct ust_app_event *ua_event;
5013 struct ust_app_ctx *ua_ctx;
5014 int is_created = 0;
5015
5016 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
5017 if (ret < 0) {
5018 /* Tracer is probably gone or ENOMEM. */
5019 goto error;
5020 }
5021 if (!is_created) {
5022 /* App session already created. */
5023 goto end;
5024 }
5025 assert(ua_sess);
5026
5027 pthread_mutex_lock(&ua_sess->lock);
5028
5029 if (ua_sess->deleted) {
5030 pthread_mutex_unlock(&ua_sess->lock);
5031 goto end;
5032 }
5033
5034 /*
5035 * We can iterate safely here over all UST app session since the create ust
5036 * app session above made a shadow copy of the UST global domain from the
5037 * ltt ust session.
5038 */
5039 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5040 node.node) {
5041 ret = do_create_channel(app, usess, ua_sess, ua_chan);
5042 if (ret < 0 && ret != -ENOTCONN) {
5043 /*
5044 * Stop everything. On error, the application
5045 * failed, no more file descriptor are available
5046 * or ENOMEM so stopping here is the only thing
5047 * we can do for now. The only exception is
5048 * -ENOTCONN, which indicates that the application
5049 * has exit.
5050 */
5051 goto error_unlock;
5052 }
5053
5054 /*
5055 * Add context using the list so they are enabled in the same order the
5056 * user added them.
5057 */
5058 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
5059 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
5060 if (ret < 0) {
5061 goto error_unlock;
5062 }
5063 }
5064
5065
5066 /* For each events */
5067 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
5068 node.node) {
5069 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
5070 if (ret < 0) {
5071 goto error_unlock;
5072 }
5073 }
5074 }
5075
5076 pthread_mutex_unlock(&ua_sess->lock);
5077
5078 if (usess->active) {
5079 ret = ust_app_start_trace(usess, app);
5080 if (ret < 0) {
5081 goto error;
5082 }
5083
5084 DBG2("UST trace started for app pid %d", app->pid);
5085 }
5086 end:
5087 /* Everything went well at this point. */
5088 return;
5089
5090 error_unlock:
5091 pthread_mutex_unlock(&ua_sess->lock);
5092 error:
5093 if (ua_sess) {
5094 destroy_app_session(app, ua_sess);
5095 }
5096 return;
5097 }
5098
5099 static
5100 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5101 {
5102 struct ust_app_session *ua_sess;
5103
5104 ua_sess = lookup_session_by_app(usess, app);
5105 if (ua_sess == NULL) {
5106 return;
5107 }
5108 destroy_app_session(app, ua_sess);
5109 }
5110
5111 /*
5112 * Add channels/events from UST global domain to registered apps at sock.
5113 *
5114 * Called with session lock held.
5115 * Called with RCU read-side lock held.
5116 */
5117 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5118 {
5119 assert(usess);
5120
5121 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5122 app->sock, usess->id);
5123
5124 if (!app->compatible) {
5125 return;
5126 }
5127
5128 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
5129 ust_app_global_create(usess, app);
5130 } else {
5131 ust_app_global_destroy(usess, app);
5132 }
5133 }
5134
5135 /*
5136 * Called with session lock held.
5137 */
5138 void ust_app_global_update_all(struct ltt_ust_session *usess)
5139 {
5140 struct lttng_ht_iter iter;
5141 struct ust_app *app;
5142
5143 rcu_read_lock();
5144 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5145 ust_app_global_update(usess, app);
5146 }
5147 rcu_read_unlock();
5148 }
5149
5150 /*
5151 * Add context to a specific channel for global UST domain.
5152 */
5153 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5154 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5155 {
5156 int ret = 0;
5157 struct lttng_ht_node_str *ua_chan_node;
5158 struct lttng_ht_iter iter, uiter;
5159 struct ust_app_channel *ua_chan = NULL;
5160 struct ust_app_session *ua_sess;
5161 struct ust_app *app;
5162
5163 rcu_read_lock();
5164
5165 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5166 if (!app->compatible) {
5167 /*
5168 * TODO: In time, we should notice the caller of this error by
5169 * telling him that this is a version error.
5170 */
5171 continue;
5172 }
5173 ua_sess = lookup_session_by_app(usess, app);
5174 if (ua_sess == NULL) {
5175 continue;
5176 }
5177
5178 pthread_mutex_lock(&ua_sess->lock);
5179
5180 if (ua_sess->deleted) {
5181 pthread_mutex_unlock(&ua_sess->lock);
5182 continue;
5183 }
5184
5185 /* Lookup channel in the ust app session */
5186 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5187 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5188 if (ua_chan_node == NULL) {
5189 goto next_app;
5190 }
5191 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5192 node);
5193 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
5194 if (ret < 0) {
5195 goto next_app;
5196 }
5197 next_app:
5198 pthread_mutex_unlock(&ua_sess->lock);
5199 }
5200
5201 rcu_read_unlock();
5202 return ret;
5203 }
5204
5205 /*
5206 * Enable event for a channel from a UST session for a specific PID.
5207 */
5208 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
5209 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
5210 {
5211 int ret = 0;
5212 struct lttng_ht_iter iter;
5213 struct lttng_ht_node_str *ua_chan_node;
5214 struct ust_app *app;
5215 struct ust_app_session *ua_sess;
5216 struct ust_app_channel *ua_chan;
5217 struct ust_app_event *ua_event;
5218
5219 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
5220
5221 rcu_read_lock();
5222
5223 app = ust_app_find_by_pid(pid);
5224 if (app == NULL) {
5225 ERR("UST app enable event per PID %d not found", pid);
5226 ret = -1;
5227 goto end;
5228 }
5229
5230 if (!app->compatible) {
5231 ret = 0;
5232 goto end;
5233 }
5234
5235 ua_sess = lookup_session_by_app(usess, app);
5236 if (!ua_sess) {
5237 /* The application has problem or is probably dead. */
5238 ret = 0;
5239 goto end;
5240 }
5241
5242 pthread_mutex_lock(&ua_sess->lock);
5243
5244 if (ua_sess->deleted) {
5245 ret = 0;
5246 goto end_unlock;
5247 }
5248
5249 /* Lookup channel in the ust app session */
5250 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
5251 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5252 /* If the channel is not found, there is a code flow error */
5253 assert(ua_chan_node);
5254
5255 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5256
5257 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5258 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5259 if (ua_event == NULL) {
5260 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5261 if (ret < 0) {
5262 goto end_unlock;
5263 }
5264 } else {
5265 ret = enable_ust_app_event(ua_sess, ua_event, app);
5266 if (ret < 0) {
5267 goto end_unlock;
5268 }
5269 }
5270
5271 end_unlock:
5272 pthread_mutex_unlock(&ua_sess->lock);
5273 end:
5274 rcu_read_unlock();
5275 return ret;
5276 }
5277
5278 /*
5279 * Receive registration and populate the given msg structure.
5280 *
5281 * On success return 0 else a negative value returned by the ustctl call.
5282 */
5283 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5284 {
5285 int ret;
5286 uint32_t pid, ppid, uid, gid;
5287
5288 assert(msg);
5289
5290 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5291 &pid, &ppid, &uid, &gid,
5292 &msg->bits_per_long,
5293 &msg->uint8_t_alignment,
5294 &msg->uint16_t_alignment,
5295 &msg->uint32_t_alignment,
5296 &msg->uint64_t_alignment,
5297 &msg->long_alignment,
5298 &msg->byte_order,
5299 msg->name);
5300 if (ret < 0) {
5301 switch (-ret) {
5302 case EPIPE:
5303 case ECONNRESET:
5304 case LTTNG_UST_ERR_EXITING:
5305 DBG3("UST app recv reg message failed. Application died");
5306 break;
5307 case LTTNG_UST_ERR_UNSUP_MAJOR:
5308 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5309 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5310 LTTNG_UST_ABI_MINOR_VERSION);
5311 break;
5312 default:
5313 ERR("UST app recv reg message failed with ret %d", ret);
5314 break;
5315 }
5316 goto error;
5317 }
5318 msg->pid = (pid_t) pid;
5319 msg->ppid = (pid_t) ppid;
5320 msg->uid = (uid_t) uid;
5321 msg->gid = (gid_t) gid;
5322
5323 error:
5324 return ret;
5325 }
5326
5327 /*
5328 * Return a ust app session object using the application object and the
5329 * session object descriptor has a key. If not found, NULL is returned.
5330 * A RCU read side lock MUST be acquired when calling this function.
5331 */
5332 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5333 int objd)
5334 {
5335 struct lttng_ht_node_ulong *node;
5336 struct lttng_ht_iter iter;
5337 struct ust_app_session *ua_sess = NULL;
5338
5339 assert(app);
5340
5341 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5342 node = lttng_ht_iter_get_node_ulong(&iter);
5343 if (node == NULL) {
5344 DBG2("UST app session find by objd %d not found", objd);
5345 goto error;
5346 }
5347
5348 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5349
5350 error:
5351 return ua_sess;
5352 }
5353
5354 /*
5355 * Return a ust app channel object using the application object and the channel
5356 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5357 * lock MUST be acquired before calling this function.
5358 */
5359 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5360 int objd)
5361 {
5362 struct lttng_ht_node_ulong *node;
5363 struct lttng_ht_iter iter;
5364 struct ust_app_channel *ua_chan = NULL;
5365
5366 assert(app);
5367
5368 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5369 node = lttng_ht_iter_get_node_ulong(&iter);
5370 if (node == NULL) {
5371 DBG2("UST app channel find by objd %d not found", objd);
5372 goto error;
5373 }
5374
5375 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5376
5377 error:
5378 return ua_chan;
5379 }
5380
5381 /*
5382 * Reply to a register channel notification from an application on the notify
5383 * socket. The channel metadata is also created.
5384 *
5385 * The session UST registry lock is acquired in this function.
5386 *
5387 * On success 0 is returned else a negative value.
5388 */
5389 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5390 size_t nr_fields, struct ustctl_field *fields)
5391 {
5392 int ret, ret_code = 0;
5393 uint32_t chan_id, reg_count;
5394 uint64_t chan_reg_key;
5395 enum ustctl_channel_header type;
5396 struct ust_app *app;
5397 struct ust_app_channel *ua_chan;
5398 struct ust_app_session *ua_sess;
5399 struct ust_registry_session *registry;
5400 struct ust_registry_channel *chan_reg;
5401
5402 rcu_read_lock();
5403
5404 /* Lookup application. If not found, there is a code flow error. */
5405 app = find_app_by_notify_sock(sock);
5406 if (!app) {
5407 DBG("Application socket %d is being torn down. Abort event notify",
5408 sock);
5409 ret = 0;
5410 goto error_rcu_unlock;
5411 }
5412
5413 /* Lookup channel by UST object descriptor. */
5414 ua_chan = find_channel_by_objd(app, cobjd);
5415 if (!ua_chan) {
5416 DBG("Application channel is being torn down. Abort event notify");
5417 ret = 0;
5418 goto error_rcu_unlock;
5419 }
5420
5421 assert(ua_chan->session);
5422 ua_sess = ua_chan->session;
5423
5424 /* Get right session registry depending on the session buffer type. */
5425 registry = get_session_registry(ua_sess);
5426 if (!registry) {
5427 DBG("Application session is being torn down. Abort event notify");
5428 ret = 0;
5429 goto error_rcu_unlock;
5430 };
5431
5432 /* Depending on the buffer type, a different channel key is used. */
5433 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5434 chan_reg_key = ua_chan->tracing_channel_id;
5435 } else {
5436 chan_reg_key = ua_chan->key;
5437 }
5438
5439 pthread_mutex_lock(&registry->lock);
5440
5441 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5442 assert(chan_reg);
5443
5444 if (!chan_reg->register_done) {
5445 reg_count = ust_registry_get_event_count(chan_reg);
5446 if (reg_count < 31) {
5447 type = USTCTL_CHANNEL_HEADER_COMPACT;
5448 } else {
5449 type = USTCTL_CHANNEL_HEADER_LARGE;
5450 }
5451
5452 chan_reg->nr_ctx_fields = nr_fields;
5453 chan_reg->ctx_fields = fields;
5454 fields = NULL;
5455 chan_reg->header_type = type;
5456 } else {
5457 /* Get current already assigned values. */
5458 type = chan_reg->header_type;
5459 }
5460 /* Channel id is set during the object creation. */
5461 chan_id = chan_reg->chan_id;
5462
5463 /* Append to metadata */
5464 if (!chan_reg->metadata_dumped) {
5465 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5466 if (ret_code) {
5467 ERR("Error appending channel metadata (errno = %d)", ret_code);
5468 goto reply;
5469 }
5470 }
5471
5472 reply:
5473 DBG3("UST app replying to register channel key %" PRIu64
5474 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5475 ret_code);
5476
5477 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5478 if (ret < 0) {
5479 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5480 ERR("UST app reply channel failed with ret %d", ret);
5481 } else {
5482 DBG3("UST app reply channel failed. Application died");
5483 }
5484 goto error;
5485 }
5486
5487 /* This channel registry registration is completed. */
5488 chan_reg->register_done = 1;
5489
5490 error:
5491 pthread_mutex_unlock(&registry->lock);
5492 error_rcu_unlock:
5493 rcu_read_unlock();
5494 free(fields);
5495 return ret;
5496 }
5497
5498 /*
5499 * Add event to the UST channel registry. When the event is added to the
5500 * registry, the metadata is also created. Once done, this replies to the
5501 * application with the appropriate error code.
5502 *
5503 * The session UST registry lock is acquired in the function.
5504 *
5505 * On success 0 is returned else a negative value.
5506 */
5507 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5508 char *sig, size_t nr_fields, struct ustctl_field *fields,
5509 int loglevel_value, char *model_emf_uri)
5510 {
5511 int ret, ret_code;
5512 uint32_t event_id = 0;
5513 uint64_t chan_reg_key;
5514 struct ust_app *app;
5515 struct ust_app_channel *ua_chan;
5516 struct ust_app_session *ua_sess;
5517 struct ust_registry_session *registry;
5518
5519 rcu_read_lock();
5520
5521 /* Lookup application. If not found, there is a code flow error. */
5522 app = find_app_by_notify_sock(sock);
5523 if (!app) {
5524 DBG("Application socket %d is being torn down. Abort event notify",
5525 sock);
5526 ret = 0;
5527 goto error_rcu_unlock;
5528 }
5529
5530 /* Lookup channel by UST object descriptor. */
5531 ua_chan = find_channel_by_objd(app, cobjd);
5532 if (!ua_chan) {
5533 DBG("Application channel is being torn down. Abort event notify");
5534 ret = 0;
5535 goto error_rcu_unlock;
5536 }
5537
5538 assert(ua_chan->session);
5539 ua_sess = ua_chan->session;
5540
5541 registry = get_session_registry(ua_sess);
5542 if (!registry) {
5543 DBG("Application session is being torn down. Abort event notify");
5544 ret = 0;
5545 goto error_rcu_unlock;
5546 }
5547
5548 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5549 chan_reg_key = ua_chan->tracing_channel_id;
5550 } else {
5551 chan_reg_key = ua_chan->key;
5552 }
5553
5554 pthread_mutex_lock(&registry->lock);
5555
5556 /*
5557 * From this point on, this call acquires the ownership of the sig, fields
5558 * and model_emf_uri meaning any free are done inside it if needed. These
5559 * three variables MUST NOT be read/write after this.
5560 */
5561 ret_code = ust_registry_create_event(registry, chan_reg_key,
5562 sobjd, cobjd, name, sig, nr_fields, fields,
5563 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5564 &event_id, app);
5565 sig = NULL;
5566 fields = NULL;
5567 model_emf_uri = NULL;
5568
5569 /*
5570 * The return value is returned to ustctl so in case of an error, the
5571 * application can be notified. In case of an error, it's important not to
5572 * return a negative error or else the application will get closed.
5573 */
5574 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5575 if (ret < 0) {
5576 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5577 ERR("UST app reply event failed with ret %d", ret);
5578 } else {
5579 DBG3("UST app reply event failed. Application died");
5580 }
5581 /*
5582 * No need to wipe the create event since the application socket will
5583 * get close on error hence cleaning up everything by itself.
5584 */
5585 goto error;
5586 }
5587
5588 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5589 name, event_id);
5590
5591 error:
5592 pthread_mutex_unlock(&registry->lock);
5593 error_rcu_unlock:
5594 rcu_read_unlock();
5595 free(sig);
5596 free(fields);
5597 free(model_emf_uri);
5598 return ret;
5599 }
5600
5601 /*
5602 * Add enum to the UST session registry. Once done, this replies to the
5603 * application with the appropriate error code.
5604 *
5605 * The session UST registry lock is acquired within this function.
5606 *
5607 * On success 0 is returned else a negative value.
5608 */
5609 static int add_enum_ust_registry(int sock, int sobjd, char *name,
5610 struct ustctl_enum_entry *entries, size_t nr_entries)
5611 {
5612 int ret = 0, ret_code;
5613 struct ust_app *app;
5614 struct ust_app_session *ua_sess;
5615 struct ust_registry_session *registry;
5616 uint64_t enum_id = -1ULL;
5617
5618 rcu_read_lock();
5619
5620 /* Lookup application. If not found, there is a code flow error. */
5621 app = find_app_by_notify_sock(sock);
5622 if (!app) {
5623 /* Return an error since this is not an error */
5624 DBG("Application socket %d is being torn down. Aborting enum registration",
5625 sock);
5626 free(entries);
5627 goto error_rcu_unlock;
5628 }
5629
5630 /* Lookup session by UST object descriptor. */
5631 ua_sess = find_session_by_objd(app, sobjd);
5632 if (!ua_sess) {
5633 /* Return an error since this is not an error */
5634 DBG("Application session is being torn down (session not found). Aborting enum registration.");
5635 free(entries);
5636 goto error_rcu_unlock;
5637 }
5638
5639 registry = get_session_registry(ua_sess);
5640 if (!registry) {
5641 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5642 free(entries);
5643 goto error_rcu_unlock;
5644 }
5645
5646 pthread_mutex_lock(&registry->lock);
5647
5648 /*
5649 * From this point on, the callee acquires the ownership of
5650 * entries. The variable entries MUST NOT be read/written after
5651 * call.
5652 */
5653 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5654 entries, nr_entries, &enum_id);
5655 entries = NULL;
5656
5657 /*
5658 * The return value is returned to ustctl so in case of an error, the
5659 * application can be notified. In case of an error, it's important not to
5660 * return a negative error or else the application will get closed.
5661 */
5662 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5663 if (ret < 0) {
5664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5665 ERR("UST app reply enum failed with ret %d", ret);
5666 } else {
5667 DBG3("UST app reply enum failed. Application died");
5668 }
5669 /*
5670 * No need to wipe the create enum since the application socket will
5671 * get close on error hence cleaning up everything by itself.
5672 */
5673 goto error;
5674 }
5675
5676 DBG3("UST registry enum %s added successfully or already found", name);
5677
5678 error:
5679 pthread_mutex_unlock(&registry->lock);
5680 error_rcu_unlock:
5681 rcu_read_unlock();
5682 return ret;
5683 }
5684
5685 /*
5686 * Handle application notification through the given notify socket.
5687 *
5688 * Return 0 on success or else a negative value.
5689 */
5690 int ust_app_recv_notify(int sock)
5691 {
5692 int ret;
5693 enum ustctl_notify_cmd cmd;
5694
5695 DBG3("UST app receiving notify from sock %d", sock);
5696
5697 ret = ustctl_recv_notify(sock, &cmd);
5698 if (ret < 0) {
5699 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5700 ERR("UST app recv notify failed with ret %d", ret);
5701 } else {
5702 DBG3("UST app recv notify failed. Application died");
5703 }
5704 goto error;
5705 }
5706
5707 switch (cmd) {
5708 case USTCTL_NOTIFY_CMD_EVENT:
5709 {
5710 int sobjd, cobjd, loglevel_value;
5711 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5712 size_t nr_fields;
5713 struct ustctl_field *fields;
5714
5715 DBG2("UST app ustctl register event received");
5716
5717 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5718 &loglevel_value, &sig, &nr_fields, &fields,
5719 &model_emf_uri);
5720 if (ret < 0) {
5721 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5722 ERR("UST app recv event failed with ret %d", ret);
5723 } else {
5724 DBG3("UST app recv event failed. Application died");
5725 }
5726 goto error;
5727 }
5728
5729 /*
5730 * Add event to the UST registry coming from the notify socket. This
5731 * call will free if needed the sig, fields and model_emf_uri. This
5732 * code path loses the ownsership of these variables and transfer them
5733 * to the this function.
5734 */
5735 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5736 fields, loglevel_value, model_emf_uri);
5737 if (ret < 0) {
5738 goto error;
5739 }
5740
5741 break;
5742 }
5743 case USTCTL_NOTIFY_CMD_CHANNEL:
5744 {
5745 int sobjd, cobjd;
5746 size_t nr_fields;
5747 struct ustctl_field *fields;
5748
5749 DBG2("UST app ustctl register channel received");
5750
5751 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5752 &fields);
5753 if (ret < 0) {
5754 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5755 ERR("UST app recv channel failed with ret %d", ret);
5756 } else {
5757 DBG3("UST app recv channel failed. Application died");
5758 }
5759 goto error;
5760 }
5761
5762 /*
5763 * The fields ownership are transfered to this function call meaning
5764 * that if needed it will be freed. After this, it's invalid to access
5765 * fields or clean it up.
5766 */
5767 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5768 fields);
5769 if (ret < 0) {
5770 goto error;
5771 }
5772
5773 break;
5774 }
5775 case USTCTL_NOTIFY_CMD_ENUM:
5776 {
5777 int sobjd;
5778 char name[LTTNG_UST_SYM_NAME_LEN];
5779 size_t nr_entries;
5780 struct ustctl_enum_entry *entries;
5781
5782 DBG2("UST app ustctl register enum received");
5783
5784 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5785 &entries, &nr_entries);
5786 if (ret < 0) {
5787 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5788 ERR("UST app recv enum failed with ret %d", ret);
5789 } else {
5790 DBG3("UST app recv enum failed. Application died");
5791 }
5792 goto error;
5793 }
5794
5795 /* Callee assumes ownership of entries */
5796 ret = add_enum_ust_registry(sock, sobjd, name,
5797 entries, nr_entries);
5798 if (ret < 0) {
5799 goto error;
5800 }
5801
5802 break;
5803 }
5804 default:
5805 /* Should NEVER happen. */
5806 assert(0);
5807 }
5808
5809 error:
5810 return ret;
5811 }
5812
5813 /*
5814 * Once the notify socket hangs up, this is called. First, it tries to find the
5815 * corresponding application. On failure, the call_rcu to close the socket is
5816 * executed. If an application is found, it tries to delete it from the notify
5817 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5818 *
5819 * Note that an object needs to be allocated here so on ENOMEM failure, the
5820 * call RCU is not done but the rest of the cleanup is.
5821 */
5822 void ust_app_notify_sock_unregister(int sock)
5823 {
5824 int err_enomem = 0;
5825 struct lttng_ht_iter iter;
5826 struct ust_app *app;
5827 struct ust_app_notify_sock_obj *obj;
5828
5829 assert(sock >= 0);
5830
5831 rcu_read_lock();
5832
5833 obj = zmalloc(sizeof(*obj));
5834 if (!obj) {
5835 /*
5836 * An ENOMEM is kind of uncool. If this strikes we continue the
5837 * procedure but the call_rcu will not be called. In this case, we
5838 * accept the fd leak rather than possibly creating an unsynchronized
5839 * state between threads.
5840 *
5841 * TODO: The notify object should be created once the notify socket is
5842 * registered and stored independantely from the ust app object. The
5843 * tricky part is to synchronize the teardown of the application and
5844 * this notify object. Let's keep that in mind so we can avoid this
5845 * kind of shenanigans with ENOMEM in the teardown path.
5846 */
5847 err_enomem = 1;
5848 } else {
5849 obj->fd = sock;
5850 }
5851
5852 DBG("UST app notify socket unregister %d", sock);
5853
5854 /*
5855 * Lookup application by notify socket. If this fails, this means that the
5856 * hash table delete has already been done by the application
5857 * unregistration process so we can safely close the notify socket in a
5858 * call RCU.
5859 */
5860 app = find_app_by_notify_sock(sock);
5861 if (!app) {
5862 goto close_socket;
5863 }
5864
5865 iter.iter.node = &app->notify_sock_n.node;
5866
5867 /*
5868 * Whatever happens here either we fail or succeed, in both cases we have
5869 * to close the socket after a grace period to continue to the call RCU
5870 * here. If the deletion is successful, the application is not visible
5871 * anymore by other threads and is it fails it means that it was already
5872 * deleted from the hash table so either way we just have to close the
5873 * socket.
5874 */
5875 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5876
5877 close_socket:
5878 rcu_read_unlock();
5879
5880 /*
5881 * Close socket after a grace period to avoid for the socket to be reused
5882 * before the application object is freed creating potential race between
5883 * threads trying to add unique in the global hash table.
5884 */
5885 if (!err_enomem) {
5886 call_rcu(&obj->head, close_notify_sock_rcu);
5887 }
5888 }
5889
5890 /*
5891 * Destroy a ust app data structure and free its memory.
5892 */
5893 void ust_app_destroy(struct ust_app *app)
5894 {
5895 if (!app) {
5896 return;
5897 }
5898
5899 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5900 }
5901
5902 /*
5903 * Take a snapshot for a given UST session. The snapshot is sent to the given
5904 * output.
5905 *
5906 * Return 0 on success or else a negative value.
5907 */
5908 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5909 struct snapshot_output *output, int wait,
5910 uint64_t nb_packets_per_stream)
5911 {
5912 int ret = 0;
5913 struct lttng_ht_iter iter;
5914 struct ust_app *app;
5915 char pathname[PATH_MAX];
5916
5917 assert(usess);
5918 assert(output);
5919
5920 rcu_read_lock();
5921
5922 switch (usess->buffer_type) {
5923 case LTTNG_BUFFER_PER_UID:
5924 {
5925 struct buffer_reg_uid *reg;
5926
5927 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5928 struct buffer_reg_channel *reg_chan;
5929 struct consumer_socket *socket;
5930
5931 /* Get consumer socket to use to push the metadata.*/
5932 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5933 usess->consumer);
5934 if (!socket) {
5935 ret = -EINVAL;
5936 goto error;
5937 }
5938
5939 memset(pathname, 0, sizeof(pathname));
5940 ret = snprintf(pathname, sizeof(pathname),
5941 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5942 reg->uid, reg->bits_per_long);
5943 if (ret < 0) {
5944 PERROR("snprintf snapshot path");
5945 goto error;
5946 }
5947
5948 /* Add the UST default trace dir to path. */
5949 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5950 reg_chan, node.node) {
5951 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5952 output, 0, usess->uid, usess->gid, pathname, wait,
5953 nb_packets_per_stream);
5954 if (ret < 0) {
5955 goto error;
5956 }
5957 }
5958 ret = consumer_snapshot_channel(socket,
5959 reg->registry->reg.ust->metadata_key, output, 1,
5960 usess->uid, usess->gid, pathname, wait, 0);
5961 if (ret < 0) {
5962 goto error;
5963 }
5964 }
5965 break;
5966 }
5967 case LTTNG_BUFFER_PER_PID:
5968 {
5969 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5970 struct consumer_socket *socket;
5971 struct lttng_ht_iter chan_iter;
5972 struct ust_app_channel *ua_chan;
5973 struct ust_app_session *ua_sess;
5974 struct ust_registry_session *registry;
5975
5976 ua_sess = lookup_session_by_app(usess, app);
5977 if (!ua_sess) {
5978 /* Session not associated with this app. */
5979 continue;
5980 }
5981
5982 /* Get the right consumer socket for the application. */
5983 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5984 output->consumer);
5985 if (!socket) {
5986 ret = -EINVAL;
5987 goto error;
5988 }
5989
5990 /* Add the UST default trace dir to path. */
5991 memset(pathname, 0, sizeof(pathname));
5992 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5993 ua_sess->path);
5994 if (ret < 0) {
5995 PERROR("snprintf snapshot path");
5996 goto error;
5997 }
5998
5999 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6000 ua_chan, node.node) {
6001 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
6002 0, ua_sess->euid, ua_sess->egid, pathname, wait,
6003 nb_packets_per_stream);
6004 if (ret < 0) {
6005 goto error;
6006 }
6007 }
6008
6009 registry = get_session_registry(ua_sess);
6010 if (!registry) {
6011 DBG("Application session is being torn down. Abort snapshot record.");
6012 ret = -1;
6013 goto error;
6014 }
6015 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
6016 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
6017 if (ret < 0) {
6018 goto error;
6019 }
6020 }
6021 break;
6022 }
6023 default:
6024 assert(0);
6025 break;
6026 }
6027
6028 error:
6029 rcu_read_unlock();
6030 return ret;
6031 }
6032
6033 /*
6034 * Return the size taken by one more packet per stream.
6035 */
6036 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
6037 uint64_t cur_nr_packets)
6038 {
6039 uint64_t tot_size = 0;
6040 struct ust_app *app;
6041 struct lttng_ht_iter iter;
6042
6043 assert(usess);
6044
6045 switch (usess->buffer_type) {
6046 case LTTNG_BUFFER_PER_UID:
6047 {
6048 struct buffer_reg_uid *reg;
6049
6050 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6051 struct buffer_reg_channel *reg_chan;
6052
6053 rcu_read_lock();
6054 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6055 reg_chan, node.node) {
6056 if (cur_nr_packets >= reg_chan->num_subbuf) {
6057 /*
6058 * Don't take channel into account if we
6059 * already grab all its packets.
6060 */
6061 continue;
6062 }
6063 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
6064 }
6065 rcu_read_unlock();
6066 }
6067 break;
6068 }
6069 case LTTNG_BUFFER_PER_PID:
6070 {
6071 rcu_read_lock();
6072 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6073 struct ust_app_channel *ua_chan;
6074 struct ust_app_session *ua_sess;
6075 struct lttng_ht_iter chan_iter;
6076
6077 ua_sess = lookup_session_by_app(usess, app);
6078 if (!ua_sess) {
6079 /* Session not associated with this app. */
6080 continue;
6081 }
6082
6083 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6084 ua_chan, node.node) {
6085 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6086 /*
6087 * Don't take channel into account if we
6088 * already grab all its packets.
6089 */
6090 continue;
6091 }
6092 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6093 }
6094 }
6095 rcu_read_unlock();
6096 break;
6097 }
6098 default:
6099 assert(0);
6100 break;
6101 }
6102
6103 return tot_size;
6104 }
6105
6106 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6107 struct cds_list_head *buffer_reg_uid_list,
6108 struct consumer_output *consumer, uint64_t uchan_id,
6109 int overwrite, uint64_t *discarded, uint64_t *lost)
6110 {
6111 int ret;
6112 uint64_t consumer_chan_key;
6113
6114 ret = buffer_reg_uid_consumer_channel_key(
6115 buffer_reg_uid_list, ust_session_id,
6116 uchan_id, &consumer_chan_key);
6117 if (ret < 0) {
6118 goto end;
6119 }
6120
6121 if (overwrite) {
6122 ret = consumer_get_lost_packets(ust_session_id,
6123 consumer_chan_key, consumer, lost);
6124 *discarded = 0;
6125 } else {
6126 ret = consumer_get_discarded_events(ust_session_id,
6127 consumer_chan_key, consumer, discarded);
6128 *lost = 0;
6129 }
6130
6131 end:
6132 return ret;
6133 }
6134
6135 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6136 struct ltt_ust_channel *uchan,
6137 struct consumer_output *consumer, int overwrite,
6138 uint64_t *discarded, uint64_t *lost)
6139 {
6140 int ret = 0;
6141 struct lttng_ht_iter iter;
6142 struct lttng_ht_node_str *ua_chan_node;
6143 struct ust_app *app;
6144 struct ust_app_session *ua_sess;
6145 struct ust_app_channel *ua_chan;
6146
6147 rcu_read_lock();
6148 /*
6149 * Iterate over every registered applications, return when we
6150 * found one in the right session and channel.
6151 */
6152 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6153 struct lttng_ht_iter uiter;
6154
6155 ua_sess = lookup_session_by_app(usess, app);
6156 if (ua_sess == NULL) {
6157 continue;
6158 }
6159
6160 /* Get channel */
6161 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
6162 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6163 /* If the session is found for the app, the channel must be there */
6164 assert(ua_chan_node);
6165
6166 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6167
6168 if (overwrite) {
6169 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6170 consumer, lost);
6171 *discarded = 0;
6172 goto end;
6173 } else {
6174 ret = consumer_get_discarded_events(usess->id,
6175 ua_chan->key, consumer, discarded);
6176 *lost = 0;
6177 goto end;
6178 }
6179 }
6180
6181 end:
6182 rcu_read_unlock();
6183 return ret;
6184 }
6185
6186 static
6187 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6188 struct ust_app *app)
6189 {
6190 int ret = 0;
6191 struct ust_app_session *ua_sess;
6192
6193 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6194
6195 rcu_read_lock();
6196
6197 ua_sess = lookup_session_by_app(usess, app);
6198 if (ua_sess == NULL) {
6199 /* The session is in teardown process. Ignore and continue. */
6200 goto end;
6201 }
6202
6203 pthread_mutex_lock(&ua_sess->lock);
6204
6205 if (ua_sess->deleted) {
6206 goto end_unlock;
6207 }
6208
6209 pthread_mutex_lock(&app->sock_lock);
6210 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6211 pthread_mutex_unlock(&app->sock_lock);
6212
6213 end_unlock:
6214 pthread_mutex_unlock(&ua_sess->lock);
6215
6216 end:
6217 rcu_read_unlock();
6218 health_code_update();
6219 return ret;
6220 }
6221
6222 /*
6223 * Regenerate the statedump for each app in the session.
6224 */
6225 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6226 {
6227 int ret = 0;
6228 struct lttng_ht_iter iter;
6229 struct ust_app *app;
6230
6231 DBG("Regenerating the metadata for all UST apps");
6232
6233 rcu_read_lock();
6234
6235 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6236 if (!app->compatible) {
6237 continue;
6238 }
6239
6240 ret = ust_app_regenerate_statedump(usess, app);
6241 if (ret < 0) {
6242 /* Continue to the next app even on error */
6243 continue;
6244 }
6245 }
6246
6247 rcu_read_unlock();
6248
6249 return 0;
6250 }
This page took 0.217119 seconds and 3 git commands to generate.