5a41c3800709ff3c2a628758ed89f01636be04f7
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43 #include "session.h"
44
45 static
46 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
47
48 /* Next available channel key. Access under next_channel_key_lock. */
49 static uint64_t _next_channel_key;
50 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
51
52 /* Next available session ID. Access under next_session_id_lock. */
53 static uint64_t _next_session_id;
54 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
55
56 /*
57 * Return the incremented value of next_channel_key.
58 */
59 static uint64_t get_next_channel_key(void)
60 {
61 uint64_t ret;
62
63 pthread_mutex_lock(&next_channel_key_lock);
64 ret = ++_next_channel_key;
65 pthread_mutex_unlock(&next_channel_key_lock);
66 return ret;
67 }
68
69 /*
70 * Return the atomically incremented value of next_session_id.
71 */
72 static uint64_t get_next_session_id(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_session_id_lock);
77 ret = ++_next_session_id;
78 pthread_mutex_unlock(&next_session_id_lock);
79 return ret;
80 }
81
82 static void copy_channel_attr_to_ustctl(
83 struct ustctl_consumer_channel_attr *attr,
84 struct lttng_ust_channel_attr *uattr)
85 {
86 /* Copy event attributes since the layout is different. */
87 attr->subbuf_size = uattr->subbuf_size;
88 attr->num_subbuf = uattr->num_subbuf;
89 attr->overwrite = uattr->overwrite;
90 attr->switch_timer_interval = uattr->switch_timer_interval;
91 attr->read_timer_interval = uattr->read_timer_interval;
92 attr->output = uattr->output;
93 }
94
95 /*
96 * Match function for the hash table lookup.
97 *
98 * It matches an ust app event based on three attributes which are the event
99 * name, the filter bytecode and the loglevel.
100 */
101 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
102 {
103 struct ust_app_event *event;
104 const struct ust_app_ht_key *key;
105 int ev_loglevel_value;
106
107 assert(node);
108 assert(_key);
109
110 event = caa_container_of(node, struct ust_app_event, node.node);
111 key = _key;
112 ev_loglevel_value = event->attr.loglevel;
113
114 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
115
116 /* Event name */
117 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
118 goto no_match;
119 }
120
121 /* Event loglevel. */
122 if (ev_loglevel_value != key->loglevel_type) {
123 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
124 && key->loglevel_type == 0 &&
125 ev_loglevel_value == -1) {
126 /*
127 * Match is accepted. This is because on event creation, the
128 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
129 * -1 are accepted for this loglevel type since 0 is the one set by
130 * the API when receiving an enable event.
131 */
132 } else {
133 goto no_match;
134 }
135 }
136
137 /* One of the filters is NULL, fail. */
138 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
139 goto no_match;
140 }
141
142 if (key->filter && event->filter) {
143 /* Both filters exists, check length followed by the bytecode. */
144 if (event->filter->len != key->filter->len ||
145 memcmp(event->filter->data, key->filter->data,
146 event->filter->len) != 0) {
147 goto no_match;
148 }
149 }
150
151 /* One of the exclusions is NULL, fail. */
152 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
153 goto no_match;
154 }
155
156 if (key->exclusion && event->exclusion) {
157 /* Both exclusions exists, check count followed by the names. */
158 if (event->exclusion->count != key->exclusion->count ||
159 memcmp(event->exclusion->names, key->exclusion->names,
160 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
161 goto no_match;
162 }
163 }
164
165
166 /* Match. */
167 return 1;
168
169 no_match:
170 return 0;
171 }
172
173 /*
174 * Unique add of an ust app event in the given ht. This uses the custom
175 * ht_match_ust_app_event match function and the event name as hash.
176 */
177 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
178 struct ust_app_event *event)
179 {
180 struct cds_lfht_node *node_ptr;
181 struct ust_app_ht_key key;
182 struct lttng_ht *ht;
183
184 assert(ua_chan);
185 assert(ua_chan->events);
186 assert(event);
187
188 ht = ua_chan->events;
189 key.name = event->attr.name;
190 key.filter = event->filter;
191 key.loglevel_type = event->attr.loglevel;
192 key.exclusion = event->exclusion;
193
194 node_ptr = cds_lfht_add_unique(ht->ht,
195 ht->hash_fct(event->node.key, lttng_ht_seed),
196 ht_match_ust_app_event, &key, &event->node.node);
197 assert(node_ptr == &event->node.node);
198 }
199
200 /*
201 * Close the notify socket from the given RCU head object. This MUST be called
202 * through a call_rcu().
203 */
204 static void close_notify_sock_rcu(struct rcu_head *head)
205 {
206 int ret;
207 struct ust_app_notify_sock_obj *obj =
208 caa_container_of(head, struct ust_app_notify_sock_obj, head);
209
210 /* Must have a valid fd here. */
211 assert(obj->fd >= 0);
212
213 ret = close(obj->fd);
214 if (ret) {
215 ERR("close notify sock %d RCU", obj->fd);
216 }
217 lttng_fd_put(LTTNG_FD_APPS, 1);
218
219 free(obj);
220 }
221
222 /*
223 * Return the session registry according to the buffer type of the given
224 * session.
225 *
226 * A registry per UID object MUST exists before calling this function or else
227 * it assert() if not found. RCU read side lock must be acquired.
228 */
229 static struct ust_registry_session *get_session_registry(
230 struct ust_app_session *ua_sess)
231 {
232 struct ust_registry_session *registry = NULL;
233
234 assert(ua_sess);
235
236 switch (ua_sess->buffer_type) {
237 case LTTNG_BUFFER_PER_PID:
238 {
239 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
240 if (!reg_pid) {
241 goto error;
242 }
243 registry = reg_pid->registry->reg.ust;
244 break;
245 }
246 case LTTNG_BUFFER_PER_UID:
247 {
248 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
249 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
250 if (!reg_uid) {
251 goto error;
252 }
253 registry = reg_uid->registry->reg.ust;
254 break;
255 }
256 default:
257 assert(0);
258 };
259
260 error:
261 return registry;
262 }
263
264 /*
265 * Delete ust context safely. RCU read lock must be held before calling
266 * this function.
267 */
268 static
269 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
270 struct ust_app *app)
271 {
272 int ret;
273
274 assert(ua_ctx);
275
276 if (ua_ctx->obj) {
277 pthread_mutex_lock(&app->sock_lock);
278 ret = ustctl_release_object(sock, ua_ctx->obj);
279 pthread_mutex_unlock(&app->sock_lock);
280 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
281 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
282 sock, ua_ctx->obj->handle, ret);
283 }
284 free(ua_ctx->obj);
285 }
286 free(ua_ctx);
287 }
288
289 /*
290 * Delete ust app event safely. RCU read lock must be held before calling
291 * this function.
292 */
293 static
294 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
295 struct ust_app *app)
296 {
297 int ret;
298
299 assert(ua_event);
300
301 free(ua_event->filter);
302 if (ua_event->exclusion != NULL)
303 free(ua_event->exclusion);
304 if (ua_event->obj != NULL) {
305 pthread_mutex_lock(&app->sock_lock);
306 ret = ustctl_release_object(sock, ua_event->obj);
307 pthread_mutex_unlock(&app->sock_lock);
308 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
309 ERR("UST app sock %d release event obj failed with ret %d",
310 sock, ret);
311 }
312 free(ua_event->obj);
313 }
314 free(ua_event);
315 }
316
317 /*
318 * Release ust data object of the given stream.
319 *
320 * Return 0 on success or else a negative value.
321 */
322 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
323 struct ust_app *app)
324 {
325 int ret = 0;
326
327 assert(stream);
328
329 if (stream->obj) {
330 pthread_mutex_lock(&app->sock_lock);
331 ret = ustctl_release_object(sock, stream->obj);
332 pthread_mutex_unlock(&app->sock_lock);
333 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
334 ERR("UST app sock %d release stream obj failed with ret %d",
335 sock, ret);
336 }
337 lttng_fd_put(LTTNG_FD_APPS, 2);
338 free(stream->obj);
339 }
340
341 return ret;
342 }
343
344 /*
345 * Delete ust app stream safely. RCU read lock must be held before calling
346 * this function.
347 */
348 static
349 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
350 struct ust_app *app)
351 {
352 assert(stream);
353
354 (void) release_ust_app_stream(sock, stream, app);
355 free(stream);
356 }
357
358 /*
359 * We need to execute ht_destroy outside of RCU read-side critical
360 * section and outside of call_rcu thread, so we postpone its execution
361 * using ht_cleanup_push. It is simpler than to change the semantic of
362 * the many callers of delete_ust_app_session().
363 */
364 static
365 void delete_ust_app_channel_rcu(struct rcu_head *head)
366 {
367 struct ust_app_channel *ua_chan =
368 caa_container_of(head, struct ust_app_channel, rcu_head);
369
370 ht_cleanup_push(ua_chan->ctx);
371 ht_cleanup_push(ua_chan->events);
372 free(ua_chan);
373 }
374
375 /*
376 * Extract the lost packet or discarded events counter when the channel is
377 * being deleted and store the value in the parent channel so we can
378 * access it from lttng list and at stop/destroy.
379 *
380 * The session list lock must be held by the caller.
381 */
382 static
383 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
384 {
385 uint64_t discarded = 0, lost = 0;
386 struct ltt_session *session;
387 struct ltt_ust_channel *uchan;
388
389 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
390 return;
391 }
392
393 rcu_read_lock();
394 session = session_find_by_id(ua_chan->session->tracing_id);
395 if (!session || !session->ust_session) {
396 /*
397 * Not finding the session is not an error because there are
398 * multiple ways the channels can be torn down.
399 *
400 * 1) The session daemon can initiate the destruction of the
401 * ust app session after receiving a destroy command or
402 * during its shutdown/teardown.
403 * 2) The application, since we are in per-pid tracing, is
404 * unregistering and tearing down its ust app session.
405 *
406 * Both paths are protected by the session list lock which
407 * ensures that the accounting of lost packets and discarded
408 * events is done exactly once. The session is then unpublished
409 * from the session list, resulting in this condition.
410 */
411 goto end;
412 }
413
414 if (ua_chan->attr.overwrite) {
415 consumer_get_lost_packets(ua_chan->session->tracing_id,
416 ua_chan->key, session->ust_session->consumer,
417 &lost);
418 } else {
419 consumer_get_discarded_events(ua_chan->session->tracing_id,
420 ua_chan->key, session->ust_session->consumer,
421 &discarded);
422 }
423 uchan = trace_ust_find_channel_by_name(
424 session->ust_session->domain_global.channels,
425 ua_chan->name);
426 if (!uchan) {
427 ERR("Missing UST channel to store discarded counters");
428 goto end;
429 }
430
431 uchan->per_pid_closed_app_discarded += discarded;
432 uchan->per_pid_closed_app_lost += lost;
433
434 end:
435 rcu_read_unlock();
436 }
437
438 /*
439 * Delete ust app channel safely. RCU read lock must be held before calling
440 * this function.
441 *
442 * The session list lock must be held by the caller.
443 */
444 static
445 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
446 struct ust_app *app)
447 {
448 int ret;
449 struct lttng_ht_iter iter;
450 struct ust_app_event *ua_event;
451 struct ust_app_ctx *ua_ctx;
452 struct ust_app_stream *stream, *stmp;
453 struct ust_registry_session *registry;
454
455 assert(ua_chan);
456
457 DBG3("UST app deleting channel %s", ua_chan->name);
458
459 /* Wipe stream */
460 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
461 cds_list_del(&stream->list);
462 delete_ust_app_stream(sock, stream, app);
463 }
464
465 /* Wipe context */
466 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
467 cds_list_del(&ua_ctx->list);
468 ret = lttng_ht_del(ua_chan->ctx, &iter);
469 assert(!ret);
470 delete_ust_app_ctx(sock, ua_ctx, app);
471 }
472
473 /* Wipe events */
474 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
475 node.node) {
476 ret = lttng_ht_del(ua_chan->events, &iter);
477 assert(!ret);
478 delete_ust_app_event(sock, ua_event, app);
479 }
480
481 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
482 /* Wipe and free registry from session registry. */
483 registry = get_session_registry(ua_chan->session);
484 if (registry) {
485 ust_registry_channel_del_free(registry, ua_chan->key);
486 }
487 save_per_pid_lost_discarded_counters(ua_chan);
488 }
489
490 if (ua_chan->obj != NULL) {
491 /* Remove channel from application UST object descriptor. */
492 iter.iter.node = &ua_chan->ust_objd_node.node;
493 ret = lttng_ht_del(app->ust_objd, &iter);
494 assert(!ret);
495 pthread_mutex_lock(&app->sock_lock);
496 ret = ustctl_release_object(sock, ua_chan->obj);
497 pthread_mutex_unlock(&app->sock_lock);
498 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
499 ERR("UST app sock %d release channel obj failed with ret %d",
500 sock, ret);
501 }
502 lttng_fd_put(LTTNG_FD_APPS, 1);
503 free(ua_chan->obj);
504 }
505 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
506 }
507
508 int ust_app_register_done(struct ust_app *app)
509 {
510 int ret;
511
512 pthread_mutex_lock(&app->sock_lock);
513 ret = ustctl_register_done(app->sock);
514 pthread_mutex_unlock(&app->sock_lock);
515 return ret;
516 }
517
518 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
519 {
520 int ret, sock;
521
522 if (app) {
523 pthread_mutex_lock(&app->sock_lock);
524 sock = app->sock;
525 } else {
526 sock = -1;
527 }
528 ret = ustctl_release_object(sock, data);
529 if (app) {
530 pthread_mutex_unlock(&app->sock_lock);
531 }
532 return ret;
533 }
534
535 /*
536 * Push metadata to consumer socket.
537 *
538 * RCU read-side lock must be held to guarantee existance of socket.
539 * Must be called with the ust app session lock held.
540 * Must be called with the registry lock held.
541 *
542 * On success, return the len of metadata pushed or else a negative value.
543 * Returning a -EPIPE return value means we could not send the metadata,
544 * but it can be caused by recoverable errors (e.g. the application has
545 * terminated concurrently).
546 */
547 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
548 struct consumer_socket *socket, int send_zero_data)
549 {
550 int ret;
551 char *metadata_str = NULL;
552 size_t len, offset, new_metadata_len_sent;
553 ssize_t ret_val;
554 uint64_t metadata_key, metadata_version;
555
556 assert(registry);
557 assert(socket);
558
559 metadata_key = registry->metadata_key;
560
561 /*
562 * Means that no metadata was assigned to the session. This can
563 * happens if no start has been done previously.
564 */
565 if (!metadata_key) {
566 return 0;
567 }
568
569 offset = registry->metadata_len_sent;
570 len = registry->metadata_len - registry->metadata_len_sent;
571 new_metadata_len_sent = registry->metadata_len;
572 metadata_version = registry->metadata_version;
573 if (len == 0) {
574 DBG3("No metadata to push for metadata key %" PRIu64,
575 registry->metadata_key);
576 ret_val = len;
577 if (send_zero_data) {
578 DBG("No metadata to push");
579 goto push_data;
580 }
581 goto end;
582 }
583
584 /* Allocate only what we have to send. */
585 metadata_str = zmalloc(len);
586 if (!metadata_str) {
587 PERROR("zmalloc ust app metadata string");
588 ret_val = -ENOMEM;
589 goto error;
590 }
591 /* Copy what we haven't sent out. */
592 memcpy(metadata_str, registry->metadata + offset, len);
593
594 push_data:
595 pthread_mutex_unlock(&registry->lock);
596 /*
597 * We need to unlock the registry while we push metadata to
598 * break a circular dependency between the consumerd metadata
599 * lock and the sessiond registry lock. Indeed, pushing metadata
600 * to the consumerd awaits that it gets pushed all the way to
601 * relayd, but doing so requires grabbing the metadata lock. If
602 * a concurrent metadata request is being performed by
603 * consumerd, this can try to grab the registry lock on the
604 * sessiond while holding the metadata lock on the consumer
605 * daemon. Those push and pull schemes are performed on two
606 * different bidirectionnal communication sockets.
607 */
608 ret = consumer_push_metadata(socket, metadata_key,
609 metadata_str, len, offset, metadata_version);
610 pthread_mutex_lock(&registry->lock);
611 if (ret < 0) {
612 /*
613 * There is an acceptable race here between the registry
614 * metadata key assignment and the creation on the
615 * consumer. The session daemon can concurrently push
616 * metadata for this registry while being created on the
617 * consumer since the metadata key of the registry is
618 * assigned *before* it is setup to avoid the consumer
619 * to ask for metadata that could possibly be not found
620 * in the session daemon.
621 *
622 * The metadata will get pushed either by the session
623 * being stopped or the consumer requesting metadata if
624 * that race is triggered.
625 */
626 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
627 ret = 0;
628 } else {
629 ERR("Error pushing metadata to consumer");
630 }
631 ret_val = ret;
632 goto error_push;
633 } else {
634 /*
635 * Metadata may have been concurrently pushed, since
636 * we're not holding the registry lock while pushing to
637 * consumer. This is handled by the fact that we send
638 * the metadata content, size, and the offset at which
639 * that metadata belongs. This may arrive out of order
640 * on the consumer side, and the consumer is able to
641 * deal with overlapping fragments. The consumer
642 * supports overlapping fragments, which must be
643 * contiguous starting from offset 0. We keep the
644 * largest metadata_len_sent value of the concurrent
645 * send.
646 */
647 registry->metadata_len_sent =
648 max_t(size_t, registry->metadata_len_sent,
649 new_metadata_len_sent);
650 }
651 free(metadata_str);
652 return len;
653
654 end:
655 error:
656 if (ret_val) {
657 /*
658 * On error, flag the registry that the metadata is
659 * closed. We were unable to push anything and this
660 * means that either the consumer is not responding or
661 * the metadata cache has been destroyed on the
662 * consumer.
663 */
664 registry->metadata_closed = 1;
665 }
666 error_push:
667 free(metadata_str);
668 return ret_val;
669 }
670
671 /*
672 * For a given application and session, push metadata to consumer.
673 * Either sock or consumer is required : if sock is NULL, the default
674 * socket to send the metadata is retrieved from consumer, if sock
675 * is not NULL we use it to send the metadata.
676 * RCU read-side lock must be held while calling this function,
677 * therefore ensuring existance of registry. It also ensures existance
678 * of socket throughout this function.
679 *
680 * Return 0 on success else a negative error.
681 * Returning a -EPIPE return value means we could not send the metadata,
682 * but it can be caused by recoverable errors (e.g. the application has
683 * terminated concurrently).
684 */
685 static int push_metadata(struct ust_registry_session *registry,
686 struct consumer_output *consumer)
687 {
688 int ret_val;
689 ssize_t ret;
690 struct consumer_socket *socket;
691
692 assert(registry);
693 assert(consumer);
694
695 pthread_mutex_lock(&registry->lock);
696 if (registry->metadata_closed) {
697 ret_val = -EPIPE;
698 goto error;
699 }
700
701 /* Get consumer socket to use to push the metadata.*/
702 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
703 consumer);
704 if (!socket) {
705 ret_val = -1;
706 goto error;
707 }
708
709 ret = ust_app_push_metadata(registry, socket, 0);
710 if (ret < 0) {
711 ret_val = ret;
712 goto error;
713 }
714 pthread_mutex_unlock(&registry->lock);
715 return 0;
716
717 error:
718 pthread_mutex_unlock(&registry->lock);
719 return ret_val;
720 }
721
722 /*
723 * Send to the consumer a close metadata command for the given session. Once
724 * done, the metadata channel is deleted and the session metadata pointer is
725 * nullified. The session lock MUST be held unless the application is
726 * in the destroy path.
727 *
728 * Return 0 on success else a negative value.
729 */
730 static int close_metadata(struct ust_registry_session *registry,
731 struct consumer_output *consumer)
732 {
733 int ret;
734 struct consumer_socket *socket;
735
736 assert(registry);
737 assert(consumer);
738
739 rcu_read_lock();
740
741 pthread_mutex_lock(&registry->lock);
742
743 if (!registry->metadata_key || registry->metadata_closed) {
744 ret = 0;
745 goto end;
746 }
747
748 /* Get consumer socket to use to push the metadata.*/
749 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
750 consumer);
751 if (!socket) {
752 ret = -1;
753 goto error;
754 }
755
756 ret = consumer_close_metadata(socket, registry->metadata_key);
757 if (ret < 0) {
758 goto error;
759 }
760
761 error:
762 /*
763 * Metadata closed. Even on error this means that the consumer is not
764 * responding or not found so either way a second close should NOT be emit
765 * for this registry.
766 */
767 registry->metadata_closed = 1;
768 end:
769 pthread_mutex_unlock(&registry->lock);
770 rcu_read_unlock();
771 return ret;
772 }
773
774 /*
775 * We need to execute ht_destroy outside of RCU read-side critical
776 * section and outside of call_rcu thread, so we postpone its execution
777 * using ht_cleanup_push. It is simpler than to change the semantic of
778 * the many callers of delete_ust_app_session().
779 */
780 static
781 void delete_ust_app_session_rcu(struct rcu_head *head)
782 {
783 struct ust_app_session *ua_sess =
784 caa_container_of(head, struct ust_app_session, rcu_head);
785
786 ht_cleanup_push(ua_sess->channels);
787 free(ua_sess);
788 }
789
790 /*
791 * Delete ust app session safely. RCU read lock must be held before calling
792 * this function.
793 *
794 * The session list lock must be held by the caller.
795 */
796 static
797 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
798 struct ust_app *app)
799 {
800 int ret;
801 struct lttng_ht_iter iter;
802 struct ust_app_channel *ua_chan;
803 struct ust_registry_session *registry;
804
805 assert(ua_sess);
806
807 pthread_mutex_lock(&ua_sess->lock);
808
809 assert(!ua_sess->deleted);
810 ua_sess->deleted = true;
811
812 registry = get_session_registry(ua_sess);
813 if (registry) {
814 /* Push metadata for application before freeing the application. */
815 (void) push_metadata(registry, ua_sess->consumer);
816
817 /*
818 * Don't ask to close metadata for global per UID buffers. Close
819 * metadata only on destroy trace session in this case. Also, the
820 * previous push metadata could have flag the metadata registry to
821 * close so don't send a close command if closed.
822 */
823 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
824 /* And ask to close it for this session registry. */
825 (void) close_metadata(registry, ua_sess->consumer);
826 }
827 }
828
829 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
830 node.node) {
831 ret = lttng_ht_del(ua_sess->channels, &iter);
832 assert(!ret);
833 delete_ust_app_channel(sock, ua_chan, app);
834 }
835
836 /* In case of per PID, the registry is kept in the session. */
837 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
838 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
839 if (reg_pid) {
840 buffer_reg_pid_remove(reg_pid);
841 buffer_reg_pid_destroy(reg_pid);
842 }
843 }
844
845 if (ua_sess->handle != -1) {
846 pthread_mutex_lock(&app->sock_lock);
847 ret = ustctl_release_handle(sock, ua_sess->handle);
848 pthread_mutex_unlock(&app->sock_lock);
849 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
850 ERR("UST app sock %d release session handle failed with ret %d",
851 sock, ret);
852 }
853 /* Remove session from application UST object descriptor. */
854 iter.iter.node = &ua_sess->ust_objd_node.node;
855 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
856 assert(!ret);
857 }
858
859 pthread_mutex_unlock(&ua_sess->lock);
860
861 consumer_output_put(ua_sess->consumer);
862
863 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
864 }
865
866 /*
867 * Delete a traceable application structure from the global list. Never call
868 * this function outside of a call_rcu call.
869 *
870 * RCU read side lock should _NOT_ be held when calling this function.
871 */
872 static
873 void delete_ust_app(struct ust_app *app)
874 {
875 int ret, sock;
876 struct ust_app_session *ua_sess, *tmp_ua_sess;
877
878 /*
879 * The session list lock must be held during this function to guarantee
880 * the existence of ua_sess.
881 */
882 session_lock_list();
883 /* Delete ust app sessions info */
884 sock = app->sock;
885 app->sock = -1;
886
887 /* Wipe sessions */
888 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
889 teardown_node) {
890 /* Free every object in the session and the session. */
891 rcu_read_lock();
892 delete_ust_app_session(sock, ua_sess, app);
893 rcu_read_unlock();
894 }
895
896 ht_cleanup_push(app->sessions);
897 ht_cleanup_push(app->ust_sessions_objd);
898 ht_cleanup_push(app->ust_objd);
899
900 /*
901 * Wait until we have deleted the application from the sock hash table
902 * before closing this socket, otherwise an application could re-use the
903 * socket ID and race with the teardown, using the same hash table entry.
904 *
905 * It's OK to leave the close in call_rcu. We want it to stay unique for
906 * all RCU readers that could run concurrently with unregister app,
907 * therefore we _need_ to only close that socket after a grace period. So
908 * it should stay in this RCU callback.
909 *
910 * This close() is a very important step of the synchronization model so
911 * every modification to this function must be carefully reviewed.
912 */
913 ret = close(sock);
914 if (ret) {
915 PERROR("close");
916 }
917 lttng_fd_put(LTTNG_FD_APPS, 1);
918
919 DBG2("UST app pid %d deleted", app->pid);
920 free(app);
921 session_unlock_list();
922 }
923
924 /*
925 * URCU intermediate call to delete an UST app.
926 */
927 static
928 void delete_ust_app_rcu(struct rcu_head *head)
929 {
930 struct lttng_ht_node_ulong *node =
931 caa_container_of(head, struct lttng_ht_node_ulong, head);
932 struct ust_app *app =
933 caa_container_of(node, struct ust_app, pid_n);
934
935 DBG3("Call RCU deleting app PID %d", app->pid);
936 delete_ust_app(app);
937 }
938
939 /*
940 * Delete the session from the application ht and delete the data structure by
941 * freeing every object inside and releasing them.
942 *
943 * The session list lock must be held by the caller.
944 */
945 static void destroy_app_session(struct ust_app *app,
946 struct ust_app_session *ua_sess)
947 {
948 int ret;
949 struct lttng_ht_iter iter;
950
951 assert(app);
952 assert(ua_sess);
953
954 iter.iter.node = &ua_sess->node.node;
955 ret = lttng_ht_del(app->sessions, &iter);
956 if (ret) {
957 /* Already scheduled for teardown. */
958 goto end;
959 }
960
961 /* Once deleted, free the data structure. */
962 delete_ust_app_session(app->sock, ua_sess, app);
963
964 end:
965 return;
966 }
967
968 /*
969 * Alloc new UST app session.
970 */
971 static
972 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
973 {
974 struct ust_app_session *ua_sess;
975
976 /* Init most of the default value by allocating and zeroing */
977 ua_sess = zmalloc(sizeof(struct ust_app_session));
978 if (ua_sess == NULL) {
979 PERROR("malloc");
980 goto error_free;
981 }
982
983 ua_sess->handle = -1;
984 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
985 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
986 pthread_mutex_init(&ua_sess->lock, NULL);
987
988 return ua_sess;
989
990 error_free:
991 return NULL;
992 }
993
994 /*
995 * Alloc new UST app channel.
996 */
997 static
998 struct ust_app_channel *alloc_ust_app_channel(char *name,
999 struct ust_app_session *ua_sess,
1000 struct lttng_ust_channel_attr *attr)
1001 {
1002 struct ust_app_channel *ua_chan;
1003
1004 /* Init most of the default value by allocating and zeroing */
1005 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1006 if (ua_chan == NULL) {
1007 PERROR("malloc");
1008 goto error;
1009 }
1010
1011 /* Setup channel name */
1012 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1013 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1014
1015 ua_chan->enabled = 1;
1016 ua_chan->handle = -1;
1017 ua_chan->session = ua_sess;
1018 ua_chan->key = get_next_channel_key();
1019 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1020 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1021 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1022
1023 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1024 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1025
1026 /* Copy attributes */
1027 if (attr) {
1028 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
1029 ua_chan->attr.subbuf_size = attr->subbuf_size;
1030 ua_chan->attr.num_subbuf = attr->num_subbuf;
1031 ua_chan->attr.overwrite = attr->overwrite;
1032 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1033 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1034 ua_chan->attr.output = attr->output;
1035 }
1036 /* By default, the channel is a per cpu channel. */
1037 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1038
1039 DBG3("UST app channel %s allocated", ua_chan->name);
1040
1041 return ua_chan;
1042
1043 error:
1044 return NULL;
1045 }
1046
1047 /*
1048 * Allocate and initialize a UST app stream.
1049 *
1050 * Return newly allocated stream pointer or NULL on error.
1051 */
1052 struct ust_app_stream *ust_app_alloc_stream(void)
1053 {
1054 struct ust_app_stream *stream = NULL;
1055
1056 stream = zmalloc(sizeof(*stream));
1057 if (stream == NULL) {
1058 PERROR("zmalloc ust app stream");
1059 goto error;
1060 }
1061
1062 /* Zero could be a valid value for a handle so flag it to -1. */
1063 stream->handle = -1;
1064
1065 error:
1066 return stream;
1067 }
1068
1069 /*
1070 * Alloc new UST app event.
1071 */
1072 static
1073 struct ust_app_event *alloc_ust_app_event(char *name,
1074 struct lttng_ust_event *attr)
1075 {
1076 struct ust_app_event *ua_event;
1077
1078 /* Init most of the default value by allocating and zeroing */
1079 ua_event = zmalloc(sizeof(struct ust_app_event));
1080 if (ua_event == NULL) {
1081 PERROR("malloc");
1082 goto error;
1083 }
1084
1085 ua_event->enabled = 1;
1086 strncpy(ua_event->name, name, sizeof(ua_event->name));
1087 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1088 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1089
1090 /* Copy attributes */
1091 if (attr) {
1092 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1093 }
1094
1095 DBG3("UST app event %s allocated", ua_event->name);
1096
1097 return ua_event;
1098
1099 error:
1100 return NULL;
1101 }
1102
1103 /*
1104 * Alloc new UST app context.
1105 */
1106 static
1107 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1108 {
1109 struct ust_app_ctx *ua_ctx;
1110
1111 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1112 if (ua_ctx == NULL) {
1113 goto error;
1114 }
1115
1116 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1117
1118 if (uctx) {
1119 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1120 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1121 char *provider_name = NULL, *ctx_name = NULL;
1122
1123 provider_name = strdup(uctx->u.app_ctx.provider_name);
1124 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1125 if (!provider_name || !ctx_name) {
1126 free(provider_name);
1127 free(ctx_name);
1128 goto error;
1129 }
1130
1131 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1132 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1133 }
1134 }
1135
1136 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1137 return ua_ctx;
1138 error:
1139 free(ua_ctx);
1140 return NULL;
1141 }
1142
1143 /*
1144 * Allocate a filter and copy the given original filter.
1145 *
1146 * Return allocated filter or NULL on error.
1147 */
1148 static struct lttng_filter_bytecode *copy_filter_bytecode(
1149 struct lttng_filter_bytecode *orig_f)
1150 {
1151 struct lttng_filter_bytecode *filter = NULL;
1152
1153 /* Copy filter bytecode */
1154 filter = zmalloc(sizeof(*filter) + orig_f->len);
1155 if (!filter) {
1156 PERROR("zmalloc alloc filter bytecode");
1157 goto error;
1158 }
1159
1160 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1161
1162 error:
1163 return filter;
1164 }
1165
1166 /*
1167 * Create a liblttng-ust filter bytecode from given bytecode.
1168 *
1169 * Return allocated filter or NULL on error.
1170 */
1171 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1172 struct lttng_filter_bytecode *orig_f)
1173 {
1174 struct lttng_ust_filter_bytecode *filter = NULL;
1175
1176 /* Copy filter bytecode */
1177 filter = zmalloc(sizeof(*filter) + orig_f->len);
1178 if (!filter) {
1179 PERROR("zmalloc alloc ust filter bytecode");
1180 goto error;
1181 }
1182
1183 assert(sizeof(struct lttng_filter_bytecode) ==
1184 sizeof(struct lttng_ust_filter_bytecode));
1185 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1186 error:
1187 return filter;
1188 }
1189
1190 /*
1191 * Find an ust_app using the sock and return it. RCU read side lock must be
1192 * held before calling this helper function.
1193 */
1194 struct ust_app *ust_app_find_by_sock(int sock)
1195 {
1196 struct lttng_ht_node_ulong *node;
1197 struct lttng_ht_iter iter;
1198
1199 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1200 node = lttng_ht_iter_get_node_ulong(&iter);
1201 if (node == NULL) {
1202 DBG2("UST app find by sock %d not found", sock);
1203 goto error;
1204 }
1205
1206 return caa_container_of(node, struct ust_app, sock_n);
1207
1208 error:
1209 return NULL;
1210 }
1211
1212 /*
1213 * Find an ust_app using the notify sock and return it. RCU read side lock must
1214 * be held before calling this helper function.
1215 */
1216 static struct ust_app *find_app_by_notify_sock(int sock)
1217 {
1218 struct lttng_ht_node_ulong *node;
1219 struct lttng_ht_iter iter;
1220
1221 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1222 &iter);
1223 node = lttng_ht_iter_get_node_ulong(&iter);
1224 if (node == NULL) {
1225 DBG2("UST app find by notify sock %d not found", sock);
1226 goto error;
1227 }
1228
1229 return caa_container_of(node, struct ust_app, notify_sock_n);
1230
1231 error:
1232 return NULL;
1233 }
1234
1235 /*
1236 * Lookup for an ust app event based on event name, filter bytecode and the
1237 * event loglevel.
1238 *
1239 * Return an ust_app_event object or NULL on error.
1240 */
1241 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1242 char *name, struct lttng_filter_bytecode *filter,
1243 int loglevel_value,
1244 const struct lttng_event_exclusion *exclusion)
1245 {
1246 struct lttng_ht_iter iter;
1247 struct lttng_ht_node_str *node;
1248 struct ust_app_event *event = NULL;
1249 struct ust_app_ht_key key;
1250
1251 assert(name);
1252 assert(ht);
1253
1254 /* Setup key for event lookup. */
1255 key.name = name;
1256 key.filter = filter;
1257 key.loglevel_type = loglevel_value;
1258 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1259 key.exclusion = exclusion;
1260
1261 /* Lookup using the event name as hash and a custom match fct. */
1262 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1263 ht_match_ust_app_event, &key, &iter.iter);
1264 node = lttng_ht_iter_get_node_str(&iter);
1265 if (node == NULL) {
1266 goto end;
1267 }
1268
1269 event = caa_container_of(node, struct ust_app_event, node);
1270
1271 end:
1272 return event;
1273 }
1274
1275 /*
1276 * Create the channel context on the tracer.
1277 *
1278 * Called with UST app session lock held.
1279 */
1280 static
1281 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1282 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1283 {
1284 int ret;
1285
1286 health_code_update();
1287
1288 pthread_mutex_lock(&app->sock_lock);
1289 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1290 ua_chan->obj, &ua_ctx->obj);
1291 pthread_mutex_unlock(&app->sock_lock);
1292 if (ret < 0) {
1293 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1294 ERR("UST app create channel context failed for app (pid: %d) "
1295 "with ret %d", app->pid, ret);
1296 } else {
1297 /*
1298 * This is normal behavior, an application can die during the
1299 * creation process. Don't report an error so the execution can
1300 * continue normally.
1301 */
1302 ret = 0;
1303 DBG3("UST app disable event failed. Application is dead.");
1304 }
1305 goto error;
1306 }
1307
1308 ua_ctx->handle = ua_ctx->obj->handle;
1309
1310 DBG2("UST app context handle %d created successfully for channel %s",
1311 ua_ctx->handle, ua_chan->name);
1312
1313 error:
1314 health_code_update();
1315 return ret;
1316 }
1317
1318 /*
1319 * Set the filter on the tracer.
1320 */
1321 static
1322 int set_ust_event_filter(struct ust_app_event *ua_event,
1323 struct ust_app *app)
1324 {
1325 int ret;
1326 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1327
1328 health_code_update();
1329
1330 if (!ua_event->filter) {
1331 ret = 0;
1332 goto error;
1333 }
1334
1335 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1336 if (!ust_bytecode) {
1337 ret = -LTTNG_ERR_NOMEM;
1338 goto error;
1339 }
1340 pthread_mutex_lock(&app->sock_lock);
1341 ret = ustctl_set_filter(app->sock, ust_bytecode,
1342 ua_event->obj);
1343 pthread_mutex_unlock(&app->sock_lock);
1344 if (ret < 0) {
1345 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1346 ERR("UST app event %s filter failed for app (pid: %d) "
1347 "with ret %d", ua_event->attr.name, app->pid, ret);
1348 } else {
1349 /*
1350 * This is normal behavior, an application can die during the
1351 * creation process. Don't report an error so the execution can
1352 * continue normally.
1353 */
1354 ret = 0;
1355 DBG3("UST app filter event failed. Application is dead.");
1356 }
1357 goto error;
1358 }
1359
1360 DBG2("UST filter set successfully for event %s", ua_event->name);
1361
1362 error:
1363 health_code_update();
1364 free(ust_bytecode);
1365 return ret;
1366 }
1367
1368 static
1369 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1370 struct lttng_event_exclusion *exclusion)
1371 {
1372 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1373 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1374 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1375
1376 ust_exclusion = zmalloc(exclusion_alloc_size);
1377 if (!ust_exclusion) {
1378 PERROR("malloc");
1379 goto end;
1380 }
1381
1382 assert(sizeof(struct lttng_event_exclusion) ==
1383 sizeof(struct lttng_ust_event_exclusion));
1384 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1385 end:
1386 return ust_exclusion;
1387 }
1388
1389 /*
1390 * Set event exclusions on the tracer.
1391 */
1392 static
1393 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1394 struct ust_app *app)
1395 {
1396 int ret;
1397 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1398
1399 health_code_update();
1400
1401 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1402 ret = 0;
1403 goto error;
1404 }
1405
1406 ust_exclusion = create_ust_exclusion_from_exclusion(
1407 ua_event->exclusion);
1408 if (!ust_exclusion) {
1409 ret = -LTTNG_ERR_NOMEM;
1410 goto error;
1411 }
1412 pthread_mutex_lock(&app->sock_lock);
1413 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1414 pthread_mutex_unlock(&app->sock_lock);
1415 if (ret < 0) {
1416 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1417 ERR("UST app event %s exclusions failed for app (pid: %d) "
1418 "with ret %d", ua_event->attr.name, app->pid, ret);
1419 } else {
1420 /*
1421 * This is normal behavior, an application can die during the
1422 * creation process. Don't report an error so the execution can
1423 * continue normally.
1424 */
1425 ret = 0;
1426 DBG3("UST app event exclusion failed. Application is dead.");
1427 }
1428 goto error;
1429 }
1430
1431 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1432
1433 error:
1434 health_code_update();
1435 free(ust_exclusion);
1436 return ret;
1437 }
1438
1439 /*
1440 * Disable the specified event on to UST tracer for the UST session.
1441 */
1442 static int disable_ust_event(struct ust_app *app,
1443 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1444 {
1445 int ret;
1446
1447 health_code_update();
1448
1449 pthread_mutex_lock(&app->sock_lock);
1450 ret = ustctl_disable(app->sock, ua_event->obj);
1451 pthread_mutex_unlock(&app->sock_lock);
1452 if (ret < 0) {
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app event %s disable failed for app (pid: %d) "
1455 "and session handle %d with ret %d",
1456 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1457 } else {
1458 /*
1459 * This is normal behavior, an application can die during the
1460 * creation process. Don't report an error so the execution can
1461 * continue normally.
1462 */
1463 ret = 0;
1464 DBG3("UST app disable event failed. Application is dead.");
1465 }
1466 goto error;
1467 }
1468
1469 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1470 ua_event->attr.name, app->pid);
1471
1472 error:
1473 health_code_update();
1474 return ret;
1475 }
1476
1477 /*
1478 * Disable the specified channel on to UST tracer for the UST session.
1479 */
1480 static int disable_ust_channel(struct ust_app *app,
1481 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1482 {
1483 int ret;
1484
1485 health_code_update();
1486
1487 pthread_mutex_lock(&app->sock_lock);
1488 ret = ustctl_disable(app->sock, ua_chan->obj);
1489 pthread_mutex_unlock(&app->sock_lock);
1490 if (ret < 0) {
1491 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1492 ERR("UST app channel %s disable failed for app (pid: %d) "
1493 "and session handle %d with ret %d",
1494 ua_chan->name, app->pid, ua_sess->handle, ret);
1495 } else {
1496 /*
1497 * This is normal behavior, an application can die during the
1498 * creation process. Don't report an error so the execution can
1499 * continue normally.
1500 */
1501 ret = 0;
1502 DBG3("UST app disable channel failed. Application is dead.");
1503 }
1504 goto error;
1505 }
1506
1507 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1508 ua_chan->name, app->pid);
1509
1510 error:
1511 health_code_update();
1512 return ret;
1513 }
1514
1515 /*
1516 * Enable the specified channel on to UST tracer for the UST session.
1517 */
1518 static int enable_ust_channel(struct ust_app *app,
1519 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1520 {
1521 int ret;
1522
1523 health_code_update();
1524
1525 pthread_mutex_lock(&app->sock_lock);
1526 ret = ustctl_enable(app->sock, ua_chan->obj);
1527 pthread_mutex_unlock(&app->sock_lock);
1528 if (ret < 0) {
1529 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1530 ERR("UST app channel %s enable failed for app (pid: %d) "
1531 "and session handle %d with ret %d",
1532 ua_chan->name, app->pid, ua_sess->handle, ret);
1533 } else {
1534 /*
1535 * This is normal behavior, an application can die during the
1536 * creation process. Don't report an error so the execution can
1537 * continue normally.
1538 */
1539 ret = 0;
1540 DBG3("UST app enable channel failed. Application is dead.");
1541 }
1542 goto error;
1543 }
1544
1545 ua_chan->enabled = 1;
1546
1547 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1548 ua_chan->name, app->pid);
1549
1550 error:
1551 health_code_update();
1552 return ret;
1553 }
1554
1555 /*
1556 * Enable the specified event on to UST tracer for the UST session.
1557 */
1558 static int enable_ust_event(struct ust_app *app,
1559 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1560 {
1561 int ret;
1562
1563 health_code_update();
1564
1565 pthread_mutex_lock(&app->sock_lock);
1566 ret = ustctl_enable(app->sock, ua_event->obj);
1567 pthread_mutex_unlock(&app->sock_lock);
1568 if (ret < 0) {
1569 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1570 ERR("UST app event %s enable failed for app (pid: %d) "
1571 "and session handle %d with ret %d",
1572 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1573 } else {
1574 /*
1575 * This is normal behavior, an application can die during the
1576 * creation process. Don't report an error so the execution can
1577 * continue normally.
1578 */
1579 ret = 0;
1580 DBG3("UST app enable event failed. Application is dead.");
1581 }
1582 goto error;
1583 }
1584
1585 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1586 ua_event->attr.name, app->pid);
1587
1588 error:
1589 health_code_update();
1590 return ret;
1591 }
1592
1593 /*
1594 * Send channel and stream buffer to application.
1595 *
1596 * Return 0 on success. On error, a negative value is returned.
1597 */
1598 static int send_channel_pid_to_ust(struct ust_app *app,
1599 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1600 {
1601 int ret;
1602 struct ust_app_stream *stream, *stmp;
1603
1604 assert(app);
1605 assert(ua_sess);
1606 assert(ua_chan);
1607
1608 health_code_update();
1609
1610 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1611 app->sock);
1612
1613 /* Send channel to the application. */
1614 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1615 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1616 ret = -ENOTCONN; /* Caused by app exiting. */
1617 goto error;
1618 } else if (ret < 0) {
1619 goto error;
1620 }
1621
1622 health_code_update();
1623
1624 /* Send all streams to application. */
1625 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1626 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1628 ret = -ENOTCONN; /* Caused by app exiting. */
1629 goto error;
1630 } else if (ret < 0) {
1631 goto error;
1632 }
1633 /* We don't need the stream anymore once sent to the tracer. */
1634 cds_list_del(&stream->list);
1635 delete_ust_app_stream(-1, stream, app);
1636 }
1637 /* Flag the channel that it is sent to the application. */
1638 ua_chan->is_sent = 1;
1639
1640 error:
1641 health_code_update();
1642 return ret;
1643 }
1644
1645 /*
1646 * Create the specified event onto the UST tracer for a UST session.
1647 *
1648 * Should be called with session mutex held.
1649 */
1650 static
1651 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1652 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1653 {
1654 int ret = 0;
1655
1656 health_code_update();
1657
1658 /* Create UST event on tracer */
1659 pthread_mutex_lock(&app->sock_lock);
1660 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1661 &ua_event->obj);
1662 pthread_mutex_unlock(&app->sock_lock);
1663 if (ret < 0) {
1664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1665 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1666 ua_event->attr.name, app->pid, ret);
1667 } else {
1668 /*
1669 * This is normal behavior, an application can die during the
1670 * creation process. Don't report an error so the execution can
1671 * continue normally.
1672 */
1673 ret = 0;
1674 DBG3("UST app create event failed. Application is dead.");
1675 }
1676 goto error;
1677 }
1678
1679 ua_event->handle = ua_event->obj->handle;
1680
1681 DBG2("UST app event %s created successfully for pid:%d",
1682 ua_event->attr.name, app->pid);
1683
1684 health_code_update();
1685
1686 /* Set filter if one is present. */
1687 if (ua_event->filter) {
1688 ret = set_ust_event_filter(ua_event, app);
1689 if (ret < 0) {
1690 goto error;
1691 }
1692 }
1693
1694 /* Set exclusions for the event */
1695 if (ua_event->exclusion) {
1696 ret = set_ust_event_exclusion(ua_event, app);
1697 if (ret < 0) {
1698 goto error;
1699 }
1700 }
1701
1702 /* If event not enabled, disable it on the tracer */
1703 if (ua_event->enabled) {
1704 /*
1705 * We now need to explicitly enable the event, since it
1706 * is now disabled at creation.
1707 */
1708 ret = enable_ust_event(app, ua_sess, ua_event);
1709 if (ret < 0) {
1710 /*
1711 * If we hit an EPERM, something is wrong with our enable call. If
1712 * we get an EEXIST, there is a problem on the tracer side since we
1713 * just created it.
1714 */
1715 switch (ret) {
1716 case -LTTNG_UST_ERR_PERM:
1717 /* Code flow problem */
1718 assert(0);
1719 case -LTTNG_UST_ERR_EXIST:
1720 /* It's OK for our use case. */
1721 ret = 0;
1722 break;
1723 default:
1724 break;
1725 }
1726 goto error;
1727 }
1728 }
1729
1730 error:
1731 health_code_update();
1732 return ret;
1733 }
1734
1735 /*
1736 * Copy data between an UST app event and a LTT event.
1737 */
1738 static void shadow_copy_event(struct ust_app_event *ua_event,
1739 struct ltt_ust_event *uevent)
1740 {
1741 size_t exclusion_alloc_size;
1742
1743 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1744 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1745
1746 ua_event->enabled = uevent->enabled;
1747
1748 /* Copy event attributes */
1749 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1750
1751 /* Copy filter bytecode */
1752 if (uevent->filter) {
1753 ua_event->filter = copy_filter_bytecode(uevent->filter);
1754 /* Filter might be NULL here in case of ENONEM. */
1755 }
1756
1757 /* Copy exclusion data */
1758 if (uevent->exclusion) {
1759 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1760 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1761 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1762 if (ua_event->exclusion == NULL) {
1763 PERROR("malloc");
1764 } else {
1765 memcpy(ua_event->exclusion, uevent->exclusion,
1766 exclusion_alloc_size);
1767 }
1768 }
1769 }
1770
1771 /*
1772 * Copy data between an UST app channel and a LTT channel.
1773 */
1774 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1775 struct ltt_ust_channel *uchan)
1776 {
1777 struct lttng_ht_iter iter;
1778 struct ltt_ust_event *uevent;
1779 struct ltt_ust_context *uctx;
1780 struct ust_app_event *ua_event;
1781
1782 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1783
1784 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1785 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1786
1787 ua_chan->tracefile_size = uchan->tracefile_size;
1788 ua_chan->tracefile_count = uchan->tracefile_count;
1789
1790 /* Copy event attributes since the layout is different. */
1791 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1792 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1793 ua_chan->attr.overwrite = uchan->attr.overwrite;
1794 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1795 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1796 ua_chan->attr.output = uchan->attr.output;
1797 /*
1798 * Note that the attribute channel type is not set since the channel on the
1799 * tracing registry side does not have this information.
1800 */
1801
1802 ua_chan->enabled = uchan->enabled;
1803 ua_chan->tracing_channel_id = uchan->id;
1804
1805 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1806 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1807
1808 if (ua_ctx == NULL) {
1809 continue;
1810 }
1811 lttng_ht_node_init_ulong(&ua_ctx->node,
1812 (unsigned long) ua_ctx->ctx.ctx);
1813 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1814 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1815 }
1816
1817 /* Copy all events from ltt ust channel to ust app channel */
1818 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1819 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1820 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1821 if (ua_event == NULL) {
1822 DBG2("UST event %s not found on shadow copy channel",
1823 uevent->attr.name);
1824 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1825 if (ua_event == NULL) {
1826 continue;
1827 }
1828 shadow_copy_event(ua_event, uevent);
1829 add_unique_ust_app_event(ua_chan, ua_event);
1830 }
1831 }
1832
1833 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1834 }
1835
1836 /*
1837 * Copy data between a UST app session and a regular LTT session.
1838 */
1839 static void shadow_copy_session(struct ust_app_session *ua_sess,
1840 struct ltt_ust_session *usess, struct ust_app *app)
1841 {
1842 struct lttng_ht_node_str *ua_chan_node;
1843 struct lttng_ht_iter iter;
1844 struct ltt_ust_channel *uchan;
1845 struct ust_app_channel *ua_chan;
1846 time_t rawtime;
1847 struct tm *timeinfo;
1848 char datetime[16];
1849 int ret;
1850 char tmp_shm_path[PATH_MAX];
1851
1852 /* Get date and time for unique app path */
1853 time(&rawtime);
1854 timeinfo = localtime(&rawtime);
1855 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1856
1857 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1858
1859 ua_sess->tracing_id = usess->id;
1860 ua_sess->id = get_next_session_id();
1861 ua_sess->uid = app->uid;
1862 ua_sess->gid = app->gid;
1863 ua_sess->euid = usess->uid;
1864 ua_sess->egid = usess->gid;
1865 ua_sess->buffer_type = usess->buffer_type;
1866 ua_sess->bits_per_long = app->bits_per_long;
1867
1868 /* There is only one consumer object per session possible. */
1869 consumer_output_get(usess->consumer);
1870 ua_sess->consumer = usess->consumer;
1871
1872 ua_sess->output_traces = usess->output_traces;
1873 ua_sess->live_timer_interval = usess->live_timer_interval;
1874 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1875 &usess->metadata_attr);
1876
1877 switch (ua_sess->buffer_type) {
1878 case LTTNG_BUFFER_PER_PID:
1879 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1880 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1881 datetime);
1882 break;
1883 case LTTNG_BUFFER_PER_UID:
1884 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1885 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1886 break;
1887 default:
1888 assert(0);
1889 goto error;
1890 }
1891 if (ret < 0) {
1892 PERROR("asprintf UST shadow copy session");
1893 assert(0);
1894 goto error;
1895 }
1896
1897 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1898 sizeof(ua_sess->root_shm_path));
1899 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1900 strncpy(ua_sess->shm_path, usess->shm_path,
1901 sizeof(ua_sess->shm_path));
1902 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1903 if (ua_sess->shm_path[0]) {
1904 switch (ua_sess->buffer_type) {
1905 case LTTNG_BUFFER_PER_PID:
1906 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1907 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1908 app->name, app->pid, datetime);
1909 break;
1910 case LTTNG_BUFFER_PER_UID:
1911 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1912 DEFAULT_UST_TRACE_UID_PATH,
1913 app->uid, app->bits_per_long);
1914 break;
1915 default:
1916 assert(0);
1917 goto error;
1918 }
1919 if (ret < 0) {
1920 PERROR("sprintf UST shadow copy session");
1921 assert(0);
1922 goto error;
1923 }
1924 strncat(ua_sess->shm_path, tmp_shm_path,
1925 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1926 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1927 }
1928
1929 /* Iterate over all channels in global domain. */
1930 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1931 uchan, node.node) {
1932 struct lttng_ht_iter uiter;
1933
1934 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1935 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1936 if (ua_chan_node != NULL) {
1937 /* Session exist. Contiuing. */
1938 continue;
1939 }
1940
1941 DBG2("Channel %s not found on shadow session copy, creating it",
1942 uchan->name);
1943 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1944 &uchan->attr);
1945 if (ua_chan == NULL) {
1946 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1947 continue;
1948 }
1949 shadow_copy_channel(ua_chan, uchan);
1950 /*
1951 * The concept of metadata channel does not exist on the tracing
1952 * registry side of the session daemon so this can only be a per CPU
1953 * channel and not metadata.
1954 */
1955 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1956
1957 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1958 }
1959 return;
1960
1961 error:
1962 consumer_output_put(ua_sess->consumer);
1963 }
1964
1965 /*
1966 * Lookup sesison wrapper.
1967 */
1968 static
1969 void __lookup_session_by_app(struct ltt_ust_session *usess,
1970 struct ust_app *app, struct lttng_ht_iter *iter)
1971 {
1972 /* Get right UST app session from app */
1973 lttng_ht_lookup(app->sessions, &usess->id, iter);
1974 }
1975
1976 /*
1977 * Return ust app session from the app session hashtable using the UST session
1978 * id.
1979 */
1980 static struct ust_app_session *lookup_session_by_app(
1981 struct ltt_ust_session *usess, struct ust_app *app)
1982 {
1983 struct lttng_ht_iter iter;
1984 struct lttng_ht_node_u64 *node;
1985
1986 __lookup_session_by_app(usess, app, &iter);
1987 node = lttng_ht_iter_get_node_u64(&iter);
1988 if (node == NULL) {
1989 goto error;
1990 }
1991
1992 return caa_container_of(node, struct ust_app_session, node);
1993
1994 error:
1995 return NULL;
1996 }
1997
1998 /*
1999 * Setup buffer registry per PID for the given session and application. If none
2000 * is found, a new one is created, added to the global registry and
2001 * initialized. If regp is valid, it's set with the newly created object.
2002 *
2003 * Return 0 on success or else a negative value.
2004 */
2005 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2006 struct ust_app *app, struct buffer_reg_pid **regp)
2007 {
2008 int ret = 0;
2009 struct buffer_reg_pid *reg_pid;
2010
2011 assert(ua_sess);
2012 assert(app);
2013
2014 rcu_read_lock();
2015
2016 reg_pid = buffer_reg_pid_find(ua_sess->id);
2017 if (!reg_pid) {
2018 /*
2019 * This is the create channel path meaning that if there is NO
2020 * registry available, we have to create one for this session.
2021 */
2022 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2023 ua_sess->root_shm_path, ua_sess->shm_path);
2024 if (ret < 0) {
2025 goto error;
2026 }
2027 } else {
2028 goto end;
2029 }
2030
2031 /* Initialize registry. */
2032 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2033 app->bits_per_long, app->uint8_t_alignment,
2034 app->uint16_t_alignment, app->uint32_t_alignment,
2035 app->uint64_t_alignment, app->long_alignment,
2036 app->byte_order, app->version.major,
2037 app->version.minor, reg_pid->root_shm_path,
2038 reg_pid->shm_path,
2039 ua_sess->euid, ua_sess->egid);
2040 if (ret < 0) {
2041 /*
2042 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2043 * destroy the buffer registry, because it is always expected
2044 * that if the buffer registry can be found, its ust registry is
2045 * non-NULL.
2046 */
2047 buffer_reg_pid_destroy(reg_pid);
2048 goto error;
2049 }
2050
2051 buffer_reg_pid_add(reg_pid);
2052
2053 DBG3("UST app buffer registry per PID created successfully");
2054
2055 end:
2056 if (regp) {
2057 *regp = reg_pid;
2058 }
2059 error:
2060 rcu_read_unlock();
2061 return ret;
2062 }
2063
2064 /*
2065 * Setup buffer registry per UID for the given session and application. If none
2066 * is found, a new one is created, added to the global registry and
2067 * initialized. If regp is valid, it's set with the newly created object.
2068 *
2069 * Return 0 on success or else a negative value.
2070 */
2071 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2072 struct ust_app_session *ua_sess,
2073 struct ust_app *app, struct buffer_reg_uid **regp)
2074 {
2075 int ret = 0;
2076 struct buffer_reg_uid *reg_uid;
2077
2078 assert(usess);
2079 assert(app);
2080
2081 rcu_read_lock();
2082
2083 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2084 if (!reg_uid) {
2085 /*
2086 * This is the create channel path meaning that if there is NO
2087 * registry available, we have to create one for this session.
2088 */
2089 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2090 LTTNG_DOMAIN_UST, &reg_uid,
2091 ua_sess->root_shm_path, ua_sess->shm_path);
2092 if (ret < 0) {
2093 goto error;
2094 }
2095 } else {
2096 goto end;
2097 }
2098
2099 /* Initialize registry. */
2100 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2101 app->bits_per_long, app->uint8_t_alignment,
2102 app->uint16_t_alignment, app->uint32_t_alignment,
2103 app->uint64_t_alignment, app->long_alignment,
2104 app->byte_order, app->version.major,
2105 app->version.minor, reg_uid->root_shm_path,
2106 reg_uid->shm_path, usess->uid, usess->gid);
2107 if (ret < 0) {
2108 /*
2109 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2110 * destroy the buffer registry, because it is always expected
2111 * that if the buffer registry can be found, its ust registry is
2112 * non-NULL.
2113 */
2114 buffer_reg_uid_destroy(reg_uid, NULL);
2115 goto error;
2116 }
2117 /* Add node to teardown list of the session. */
2118 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2119
2120 buffer_reg_uid_add(reg_uid);
2121
2122 DBG3("UST app buffer registry per UID created successfully");
2123 end:
2124 if (regp) {
2125 *regp = reg_uid;
2126 }
2127 error:
2128 rcu_read_unlock();
2129 return ret;
2130 }
2131
2132 /*
2133 * Create a session on the tracer side for the given app.
2134 *
2135 * On success, ua_sess_ptr is populated with the session pointer or else left
2136 * untouched. If the session was created, is_created is set to 1. On error,
2137 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2138 * be NULL.
2139 *
2140 * Returns 0 on success or else a negative code which is either -ENOMEM or
2141 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2142 */
2143 static int create_ust_app_session(struct ltt_ust_session *usess,
2144 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2145 int *is_created)
2146 {
2147 int ret, created = 0;
2148 struct ust_app_session *ua_sess;
2149
2150 assert(usess);
2151 assert(app);
2152 assert(ua_sess_ptr);
2153
2154 health_code_update();
2155
2156 ua_sess = lookup_session_by_app(usess, app);
2157 if (ua_sess == NULL) {
2158 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2159 app->pid, usess->id);
2160 ua_sess = alloc_ust_app_session(app);
2161 if (ua_sess == NULL) {
2162 /* Only malloc can failed so something is really wrong */
2163 ret = -ENOMEM;
2164 goto error;
2165 }
2166 shadow_copy_session(ua_sess, usess, app);
2167 created = 1;
2168 }
2169
2170 switch (usess->buffer_type) {
2171 case LTTNG_BUFFER_PER_PID:
2172 /* Init local registry. */
2173 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2174 if (ret < 0) {
2175 delete_ust_app_session(-1, ua_sess, app);
2176 goto error;
2177 }
2178 break;
2179 case LTTNG_BUFFER_PER_UID:
2180 /* Look for a global registry. If none exists, create one. */
2181 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2182 if (ret < 0) {
2183 delete_ust_app_session(-1, ua_sess, app);
2184 goto error;
2185 }
2186 break;
2187 default:
2188 assert(0);
2189 ret = -EINVAL;
2190 goto error;
2191 }
2192
2193 health_code_update();
2194
2195 if (ua_sess->handle == -1) {
2196 pthread_mutex_lock(&app->sock_lock);
2197 ret = ustctl_create_session(app->sock);
2198 pthread_mutex_unlock(&app->sock_lock);
2199 if (ret < 0) {
2200 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2201 ERR("Creating session for app pid %d with ret %d",
2202 app->pid, ret);
2203 } else {
2204 DBG("UST app creating session failed. Application is dead");
2205 /*
2206 * This is normal behavior, an application can die during the
2207 * creation process. Don't report an error so the execution can
2208 * continue normally. This will get flagged ENOTCONN and the
2209 * caller will handle it.
2210 */
2211 ret = 0;
2212 }
2213 delete_ust_app_session(-1, ua_sess, app);
2214 if (ret != -ENOMEM) {
2215 /*
2216 * Tracer is probably gone or got an internal error so let's
2217 * behave like it will soon unregister or not usable.
2218 */
2219 ret = -ENOTCONN;
2220 }
2221 goto error;
2222 }
2223
2224 ua_sess->handle = ret;
2225
2226 /* Add ust app session to app's HT */
2227 lttng_ht_node_init_u64(&ua_sess->node,
2228 ua_sess->tracing_id);
2229 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2230 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2231 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2232 &ua_sess->ust_objd_node);
2233
2234 DBG2("UST app session created successfully with handle %d", ret);
2235 }
2236
2237 *ua_sess_ptr = ua_sess;
2238 if (is_created) {
2239 *is_created = created;
2240 }
2241
2242 /* Everything went well. */
2243 ret = 0;
2244
2245 error:
2246 health_code_update();
2247 return ret;
2248 }
2249
2250 /*
2251 * Match function for a hash table lookup of ust_app_ctx.
2252 *
2253 * It matches an ust app context based on the context type and, in the case
2254 * of perf counters, their name.
2255 */
2256 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2257 {
2258 struct ust_app_ctx *ctx;
2259 const struct lttng_ust_context_attr *key;
2260
2261 assert(node);
2262 assert(_key);
2263
2264 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2265 key = _key;
2266
2267 /* Context type */
2268 if (ctx->ctx.ctx != key->ctx) {
2269 goto no_match;
2270 }
2271
2272 switch(key->ctx) {
2273 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2274 if (strncmp(key->u.perf_counter.name,
2275 ctx->ctx.u.perf_counter.name,
2276 sizeof(key->u.perf_counter.name))) {
2277 goto no_match;
2278 }
2279 break;
2280 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2281 if (strcmp(key->u.app_ctx.provider_name,
2282 ctx->ctx.u.app_ctx.provider_name) ||
2283 strcmp(key->u.app_ctx.ctx_name,
2284 ctx->ctx.u.app_ctx.ctx_name)) {
2285 goto no_match;
2286 }
2287 break;
2288 default:
2289 break;
2290 }
2291
2292 /* Match. */
2293 return 1;
2294
2295 no_match:
2296 return 0;
2297 }
2298
2299 /*
2300 * Lookup for an ust app context from an lttng_ust_context.
2301 *
2302 * Must be called while holding RCU read side lock.
2303 * Return an ust_app_ctx object or NULL on error.
2304 */
2305 static
2306 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2307 struct lttng_ust_context_attr *uctx)
2308 {
2309 struct lttng_ht_iter iter;
2310 struct lttng_ht_node_ulong *node;
2311 struct ust_app_ctx *app_ctx = NULL;
2312
2313 assert(uctx);
2314 assert(ht);
2315
2316 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2317 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2318 ht_match_ust_app_ctx, uctx, &iter.iter);
2319 node = lttng_ht_iter_get_node_ulong(&iter);
2320 if (!node) {
2321 goto end;
2322 }
2323
2324 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2325
2326 end:
2327 return app_ctx;
2328 }
2329
2330 /*
2331 * Create a context for the channel on the tracer.
2332 *
2333 * Called with UST app session lock held and a RCU read side lock.
2334 */
2335 static
2336 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2337 struct ust_app_channel *ua_chan,
2338 struct lttng_ust_context_attr *uctx,
2339 struct ust_app *app)
2340 {
2341 int ret = 0;
2342 struct ust_app_ctx *ua_ctx;
2343
2344 DBG2("UST app adding context to channel %s", ua_chan->name);
2345
2346 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2347 if (ua_ctx) {
2348 ret = -EEXIST;
2349 goto error;
2350 }
2351
2352 ua_ctx = alloc_ust_app_ctx(uctx);
2353 if (ua_ctx == NULL) {
2354 /* malloc failed */
2355 ret = -1;
2356 goto error;
2357 }
2358
2359 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2360 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2361 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2362
2363 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2364 if (ret < 0) {
2365 goto error;
2366 }
2367
2368 error:
2369 return ret;
2370 }
2371
2372 /*
2373 * Enable on the tracer side a ust app event for the session and channel.
2374 *
2375 * Called with UST app session lock held.
2376 */
2377 static
2378 int enable_ust_app_event(struct ust_app_session *ua_sess,
2379 struct ust_app_event *ua_event, struct ust_app *app)
2380 {
2381 int ret;
2382
2383 ret = enable_ust_event(app, ua_sess, ua_event);
2384 if (ret < 0) {
2385 goto error;
2386 }
2387
2388 ua_event->enabled = 1;
2389
2390 error:
2391 return ret;
2392 }
2393
2394 /*
2395 * Disable on the tracer side a ust app event for the session and channel.
2396 */
2397 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2398 struct ust_app_event *ua_event, struct ust_app *app)
2399 {
2400 int ret;
2401
2402 ret = disable_ust_event(app, ua_sess, ua_event);
2403 if (ret < 0) {
2404 goto error;
2405 }
2406
2407 ua_event->enabled = 0;
2408
2409 error:
2410 return ret;
2411 }
2412
2413 /*
2414 * Lookup ust app channel for session and disable it on the tracer side.
2415 */
2416 static
2417 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2418 struct ust_app_channel *ua_chan, struct ust_app *app)
2419 {
2420 int ret;
2421
2422 ret = disable_ust_channel(app, ua_sess, ua_chan);
2423 if (ret < 0) {
2424 goto error;
2425 }
2426
2427 ua_chan->enabled = 0;
2428
2429 error:
2430 return ret;
2431 }
2432
2433 /*
2434 * Lookup ust app channel for session and enable it on the tracer side. This
2435 * MUST be called with a RCU read side lock acquired.
2436 */
2437 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2438 struct ltt_ust_channel *uchan, struct ust_app *app)
2439 {
2440 int ret = 0;
2441 struct lttng_ht_iter iter;
2442 struct lttng_ht_node_str *ua_chan_node;
2443 struct ust_app_channel *ua_chan;
2444
2445 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2446 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2447 if (ua_chan_node == NULL) {
2448 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2449 uchan->name, ua_sess->tracing_id);
2450 goto error;
2451 }
2452
2453 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2454
2455 ret = enable_ust_channel(app, ua_sess, ua_chan);
2456 if (ret < 0) {
2457 goto error;
2458 }
2459
2460 error:
2461 return ret;
2462 }
2463
2464 /*
2465 * Ask the consumer to create a channel and get it if successful.
2466 *
2467 * Return 0 on success or else a negative value.
2468 */
2469 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2470 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2471 int bitness, struct ust_registry_session *registry)
2472 {
2473 int ret;
2474 unsigned int nb_fd = 0;
2475 struct consumer_socket *socket;
2476
2477 assert(usess);
2478 assert(ua_sess);
2479 assert(ua_chan);
2480 assert(registry);
2481
2482 rcu_read_lock();
2483 health_code_update();
2484
2485 /* Get the right consumer socket for the application. */
2486 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2487 if (!socket) {
2488 ret = -EINVAL;
2489 goto error;
2490 }
2491
2492 health_code_update();
2493
2494 /* Need one fd for the channel. */
2495 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2496 if (ret < 0) {
2497 ERR("Exhausted number of available FD upon create channel");
2498 goto error;
2499 }
2500
2501 /*
2502 * Ask consumer to create channel. The consumer will return the number of
2503 * stream we have to expect.
2504 */
2505 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2506 registry);
2507 if (ret < 0) {
2508 goto error_ask;
2509 }
2510
2511 /*
2512 * Compute the number of fd needed before receiving them. It must be 2 per
2513 * stream (2 being the default value here).
2514 */
2515 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2516
2517 /* Reserve the amount of file descriptor we need. */
2518 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2519 if (ret < 0) {
2520 ERR("Exhausted number of available FD upon create channel");
2521 goto error_fd_get_stream;
2522 }
2523
2524 health_code_update();
2525
2526 /*
2527 * Now get the channel from the consumer. This call wil populate the stream
2528 * list of that channel and set the ust objects.
2529 */
2530 if (usess->consumer->enabled) {
2531 ret = ust_consumer_get_channel(socket, ua_chan);
2532 if (ret < 0) {
2533 goto error_destroy;
2534 }
2535 }
2536
2537 rcu_read_unlock();
2538 return 0;
2539
2540 error_destroy:
2541 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2542 error_fd_get_stream:
2543 /*
2544 * Initiate a destroy channel on the consumer since we had an error
2545 * handling it on our side. The return value is of no importance since we
2546 * already have a ret value set by the previous error that we need to
2547 * return.
2548 */
2549 (void) ust_consumer_destroy_channel(socket, ua_chan);
2550 error_ask:
2551 lttng_fd_put(LTTNG_FD_APPS, 1);
2552 error:
2553 health_code_update();
2554 rcu_read_unlock();
2555 return ret;
2556 }
2557
2558 /*
2559 * Duplicate the ust data object of the ust app stream and save it in the
2560 * buffer registry stream.
2561 *
2562 * Return 0 on success or else a negative value.
2563 */
2564 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2565 struct ust_app_stream *stream)
2566 {
2567 int ret;
2568
2569 assert(reg_stream);
2570 assert(stream);
2571
2572 /* Reserve the amount of file descriptor we need. */
2573 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2574 if (ret < 0) {
2575 ERR("Exhausted number of available FD upon duplicate stream");
2576 goto error;
2577 }
2578
2579 /* Duplicate object for stream once the original is in the registry. */
2580 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2581 reg_stream->obj.ust);
2582 if (ret < 0) {
2583 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2584 reg_stream->obj.ust, stream->obj, ret);
2585 lttng_fd_put(LTTNG_FD_APPS, 2);
2586 goto error;
2587 }
2588 stream->handle = stream->obj->handle;
2589
2590 error:
2591 return ret;
2592 }
2593
2594 /*
2595 * Duplicate the ust data object of the ust app. channel and save it in the
2596 * buffer registry channel.
2597 *
2598 * Return 0 on success or else a negative value.
2599 */
2600 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2601 struct ust_app_channel *ua_chan)
2602 {
2603 int ret;
2604
2605 assert(reg_chan);
2606 assert(ua_chan);
2607
2608 /* Need two fds for the channel. */
2609 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2610 if (ret < 0) {
2611 ERR("Exhausted number of available FD upon duplicate channel");
2612 goto error_fd_get;
2613 }
2614
2615 /* Duplicate object for stream once the original is in the registry. */
2616 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2617 if (ret < 0) {
2618 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2619 reg_chan->obj.ust, ua_chan->obj, ret);
2620 goto error;
2621 }
2622 ua_chan->handle = ua_chan->obj->handle;
2623
2624 return 0;
2625
2626 error:
2627 lttng_fd_put(LTTNG_FD_APPS, 1);
2628 error_fd_get:
2629 return ret;
2630 }
2631
2632 /*
2633 * For a given channel buffer registry, setup all streams of the given ust
2634 * application channel.
2635 *
2636 * Return 0 on success or else a negative value.
2637 */
2638 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2639 struct ust_app_channel *ua_chan,
2640 struct ust_app *app)
2641 {
2642 int ret = 0;
2643 struct ust_app_stream *stream, *stmp;
2644
2645 assert(reg_chan);
2646 assert(ua_chan);
2647
2648 DBG2("UST app setup buffer registry stream");
2649
2650 /* Send all streams to application. */
2651 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2652 struct buffer_reg_stream *reg_stream;
2653
2654 ret = buffer_reg_stream_create(&reg_stream);
2655 if (ret < 0) {
2656 goto error;
2657 }
2658
2659 /*
2660 * Keep original pointer and nullify it in the stream so the delete
2661 * stream call does not release the object.
2662 */
2663 reg_stream->obj.ust = stream->obj;
2664 stream->obj = NULL;
2665 buffer_reg_stream_add(reg_stream, reg_chan);
2666
2667 /* We don't need the streams anymore. */
2668 cds_list_del(&stream->list);
2669 delete_ust_app_stream(-1, stream, app);
2670 }
2671
2672 error:
2673 return ret;
2674 }
2675
2676 /*
2677 * Create a buffer registry channel for the given session registry and
2678 * application channel object. If regp pointer is valid, it's set with the
2679 * created object. Important, the created object is NOT added to the session
2680 * registry hash table.
2681 *
2682 * Return 0 on success else a negative value.
2683 */
2684 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2685 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2686 {
2687 int ret;
2688 struct buffer_reg_channel *reg_chan = NULL;
2689
2690 assert(reg_sess);
2691 assert(ua_chan);
2692
2693 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2694
2695 /* Create buffer registry channel. */
2696 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2697 if (ret < 0) {
2698 goto error_create;
2699 }
2700 assert(reg_chan);
2701 reg_chan->consumer_key = ua_chan->key;
2702 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2703 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2704
2705 /* Create and add a channel registry to session. */
2706 ret = ust_registry_channel_add(reg_sess->reg.ust,
2707 ua_chan->tracing_channel_id);
2708 if (ret < 0) {
2709 goto error;
2710 }
2711 buffer_reg_channel_add(reg_sess, reg_chan);
2712
2713 if (regp) {
2714 *regp = reg_chan;
2715 }
2716
2717 return 0;
2718
2719 error:
2720 /* Safe because the registry channel object was not added to any HT. */
2721 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2722 error_create:
2723 return ret;
2724 }
2725
2726 /*
2727 * Setup buffer registry channel for the given session registry and application
2728 * channel object. If regp pointer is valid, it's set with the created object.
2729 *
2730 * Return 0 on success else a negative value.
2731 */
2732 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2733 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2734 struct ust_app *app)
2735 {
2736 int ret;
2737
2738 assert(reg_sess);
2739 assert(reg_chan);
2740 assert(ua_chan);
2741 assert(ua_chan->obj);
2742
2743 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2744
2745 /* Setup all streams for the registry. */
2746 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2747 if (ret < 0) {
2748 goto error;
2749 }
2750
2751 reg_chan->obj.ust = ua_chan->obj;
2752 ua_chan->obj = NULL;
2753
2754 return 0;
2755
2756 error:
2757 buffer_reg_channel_remove(reg_sess, reg_chan);
2758 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2759 return ret;
2760 }
2761
2762 /*
2763 * Send buffer registry channel to the application.
2764 *
2765 * Return 0 on success else a negative value.
2766 */
2767 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2768 struct ust_app *app, struct ust_app_session *ua_sess,
2769 struct ust_app_channel *ua_chan)
2770 {
2771 int ret;
2772 struct buffer_reg_stream *reg_stream;
2773
2774 assert(reg_chan);
2775 assert(app);
2776 assert(ua_sess);
2777 assert(ua_chan);
2778
2779 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2780
2781 ret = duplicate_channel_object(reg_chan, ua_chan);
2782 if (ret < 0) {
2783 goto error;
2784 }
2785
2786 /* Send channel to the application. */
2787 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2788 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2789 ret = -ENOTCONN; /* Caused by app exiting. */
2790 goto error;
2791 } else if (ret < 0) {
2792 goto error;
2793 }
2794
2795 health_code_update();
2796
2797 /* Send all streams to application. */
2798 pthread_mutex_lock(&reg_chan->stream_list_lock);
2799 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2800 struct ust_app_stream stream;
2801
2802 ret = duplicate_stream_object(reg_stream, &stream);
2803 if (ret < 0) {
2804 goto error_stream_unlock;
2805 }
2806
2807 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2808 if (ret < 0) {
2809 (void) release_ust_app_stream(-1, &stream, app);
2810 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2811 ret = -ENOTCONN; /* Caused by app exiting. */
2812 }
2813 goto error_stream_unlock;
2814 }
2815
2816 /*
2817 * The return value is not important here. This function will output an
2818 * error if needed.
2819 */
2820 (void) release_ust_app_stream(-1, &stream, app);
2821 }
2822 ua_chan->is_sent = 1;
2823
2824 error_stream_unlock:
2825 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2826 error:
2827 return ret;
2828 }
2829
2830 /*
2831 * Create and send to the application the created buffers with per UID buffers.
2832 *
2833 * Return 0 on success else a negative value.
2834 */
2835 static int create_channel_per_uid(struct ust_app *app,
2836 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2837 struct ust_app_channel *ua_chan)
2838 {
2839 int ret;
2840 struct buffer_reg_uid *reg_uid;
2841 struct buffer_reg_channel *reg_chan;
2842
2843 assert(app);
2844 assert(usess);
2845 assert(ua_sess);
2846 assert(ua_chan);
2847
2848 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2849
2850 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2851 /*
2852 * The session creation handles the creation of this global registry
2853 * object. If none can be find, there is a code flow problem or a
2854 * teardown race.
2855 */
2856 assert(reg_uid);
2857
2858 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2859 reg_uid);
2860 if (!reg_chan) {
2861 /* Create the buffer registry channel object. */
2862 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2863 if (ret < 0) {
2864 ERR("Error creating the UST channel \"%s\" registry instance",
2865 ua_chan->name);
2866 goto error;
2867 }
2868 assert(reg_chan);
2869
2870 /*
2871 * Create the buffers on the consumer side. This call populates the
2872 * ust app channel object with all streams and data object.
2873 */
2874 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2875 app->bits_per_long, reg_uid->registry->reg.ust);
2876 if (ret < 0) {
2877 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2878 ua_chan->name);
2879
2880 /*
2881 * Let's remove the previously created buffer registry channel so
2882 * it's not visible anymore in the session registry.
2883 */
2884 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2885 ua_chan->tracing_channel_id);
2886 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2887 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2888 goto error;
2889 }
2890
2891 /*
2892 * Setup the streams and add it to the session registry.
2893 */
2894 ret = setup_buffer_reg_channel(reg_uid->registry,
2895 ua_chan, reg_chan, app);
2896 if (ret < 0) {
2897 ERR("Error setting up UST channel \"%s\"",
2898 ua_chan->name);
2899 goto error;
2900 }
2901
2902 }
2903
2904 /* Send buffers to the application. */
2905 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2906 if (ret < 0) {
2907 if (ret != -ENOTCONN) {
2908 ERR("Error sending channel to application");
2909 }
2910 goto error;
2911 }
2912
2913 error:
2914 return ret;
2915 }
2916
2917 /*
2918 * Create and send to the application the created buffers with per PID buffers.
2919 *
2920 * Return 0 on success else a negative value.
2921 */
2922 static int create_channel_per_pid(struct ust_app *app,
2923 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2924 struct ust_app_channel *ua_chan)
2925 {
2926 int ret;
2927 struct ust_registry_session *registry;
2928
2929 assert(app);
2930 assert(usess);
2931 assert(ua_sess);
2932 assert(ua_chan);
2933
2934 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2935
2936 rcu_read_lock();
2937
2938 registry = get_session_registry(ua_sess);
2939 assert(registry);
2940
2941 /* Create and add a new channel registry to session. */
2942 ret = ust_registry_channel_add(registry, ua_chan->key);
2943 if (ret < 0) {
2944 ERR("Error creating the UST channel \"%s\" registry instance",
2945 ua_chan->name);
2946 goto error;
2947 }
2948
2949 /* Create and get channel on the consumer side. */
2950 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2951 app->bits_per_long, registry);
2952 if (ret < 0) {
2953 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2954 ua_chan->name);
2955 goto error;
2956 }
2957
2958 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2959 if (ret < 0) {
2960 if (ret != -ENOTCONN) {
2961 ERR("Error sending channel to application");
2962 }
2963 goto error;
2964 }
2965
2966 error:
2967 rcu_read_unlock();
2968 return ret;
2969 }
2970
2971 /*
2972 * From an already allocated ust app channel, create the channel buffers if
2973 * need and send it to the application. This MUST be called with a RCU read
2974 * side lock acquired.
2975 *
2976 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2977 * the application exited concurrently.
2978 */
2979 static int do_create_channel(struct ust_app *app,
2980 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2981 struct ust_app_channel *ua_chan)
2982 {
2983 int ret;
2984
2985 assert(app);
2986 assert(usess);
2987 assert(ua_sess);
2988 assert(ua_chan);
2989
2990 /* Handle buffer type before sending the channel to the application. */
2991 switch (usess->buffer_type) {
2992 case LTTNG_BUFFER_PER_UID:
2993 {
2994 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2995 if (ret < 0) {
2996 goto error;
2997 }
2998 break;
2999 }
3000 case LTTNG_BUFFER_PER_PID:
3001 {
3002 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3003 if (ret < 0) {
3004 goto error;
3005 }
3006 break;
3007 }
3008 default:
3009 assert(0);
3010 ret = -EINVAL;
3011 goto error;
3012 }
3013
3014 /* Initialize ust objd object using the received handle and add it. */
3015 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3016 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3017
3018 /* If channel is not enabled, disable it on the tracer */
3019 if (!ua_chan->enabled) {
3020 ret = disable_ust_channel(app, ua_sess, ua_chan);
3021 if (ret < 0) {
3022 goto error;
3023 }
3024 }
3025
3026 error:
3027 return ret;
3028 }
3029
3030 /*
3031 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3032 * newly created channel if not NULL.
3033 *
3034 * Called with UST app session lock and RCU read-side lock held.
3035 *
3036 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3037 * the application exited concurrently.
3038 */
3039 static int create_ust_app_channel(struct ust_app_session *ua_sess,
3040 struct ltt_ust_channel *uchan, struct ust_app *app,
3041 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
3042 struct ust_app_channel **ua_chanp)
3043 {
3044 int ret = 0;
3045 struct lttng_ht_iter iter;
3046 struct lttng_ht_node_str *ua_chan_node;
3047 struct ust_app_channel *ua_chan;
3048
3049 /* Lookup channel in the ust app session */
3050 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3051 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3052 if (ua_chan_node != NULL) {
3053 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3054 goto end;
3055 }
3056
3057 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3058 if (ua_chan == NULL) {
3059 /* Only malloc can fail here */
3060 ret = -ENOMEM;
3061 goto error_alloc;
3062 }
3063 shadow_copy_channel(ua_chan, uchan);
3064
3065 /* Set channel type. */
3066 ua_chan->attr.type = type;
3067
3068 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3069 if (ret < 0) {
3070 goto error;
3071 }
3072
3073 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3074 app->pid);
3075
3076 /* Only add the channel if successful on the tracer side. */
3077 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3078
3079 end:
3080 if (ua_chanp) {
3081 *ua_chanp = ua_chan;
3082 }
3083
3084 /* Everything went well. */
3085 return 0;
3086
3087 error:
3088 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3089 error_alloc:
3090 return ret;
3091 }
3092
3093 /*
3094 * Create UST app event and create it on the tracer side.
3095 *
3096 * Called with ust app session mutex held.
3097 */
3098 static
3099 int create_ust_app_event(struct ust_app_session *ua_sess,
3100 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3101 struct ust_app *app)
3102 {
3103 int ret = 0;
3104 struct ust_app_event *ua_event;
3105
3106 /* Get event node */
3107 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3108 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3109 if (ua_event != NULL) {
3110 ret = -EEXIST;
3111 goto end;
3112 }
3113
3114 /* Does not exist so create one */
3115 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3116 if (ua_event == NULL) {
3117 /* Only malloc can failed so something is really wrong */
3118 ret = -ENOMEM;
3119 goto end;
3120 }
3121 shadow_copy_event(ua_event, uevent);
3122
3123 /* Create it on the tracer side */
3124 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3125 if (ret < 0) {
3126 /* Not found previously means that it does not exist on the tracer */
3127 assert(ret != -LTTNG_UST_ERR_EXIST);
3128 goto error;
3129 }
3130
3131 add_unique_ust_app_event(ua_chan, ua_event);
3132
3133 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3134 app->pid);
3135
3136 end:
3137 return ret;
3138
3139 error:
3140 /* Valid. Calling here is already in a read side lock */
3141 delete_ust_app_event(-1, ua_event, app);
3142 return ret;
3143 }
3144
3145 /*
3146 * Create UST metadata and open it on the tracer side.
3147 *
3148 * Called with UST app session lock held and RCU read side lock.
3149 */
3150 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3151 struct ust_app *app, struct consumer_output *consumer)
3152 {
3153 int ret = 0;
3154 struct ust_app_channel *metadata;
3155 struct consumer_socket *socket;
3156 struct ust_registry_session *registry;
3157
3158 assert(ua_sess);
3159 assert(app);
3160 assert(consumer);
3161
3162 registry = get_session_registry(ua_sess);
3163 assert(registry);
3164
3165 pthread_mutex_lock(&registry->lock);
3166
3167 /* Metadata already exists for this registry or it was closed previously */
3168 if (registry->metadata_key || registry->metadata_closed) {
3169 ret = 0;
3170 goto error;
3171 }
3172
3173 /* Allocate UST metadata */
3174 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3175 if (!metadata) {
3176 /* malloc() failed */
3177 ret = -ENOMEM;
3178 goto error;
3179 }
3180
3181 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3182
3183 /* Need one fd for the channel. */
3184 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3185 if (ret < 0) {
3186 ERR("Exhausted number of available FD upon create metadata");
3187 goto error;
3188 }
3189
3190 /* Get the right consumer socket for the application. */
3191 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3192 if (!socket) {
3193 ret = -EINVAL;
3194 goto error_consumer;
3195 }
3196
3197 /*
3198 * Keep metadata key so we can identify it on the consumer side. Assign it
3199 * to the registry *before* we ask the consumer so we avoid the race of the
3200 * consumer requesting the metadata and the ask_channel call on our side
3201 * did not returned yet.
3202 */
3203 registry->metadata_key = metadata->key;
3204
3205 /*
3206 * Ask the metadata channel creation to the consumer. The metadata object
3207 * will be created by the consumer and kept their. However, the stream is
3208 * never added or monitored until we do a first push metadata to the
3209 * consumer.
3210 */
3211 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3212 registry);
3213 if (ret < 0) {
3214 /* Nullify the metadata key so we don't try to close it later on. */
3215 registry->metadata_key = 0;
3216 goto error_consumer;
3217 }
3218
3219 /*
3220 * The setup command will make the metadata stream be sent to the relayd,
3221 * if applicable, and the thread managing the metadatas. This is important
3222 * because after this point, if an error occurs, the only way the stream
3223 * can be deleted is to be monitored in the consumer.
3224 */
3225 ret = consumer_setup_metadata(socket, metadata->key);
3226 if (ret < 0) {
3227 /* Nullify the metadata key so we don't try to close it later on. */
3228 registry->metadata_key = 0;
3229 goto error_consumer;
3230 }
3231
3232 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3233 metadata->key, app->pid);
3234
3235 error_consumer:
3236 lttng_fd_put(LTTNG_FD_APPS, 1);
3237 delete_ust_app_channel(-1, metadata, app);
3238 error:
3239 pthread_mutex_unlock(&registry->lock);
3240 return ret;
3241 }
3242
3243 /*
3244 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3245 * acquired before calling this function.
3246 */
3247 struct ust_app *ust_app_find_by_pid(pid_t pid)
3248 {
3249 struct ust_app *app = NULL;
3250 struct lttng_ht_node_ulong *node;
3251 struct lttng_ht_iter iter;
3252
3253 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3254 node = lttng_ht_iter_get_node_ulong(&iter);
3255 if (node == NULL) {
3256 DBG2("UST app no found with pid %d", pid);
3257 goto error;
3258 }
3259
3260 DBG2("Found UST app by pid %d", pid);
3261
3262 app = caa_container_of(node, struct ust_app, pid_n);
3263
3264 error:
3265 return app;
3266 }
3267
3268 /*
3269 * Allocate and init an UST app object using the registration information and
3270 * the command socket. This is called when the command socket connects to the
3271 * session daemon.
3272 *
3273 * The object is returned on success or else NULL.
3274 */
3275 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3276 {
3277 struct ust_app *lta = NULL;
3278
3279 assert(msg);
3280 assert(sock >= 0);
3281
3282 DBG3("UST app creating application for socket %d", sock);
3283
3284 if ((msg->bits_per_long == 64 &&
3285 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3286 || (msg->bits_per_long == 32 &&
3287 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3288 ERR("Registration failed: application \"%s\" (pid: %d) has "
3289 "%d-bit long, but no consumerd for this size is available.\n",
3290 msg->name, msg->pid, msg->bits_per_long);
3291 goto error;
3292 }
3293
3294 lta = zmalloc(sizeof(struct ust_app));
3295 if (lta == NULL) {
3296 PERROR("malloc");
3297 goto error;
3298 }
3299
3300 lta->ppid = msg->ppid;
3301 lta->uid = msg->uid;
3302 lta->gid = msg->gid;
3303
3304 lta->bits_per_long = msg->bits_per_long;
3305 lta->uint8_t_alignment = msg->uint8_t_alignment;
3306 lta->uint16_t_alignment = msg->uint16_t_alignment;
3307 lta->uint32_t_alignment = msg->uint32_t_alignment;
3308 lta->uint64_t_alignment = msg->uint64_t_alignment;
3309 lta->long_alignment = msg->long_alignment;
3310 lta->byte_order = msg->byte_order;
3311
3312 lta->v_major = msg->major;
3313 lta->v_minor = msg->minor;
3314 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3315 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3316 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3317 lta->notify_sock = -1;
3318
3319 /* Copy name and make sure it's NULL terminated. */
3320 strncpy(lta->name, msg->name, sizeof(lta->name));
3321 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3322
3323 /*
3324 * Before this can be called, when receiving the registration information,
3325 * the application compatibility is checked. So, at this point, the
3326 * application can work with this session daemon.
3327 */
3328 lta->compatible = 1;
3329
3330 lta->pid = msg->pid;
3331 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3332 lta->sock = sock;
3333 pthread_mutex_init(&lta->sock_lock, NULL);
3334 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3335
3336 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3337 error:
3338 return lta;
3339 }
3340
3341 /*
3342 * For a given application object, add it to every hash table.
3343 */
3344 void ust_app_add(struct ust_app *app)
3345 {
3346 assert(app);
3347 assert(app->notify_sock >= 0);
3348
3349 rcu_read_lock();
3350
3351 /*
3352 * On a re-registration, we want to kick out the previous registration of
3353 * that pid
3354 */
3355 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3356
3357 /*
3358 * The socket _should_ be unique until _we_ call close. So, a add_unique
3359 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3360 * already in the table.
3361 */
3362 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3363
3364 /* Add application to the notify socket hash table. */
3365 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3366 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3367
3368 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3369 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3370 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3371 app->v_minor);
3372
3373 rcu_read_unlock();
3374 }
3375
3376 /*
3377 * Set the application version into the object.
3378 *
3379 * Return 0 on success else a negative value either an errno code or a
3380 * LTTng-UST error code.
3381 */
3382 int ust_app_version(struct ust_app *app)
3383 {
3384 int ret;
3385
3386 assert(app);
3387
3388 pthread_mutex_lock(&app->sock_lock);
3389 ret = ustctl_tracer_version(app->sock, &app->version);
3390 pthread_mutex_unlock(&app->sock_lock);
3391 if (ret < 0) {
3392 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3393 ERR("UST app %d version failed with ret %d", app->sock, ret);
3394 } else {
3395 DBG3("UST app %d version failed. Application is dead", app->sock);
3396 }
3397 }
3398
3399 return ret;
3400 }
3401
3402 /*
3403 * Unregister app by removing it from the global traceable app list and freeing
3404 * the data struct.
3405 *
3406 * The socket is already closed at this point so no close to sock.
3407 */
3408 void ust_app_unregister(int sock)
3409 {
3410 struct ust_app *lta;
3411 struct lttng_ht_node_ulong *node;
3412 struct lttng_ht_iter ust_app_sock_iter;
3413 struct lttng_ht_iter iter;
3414 struct ust_app_session *ua_sess;
3415 int ret;
3416
3417 rcu_read_lock();
3418
3419 /* Get the node reference for a call_rcu */
3420 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3421 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3422 assert(node);
3423
3424 lta = caa_container_of(node, struct ust_app, sock_n);
3425 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3426
3427 /*
3428 * For per-PID buffers, perform "push metadata" and flush all
3429 * application streams before removing app from hash tables,
3430 * ensuring proper behavior of data_pending check.
3431 * Remove sessions so they are not visible during deletion.
3432 */
3433 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3434 node.node) {
3435 struct ust_registry_session *registry;
3436
3437 ret = lttng_ht_del(lta->sessions, &iter);
3438 if (ret) {
3439 /* The session was already removed so scheduled for teardown. */
3440 continue;
3441 }
3442
3443 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3444 (void) ust_app_flush_app_session(lta, ua_sess);
3445 }
3446
3447 /*
3448 * Add session to list for teardown. This is safe since at this point we
3449 * are the only one using this list.
3450 */
3451 pthread_mutex_lock(&ua_sess->lock);
3452
3453 if (ua_sess->deleted) {
3454 pthread_mutex_unlock(&ua_sess->lock);
3455 continue;
3456 }
3457
3458 /*
3459 * Normally, this is done in the delete session process which is
3460 * executed in the call rcu below. However, upon registration we can't
3461 * afford to wait for the grace period before pushing data or else the
3462 * data pending feature can race between the unregistration and stop
3463 * command where the data pending command is sent *before* the grace
3464 * period ended.
3465 *
3466 * The close metadata below nullifies the metadata pointer in the
3467 * session so the delete session will NOT push/close a second time.
3468 */
3469 registry = get_session_registry(ua_sess);
3470 if (registry) {
3471 /* Push metadata for application before freeing the application. */
3472 (void) push_metadata(registry, ua_sess->consumer);
3473
3474 /*
3475 * Don't ask to close metadata for global per UID buffers. Close
3476 * metadata only on destroy trace session in this case. Also, the
3477 * previous push metadata could have flag the metadata registry to
3478 * close so don't send a close command if closed.
3479 */
3480 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3481 /* And ask to close it for this session registry. */
3482 (void) close_metadata(registry, ua_sess->consumer);
3483 }
3484 }
3485 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3486
3487 pthread_mutex_unlock(&ua_sess->lock);
3488 }
3489
3490 /* Remove application from PID hash table */
3491 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3492 assert(!ret);
3493
3494 /*
3495 * Remove application from notify hash table. The thread handling the
3496 * notify socket could have deleted the node so ignore on error because
3497 * either way it's valid. The close of that socket is handled by the other
3498 * thread.
3499 */
3500 iter.iter.node = &lta->notify_sock_n.node;
3501 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3502
3503 /*
3504 * Ignore return value since the node might have been removed before by an
3505 * add replace during app registration because the PID can be reassigned by
3506 * the OS.
3507 */
3508 iter.iter.node = &lta->pid_n.node;
3509 ret = lttng_ht_del(ust_app_ht, &iter);
3510 if (ret) {
3511 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3512 lta->pid);
3513 }
3514
3515 /* Free memory */
3516 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3517
3518 rcu_read_unlock();
3519 return;
3520 }
3521
3522 /*
3523 * Fill events array with all events name of all registered apps.
3524 */
3525 int ust_app_list_events(struct lttng_event **events)
3526 {
3527 int ret, handle;
3528 size_t nbmem, count = 0;
3529 struct lttng_ht_iter iter;
3530 struct ust_app *app;
3531 struct lttng_event *tmp_event;
3532
3533 nbmem = UST_APP_EVENT_LIST_SIZE;
3534 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3535 if (tmp_event == NULL) {
3536 PERROR("zmalloc ust app events");
3537 ret = -ENOMEM;
3538 goto error;
3539 }
3540
3541 rcu_read_lock();
3542
3543 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3544 struct lttng_ust_tracepoint_iter uiter;
3545
3546 health_code_update();
3547
3548 if (!app->compatible) {
3549 /*
3550 * TODO: In time, we should notice the caller of this error by
3551 * telling him that this is a version error.
3552 */
3553 continue;
3554 }
3555 pthread_mutex_lock(&app->sock_lock);
3556 handle = ustctl_tracepoint_list(app->sock);
3557 if (handle < 0) {
3558 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3559 ERR("UST app list events getting handle failed for app pid %d",
3560 app->pid);
3561 }
3562 pthread_mutex_unlock(&app->sock_lock);
3563 continue;
3564 }
3565
3566 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3567 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3568 /* Handle ustctl error. */
3569 if (ret < 0) {
3570 int release_ret;
3571
3572 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3573 ERR("UST app tp list get failed for app %d with ret %d",
3574 app->sock, ret);
3575 } else {
3576 DBG3("UST app tp list get failed. Application is dead");
3577 /*
3578 * This is normal behavior, an application can die during the
3579 * creation process. Don't report an error so the execution can
3580 * continue normally. Continue normal execution.
3581 */
3582 break;
3583 }
3584 free(tmp_event);
3585 release_ret = ustctl_release_handle(app->sock, handle);
3586 if (release_ret < 0 &&
3587 release_ret != -LTTNG_UST_ERR_EXITING &&
3588 release_ret != -EPIPE) {
3589 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3590 }
3591 pthread_mutex_unlock(&app->sock_lock);
3592 goto rcu_error;
3593 }
3594
3595 health_code_update();
3596 if (count >= nbmem) {
3597 /* In case the realloc fails, we free the memory */
3598 struct lttng_event *new_tmp_event;
3599 size_t new_nbmem;
3600
3601 new_nbmem = nbmem << 1;
3602 DBG2("Reallocating event list from %zu to %zu entries",
3603 nbmem, new_nbmem);
3604 new_tmp_event = realloc(tmp_event,
3605 new_nbmem * sizeof(struct lttng_event));
3606 if (new_tmp_event == NULL) {
3607 int release_ret;
3608
3609 PERROR("realloc ust app events");
3610 free(tmp_event);
3611 ret = -ENOMEM;
3612 release_ret = ustctl_release_handle(app->sock, handle);
3613 if (release_ret < 0 &&
3614 release_ret != -LTTNG_UST_ERR_EXITING &&
3615 release_ret != -EPIPE) {
3616 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3617 }
3618 pthread_mutex_unlock(&app->sock_lock);
3619 goto rcu_error;
3620 }
3621 /* Zero the new memory */
3622 memset(new_tmp_event + nbmem, 0,
3623 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3624 nbmem = new_nbmem;
3625 tmp_event = new_tmp_event;
3626 }
3627 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3628 tmp_event[count].loglevel = uiter.loglevel;
3629 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3630 tmp_event[count].pid = app->pid;
3631 tmp_event[count].enabled = -1;
3632 count++;
3633 }
3634 ret = ustctl_release_handle(app->sock, handle);
3635 pthread_mutex_unlock(&app->sock_lock);
3636 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3637 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3638 }
3639 }
3640
3641 ret = count;
3642 *events = tmp_event;
3643
3644 DBG2("UST app list events done (%zu events)", count);
3645
3646 rcu_error:
3647 rcu_read_unlock();
3648 error:
3649 health_code_update();
3650 return ret;
3651 }
3652
3653 /*
3654 * Fill events array with all events name of all registered apps.
3655 */
3656 int ust_app_list_event_fields(struct lttng_event_field **fields)
3657 {
3658 int ret, handle;
3659 size_t nbmem, count = 0;
3660 struct lttng_ht_iter iter;
3661 struct ust_app *app;
3662 struct lttng_event_field *tmp_event;
3663
3664 nbmem = UST_APP_EVENT_LIST_SIZE;
3665 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3666 if (tmp_event == NULL) {
3667 PERROR("zmalloc ust app event fields");
3668 ret = -ENOMEM;
3669 goto error;
3670 }
3671
3672 rcu_read_lock();
3673
3674 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3675 struct lttng_ust_field_iter uiter;
3676
3677 health_code_update();
3678
3679 if (!app->compatible) {
3680 /*
3681 * TODO: In time, we should notice the caller of this error by
3682 * telling him that this is a version error.
3683 */
3684 continue;
3685 }
3686 pthread_mutex_lock(&app->sock_lock);
3687 handle = ustctl_tracepoint_field_list(app->sock);
3688 if (handle < 0) {
3689 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3690 ERR("UST app list field getting handle failed for app pid %d",
3691 app->pid);
3692 }
3693 pthread_mutex_unlock(&app->sock_lock);
3694 continue;
3695 }
3696
3697 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3698 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3699 /* Handle ustctl error. */
3700 if (ret < 0) {
3701 int release_ret;
3702
3703 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3704 ERR("UST app tp list field failed for app %d with ret %d",
3705 app->sock, ret);
3706 } else {
3707 DBG3("UST app tp list field failed. Application is dead");
3708 /*
3709 * This is normal behavior, an application can die during the
3710 * creation process. Don't report an error so the execution can
3711 * continue normally. Reset list and count for next app.
3712 */
3713 break;
3714 }
3715 free(tmp_event);
3716 release_ret = ustctl_release_handle(app->sock, handle);
3717 pthread_mutex_unlock(&app->sock_lock);
3718 if (release_ret < 0 &&
3719 release_ret != -LTTNG_UST_ERR_EXITING &&
3720 release_ret != -EPIPE) {
3721 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);