Fix: per-pid ust buffers flush race with application unregister
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43
44 static
45 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
46
47 /* Next available channel key. Access under next_channel_key_lock. */
48 static uint64_t _next_channel_key;
49 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /* Next available session ID. Access under next_session_id_lock. */
52 static uint64_t _next_session_id;
53 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
54
55 /*
56 * Return the incremented value of next_channel_key.
57 */
58 static uint64_t get_next_channel_key(void)
59 {
60 uint64_t ret;
61
62 pthread_mutex_lock(&next_channel_key_lock);
63 ret = ++_next_channel_key;
64 pthread_mutex_unlock(&next_channel_key_lock);
65 return ret;
66 }
67
68 /*
69 * Return the atomically incremented value of next_session_id.
70 */
71 static uint64_t get_next_session_id(void)
72 {
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_session_id_lock);
76 ret = ++_next_session_id;
77 pthread_mutex_unlock(&next_session_id_lock);
78 return ret;
79 }
80
81 static void copy_channel_attr_to_ustctl(
82 struct ustctl_consumer_channel_attr *attr,
83 struct lttng_ust_channel_attr *uattr)
84 {
85 /* Copy event attributes since the layout is different. */
86 attr->subbuf_size = uattr->subbuf_size;
87 attr->num_subbuf = uattr->num_subbuf;
88 attr->overwrite = uattr->overwrite;
89 attr->switch_timer_interval = uattr->switch_timer_interval;
90 attr->read_timer_interval = uattr->read_timer_interval;
91 attr->output = uattr->output;
92 }
93
94 /*
95 * Match function for the hash table lookup.
96 *
97 * It matches an ust app event based on three attributes which are the event
98 * name, the filter bytecode and the loglevel.
99 */
100 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
101 {
102 struct ust_app_event *event;
103 const struct ust_app_ht_key *key;
104 int ev_loglevel_value;
105
106 assert(node);
107 assert(_key);
108
109 event = caa_container_of(node, struct ust_app_event, node.node);
110 key = _key;
111 ev_loglevel_value = event->attr.loglevel;
112
113 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
114
115 /* Event name */
116 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
117 goto no_match;
118 }
119
120 /* Event loglevel. */
121 if (ev_loglevel_value != key->loglevel_type) {
122 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
123 && key->loglevel_type == 0 &&
124 ev_loglevel_value == -1) {
125 /*
126 * Match is accepted. This is because on event creation, the
127 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
128 * -1 are accepted for this loglevel type since 0 is the one set by
129 * the API when receiving an enable event.
130 */
131 } else {
132 goto no_match;
133 }
134 }
135
136 /* One of the filters is NULL, fail. */
137 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
138 goto no_match;
139 }
140
141 if (key->filter && event->filter) {
142 /* Both filters exists, check length followed by the bytecode. */
143 if (event->filter->len != key->filter->len ||
144 memcmp(event->filter->data, key->filter->data,
145 event->filter->len) != 0) {
146 goto no_match;
147 }
148 }
149
150 /* One of the exclusions is NULL, fail. */
151 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
152 goto no_match;
153 }
154
155 if (key->exclusion && event->exclusion) {
156 /* Both exclusions exists, check count followed by the names. */
157 if (event->exclusion->count != key->exclusion->count ||
158 memcmp(event->exclusion->names, key->exclusion->names,
159 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
160 goto no_match;
161 }
162 }
163
164
165 /* Match. */
166 return 1;
167
168 no_match:
169 return 0;
170 }
171
172 /*
173 * Unique add of an ust app event in the given ht. This uses the custom
174 * ht_match_ust_app_event match function and the event name as hash.
175 */
176 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
177 struct ust_app_event *event)
178 {
179 struct cds_lfht_node *node_ptr;
180 struct ust_app_ht_key key;
181 struct lttng_ht *ht;
182
183 assert(ua_chan);
184 assert(ua_chan->events);
185 assert(event);
186
187 ht = ua_chan->events;
188 key.name = event->attr.name;
189 key.filter = event->filter;
190 key.loglevel_type = event->attr.loglevel;
191 key.exclusion = event->exclusion;
192
193 node_ptr = cds_lfht_add_unique(ht->ht,
194 ht->hash_fct(event->node.key, lttng_ht_seed),
195 ht_match_ust_app_event, &key, &event->node.node);
196 assert(node_ptr == &event->node.node);
197 }
198
199 /*
200 * Close the notify socket from the given RCU head object. This MUST be called
201 * through a call_rcu().
202 */
203 static void close_notify_sock_rcu(struct rcu_head *head)
204 {
205 int ret;
206 struct ust_app_notify_sock_obj *obj =
207 caa_container_of(head, struct ust_app_notify_sock_obj, head);
208
209 /* Must have a valid fd here. */
210 assert(obj->fd >= 0);
211
212 ret = close(obj->fd);
213 if (ret) {
214 ERR("close notify sock %d RCU", obj->fd);
215 }
216 lttng_fd_put(LTTNG_FD_APPS, 1);
217
218 free(obj);
219 }
220
221 /*
222 * Return the session registry according to the buffer type of the given
223 * session.
224 *
225 * A registry per UID object MUST exists before calling this function or else
226 * it assert() if not found. RCU read side lock must be acquired.
227 */
228 static struct ust_registry_session *get_session_registry(
229 struct ust_app_session *ua_sess)
230 {
231 struct ust_registry_session *registry = NULL;
232
233 assert(ua_sess);
234
235 switch (ua_sess->buffer_type) {
236 case LTTNG_BUFFER_PER_PID:
237 {
238 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
239 if (!reg_pid) {
240 goto error;
241 }
242 registry = reg_pid->registry->reg.ust;
243 break;
244 }
245 case LTTNG_BUFFER_PER_UID:
246 {
247 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
248 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
249 if (!reg_uid) {
250 goto error;
251 }
252 registry = reg_uid->registry->reg.ust;
253 break;
254 }
255 default:
256 assert(0);
257 };
258
259 error:
260 return registry;
261 }
262
263 /*
264 * Delete ust context safely. RCU read lock must be held before calling
265 * this function.
266 */
267 static
268 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
269 struct ust_app *app)
270 {
271 int ret;
272
273 assert(ua_ctx);
274
275 if (ua_ctx->obj) {
276 pthread_mutex_lock(&app->sock_lock);
277 ret = ustctl_release_object(sock, ua_ctx->obj);
278 pthread_mutex_unlock(&app->sock_lock);
279 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
280 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
281 sock, ua_ctx->obj->handle, ret);
282 }
283 free(ua_ctx->obj);
284 }
285 free(ua_ctx);
286 }
287
288 /*
289 * Delete ust app event safely. RCU read lock must be held before calling
290 * this function.
291 */
292 static
293 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
294 struct ust_app *app)
295 {
296 int ret;
297
298 assert(ua_event);
299
300 free(ua_event->filter);
301 if (ua_event->exclusion != NULL)
302 free(ua_event->exclusion);
303 if (ua_event->obj != NULL) {
304 pthread_mutex_lock(&app->sock_lock);
305 ret = ustctl_release_object(sock, ua_event->obj);
306 pthread_mutex_unlock(&app->sock_lock);
307 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
308 ERR("UST app sock %d release event obj failed with ret %d",
309 sock, ret);
310 }
311 free(ua_event->obj);
312 }
313 free(ua_event);
314 }
315
316 /*
317 * Release ust data object of the given stream.
318 *
319 * Return 0 on success or else a negative value.
320 */
321 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
322 struct ust_app *app)
323 {
324 int ret = 0;
325
326 assert(stream);
327
328 if (stream->obj) {
329 pthread_mutex_lock(&app->sock_lock);
330 ret = ustctl_release_object(sock, stream->obj);
331 pthread_mutex_unlock(&app->sock_lock);
332 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
333 ERR("UST app sock %d release stream obj failed with ret %d",
334 sock, ret);
335 }
336 lttng_fd_put(LTTNG_FD_APPS, 2);
337 free(stream->obj);
338 }
339
340 return ret;
341 }
342
343 /*
344 * Delete ust app stream safely. RCU read lock must be held before calling
345 * this function.
346 */
347 static
348 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
349 struct ust_app *app)
350 {
351 assert(stream);
352
353 (void) release_ust_app_stream(sock, stream, app);
354 free(stream);
355 }
356
357 /*
358 * We need to execute ht_destroy outside of RCU read-side critical
359 * section and outside of call_rcu thread, so we postpone its execution
360 * using ht_cleanup_push. It is simpler than to change the semantic of
361 * the many callers of delete_ust_app_session().
362 */
363 static
364 void delete_ust_app_channel_rcu(struct rcu_head *head)
365 {
366 struct ust_app_channel *ua_chan =
367 caa_container_of(head, struct ust_app_channel, rcu_head);
368
369 ht_cleanup_push(ua_chan->ctx);
370 ht_cleanup_push(ua_chan->events);
371 free(ua_chan);
372 }
373
374 /*
375 * Delete ust app channel safely. RCU read lock must be held before calling
376 * this function.
377 */
378 static
379 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
380 struct ust_app *app)
381 {
382 int ret;
383 struct lttng_ht_iter iter;
384 struct ust_app_event *ua_event;
385 struct ust_app_ctx *ua_ctx;
386 struct ust_app_stream *stream, *stmp;
387 struct ust_registry_session *registry;
388
389 assert(ua_chan);
390
391 DBG3("UST app deleting channel %s", ua_chan->name);
392
393 /* Wipe stream */
394 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
395 cds_list_del(&stream->list);
396 delete_ust_app_stream(sock, stream, app);
397 }
398
399 /* Wipe context */
400 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
401 cds_list_del(&ua_ctx->list);
402 ret = lttng_ht_del(ua_chan->ctx, &iter);
403 assert(!ret);
404 delete_ust_app_ctx(sock, ua_ctx, app);
405 }
406
407 /* Wipe events */
408 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
409 node.node) {
410 ret = lttng_ht_del(ua_chan->events, &iter);
411 assert(!ret);
412 delete_ust_app_event(sock, ua_event, app);
413 }
414
415 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
416 /* Wipe and free registry from session registry. */
417 registry = get_session_registry(ua_chan->session);
418 if (registry) {
419 ust_registry_channel_del_free(registry, ua_chan->key);
420 }
421 }
422
423 if (ua_chan->obj != NULL) {
424 /* Remove channel from application UST object descriptor. */
425 iter.iter.node = &ua_chan->ust_objd_node.node;
426 ret = lttng_ht_del(app->ust_objd, &iter);
427 assert(!ret);
428 pthread_mutex_lock(&app->sock_lock);
429 ret = ustctl_release_object(sock, ua_chan->obj);
430 pthread_mutex_unlock(&app->sock_lock);
431 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
432 ERR("UST app sock %d release channel obj failed with ret %d",
433 sock, ret);
434 }
435 lttng_fd_put(LTTNG_FD_APPS, 1);
436 free(ua_chan->obj);
437 }
438 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
439 }
440
441 int ust_app_register_done(struct ust_app *app)
442 {
443 int ret;
444
445 pthread_mutex_lock(&app->sock_lock);
446 ret = ustctl_register_done(app->sock);
447 pthread_mutex_unlock(&app->sock_lock);
448 return ret;
449 }
450
451 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
452 {
453 int ret, sock;
454
455 if (app) {
456 pthread_mutex_lock(&app->sock_lock);
457 sock = app->sock;
458 } else {
459 sock = -1;
460 }
461 ret = ustctl_release_object(sock, data);
462 if (app) {
463 pthread_mutex_unlock(&app->sock_lock);
464 }
465 return ret;
466 }
467
468 /*
469 * Push metadata to consumer socket.
470 *
471 * RCU read-side lock must be held to guarantee existance of socket.
472 * Must be called with the ust app session lock held.
473 * Must be called with the registry lock held.
474 *
475 * On success, return the len of metadata pushed or else a negative value.
476 * Returning a -EPIPE return value means we could not send the metadata,
477 * but it can be caused by recoverable errors (e.g. the application has
478 * terminated concurrently).
479 */
480 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
481 struct consumer_socket *socket, int send_zero_data)
482 {
483 int ret;
484 char *metadata_str = NULL;
485 size_t len, offset, new_metadata_len_sent;
486 ssize_t ret_val;
487 uint64_t metadata_key;
488
489 assert(registry);
490 assert(socket);
491
492 metadata_key = registry->metadata_key;
493
494 /*
495 * Means that no metadata was assigned to the session. This can
496 * happens if no start has been done previously.
497 */
498 if (!metadata_key) {
499 return 0;
500 }
501
502 /*
503 * On a push metadata error either the consumer is dead or the
504 * metadata channel has been destroyed because its endpoint
505 * might have died (e.g: relayd), or because the application has
506 * exited. If so, the metadata closed flag is set to 1 so we
507 * deny pushing metadata again which is not valid anymore on the
508 * consumer side.
509 */
510 if (registry->metadata_closed) {
511 return -EPIPE;
512 }
513
514 offset = registry->metadata_len_sent;
515 len = registry->metadata_len - registry->metadata_len_sent;
516 new_metadata_len_sent = registry->metadata_len;
517 if (len == 0) {
518 DBG3("No metadata to push for metadata key %" PRIu64,
519 registry->metadata_key);
520 ret_val = len;
521 if (send_zero_data) {
522 DBG("No metadata to push");
523 goto push_data;
524 }
525 goto end;
526 }
527
528 /* Allocate only what we have to send. */
529 metadata_str = zmalloc(len);
530 if (!metadata_str) {
531 PERROR("zmalloc ust app metadata string");
532 ret_val = -ENOMEM;
533 goto error;
534 }
535 /* Copy what we haven't sent out. */
536 memcpy(metadata_str, registry->metadata + offset, len);
537
538 push_data:
539 pthread_mutex_unlock(&registry->lock);
540 /*
541 * We need to unlock the registry while we push metadata to
542 * break a circular dependency between the consumerd metadata
543 * lock and the sessiond registry lock. Indeed, pushing metadata
544 * to the consumerd awaits that it gets pushed all the way to
545 * relayd, but doing so requires grabbing the metadata lock. If
546 * a concurrent metadata request is being performed by
547 * consumerd, this can try to grab the registry lock on the
548 * sessiond while holding the metadata lock on the consumer
549 * daemon. Those push and pull schemes are performed on two
550 * different bidirectionnal communication sockets.
551 */
552 ret = consumer_push_metadata(socket, metadata_key,
553 metadata_str, len, offset);
554 pthread_mutex_lock(&registry->lock);
555 if (ret < 0) {
556 /*
557 * There is an acceptable race here between the registry
558 * metadata key assignment and the creation on the
559 * consumer. The session daemon can concurrently push
560 * metadata for this registry while being created on the
561 * consumer since the metadata key of the registry is
562 * assigned *before* it is setup to avoid the consumer
563 * to ask for metadata that could possibly be not found
564 * in the session daemon.
565 *
566 * The metadata will get pushed either by the session
567 * being stopped or the consumer requesting metadata if
568 * that race is triggered.
569 */
570 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
571 ret = 0;
572 } else {
573 ERR("Error pushing metadata to consumer");
574 }
575 ret_val = ret;
576 goto error_push;
577 } else {
578 /*
579 * Metadata may have been concurrently pushed, since
580 * we're not holding the registry lock while pushing to
581 * consumer. This is handled by the fact that we send
582 * the metadata content, size, and the offset at which
583 * that metadata belongs. This may arrive out of order
584 * on the consumer side, and the consumer is able to
585 * deal with overlapping fragments. The consumer
586 * supports overlapping fragments, which must be
587 * contiguous starting from offset 0. We keep the
588 * largest metadata_len_sent value of the concurrent
589 * send.
590 */
591 registry->metadata_len_sent =
592 max_t(size_t, registry->metadata_len_sent,
593 new_metadata_len_sent);
594 }
595 free(metadata_str);
596 return len;
597
598 end:
599 error:
600 if (ret_val) {
601 /*
602 * On error, flag the registry that the metadata is
603 * closed. We were unable to push anything and this
604 * means that either the consumer is not responding or
605 * the metadata cache has been destroyed on the
606 * consumer.
607 */
608 registry->metadata_closed = 1;
609 }
610 error_push:
611 free(metadata_str);
612 return ret_val;
613 }
614
615 /*
616 * For a given application and session, push metadata to consumer.
617 * Either sock or consumer is required : if sock is NULL, the default
618 * socket to send the metadata is retrieved from consumer, if sock
619 * is not NULL we use it to send the metadata.
620 * RCU read-side lock must be held while calling this function,
621 * therefore ensuring existance of registry. It also ensures existance
622 * of socket throughout this function.
623 *
624 * Return 0 on success else a negative error.
625 * Returning a -EPIPE return value means we could not send the metadata,
626 * but it can be caused by recoverable errors (e.g. the application has
627 * terminated concurrently).
628 */
629 static int push_metadata(struct ust_registry_session *registry,
630 struct consumer_output *consumer)
631 {
632 int ret_val;
633 ssize_t ret;
634 struct consumer_socket *socket;
635
636 assert(registry);
637 assert(consumer);
638
639 pthread_mutex_lock(&registry->lock);
640 if (registry->metadata_closed) {
641 ret_val = -EPIPE;
642 goto error;
643 }
644
645 /* Get consumer socket to use to push the metadata.*/
646 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
647 consumer);
648 if (!socket) {
649 ret_val = -1;
650 goto error;
651 }
652
653 ret = ust_app_push_metadata(registry, socket, 0);
654 if (ret < 0) {
655 ret_val = ret;
656 goto error;
657 }
658 pthread_mutex_unlock(&registry->lock);
659 return 0;
660
661 error:
662 pthread_mutex_unlock(&registry->lock);
663 return ret_val;
664 }
665
666 /*
667 * Send to the consumer a close metadata command for the given session. Once
668 * done, the metadata channel is deleted and the session metadata pointer is
669 * nullified. The session lock MUST be held unless the application is
670 * in the destroy path.
671 *
672 * Return 0 on success else a negative value.
673 */
674 static int close_metadata(struct ust_registry_session *registry,
675 struct consumer_output *consumer)
676 {
677 int ret;
678 struct consumer_socket *socket;
679
680 assert(registry);
681 assert(consumer);
682
683 rcu_read_lock();
684
685 pthread_mutex_lock(&registry->lock);
686
687 if (!registry->metadata_key || registry->metadata_closed) {
688 ret = 0;
689 goto end;
690 }
691
692 /* Get consumer socket to use to push the metadata.*/
693 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
694 consumer);
695 if (!socket) {
696 ret = -1;
697 goto error;
698 }
699
700 ret = consumer_close_metadata(socket, registry->metadata_key);
701 if (ret < 0) {
702 goto error;
703 }
704
705 error:
706 /*
707 * Metadata closed. Even on error this means that the consumer is not
708 * responding or not found so either way a second close should NOT be emit
709 * for this registry.
710 */
711 registry->metadata_closed = 1;
712 end:
713 pthread_mutex_unlock(&registry->lock);
714 rcu_read_unlock();
715 return ret;
716 }
717
718 /*
719 * We need to execute ht_destroy outside of RCU read-side critical
720 * section and outside of call_rcu thread, so we postpone its execution
721 * using ht_cleanup_push. It is simpler than to change the semantic of
722 * the many callers of delete_ust_app_session().
723 */
724 static
725 void delete_ust_app_session_rcu(struct rcu_head *head)
726 {
727 struct ust_app_session *ua_sess =
728 caa_container_of(head, struct ust_app_session, rcu_head);
729
730 ht_cleanup_push(ua_sess->channels);
731 free(ua_sess);
732 }
733
734 /*
735 * Delete ust app session safely. RCU read lock must be held before calling
736 * this function.
737 */
738 static
739 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
740 struct ust_app *app)
741 {
742 int ret;
743 struct lttng_ht_iter iter;
744 struct ust_app_channel *ua_chan;
745 struct ust_registry_session *registry;
746
747 assert(ua_sess);
748
749 pthread_mutex_lock(&ua_sess->lock);
750
751 assert(!ua_sess->deleted);
752 ua_sess->deleted = true;
753
754 registry = get_session_registry(ua_sess);
755 if (registry) {
756 /* Push metadata for application before freeing the application. */
757 (void) push_metadata(registry, ua_sess->consumer);
758
759 /*
760 * Don't ask to close metadata for global per UID buffers. Close
761 * metadata only on destroy trace session in this case. Also, the
762 * previous push metadata could have flag the metadata registry to
763 * close so don't send a close command if closed.
764 */
765 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
766 /* And ask to close it for this session registry. */
767 (void) close_metadata(registry, ua_sess->consumer);
768 }
769 }
770
771 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
772 node.node) {
773 ret = lttng_ht_del(ua_sess->channels, &iter);
774 assert(!ret);
775 delete_ust_app_channel(sock, ua_chan, app);
776 }
777
778 /* In case of per PID, the registry is kept in the session. */
779 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
780 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
781 if (reg_pid) {
782 buffer_reg_pid_remove(reg_pid);
783 buffer_reg_pid_destroy(reg_pid);
784 }
785 }
786
787 if (ua_sess->handle != -1) {
788 pthread_mutex_lock(&app->sock_lock);
789 ret = ustctl_release_handle(sock, ua_sess->handle);
790 pthread_mutex_unlock(&app->sock_lock);
791 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
792 ERR("UST app sock %d release session handle failed with ret %d",
793 sock, ret);
794 }
795 /* Remove session from application UST object descriptor. */
796 iter.iter.node = &ua_sess->ust_objd_node.node;
797 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
798 assert(!ret);
799 }
800
801 pthread_mutex_unlock(&ua_sess->lock);
802
803 consumer_output_put(ua_sess->consumer);
804
805 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
806 }
807
808 /*
809 * Delete a traceable application structure from the global list. Never call
810 * this function outside of a call_rcu call.
811 *
812 * RCU read side lock should _NOT_ be held when calling this function.
813 */
814 static
815 void delete_ust_app(struct ust_app *app)
816 {
817 int ret, sock;
818 struct ust_app_session *ua_sess, *tmp_ua_sess;
819
820 /* Delete ust app sessions info */
821 sock = app->sock;
822 app->sock = -1;
823
824 /* Wipe sessions */
825 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
826 teardown_node) {
827 /* Free every object in the session and the session. */
828 rcu_read_lock();
829 delete_ust_app_session(sock, ua_sess, app);
830 rcu_read_unlock();
831 }
832
833 ht_cleanup_push(app->sessions);
834 ht_cleanup_push(app->ust_sessions_objd);
835 ht_cleanup_push(app->ust_objd);
836
837 /*
838 * Wait until we have deleted the application from the sock hash table
839 * before closing this socket, otherwise an application could re-use the
840 * socket ID and race with the teardown, using the same hash table entry.
841 *
842 * It's OK to leave the close in call_rcu. We want it to stay unique for
843 * all RCU readers that could run concurrently with unregister app,
844 * therefore we _need_ to only close that socket after a grace period. So
845 * it should stay in this RCU callback.
846 *
847 * This close() is a very important step of the synchronization model so
848 * every modification to this function must be carefully reviewed.
849 */
850 ret = close(sock);
851 if (ret) {
852 PERROR("close");
853 }
854 lttng_fd_put(LTTNG_FD_APPS, 1);
855
856 DBG2("UST app pid %d deleted", app->pid);
857 free(app);
858 }
859
860 /*
861 * URCU intermediate call to delete an UST app.
862 */
863 static
864 void delete_ust_app_rcu(struct rcu_head *head)
865 {
866 struct lttng_ht_node_ulong *node =
867 caa_container_of(head, struct lttng_ht_node_ulong, head);
868 struct ust_app *app =
869 caa_container_of(node, struct ust_app, pid_n);
870
871 DBG3("Call RCU deleting app PID %d", app->pid);
872 delete_ust_app(app);
873 }
874
875 /*
876 * Delete the session from the application ht and delete the data structure by
877 * freeing every object inside and releasing them.
878 */
879 static void destroy_app_session(struct ust_app *app,
880 struct ust_app_session *ua_sess)
881 {
882 int ret;
883 struct lttng_ht_iter iter;
884
885 assert(app);
886 assert(ua_sess);
887
888 iter.iter.node = &ua_sess->node.node;
889 ret = lttng_ht_del(app->sessions, &iter);
890 if (ret) {
891 /* Already scheduled for teardown. */
892 goto end;
893 }
894
895 /* Once deleted, free the data structure. */
896 delete_ust_app_session(app->sock, ua_sess, app);
897
898 end:
899 return;
900 }
901
902 /*
903 * Alloc new UST app session.
904 */
905 static
906 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
907 {
908 struct ust_app_session *ua_sess;
909
910 /* Init most of the default value by allocating and zeroing */
911 ua_sess = zmalloc(sizeof(struct ust_app_session));
912 if (ua_sess == NULL) {
913 PERROR("malloc");
914 goto error_free;
915 }
916
917 ua_sess->handle = -1;
918 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
919 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
920 pthread_mutex_init(&ua_sess->lock, NULL);
921
922 return ua_sess;
923
924 error_free:
925 return NULL;
926 }
927
928 /*
929 * Alloc new UST app channel.
930 */
931 static
932 struct ust_app_channel *alloc_ust_app_channel(char *name,
933 struct ust_app_session *ua_sess,
934 struct lttng_ust_channel_attr *attr)
935 {
936 struct ust_app_channel *ua_chan;
937
938 /* Init most of the default value by allocating and zeroing */
939 ua_chan = zmalloc(sizeof(struct ust_app_channel));
940 if (ua_chan == NULL) {
941 PERROR("malloc");
942 goto error;
943 }
944
945 /* Setup channel name */
946 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
947 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
948
949 ua_chan->enabled = 1;
950 ua_chan->handle = -1;
951 ua_chan->session = ua_sess;
952 ua_chan->key = get_next_channel_key();
953 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
954 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
955 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
956
957 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
958 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
959
960 /* Copy attributes */
961 if (attr) {
962 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
963 ua_chan->attr.subbuf_size = attr->subbuf_size;
964 ua_chan->attr.num_subbuf = attr->num_subbuf;
965 ua_chan->attr.overwrite = attr->overwrite;
966 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
967 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
968 ua_chan->attr.output = attr->output;
969 }
970 /* By default, the channel is a per cpu channel. */
971 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
972
973 DBG3("UST app channel %s allocated", ua_chan->name);
974
975 return ua_chan;
976
977 error:
978 return NULL;
979 }
980
981 /*
982 * Allocate and initialize a UST app stream.
983 *
984 * Return newly allocated stream pointer or NULL on error.
985 */
986 struct ust_app_stream *ust_app_alloc_stream(void)
987 {
988 struct ust_app_stream *stream = NULL;
989
990 stream = zmalloc(sizeof(*stream));
991 if (stream == NULL) {
992 PERROR("zmalloc ust app stream");
993 goto error;
994 }
995
996 /* Zero could be a valid value for a handle so flag it to -1. */
997 stream->handle = -1;
998
999 error:
1000 return stream;
1001 }
1002
1003 /*
1004 * Alloc new UST app event.
1005 */
1006 static
1007 struct ust_app_event *alloc_ust_app_event(char *name,
1008 struct lttng_ust_event *attr)
1009 {
1010 struct ust_app_event *ua_event;
1011
1012 /* Init most of the default value by allocating and zeroing */
1013 ua_event = zmalloc(sizeof(struct ust_app_event));
1014 if (ua_event == NULL) {
1015 PERROR("malloc");
1016 goto error;
1017 }
1018
1019 ua_event->enabled = 1;
1020 strncpy(ua_event->name, name, sizeof(ua_event->name));
1021 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1022 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1023
1024 /* Copy attributes */
1025 if (attr) {
1026 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1027 }
1028
1029 DBG3("UST app event %s allocated", ua_event->name);
1030
1031 return ua_event;
1032
1033 error:
1034 return NULL;
1035 }
1036
1037 /*
1038 * Alloc new UST app context.
1039 */
1040 static
1041 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1042 {
1043 struct ust_app_ctx *ua_ctx;
1044
1045 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1046 if (ua_ctx == NULL) {
1047 goto error;
1048 }
1049
1050 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1051
1052 if (uctx) {
1053 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1054 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1055 char *provider_name = NULL, *ctx_name = NULL;
1056
1057 provider_name = strdup(uctx->u.app_ctx.provider_name);
1058 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1059 if (!provider_name || !ctx_name) {
1060 free(provider_name);
1061 free(ctx_name);
1062 goto error;
1063 }
1064
1065 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1066 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1067 }
1068 }
1069
1070 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1071 return ua_ctx;
1072 error:
1073 free(ua_ctx);
1074 return NULL;
1075 }
1076
1077 /*
1078 * Allocate a filter and copy the given original filter.
1079 *
1080 * Return allocated filter or NULL on error.
1081 */
1082 static struct lttng_filter_bytecode *copy_filter_bytecode(
1083 struct lttng_filter_bytecode *orig_f)
1084 {
1085 struct lttng_filter_bytecode *filter = NULL;
1086
1087 /* Copy filter bytecode */
1088 filter = zmalloc(sizeof(*filter) + orig_f->len);
1089 if (!filter) {
1090 PERROR("zmalloc alloc filter bytecode");
1091 goto error;
1092 }
1093
1094 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1095
1096 error:
1097 return filter;
1098 }
1099
1100 /*
1101 * Create a liblttng-ust filter bytecode from given bytecode.
1102 *
1103 * Return allocated filter or NULL on error.
1104 */
1105 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1106 struct lttng_filter_bytecode *orig_f)
1107 {
1108 struct lttng_ust_filter_bytecode *filter = NULL;
1109
1110 /* Copy filter bytecode */
1111 filter = zmalloc(sizeof(*filter) + orig_f->len);
1112 if (!filter) {
1113 PERROR("zmalloc alloc ust filter bytecode");
1114 goto error;
1115 }
1116
1117 assert(sizeof(struct lttng_filter_bytecode) ==
1118 sizeof(struct lttng_ust_filter_bytecode));
1119 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1120 error:
1121 return filter;
1122 }
1123
1124 /*
1125 * Find an ust_app using the sock and return it. RCU read side lock must be
1126 * held before calling this helper function.
1127 */
1128 struct ust_app *ust_app_find_by_sock(int sock)
1129 {
1130 struct lttng_ht_node_ulong *node;
1131 struct lttng_ht_iter iter;
1132
1133 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1134 node = lttng_ht_iter_get_node_ulong(&iter);
1135 if (node == NULL) {
1136 DBG2("UST app find by sock %d not found", sock);
1137 goto error;
1138 }
1139
1140 return caa_container_of(node, struct ust_app, sock_n);
1141
1142 error:
1143 return NULL;
1144 }
1145
1146 /*
1147 * Find an ust_app using the notify sock and return it. RCU read side lock must
1148 * be held before calling this helper function.
1149 */
1150 static struct ust_app *find_app_by_notify_sock(int sock)
1151 {
1152 struct lttng_ht_node_ulong *node;
1153 struct lttng_ht_iter iter;
1154
1155 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1156 &iter);
1157 node = lttng_ht_iter_get_node_ulong(&iter);
1158 if (node == NULL) {
1159 DBG2("UST app find by notify sock %d not found", sock);
1160 goto error;
1161 }
1162
1163 return caa_container_of(node, struct ust_app, notify_sock_n);
1164
1165 error:
1166 return NULL;
1167 }
1168
1169 /*
1170 * Lookup for an ust app event based on event name, filter bytecode and the
1171 * event loglevel.
1172 *
1173 * Return an ust_app_event object or NULL on error.
1174 */
1175 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1176 char *name, struct lttng_filter_bytecode *filter,
1177 int loglevel_value,
1178 const struct lttng_event_exclusion *exclusion)
1179 {
1180 struct lttng_ht_iter iter;
1181 struct lttng_ht_node_str *node;
1182 struct ust_app_event *event = NULL;
1183 struct ust_app_ht_key key;
1184
1185 assert(name);
1186 assert(ht);
1187
1188 /* Setup key for event lookup. */
1189 key.name = name;
1190 key.filter = filter;
1191 key.loglevel_type = loglevel_value;
1192 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1193 key.exclusion = exclusion;
1194
1195 /* Lookup using the event name as hash and a custom match fct. */
1196 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1197 ht_match_ust_app_event, &key, &iter.iter);
1198 node = lttng_ht_iter_get_node_str(&iter);
1199 if (node == NULL) {
1200 goto end;
1201 }
1202
1203 event = caa_container_of(node, struct ust_app_event, node);
1204
1205 end:
1206 return event;
1207 }
1208
1209 /*
1210 * Create the channel context on the tracer.
1211 *
1212 * Called with UST app session lock held.
1213 */
1214 static
1215 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1216 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1217 {
1218 int ret;
1219
1220 health_code_update();
1221
1222 pthread_mutex_lock(&app->sock_lock);
1223 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1224 ua_chan->obj, &ua_ctx->obj);
1225 pthread_mutex_unlock(&app->sock_lock);
1226 if (ret < 0) {
1227 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1228 ERR("UST app create channel context failed for app (pid: %d) "
1229 "with ret %d", app->pid, ret);
1230 } else {
1231 /*
1232 * This is normal behavior, an application can die during the
1233 * creation process. Don't report an error so the execution can
1234 * continue normally.
1235 */
1236 ret = 0;
1237 DBG3("UST app disable event failed. Application is dead.");
1238 }
1239 goto error;
1240 }
1241
1242 ua_ctx->handle = ua_ctx->obj->handle;
1243
1244 DBG2("UST app context handle %d created successfully for channel %s",
1245 ua_ctx->handle, ua_chan->name);
1246
1247 error:
1248 health_code_update();
1249 return ret;
1250 }
1251
1252 /*
1253 * Set the filter on the tracer.
1254 */
1255 static
1256 int set_ust_event_filter(struct ust_app_event *ua_event,
1257 struct ust_app *app)
1258 {
1259 int ret;
1260 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1261
1262 health_code_update();
1263
1264 if (!ua_event->filter) {
1265 ret = 0;
1266 goto error;
1267 }
1268
1269 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1270 if (!ust_bytecode) {
1271 ret = -LTTNG_ERR_NOMEM;
1272 goto error;
1273 }
1274 pthread_mutex_lock(&app->sock_lock);
1275 ret = ustctl_set_filter(app->sock, ust_bytecode,
1276 ua_event->obj);
1277 pthread_mutex_unlock(&app->sock_lock);
1278 if (ret < 0) {
1279 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1280 ERR("UST app event %s filter failed for app (pid: %d) "
1281 "with ret %d", ua_event->attr.name, app->pid, ret);
1282 } else {
1283 /*
1284 * This is normal behavior, an application can die during the
1285 * creation process. Don't report an error so the execution can
1286 * continue normally.
1287 */
1288 ret = 0;
1289 DBG3("UST app filter event failed. Application is dead.");
1290 }
1291 goto error;
1292 }
1293
1294 DBG2("UST filter set successfully for event %s", ua_event->name);
1295
1296 error:
1297 health_code_update();
1298 free(ust_bytecode);
1299 return ret;
1300 }
1301
1302 static
1303 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1304 struct lttng_event_exclusion *exclusion)
1305 {
1306 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1307 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1308 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1309
1310 ust_exclusion = zmalloc(exclusion_alloc_size);
1311 if (!ust_exclusion) {
1312 PERROR("malloc");
1313 goto end;
1314 }
1315
1316 assert(sizeof(struct lttng_event_exclusion) ==
1317 sizeof(struct lttng_ust_event_exclusion));
1318 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1319 end:
1320 return ust_exclusion;
1321 }
1322
1323 /*
1324 * Set event exclusions on the tracer.
1325 */
1326 static
1327 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1328 struct ust_app *app)
1329 {
1330 int ret;
1331 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1332
1333 health_code_update();
1334
1335 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1336 ret = 0;
1337 goto error;
1338 }
1339
1340 ust_exclusion = create_ust_exclusion_from_exclusion(
1341 ua_event->exclusion);
1342 if (!ust_exclusion) {
1343 ret = -LTTNG_ERR_NOMEM;
1344 goto error;
1345 }
1346 pthread_mutex_lock(&app->sock_lock);
1347 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1348 pthread_mutex_unlock(&app->sock_lock);
1349 if (ret < 0) {
1350 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1351 ERR("UST app event %s exclusions failed for app (pid: %d) "
1352 "with ret %d", ua_event->attr.name, app->pid, ret);
1353 } else {
1354 /*
1355 * This is normal behavior, an application can die during the
1356 * creation process. Don't report an error so the execution can
1357 * continue normally.
1358 */
1359 ret = 0;
1360 DBG3("UST app event exclusion failed. Application is dead.");
1361 }
1362 goto error;
1363 }
1364
1365 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1366
1367 error:
1368 health_code_update();
1369 free(ust_exclusion);
1370 return ret;
1371 }
1372
1373 /*
1374 * Disable the specified event on to UST tracer for the UST session.
1375 */
1376 static int disable_ust_event(struct ust_app *app,
1377 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1378 {
1379 int ret;
1380
1381 health_code_update();
1382
1383 pthread_mutex_lock(&app->sock_lock);
1384 ret = ustctl_disable(app->sock, ua_event->obj);
1385 pthread_mutex_unlock(&app->sock_lock);
1386 if (ret < 0) {
1387 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1388 ERR("UST app event %s disable failed for app (pid: %d) "
1389 "and session handle %d with ret %d",
1390 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1391 } else {
1392 /*
1393 * This is normal behavior, an application can die during the
1394 * creation process. Don't report an error so the execution can
1395 * continue normally.
1396 */
1397 ret = 0;
1398 DBG3("UST app disable event failed. Application is dead.");
1399 }
1400 goto error;
1401 }
1402
1403 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1404 ua_event->attr.name, app->pid);
1405
1406 error:
1407 health_code_update();
1408 return ret;
1409 }
1410
1411 /*
1412 * Disable the specified channel on to UST tracer for the UST session.
1413 */
1414 static int disable_ust_channel(struct ust_app *app,
1415 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1416 {
1417 int ret;
1418
1419 health_code_update();
1420
1421 pthread_mutex_lock(&app->sock_lock);
1422 ret = ustctl_disable(app->sock, ua_chan->obj);
1423 pthread_mutex_unlock(&app->sock_lock);
1424 if (ret < 0) {
1425 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1426 ERR("UST app channel %s disable failed for app (pid: %d) "
1427 "and session handle %d with ret %d",
1428 ua_chan->name, app->pid, ua_sess->handle, ret);
1429 } else {
1430 /*
1431 * This is normal behavior, an application can die during the
1432 * creation process. Don't report an error so the execution can
1433 * continue normally.
1434 */
1435 ret = 0;
1436 DBG3("UST app disable channel failed. Application is dead.");
1437 }
1438 goto error;
1439 }
1440
1441 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1442 ua_chan->name, app->pid);
1443
1444 error:
1445 health_code_update();
1446 return ret;
1447 }
1448
1449 /*
1450 * Enable the specified channel on to UST tracer for the UST session.
1451 */
1452 static int enable_ust_channel(struct ust_app *app,
1453 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1454 {
1455 int ret;
1456
1457 health_code_update();
1458
1459 pthread_mutex_lock(&app->sock_lock);
1460 ret = ustctl_enable(app->sock, ua_chan->obj);
1461 pthread_mutex_unlock(&app->sock_lock);
1462 if (ret < 0) {
1463 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1464 ERR("UST app channel %s enable failed for app (pid: %d) "
1465 "and session handle %d with ret %d",
1466 ua_chan->name, app->pid, ua_sess->handle, ret);
1467 } else {
1468 /*
1469 * This is normal behavior, an application can die during the
1470 * creation process. Don't report an error so the execution can
1471 * continue normally.
1472 */
1473 ret = 0;
1474 DBG3("UST app enable channel failed. Application is dead.");
1475 }
1476 goto error;
1477 }
1478
1479 ua_chan->enabled = 1;
1480
1481 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1482 ua_chan->name, app->pid);
1483
1484 error:
1485 health_code_update();
1486 return ret;
1487 }
1488
1489 /*
1490 * Enable the specified event on to UST tracer for the UST session.
1491 */
1492 static int enable_ust_event(struct ust_app *app,
1493 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1494 {
1495 int ret;
1496
1497 health_code_update();
1498
1499 pthread_mutex_lock(&app->sock_lock);
1500 ret = ustctl_enable(app->sock, ua_event->obj);
1501 pthread_mutex_unlock(&app->sock_lock);
1502 if (ret < 0) {
1503 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1504 ERR("UST app event %s enable failed for app (pid: %d) "
1505 "and session handle %d with ret %d",
1506 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1507 } else {
1508 /*
1509 * This is normal behavior, an application can die during the
1510 * creation process. Don't report an error so the execution can
1511 * continue normally.
1512 */
1513 ret = 0;
1514 DBG3("UST app enable event failed. Application is dead.");
1515 }
1516 goto error;
1517 }
1518
1519 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1520 ua_event->attr.name, app->pid);
1521
1522 error:
1523 health_code_update();
1524 return ret;
1525 }
1526
1527 /*
1528 * Send channel and stream buffer to application.
1529 *
1530 * Return 0 on success. On error, a negative value is returned.
1531 */
1532 static int send_channel_pid_to_ust(struct ust_app *app,
1533 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1534 {
1535 int ret;
1536 struct ust_app_stream *stream, *stmp;
1537
1538 assert(app);
1539 assert(ua_sess);
1540 assert(ua_chan);
1541
1542 health_code_update();
1543
1544 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1545 app->sock);
1546
1547 /* Send channel to the application. */
1548 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1549 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1550 ret = -ENOTCONN; /* Caused by app exiting. */
1551 goto error;
1552 } else if (ret < 0) {
1553 goto error;
1554 }
1555
1556 health_code_update();
1557
1558 /* Send all streams to application. */
1559 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1560 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1561 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1562 ret = -ENOTCONN; /* Caused by app exiting. */
1563 goto error;
1564 } else if (ret < 0) {
1565 goto error;
1566 }
1567 /* We don't need the stream anymore once sent to the tracer. */
1568 cds_list_del(&stream->list);
1569 delete_ust_app_stream(-1, stream, app);
1570 }
1571 /* Flag the channel that it is sent to the application. */
1572 ua_chan->is_sent = 1;
1573
1574 error:
1575 health_code_update();
1576 return ret;
1577 }
1578
1579 /*
1580 * Create the specified event onto the UST tracer for a UST session.
1581 *
1582 * Should be called with session mutex held.
1583 */
1584 static
1585 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1586 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1587 {
1588 int ret = 0;
1589
1590 health_code_update();
1591
1592 /* Create UST event on tracer */
1593 pthread_mutex_lock(&app->sock_lock);
1594 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1595 &ua_event->obj);
1596 pthread_mutex_unlock(&app->sock_lock);
1597 if (ret < 0) {
1598 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1599 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1600 ua_event->attr.name, app->pid, ret);
1601 } else {
1602 /*
1603 * This is normal behavior, an application can die during the
1604 * creation process. Don't report an error so the execution can
1605 * continue normally.
1606 */
1607 ret = 0;
1608 DBG3("UST app create event failed. Application is dead.");
1609 }
1610 goto error;
1611 }
1612
1613 ua_event->handle = ua_event->obj->handle;
1614
1615 DBG2("UST app event %s created successfully for pid:%d",
1616 ua_event->attr.name, app->pid);
1617
1618 health_code_update();
1619
1620 /* Set filter if one is present. */
1621 if (ua_event->filter) {
1622 ret = set_ust_event_filter(ua_event, app);
1623 if (ret < 0) {
1624 goto error;
1625 }
1626 }
1627
1628 /* Set exclusions for the event */
1629 if (ua_event->exclusion) {
1630 ret = set_ust_event_exclusion(ua_event, app);
1631 if (ret < 0) {
1632 goto error;
1633 }
1634 }
1635
1636 /* If event not enabled, disable it on the tracer */
1637 if (ua_event->enabled) {
1638 /*
1639 * We now need to explicitly enable the event, since it
1640 * is now disabled at creation.
1641 */
1642 ret = enable_ust_event(app, ua_sess, ua_event);
1643 if (ret < 0) {
1644 /*
1645 * If we hit an EPERM, something is wrong with our enable call. If
1646 * we get an EEXIST, there is a problem on the tracer side since we
1647 * just created it.
1648 */
1649 switch (ret) {
1650 case -LTTNG_UST_ERR_PERM:
1651 /* Code flow problem */
1652 assert(0);
1653 case -LTTNG_UST_ERR_EXIST:
1654 /* It's OK for our use case. */
1655 ret = 0;
1656 break;
1657 default:
1658 break;
1659 }
1660 goto error;
1661 }
1662 }
1663
1664 error:
1665 health_code_update();
1666 return ret;
1667 }
1668
1669 /*
1670 * Copy data between an UST app event and a LTT event.
1671 */
1672 static void shadow_copy_event(struct ust_app_event *ua_event,
1673 struct ltt_ust_event *uevent)
1674 {
1675 size_t exclusion_alloc_size;
1676
1677 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1678 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1679
1680 ua_event->enabled = uevent->enabled;
1681
1682 /* Copy event attributes */
1683 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1684
1685 /* Copy filter bytecode */
1686 if (uevent->filter) {
1687 ua_event->filter = copy_filter_bytecode(uevent->filter);
1688 /* Filter might be NULL here in case of ENONEM. */
1689 }
1690
1691 /* Copy exclusion data */
1692 if (uevent->exclusion) {
1693 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1694 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1695 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1696 if (ua_event->exclusion == NULL) {
1697 PERROR("malloc");
1698 } else {
1699 memcpy(ua_event->exclusion, uevent->exclusion,
1700 exclusion_alloc_size);
1701 }
1702 }
1703 }
1704
1705 /*
1706 * Copy data between an UST app channel and a LTT channel.
1707 */
1708 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1709 struct ltt_ust_channel *uchan)
1710 {
1711 struct lttng_ht_iter iter;
1712 struct ltt_ust_event *uevent;
1713 struct ltt_ust_context *uctx;
1714 struct ust_app_event *ua_event;
1715
1716 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1717
1718 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1719 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1720
1721 ua_chan->tracefile_size = uchan->tracefile_size;
1722 ua_chan->tracefile_count = uchan->tracefile_count;
1723
1724 /* Copy event attributes since the layout is different. */
1725 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1726 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1727 ua_chan->attr.overwrite = uchan->attr.overwrite;
1728 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1729 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1730 ua_chan->attr.output = uchan->attr.output;
1731 /*
1732 * Note that the attribute channel type is not set since the channel on the
1733 * tracing registry side does not have this information.
1734 */
1735
1736 ua_chan->enabled = uchan->enabled;
1737 ua_chan->tracing_channel_id = uchan->id;
1738
1739 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1740 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1741
1742 if (ua_ctx == NULL) {
1743 continue;
1744 }
1745 lttng_ht_node_init_ulong(&ua_ctx->node,
1746 (unsigned long) ua_ctx->ctx.ctx);
1747 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1748 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1749 }
1750
1751 /* Copy all events from ltt ust channel to ust app channel */
1752 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1753 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1754 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1755 if (ua_event == NULL) {
1756 DBG2("UST event %s not found on shadow copy channel",
1757 uevent->attr.name);
1758 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1759 if (ua_event == NULL) {
1760 continue;
1761 }
1762 shadow_copy_event(ua_event, uevent);
1763 add_unique_ust_app_event(ua_chan, ua_event);
1764 }
1765 }
1766
1767 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1768 }
1769
1770 /*
1771 * Copy data between a UST app session and a regular LTT session.
1772 */
1773 static void shadow_copy_session(struct ust_app_session *ua_sess,
1774 struct ltt_ust_session *usess, struct ust_app *app)
1775 {
1776 struct lttng_ht_node_str *ua_chan_node;
1777 struct lttng_ht_iter iter;
1778 struct ltt_ust_channel *uchan;
1779 struct ust_app_channel *ua_chan;
1780 time_t rawtime;
1781 struct tm *timeinfo;
1782 char datetime[16];
1783 int ret;
1784 char tmp_shm_path[PATH_MAX];
1785
1786 /* Get date and time for unique app path */
1787 time(&rawtime);
1788 timeinfo = localtime(&rawtime);
1789 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1790
1791 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1792
1793 ua_sess->tracing_id = usess->id;
1794 ua_sess->id = get_next_session_id();
1795 ua_sess->uid = app->uid;
1796 ua_sess->gid = app->gid;
1797 ua_sess->euid = usess->uid;
1798 ua_sess->egid = usess->gid;
1799 ua_sess->buffer_type = usess->buffer_type;
1800 ua_sess->bits_per_long = app->bits_per_long;
1801
1802 /* There is only one consumer object per session possible. */
1803 consumer_output_get(usess->consumer);
1804 ua_sess->consumer = usess->consumer;
1805
1806 ua_sess->output_traces = usess->output_traces;
1807 ua_sess->live_timer_interval = usess->live_timer_interval;
1808 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1809 &usess->metadata_attr);
1810
1811 switch (ua_sess->buffer_type) {
1812 case LTTNG_BUFFER_PER_PID:
1813 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1814 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1815 datetime);
1816 break;
1817 case LTTNG_BUFFER_PER_UID:
1818 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1819 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1820 break;
1821 default:
1822 assert(0);
1823 goto error;
1824 }
1825 if (ret < 0) {
1826 PERROR("asprintf UST shadow copy session");
1827 assert(0);
1828 goto error;
1829 }
1830
1831 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1832 sizeof(ua_sess->root_shm_path));
1833 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1834 strncpy(ua_sess->shm_path, usess->shm_path,
1835 sizeof(ua_sess->shm_path));
1836 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1837 if (ua_sess->shm_path[0]) {
1838 switch (ua_sess->buffer_type) {
1839 case LTTNG_BUFFER_PER_PID:
1840 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1841 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1842 app->name, app->pid, datetime);
1843 break;
1844 case LTTNG_BUFFER_PER_UID:
1845 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1846 DEFAULT_UST_TRACE_UID_PATH,
1847 app->uid, app->bits_per_long);
1848 break;
1849 default:
1850 assert(0);
1851 goto error;
1852 }
1853 if (ret < 0) {
1854 PERROR("sprintf UST shadow copy session");
1855 assert(0);
1856 goto error;
1857 }
1858 strncat(ua_sess->shm_path, tmp_shm_path,
1859 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1860 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1861 }
1862
1863 /* Iterate over all channels in global domain. */
1864 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1865 uchan, node.node) {
1866 struct lttng_ht_iter uiter;
1867
1868 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1869 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1870 if (ua_chan_node != NULL) {
1871 /* Session exist. Contiuing. */
1872 continue;
1873 }
1874
1875 DBG2("Channel %s not found on shadow session copy, creating it",
1876 uchan->name);
1877 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1878 if (ua_chan == NULL) {
1879 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1880 continue;
1881 }
1882 shadow_copy_channel(ua_chan, uchan);
1883 /*
1884 * The concept of metadata channel does not exist on the tracing
1885 * registry side of the session daemon so this can only be a per CPU
1886 * channel and not metadata.
1887 */
1888 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1889
1890 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1891 }
1892 return;
1893
1894 error:
1895 consumer_output_put(ua_sess->consumer);
1896 }
1897
1898 /*
1899 * Lookup sesison wrapper.
1900 */
1901 static
1902 void __lookup_session_by_app(struct ltt_ust_session *usess,
1903 struct ust_app *app, struct lttng_ht_iter *iter)
1904 {
1905 /* Get right UST app session from app */
1906 lttng_ht_lookup(app->sessions, &usess->id, iter);
1907 }
1908
1909 /*
1910 * Return ust app session from the app session hashtable using the UST session
1911 * id.
1912 */
1913 static struct ust_app_session *lookup_session_by_app(
1914 struct ltt_ust_session *usess, struct ust_app *app)
1915 {
1916 struct lttng_ht_iter iter;
1917 struct lttng_ht_node_u64 *node;
1918
1919 __lookup_session_by_app(usess, app, &iter);
1920 node = lttng_ht_iter_get_node_u64(&iter);
1921 if (node == NULL) {
1922 goto error;
1923 }
1924
1925 return caa_container_of(node, struct ust_app_session, node);
1926
1927 error:
1928 return NULL;
1929 }
1930
1931 /*
1932 * Setup buffer registry per PID for the given session and application. If none
1933 * is found, a new one is created, added to the global registry and
1934 * initialized. If regp is valid, it's set with the newly created object.
1935 *
1936 * Return 0 on success or else a negative value.
1937 */
1938 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1939 struct ust_app *app, struct buffer_reg_pid **regp)
1940 {
1941 int ret = 0;
1942 struct buffer_reg_pid *reg_pid;
1943
1944 assert(ua_sess);
1945 assert(app);
1946
1947 rcu_read_lock();
1948
1949 reg_pid = buffer_reg_pid_find(ua_sess->id);
1950 if (!reg_pid) {
1951 /*
1952 * This is the create channel path meaning that if there is NO
1953 * registry available, we have to create one for this session.
1954 */
1955 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
1956 ua_sess->root_shm_path, ua_sess->shm_path);
1957 if (ret < 0) {
1958 goto error;
1959 }
1960 } else {
1961 goto end;
1962 }
1963
1964 /* Initialize registry. */
1965 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1966 app->bits_per_long, app->uint8_t_alignment,
1967 app->uint16_t_alignment, app->uint32_t_alignment,
1968 app->uint64_t_alignment, app->long_alignment,
1969 app->byte_order, app->version.major,
1970 app->version.minor, reg_pid->root_shm_path,
1971 reg_pid->shm_path,
1972 ua_sess->euid, ua_sess->egid);
1973 if (ret < 0) {
1974 /*
1975 * reg_pid->registry->reg.ust is NULL upon error, so we need to
1976 * destroy the buffer registry, because it is always expected
1977 * that if the buffer registry can be found, its ust registry is
1978 * non-NULL.
1979 */
1980 buffer_reg_pid_destroy(reg_pid);
1981 goto error;
1982 }
1983
1984 buffer_reg_pid_add(reg_pid);
1985
1986 DBG3("UST app buffer registry per PID created successfully");
1987
1988 end:
1989 if (regp) {
1990 *regp = reg_pid;
1991 }
1992 error:
1993 rcu_read_unlock();
1994 return ret;
1995 }
1996
1997 /*
1998 * Setup buffer registry per UID for the given session and application. If none
1999 * is found, a new one is created, added to the global registry and
2000 * initialized. If regp is valid, it's set with the newly created object.
2001 *
2002 * Return 0 on success or else a negative value.
2003 */
2004 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2005 struct ust_app_session *ua_sess,
2006 struct ust_app *app, struct buffer_reg_uid **regp)
2007 {
2008 int ret = 0;
2009 struct buffer_reg_uid *reg_uid;
2010
2011 assert(usess);
2012 assert(app);
2013
2014 rcu_read_lock();
2015
2016 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2017 if (!reg_uid) {
2018 /*
2019 * This is the create channel path meaning that if there is NO
2020 * registry available, we have to create one for this session.
2021 */
2022 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2023 LTTNG_DOMAIN_UST, &reg_uid,
2024 ua_sess->root_shm_path, ua_sess->shm_path);
2025 if (ret < 0) {
2026 goto error;
2027 }
2028 } else {
2029 goto end;
2030 }
2031
2032 /* Initialize registry. */
2033 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2034 app->bits_per_long, app->uint8_t_alignment,
2035 app->uint16_t_alignment, app->uint32_t_alignment,
2036 app->uint64_t_alignment, app->long_alignment,
2037 app->byte_order, app->version.major,
2038 app->version.minor, reg_uid->root_shm_path,
2039 reg_uid->shm_path, usess->uid, usess->gid);
2040 if (ret < 0) {
2041 /*
2042 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2043 * destroy the buffer registry, because it is always expected
2044 * that if the buffer registry can be found, its ust registry is
2045 * non-NULL.
2046 */
2047 buffer_reg_uid_destroy(reg_uid, NULL);
2048 goto error;
2049 }
2050 /* Add node to teardown list of the session. */
2051 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2052
2053 buffer_reg_uid_add(reg_uid);
2054
2055 DBG3("UST app buffer registry per UID created successfully");
2056 end:
2057 if (regp) {
2058 *regp = reg_uid;
2059 }
2060 error:
2061 rcu_read_unlock();
2062 return ret;
2063 }
2064
2065 /*
2066 * Create a session on the tracer side for the given app.
2067 *
2068 * On success, ua_sess_ptr is populated with the session pointer or else left
2069 * untouched. If the session was created, is_created is set to 1. On error,
2070 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2071 * be NULL.
2072 *
2073 * Returns 0 on success or else a negative code which is either -ENOMEM or
2074 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2075 */
2076 static int create_ust_app_session(struct ltt_ust_session *usess,
2077 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2078 int *is_created)
2079 {
2080 int ret, created = 0;
2081 struct ust_app_session *ua_sess;
2082
2083 assert(usess);
2084 assert(app);
2085 assert(ua_sess_ptr);
2086
2087 health_code_update();
2088
2089 ua_sess = lookup_session_by_app(usess, app);
2090 if (ua_sess == NULL) {
2091 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2092 app->pid, usess->id);
2093 ua_sess = alloc_ust_app_session(app);
2094 if (ua_sess == NULL) {
2095 /* Only malloc can failed so something is really wrong */
2096 ret = -ENOMEM;
2097 goto error;
2098 }
2099 shadow_copy_session(ua_sess, usess, app);
2100 created = 1;
2101 }
2102
2103 switch (usess->buffer_type) {
2104 case LTTNG_BUFFER_PER_PID:
2105 /* Init local registry. */
2106 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2107 if (ret < 0) {
2108 delete_ust_app_session(-1, ua_sess, app);
2109 goto error;
2110 }
2111 break;
2112 case LTTNG_BUFFER_PER_UID:
2113 /* Look for a global registry. If none exists, create one. */
2114 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2115 if (ret < 0) {
2116 delete_ust_app_session(-1, ua_sess, app);
2117 goto error;
2118 }
2119 break;
2120 default:
2121 assert(0);
2122 ret = -EINVAL;
2123 goto error;
2124 }
2125
2126 health_code_update();
2127
2128 if (ua_sess->handle == -1) {
2129 pthread_mutex_lock(&app->sock_lock);
2130 ret = ustctl_create_session(app->sock);
2131 pthread_mutex_unlock(&app->sock_lock);
2132 if (ret < 0) {
2133 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2134 ERR("Creating session for app pid %d with ret %d",
2135 app->pid, ret);
2136 } else {
2137 DBG("UST app creating session failed. Application is dead");
2138 /*
2139 * This is normal behavior, an application can die during the
2140 * creation process. Don't report an error so the execution can
2141 * continue normally. This will get flagged ENOTCONN and the
2142 * caller will handle it.
2143 */
2144 ret = 0;
2145 }
2146 delete_ust_app_session(-1, ua_sess, app);
2147 if (ret != -ENOMEM) {
2148 /*
2149 * Tracer is probably gone or got an internal error so let's
2150 * behave like it will soon unregister or not usable.
2151 */
2152 ret = -ENOTCONN;
2153 }
2154 goto error;
2155 }
2156
2157 ua_sess->handle = ret;
2158
2159 /* Add ust app session to app's HT */
2160 lttng_ht_node_init_u64(&ua_sess->node,
2161 ua_sess->tracing_id);
2162 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2163 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2164 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2165 &ua_sess->ust_objd_node);
2166
2167 DBG2("UST app session created successfully with handle %d", ret);
2168 }
2169
2170 *ua_sess_ptr = ua_sess;
2171 if (is_created) {
2172 *is_created = created;
2173 }
2174
2175 /* Everything went well. */
2176 ret = 0;
2177
2178 error:
2179 health_code_update();
2180 return ret;
2181 }
2182
2183 /*
2184 * Match function for a hash table lookup of ust_app_ctx.
2185 *
2186 * It matches an ust app context based on the context type and, in the case
2187 * of perf counters, their name.
2188 */
2189 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2190 {
2191 struct ust_app_ctx *ctx;
2192 const struct lttng_ust_context_attr *key;
2193
2194 assert(node);
2195 assert(_key);
2196
2197 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2198 key = _key;
2199
2200 /* Context type */
2201 if (ctx->ctx.ctx != key->ctx) {
2202 goto no_match;
2203 }
2204
2205 switch(key->ctx) {
2206 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
2207 if (strncmp(key->u.perf_counter.name,
2208 ctx->ctx.u.perf_counter.name,
2209 sizeof(key->u.perf_counter.name))) {
2210 goto no_match;
2211 }
2212 break;
2213 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2214 if (strcmp(key->u.app_ctx.provider_name,
2215 ctx->ctx.u.app_ctx.provider_name) ||
2216 strcmp(key->u.app_ctx.ctx_name,
2217 ctx->ctx.u.app_ctx.ctx_name)) {
2218 goto no_match;
2219 }
2220 break;
2221 default:
2222 break;
2223 }
2224
2225 /* Match. */
2226 return 1;
2227
2228 no_match:
2229 return 0;
2230 }
2231
2232 /*
2233 * Lookup for an ust app context from an lttng_ust_context.
2234 *
2235 * Must be called while holding RCU read side lock.
2236 * Return an ust_app_ctx object or NULL on error.
2237 */
2238 static
2239 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2240 struct lttng_ust_context_attr *uctx)
2241 {
2242 struct lttng_ht_iter iter;
2243 struct lttng_ht_node_ulong *node;
2244 struct ust_app_ctx *app_ctx = NULL;
2245
2246 assert(uctx);
2247 assert(ht);
2248
2249 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2250 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2251 ht_match_ust_app_ctx, uctx, &iter.iter);
2252 node = lttng_ht_iter_get_node_ulong(&iter);
2253 if (!node) {
2254 goto end;
2255 }
2256
2257 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2258
2259 end:
2260 return app_ctx;
2261 }
2262
2263 /*
2264 * Create a context for the channel on the tracer.
2265 *
2266 * Called with UST app session lock held and a RCU read side lock.
2267 */
2268 static
2269 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2270 struct ust_app_channel *ua_chan,
2271 struct lttng_ust_context_attr *uctx,
2272 struct ust_app *app)
2273 {
2274 int ret = 0;
2275 struct ust_app_ctx *ua_ctx;
2276
2277 DBG2("UST app adding context to channel %s", ua_chan->name);
2278
2279 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2280 if (ua_ctx) {
2281 ret = -EEXIST;
2282 goto error;
2283 }
2284
2285 ua_ctx = alloc_ust_app_ctx(uctx);
2286 if (ua_ctx == NULL) {
2287 /* malloc failed */
2288 ret = -1;
2289 goto error;
2290 }
2291
2292 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2293 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2294 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2295
2296 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2297 if (ret < 0) {
2298 goto error;
2299 }
2300
2301 error:
2302 return ret;
2303 }
2304
2305 /*
2306 * Enable on the tracer side a ust app event for the session and channel.
2307 *
2308 * Called with UST app session lock held.
2309 */
2310 static
2311 int enable_ust_app_event(struct ust_app_session *ua_sess,
2312 struct ust_app_event *ua_event, struct ust_app *app)
2313 {
2314 int ret;
2315
2316 ret = enable_ust_event(app, ua_sess, ua_event);
2317 if (ret < 0) {
2318 goto error;
2319 }
2320
2321 ua_event->enabled = 1;
2322
2323 error:
2324 return ret;
2325 }
2326
2327 /*
2328 * Disable on the tracer side a ust app event for the session and channel.
2329 */
2330 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2331 struct ust_app_event *ua_event, struct ust_app *app)
2332 {
2333 int ret;
2334
2335 ret = disable_ust_event(app, ua_sess, ua_event);
2336 if (ret < 0) {
2337 goto error;
2338 }
2339
2340 ua_event->enabled = 0;
2341
2342 error:
2343 return ret;
2344 }
2345
2346 /*
2347 * Lookup ust app channel for session and disable it on the tracer side.
2348 */
2349 static
2350 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2351 struct ust_app_channel *ua_chan, struct ust_app *app)
2352 {
2353 int ret;
2354
2355 ret = disable_ust_channel(app, ua_sess, ua_chan);
2356 if (ret < 0) {
2357 goto error;
2358 }
2359
2360 ua_chan->enabled = 0;
2361
2362 error:
2363 return ret;
2364 }
2365
2366 /*
2367 * Lookup ust app channel for session and enable it on the tracer side. This
2368 * MUST be called with a RCU read side lock acquired.
2369 */
2370 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2371 struct ltt_ust_channel *uchan, struct ust_app *app)
2372 {
2373 int ret = 0;
2374 struct lttng_ht_iter iter;
2375 struct lttng_ht_node_str *ua_chan_node;
2376 struct ust_app_channel *ua_chan;
2377
2378 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2379 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2380 if (ua_chan_node == NULL) {
2381 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2382 uchan->name, ua_sess->tracing_id);
2383 goto error;
2384 }
2385
2386 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2387
2388 ret = enable_ust_channel(app, ua_sess, ua_chan);
2389 if (ret < 0) {
2390 goto error;
2391 }
2392
2393 error:
2394 return ret;
2395 }
2396
2397 /*
2398 * Ask the consumer to create a channel and get it if successful.
2399 *
2400 * Return 0 on success or else a negative value.
2401 */
2402 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2403 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2404 int bitness, struct ust_registry_session *registry)
2405 {
2406 int ret;
2407 unsigned int nb_fd = 0;
2408 struct consumer_socket *socket;
2409
2410 assert(usess);
2411 assert(ua_sess);
2412 assert(ua_chan);
2413 assert(registry);
2414
2415 rcu_read_lock();
2416 health_code_update();
2417
2418 /* Get the right consumer socket for the application. */
2419 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2420 if (!socket) {
2421 ret = -EINVAL;
2422 goto error;
2423 }
2424
2425 health_code_update();
2426
2427 /* Need one fd for the channel. */
2428 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2429 if (ret < 0) {
2430 ERR("Exhausted number of available FD upon create channel");
2431 goto error;
2432 }
2433
2434 /*
2435 * Ask consumer to create channel. The consumer will return the number of
2436 * stream we have to expect.
2437 */
2438 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2439 registry);
2440 if (ret < 0) {
2441 goto error_ask;
2442 }
2443
2444 /*
2445 * Compute the number of fd needed before receiving them. It must be 2 per
2446 * stream (2 being the default value here).
2447 */
2448 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2449
2450 /* Reserve the amount of file descriptor we need. */
2451 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2452 if (ret < 0) {
2453 ERR("Exhausted number of available FD upon create channel");
2454 goto error_fd_get_stream;
2455 }
2456
2457 health_code_update();
2458
2459 /*
2460 * Now get the channel from the consumer. This call wil populate the stream
2461 * list of that channel and set the ust objects.
2462 */
2463 if (usess->consumer->enabled) {
2464 ret = ust_consumer_get_channel(socket, ua_chan);
2465 if (ret < 0) {
2466 goto error_destroy;
2467 }
2468 }
2469
2470 rcu_read_unlock();
2471 return 0;
2472
2473 error_destroy:
2474 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2475 error_fd_get_stream:
2476 /*
2477 * Initiate a destroy channel on the consumer since we had an error
2478 * handling it on our side. The return value is of no importance since we
2479 * already have a ret value set by the previous error that we need to
2480 * return.
2481 */
2482 (void) ust_consumer_destroy_channel(socket, ua_chan);
2483 error_ask:
2484 lttng_fd_put(LTTNG_FD_APPS, 1);
2485 error:
2486 health_code_update();
2487 rcu_read_unlock();
2488 return ret;
2489 }
2490
2491 /*
2492 * Duplicate the ust data object of the ust app stream and save it in the
2493 * buffer registry stream.
2494 *
2495 * Return 0 on success or else a negative value.
2496 */
2497 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2498 struct ust_app_stream *stream)
2499 {
2500 int ret;
2501
2502 assert(reg_stream);
2503 assert(stream);
2504
2505 /* Reserve the amount of file descriptor we need. */
2506 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2507 if (ret < 0) {
2508 ERR("Exhausted number of available FD upon duplicate stream");
2509 goto error;
2510 }
2511
2512 /* Duplicate object for stream once the original is in the registry. */
2513 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2514 reg_stream->obj.ust);
2515 if (ret < 0) {
2516 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2517 reg_stream->obj.ust, stream->obj, ret);
2518 lttng_fd_put(LTTNG_FD_APPS, 2);
2519 goto error;
2520 }
2521 stream->handle = stream->obj->handle;
2522
2523 error:
2524 return ret;
2525 }
2526
2527 /*
2528 * Duplicate the ust data object of the ust app. channel and save it in the
2529 * buffer registry channel.
2530 *
2531 * Return 0 on success or else a negative value.
2532 */
2533 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2534 struct ust_app_channel *ua_chan)
2535 {
2536 int ret;
2537
2538 assert(reg_chan);
2539 assert(ua_chan);
2540
2541 /* Need two fds for the channel. */
2542 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2543 if (ret < 0) {
2544 ERR("Exhausted number of available FD upon duplicate channel");
2545 goto error_fd_get;
2546 }
2547
2548 /* Duplicate object for stream once the original is in the registry. */
2549 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2550 if (ret < 0) {
2551 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2552 reg_chan->obj.ust, ua_chan->obj, ret);
2553 goto error;
2554 }
2555 ua_chan->handle = ua_chan->obj->handle;
2556
2557 return 0;
2558
2559 error:
2560 lttng_fd_put(LTTNG_FD_APPS, 1);
2561 error_fd_get:
2562 return ret;
2563 }
2564
2565 /*
2566 * For a given channel buffer registry, setup all streams of the given ust
2567 * application channel.
2568 *
2569 * Return 0 on success or else a negative value.
2570 */
2571 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2572 struct ust_app_channel *ua_chan,
2573 struct ust_app *app)
2574 {
2575 int ret = 0;
2576 struct ust_app_stream *stream, *stmp;
2577
2578 assert(reg_chan);
2579 assert(ua_chan);
2580
2581 DBG2("UST app setup buffer registry stream");
2582
2583 /* Send all streams to application. */
2584 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2585 struct buffer_reg_stream *reg_stream;
2586
2587 ret = buffer_reg_stream_create(&reg_stream);
2588 if (ret < 0) {
2589 goto error;
2590 }
2591
2592 /*
2593 * Keep original pointer and nullify it in the stream so the delete
2594 * stream call does not release the object.
2595 */
2596 reg_stream->obj.ust = stream->obj;
2597 stream->obj = NULL;
2598 buffer_reg_stream_add(reg_stream, reg_chan);
2599
2600 /* We don't need the streams anymore. */
2601 cds_list_del(&stream->list);
2602 delete_ust_app_stream(-1, stream, app);
2603 }
2604
2605 error:
2606 return ret;
2607 }
2608
2609 /*
2610 * Create a buffer registry channel for the given session registry and
2611 * application channel object. If regp pointer is valid, it's set with the
2612 * created object. Important, the created object is NOT added to the session
2613 * registry hash table.
2614 *
2615 * Return 0 on success else a negative value.
2616 */
2617 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2618 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2619 {
2620 int ret;
2621 struct buffer_reg_channel *reg_chan = NULL;
2622
2623 assert(reg_sess);
2624 assert(ua_chan);
2625
2626 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2627
2628 /* Create buffer registry channel. */
2629 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2630 if (ret < 0) {
2631 goto error_create;
2632 }
2633 assert(reg_chan);
2634 reg_chan->consumer_key = ua_chan->key;
2635 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2636 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2637
2638 /* Create and add a channel registry to session. */
2639 ret = ust_registry_channel_add(reg_sess->reg.ust,
2640 ua_chan->tracing_channel_id);
2641 if (ret < 0) {
2642 goto error;
2643 }
2644 buffer_reg_channel_add(reg_sess, reg_chan);
2645
2646 if (regp) {
2647 *regp = reg_chan;
2648 }
2649
2650 return 0;
2651
2652 error:
2653 /* Safe because the registry channel object was not added to any HT. */
2654 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2655 error_create:
2656 return ret;
2657 }
2658
2659 /*
2660 * Setup buffer registry channel for the given session registry and application
2661 * channel object. If regp pointer is valid, it's set with the created object.
2662 *
2663 * Return 0 on success else a negative value.
2664 */
2665 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2666 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2667 struct ust_app *app)
2668 {
2669 int ret;
2670
2671 assert(reg_sess);
2672 assert(reg_chan);
2673 assert(ua_chan);
2674 assert(ua_chan->obj);
2675
2676 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2677
2678 /* Setup all streams for the registry. */
2679 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2680 if (ret < 0) {
2681 goto error;
2682 }
2683
2684 reg_chan->obj.ust = ua_chan->obj;
2685 ua_chan->obj = NULL;
2686
2687 return 0;
2688
2689 error:
2690 buffer_reg_channel_remove(reg_sess, reg_chan);
2691 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2692 return ret;
2693 }
2694
2695 /*
2696 * Send buffer registry channel to the application.
2697 *
2698 * Return 0 on success else a negative value.
2699 */
2700 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2701 struct ust_app *app, struct ust_app_session *ua_sess,
2702 struct ust_app_channel *ua_chan)
2703 {
2704 int ret;
2705 struct buffer_reg_stream *reg_stream;
2706
2707 assert(reg_chan);
2708 assert(app);
2709 assert(ua_sess);
2710 assert(ua_chan);
2711
2712 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2713
2714 ret = duplicate_channel_object(reg_chan, ua_chan);
2715 if (ret < 0) {
2716 goto error;
2717 }
2718
2719 /* Send channel to the application. */
2720 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2721 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2722 ret = -ENOTCONN; /* Caused by app exiting. */
2723 goto error;
2724 } else if (ret < 0) {
2725 goto error;
2726 }
2727
2728 health_code_update();
2729
2730 /* Send all streams to application. */
2731 pthread_mutex_lock(&reg_chan->stream_list_lock);
2732 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2733 struct ust_app_stream stream;
2734
2735 ret = duplicate_stream_object(reg_stream, &stream);
2736 if (ret < 0) {
2737 goto error_stream_unlock;
2738 }
2739
2740 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2741 if (ret < 0) {
2742 (void) release_ust_app_stream(-1, &stream, app);
2743 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2744 ret = -ENOTCONN; /* Caused by app exiting. */
2745 goto error_stream_unlock;
2746 } else if (ret < 0) {
2747 goto error_stream_unlock;
2748 }
2749 goto error_stream_unlock;
2750 }
2751
2752 /*
2753 * The return value is not important here. This function will output an
2754 * error if needed.
2755 */
2756 (void) release_ust_app_stream(-1, &stream, app);
2757 }
2758 ua_chan->is_sent = 1;
2759
2760 error_stream_unlock:
2761 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2762 error:
2763 return ret;
2764 }
2765
2766 /*
2767 * Create and send to the application the created buffers with per UID buffers.
2768 *
2769 * Return 0 on success else a negative value.
2770 */
2771 static int create_channel_per_uid(struct ust_app *app,
2772 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2773 struct ust_app_channel *ua_chan)
2774 {
2775 int ret;
2776 struct buffer_reg_uid *reg_uid;
2777 struct buffer_reg_channel *reg_chan;
2778
2779 assert(app);
2780 assert(usess);
2781 assert(ua_sess);
2782 assert(ua_chan);
2783
2784 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2785
2786 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2787 /*
2788 * The session creation handles the creation of this global registry
2789 * object. If none can be find, there is a code flow problem or a
2790 * teardown race.
2791 */
2792 assert(reg_uid);
2793
2794 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2795 reg_uid);
2796 if (!reg_chan) {
2797 /* Create the buffer registry channel object. */
2798 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2799 if (ret < 0) {
2800 ERR("Error creating the UST channel \"%s\" registry instance",
2801 ua_chan->name);
2802 goto error;
2803 }
2804 assert(reg_chan);
2805
2806 /*
2807 * Create the buffers on the consumer side. This call populates the
2808 * ust app channel object with all streams and data object.
2809 */
2810 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2811 app->bits_per_long, reg_uid->registry->reg.ust);
2812 if (ret < 0) {
2813 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2814 ua_chan->name);
2815
2816 /*
2817 * Let's remove the previously created buffer registry channel so
2818 * it's not visible anymore in the session registry.
2819 */
2820 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2821 ua_chan->tracing_channel_id);
2822 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2823 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2824 goto error;
2825 }
2826
2827 /*
2828 * Setup the streams and add it to the session registry.
2829 */
2830 ret = setup_buffer_reg_channel(reg_uid->registry,
2831 ua_chan, reg_chan, app);
2832 if (ret < 0) {
2833 ERR("Error setting up UST channel \"%s\"",
2834 ua_chan->name);
2835 goto error;
2836 }
2837
2838 }
2839
2840 /* Send buffers to the application. */
2841 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2842 if (ret < 0) {
2843 if (ret != -ENOTCONN) {
2844 ERR("Error sending channel to application");
2845 }
2846 goto error;
2847 }
2848
2849 error:
2850 return ret;
2851 }
2852
2853 /*
2854 * Create and send to the application the created buffers with per PID buffers.
2855 *
2856 * Return 0 on success else a negative value.
2857 */
2858 static int create_channel_per_pid(struct ust_app *app,
2859 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2860 struct ust_app_channel *ua_chan)
2861 {
2862 int ret;
2863 struct ust_registry_session *registry;
2864
2865 assert(app);
2866 assert(usess);
2867 assert(ua_sess);
2868 assert(ua_chan);
2869
2870 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2871
2872 rcu_read_lock();
2873
2874 registry = get_session_registry(ua_sess);
2875 assert(registry);
2876
2877 /* Create and add a new channel registry to session. */
2878 ret = ust_registry_channel_add(registry, ua_chan->key);
2879 if (ret < 0) {
2880 ERR("Error creating the UST channel \"%s\" registry instance",
2881 ua_chan->name);
2882 goto error;
2883 }
2884
2885 /* Create and get channel on the consumer side. */
2886 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2887 app->bits_per_long, registry);
2888 if (ret < 0) {
2889 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2890 ua_chan->name);
2891 goto error;
2892 }
2893
2894 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2895 if (ret < 0) {
2896 if (ret != -ENOTCONN) {
2897 ERR("Error sending channel to application");
2898 }
2899 goto error;
2900 }
2901
2902 error:
2903 rcu_read_unlock();
2904 return ret;
2905 }
2906
2907 /*
2908 * From an already allocated ust app channel, create the channel buffers if
2909 * need and send it to the application. This MUST be called with a RCU read
2910 * side lock acquired.
2911 *
2912 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2913 * the application exited concurrently.
2914 */
2915 static int do_create_channel(struct ust_app *app,
2916 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2917 struct ust_app_channel *ua_chan)
2918 {
2919 int ret;
2920
2921 assert(app);
2922 assert(usess);
2923 assert(ua_sess);
2924 assert(ua_chan);
2925
2926 /* Handle buffer type before sending the channel to the application. */
2927 switch (usess->buffer_type) {
2928 case LTTNG_BUFFER_PER_UID:
2929 {
2930 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2931 if (ret < 0) {
2932 goto error;
2933 }
2934 break;
2935 }
2936 case LTTNG_BUFFER_PER_PID:
2937 {
2938 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2939 if (ret < 0) {
2940 goto error;
2941 }
2942 break;
2943 }
2944 default:
2945 assert(0);
2946 ret = -EINVAL;
2947 goto error;
2948 }
2949
2950 /* Initialize ust objd object using the received handle and add it. */
2951 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2952 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2953
2954 /* If channel is not enabled, disable it on the tracer */
2955 if (!ua_chan->enabled) {
2956 ret = disable_ust_channel(app, ua_sess, ua_chan);
2957 if (ret < 0) {
2958 goto error;
2959 }
2960 }
2961
2962 error:
2963 return ret;
2964 }
2965
2966 /*
2967 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2968 * newly created channel if not NULL.
2969 *
2970 * Called with UST app session lock and RCU read-side lock held.
2971 *
2972 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2973 * the application exited concurrently.
2974 */
2975 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2976 struct ltt_ust_channel *uchan, struct ust_app *app,
2977 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2978 struct ust_app_channel **ua_chanp)
2979 {
2980 int ret = 0;
2981 struct lttng_ht_iter iter;
2982 struct lttng_ht_node_str *ua_chan_node;
2983 struct ust_app_channel *ua_chan;
2984
2985 /* Lookup channel in the ust app session */
2986 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2987 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2988 if (ua_chan_node != NULL) {
2989 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2990 goto end;
2991 }
2992
2993 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2994 if (ua_chan == NULL) {
2995 /* Only malloc can fail here */
2996 ret = -ENOMEM;
2997 goto error_alloc;
2998 }
2999 shadow_copy_channel(ua_chan, uchan);
3000
3001 /* Set channel type. */
3002 ua_chan->attr.type = type;
3003
3004 ret = do_create_channel(app, usess, ua_sess, ua_chan);
3005 if (ret < 0) {
3006 goto error;
3007 }
3008
3009 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
3010 app->pid);
3011
3012 /* Only add the channel if successful on the tracer side. */
3013 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3014
3015 end:
3016 if (ua_chanp) {
3017 *ua_chanp = ua_chan;
3018 }
3019
3020 /* Everything went well. */
3021 return 0;
3022
3023 error:
3024 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
3025 error_alloc:
3026 return ret;
3027 }
3028
3029 /*
3030 * Create UST app event and create it on the tracer side.
3031 *
3032 * Called with ust app session mutex held.
3033 */
3034 static
3035 int create_ust_app_event(struct ust_app_session *ua_sess,
3036 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3037 struct ust_app *app)
3038 {
3039 int ret = 0;
3040 struct ust_app_event *ua_event;
3041
3042 /* Get event node */
3043 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3044 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3045 if (ua_event != NULL) {
3046 ret = -EEXIST;
3047 goto end;
3048 }
3049
3050 /* Does not exist so create one */
3051 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3052 if (ua_event == NULL) {
3053 /* Only malloc can failed so something is really wrong */
3054 ret = -ENOMEM;
3055 goto end;
3056 }
3057 shadow_copy_event(ua_event, uevent);
3058
3059 /* Create it on the tracer side */
3060 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3061 if (ret < 0) {
3062 /* Not found previously means that it does not exist on the tracer */
3063 assert(ret != -LTTNG_UST_ERR_EXIST);
3064 goto error;
3065 }
3066
3067 add_unique_ust_app_event(ua_chan, ua_event);
3068
3069 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3070 app->pid);
3071
3072 end:
3073 return ret;
3074
3075 error:
3076 /* Valid. Calling here is already in a read side lock */
3077 delete_ust_app_event(-1, ua_event, app);
3078 return ret;
3079 }
3080
3081 /*
3082 * Create UST metadata and open it on the tracer side.
3083 *
3084 * Called with UST app session lock held and RCU read side lock.
3085 */
3086 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3087 struct ust_app *app, struct consumer_output *consumer)
3088 {
3089 int ret = 0;
3090 struct ust_app_channel *metadata;
3091 struct consumer_socket *socket;
3092 struct ust_registry_session *registry;
3093
3094 assert(ua_sess);
3095 assert(app);
3096 assert(consumer);
3097
3098 registry = get_session_registry(ua_sess);
3099 assert(registry);
3100
3101 pthread_mutex_lock(&registry->lock);
3102
3103 /* Metadata already exists for this registry or it was closed previously */
3104 if (registry->metadata_key || registry->metadata_closed) {
3105 ret = 0;
3106 goto error;
3107 }
3108
3109 /* Allocate UST metadata */
3110 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3111 if (!metadata) {
3112 /* malloc() failed */
3113 ret = -ENOMEM;
3114 goto error;
3115 }
3116
3117 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3118
3119 /* Need one fd for the channel. */
3120 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3121 if (ret < 0) {
3122 ERR("Exhausted number of available FD upon create metadata");
3123 goto error;
3124 }
3125
3126 /* Get the right consumer socket for the application. */
3127 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3128 if (!socket) {
3129 ret = -EINVAL;
3130 goto error_consumer;
3131 }
3132
3133 /*
3134 * Keep metadata key so we can identify it on the consumer side. Assign it
3135 * to the registry *before* we ask the consumer so we avoid the race of the
3136 * consumer requesting the metadata and the ask_channel call on our side
3137 * did not returned yet.
3138 */
3139 registry->metadata_key = metadata->key;
3140
3141 /*
3142 * Ask the metadata channel creation to the consumer. The metadata object
3143 * will be created by the consumer and kept their. However, the stream is
3144 * never added or monitored until we do a first push metadata to the
3145 * consumer.
3146 */
3147 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3148 registry);
3149 if (ret < 0) {
3150 /* Nullify the metadata key so we don't try to close it later on. */
3151 registry->metadata_key = 0;
3152 goto error_consumer;
3153 }
3154
3155 /*
3156 * The setup command will make the metadata stream be sent to the relayd,
3157 * if applicable, and the thread managing the metadatas. This is important
3158 * because after this point, if an error occurs, the only way the stream
3159 * can be deleted is to be monitored in the consumer.
3160 */
3161 ret = consumer_setup_metadata(socket, metadata->key);
3162 if (ret < 0) {
3163 /* Nullify the metadata key so we don't try to close it later on. */
3164 registry->metadata_key = 0;
3165 goto error_consumer;
3166 }
3167
3168 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3169 metadata->key, app->pid);
3170
3171 error_consumer:
3172 lttng_fd_put(LTTNG_FD_APPS, 1);
3173 delete_ust_app_channel(-1, metadata, app);
3174 error:
3175 pthread_mutex_unlock(&registry->lock);
3176 return ret;
3177 }
3178
3179 /*
3180 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3181 * acquired before calling this function.
3182 */
3183 struct ust_app *ust_app_find_by_pid(pid_t pid)
3184 {
3185 struct ust_app *app = NULL;
3186 struct lttng_ht_node_ulong *node;
3187 struct lttng_ht_iter iter;
3188
3189 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3190 node = lttng_ht_iter_get_node_ulong(&iter);
3191 if (node == NULL) {
3192 DBG2("UST app no found with pid %d", pid);
3193 goto error;
3194 }
3195
3196 DBG2("Found UST app by pid %d", pid);
3197
3198 app = caa_container_of(node, struct ust_app, pid_n);
3199
3200 error:
3201 return app;
3202 }
3203
3204 /*
3205 * Allocate and init an UST app object using the registration information and
3206 * the command socket. This is called when the command socket connects to the
3207 * session daemon.
3208 *
3209 * The object is returned on success or else NULL.
3210 */
3211 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3212 {
3213 struct ust_app *lta = NULL;
3214
3215 assert(msg);
3216 assert(sock >= 0);
3217
3218 DBG3("UST app creating application for socket %d", sock);
3219
3220 if ((msg->bits_per_long == 64 &&
3221 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3222 || (msg->bits_per_long == 32 &&
3223 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3224 ERR("Registration failed: application \"%s\" (pid: %d) has "
3225 "%d-bit long, but no consumerd for this size is available.\n",
3226 msg->name, msg->pid, msg->bits_per_long);
3227 goto error;
3228 }
3229
3230 lta = zmalloc(sizeof(struct ust_app));
3231 if (lta == NULL) {
3232 PERROR("malloc");
3233 goto error;
3234 }
3235
3236 lta->ppid = msg->ppid;
3237 lta->uid = msg->uid;
3238 lta->gid = msg->gid;
3239
3240 lta->bits_per_long = msg->bits_per_long;
3241 lta->uint8_t_alignment = msg->uint8_t_alignment;
3242 lta->uint16_t_alignment = msg->uint16_t_alignment;
3243 lta->uint32_t_alignment = msg->uint32_t_alignment;
3244 lta->uint64_t_alignment = msg->uint64_t_alignment;
3245 lta->long_alignment = msg->long_alignment;
3246 lta->byte_order = msg->byte_order;
3247
3248 lta->v_major = msg->major;
3249 lta->v_minor = msg->minor;
3250 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3251 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3252 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3253 lta->notify_sock = -1;
3254
3255 /* Copy name and make sure it's NULL terminated. */
3256 strncpy(lta->name, msg->name, sizeof(lta->name));
3257 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3258
3259 /*
3260 * Before this can be called, when receiving the registration information,
3261 * the application compatibility is checked. So, at this point, the
3262 * application can work with this session daemon.
3263 */
3264 lta->compatible = 1;
3265
3266 lta->pid = msg->pid;
3267 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3268 lta->sock = sock;
3269 pthread_mutex_init(&lta->sock_lock, NULL);
3270 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3271
3272 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3273 error:
3274 return lta;
3275 }
3276
3277 /*
3278 * For a given application object, add it to every hash table.
3279 */
3280 void ust_app_add(struct ust_app *app)
3281 {
3282 assert(app);
3283 assert(app->notify_sock >= 0);
3284
3285 rcu_read_lock();
3286
3287 /*
3288 * On a re-registration, we want to kick out the previous registration of
3289 * that pid
3290 */
3291 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3292
3293 /*
3294 * The socket _should_ be unique until _we_ call close. So, a add_unique
3295 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3296 * already in the table.
3297 */
3298 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3299
3300 /* Add application to the notify socket hash table. */
3301 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3302 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3303
3304 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3305 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3306 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3307 app->v_minor);
3308
3309 rcu_read_unlock();
3310 }
3311
3312 /*
3313 * Set the application version into the object.
3314 *
3315 * Return 0 on success else a negative value either an errno code or a
3316 * LTTng-UST error code.
3317 */
3318 int ust_app_version(struct ust_app *app)
3319 {
3320 int ret;
3321
3322 assert(app);
3323
3324 pthread_mutex_lock(&app->sock_lock);
3325 ret = ustctl_tracer_version(app->sock, &app->version);
3326 pthread_mutex_unlock(&app->sock_lock);
3327 if (ret < 0) {
3328 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3329 ERR("UST app %d version failed with ret %d", app->sock, ret);
3330 } else {
3331 DBG3("UST app %d version failed. Application is dead", app->sock);
3332 }
3333 }
3334
3335 return ret;
3336 }
3337
3338 /*
3339 * Unregister app by removing it from the global traceable app list and freeing
3340 * the data struct.
3341 *
3342 * The socket is already closed at this point so no close to sock.
3343 */
3344 void ust_app_unregister(int sock)
3345 {
3346 struct ust_app *lta;
3347 struct lttng_ht_node_ulong *node;
3348 struct lttng_ht_iter ust_app_sock_iter;
3349 struct lttng_ht_iter iter;
3350 struct ust_app_session *ua_sess;
3351 int ret;
3352
3353 rcu_read_lock();
3354
3355 /* Get the node reference for a call_rcu */
3356 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3357 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3358 assert(node);
3359
3360 lta = caa_container_of(node, struct ust_app, sock_n);
3361 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3362
3363 /*
3364 * For per-PID buffers, perform "push metadata" and flush all
3365 * application streams before removing app from hash tables,
3366 * ensuring proper behavior of data_pending check.
3367 * Remove sessions so they are not visible during deletion.
3368 */
3369 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3370 node.node) {
3371 struct ust_registry_session *registry;
3372
3373 ret = lttng_ht_del(lta->sessions, &iter);
3374 if (ret) {
3375 /* The session was already removed so scheduled for teardown. */
3376 continue;
3377 }
3378
3379 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3380 (void) ust_app_flush_app_session(lta, ua_sess);
3381 }
3382
3383 /*
3384 * Add session to list for teardown. This is safe since at this point we
3385 * are the only one using this list.
3386 */
3387 pthread_mutex_lock(&ua_sess->lock);
3388
3389 if (ua_sess->deleted) {
3390 pthread_mutex_unlock(&ua_sess->lock);
3391 continue;
3392 }
3393
3394 /*
3395 * Normally, this is done in the delete session process which is
3396 * executed in the call rcu below. However, upon registration we can't
3397 * afford to wait for the grace period before pushing data or else the
3398 * data pending feature can race between the unregistration and stop
3399 * command where the data pending command is sent *before* the grace
3400 * period ended.
3401 *
3402 * The close metadata below nullifies the metadata pointer in the
3403 * session so the delete session will NOT push/close a second time.
3404 */
3405 registry = get_session_registry(ua_sess);
3406 if (registry) {
3407 /* Push metadata for application before freeing the application. */
3408 (void) push_metadata(registry, ua_sess->consumer);
3409
3410 /*
3411 * Don't ask to close metadata for global per UID buffers. Close
3412 * metadata only on destroy trace session in this case. Also, the
3413 * previous push metadata could have flag the metadata registry to
3414 * close so don't send a close command if closed.
3415 */
3416 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3417 /* And ask to close it for this session registry. */
3418 (void) close_metadata(registry, ua_sess->consumer);
3419 }
3420 }
3421 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3422
3423 pthread_mutex_unlock(&ua_sess->lock);
3424 }
3425
3426 /* Remove application from PID hash table */
3427 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3428 assert(!ret);
3429
3430 /*
3431 * Remove application from notify hash table. The thread handling the
3432 * notify socket could have deleted the node so ignore on error because
3433 * either way it's valid. The close of that socket is handled by the other
3434 * thread.
3435 */
3436 iter.iter.node = &lta->notify_sock_n.node;
3437 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3438
3439 /*
3440 * Ignore return value since the node might have been removed before by an
3441 * add replace during app registration because the PID can be reassigned by
3442 * the OS.
3443 */
3444 iter.iter.node = &lta->pid_n.node;
3445 ret = lttng_ht_del(ust_app_ht, &iter);
3446 if (ret) {
3447 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3448 lta->pid);
3449 }
3450
3451 /* Free memory */
3452 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3453
3454 rcu_read_unlock();
3455 return;
3456 }
3457
3458 /*
3459 * Fill events array with all events name of all registered apps.
3460 */
3461 int ust_app_list_events(struct lttng_event **events)
3462 {
3463 int ret, handle;
3464 size_t nbmem, count = 0;
3465 struct lttng_ht_iter iter;
3466 struct ust_app *app;
3467 struct lttng_event *tmp_event;
3468
3469 nbmem = UST_APP_EVENT_LIST_SIZE;
3470 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3471 if (tmp_event == NULL) {
3472 PERROR("zmalloc ust app events");
3473 ret = -ENOMEM;
3474 goto error;
3475 }
3476
3477 rcu_read_lock();
3478
3479 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3480 struct lttng_ust_tracepoint_iter uiter;
3481
3482 health_code_update();
3483
3484 if (!app->compatible) {
3485 /*
3486 * TODO: In time, we should notice the caller of this error by
3487 * telling him that this is a version error.
3488 */
3489 continue;
3490 }
3491 pthread_mutex_lock(&app->sock_lock);
3492 handle = ustctl_tracepoint_list(app->sock);
3493 if (handle < 0) {
3494 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3495 ERR("UST app list events getting handle failed for app pid %d",
3496 app->pid);
3497 }
3498 pthread_mutex_unlock(&app->sock_lock);
3499 continue;
3500 }
3501
3502 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3503 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3504 /* Handle ustctl error. */
3505 if (ret < 0) {
3506 int release_ret;
3507
3508 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3509 ERR("UST app tp list get failed for app %d with ret %d",
3510 app->sock, ret);
3511 } else {
3512 DBG3("UST app tp list get failed. Application is dead");
3513 /*
3514 * This is normal behavior, an application can die during the
3515 * creation process. Don't report an error so the execution can
3516 * continue normally. Continue normal execution.
3517 */
3518 break;
3519 }
3520 free(tmp_event);
3521 release_ret = ustctl_release_handle(app->sock, handle);
3522 if (release_ret < 0 &&
3523 release_ret != -LTTNG_UST_ERR_EXITING &&
3524 release_ret != -EPIPE) {
3525 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3526 }
3527 pthread_mutex_unlock(&app->sock_lock);
3528 goto rcu_error;
3529 }
3530
3531 health_code_update();
3532 if (count >= nbmem) {
3533 /* In case the realloc fails, we free the memory */
3534 struct lttng_event *new_tmp_event;
3535 size_t new_nbmem;
3536
3537 new_nbmem = nbmem << 1;
3538 DBG2("Reallocating event list from %zu to %zu entries",
3539 nbmem, new_nbmem);
3540 new_tmp_event = realloc(tmp_event,
3541 new_nbmem * sizeof(struct lttng_event));
3542 if (new_tmp_event == NULL) {
3543 int release_ret;
3544
3545 PERROR("realloc ust app events");
3546 free(tmp_event);
3547 ret = -ENOMEM;
3548 release_ret = ustctl_release_handle(app->sock, handle);
3549 if (release_ret < 0 &&
3550 release_ret != -LTTNG_UST_ERR_EXITING &&
3551 release_ret != -EPIPE) {
3552 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3553 }
3554 pthread_mutex_unlock(&app->sock_lock);
3555 goto rcu_error;
3556 }
3557 /* Zero the new memory */
3558 memset(new_tmp_event + nbmem, 0,
3559 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3560 nbmem = new_nbmem;
3561 tmp_event = new_tmp_event;
3562 }
3563 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3564 tmp_event[count].loglevel = uiter.loglevel;
3565 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3566 tmp_event[count].pid = app->pid;
3567 tmp_event[count].enabled = -1;
3568 count++;
3569 }
3570 ret = ustctl_release_handle(app->sock, handle);
3571 pthread_mutex_unlock(&app->sock_lock);
3572 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3573 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3574 }
3575 }
3576
3577 ret = count;
3578 *events = tmp_event;
3579
3580 DBG2("UST app list events done (%zu events)", count);
3581
3582 rcu_error:
3583 rcu_read_unlock();
3584 error:
3585 health_code_update();
3586 return ret;
3587 }
3588
3589 /*
3590 * Fill events array with all events name of all registered apps.
3591 */
3592 int ust_app_list_event_fields(struct lttng_event_field **fields)
3593 {
3594 int ret, handle;
3595 size_t nbmem, count = 0;
3596 struct lttng_ht_iter iter;
3597 struct ust_app *app;
3598 struct lttng_event_field *tmp_event;
3599
3600 nbmem = UST_APP_EVENT_LIST_SIZE;
3601 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3602 if (tmp_event == NULL) {
3603 PERROR("zmalloc ust app event fields");
3604 ret = -ENOMEM;
3605 goto error;
3606 }
3607
3608 rcu_read_lock();
3609
3610 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3611 struct lttng_ust_field_iter uiter;
3612
3613 health_code_update();
3614
3615 if (!app->compatible) {
3616 /*
3617 * TODO: In time, we should notice the caller of this error by
3618 * telling him that this is a version error.
3619 */
3620 continue;
3621 }
3622 pthread_mutex_lock(&app->sock_lock);
3623 handle = ustctl_tracepoint_field_list(app->sock);
3624 if (handle < 0) {
3625 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3626 ERR("UST app list field getting handle failed for app pid %d",
3627 app->pid);
3628 }
3629 pthread_mutex_unlock(&app->sock_lock);
3630 continue;
3631 }
3632
3633 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3634 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3635 /* Handle ustctl error. */
3636 if (ret < 0) {
3637 int release_ret;
3638
3639 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3640 ERR("UST app tp list field failed for app %d with ret %d",
3641 app->sock, ret);
3642 } else {
3643 DBG3("UST app tp list field failed. Application is dead");
3644 /*
3645 * This is normal behavior, an application can die during the
3646 * creation process. Don't report an error so the execution can
3647 * continue normally. Reset list and count for next app.
3648 */
3649 break;
3650 }
3651 free(tmp_event);
3652 release_ret = ustctl_release_handle(app->sock, handle);
3653 pthread_mutex_unlock(&app->sock_lock);
3654 if (release_ret < 0 &&
3655 release_ret != -LTTNG_UST_ERR_EXITING &&
3656 release_ret != -EPIPE) {
3657 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3658 }
3659 goto rcu_error;
3660 }
3661
3662 health_code_update();
3663 if (count >= nbmem) {
3664 /* In case the realloc fails, we free the memory */
3665 struct lttng_event_field *new_tmp_event;
3666 size_t new_nbmem;
3667
3668 new_nbmem = nbmem << 1;
3669 DBG2("Reallocating event field list from %zu to %zu entries",
3670 nbmem, new_nbmem);
3671 new_tmp_event = realloc(tmp_event,
3672 new_nbmem * sizeof(struct lttng_event_field));
3673 if (new_tmp_event == NULL) {
3674 int release_ret;
3675
3676 PERROR("realloc ust app event fields");
3677 free(tmp_event);
3678 ret = -ENOMEM;
3679 release_ret = ustctl_release_handle(app->sock, handle);
3680 pthread_mutex_unlock(&app->sock_lock);
3681 if (release_ret &&
3682 release_ret != -LTTNG_UST_ERR_EXITING &&
3683 release_ret != -EPIPE) {
3684 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3685 }
3686 goto rcu_error;
3687 }
3688 /* Zero the new memory */
3689 memset(new_tmp_event + nbmem, 0,
3690 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3691 nbmem = new_nbmem;
3692 tmp_event = new_tmp_event;
3693 }
3694
3695 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3696 /* Mapping between these enums matches 1 to 1. */
3697 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3698 tmp_event[count].nowrite = uiter.nowrite;
3699
3700 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3701 tmp_event[count].event.loglevel = uiter.loglevel;
3702 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3703 tmp_event[count].event.pid = app->pid;
3704 tmp_event[count].event.enabled = -1;
3705 count++;
3706 }
3707 ret = ustctl_release_handle(app->sock, handle);
3708 pthread_mutex_unlock(&app->sock_lock);
3709 if (ret < 0 &&
3710 ret != -LTTNG_UST_ERR_EXITING &&
3711 ret != -EPIPE) {
3712 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3713 }
3714 }
3715
3716 ret = count;
3717 *fields = tmp_event;
3718
3719 DBG2("UST app list event fields done (%zu events)", count);
3720
3721 rcu_error:
3722 rcu_read_unlock();
3723 error:
3724 health_code_update();
3725 return ret;
3726 }
3727
3728 /*
3729 * Free and clean all traceable apps of the global list.
3730 *
3731 * Should _NOT_ be called with RCU read-side lock held.
3732 */
3733 void ust_app_clean_list(void)
3734 {
3735 int ret;
3736 struct ust_app *app;
3737 struct lttng_ht_iter iter;
3738
3739 DBG2("UST app cleaning registered apps hash table");
3740
3741 rcu_read_lock();
3742
3743 if (ust_app_ht) {
3744 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3745 ret = lttng_ht_del(ust_app_ht, &iter);
3746 assert(!ret);
3747 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3748 }
3749 }
3750
3751 /* Cleanup socket hash table */
3752 if (ust_app_ht_by_sock) {
3753 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3754 sock_n.node) {
3755 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3756 assert(!ret);
3757 }
3758 }
3759
3760 /* Cleanup notify socket hash table */
3761 if (ust_app_ht_by_notify_sock) {
3762 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3763 notify_sock_n.node) {
3764 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3765 assert(!ret);
3766 }
3767 }
3768 rcu_read_unlock();
3769
3770 /* Destroy is done only when the ht is empty */
3771 if (ust_app_ht) {
3772 ht_cleanup_push(ust_app_ht);
3773 }
3774 if (ust_app_ht_by_sock) {
3775 ht_cleanup_push(ust_app_ht_by_sock);
3776 }
3777 if (ust_app_ht_by_notify_sock) {
3778 ht_cleanup_push(ust_app_ht_by_notify_sock);
3779 }
3780 }
3781
3782 /*
3783 * Init UST app hash table.
3784 */
3785 int ust_app_ht_alloc(void)
3786 {
3787 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3788 if (!ust_app_ht) {
3789 return -1;
3790 }
3791 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3792 if (!ust_app_ht_by_sock) {
3793 return -1;
3794 }
3795 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3796 if (!ust_app_ht_by_notify_sock) {
3797 return -1;
3798 }
3799 return 0;
3800 }
3801
3802 /*
3803 * For a specific UST session, disable the channel for all registered apps.
3804 */
3805 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3806 struct ltt_ust_channel *uchan)
3807 {
3808 int ret = 0;
3809 struct lttng_ht_iter iter;
3810 struct lttng_ht_node_str *ua_chan_node;
3811 struct ust_app *app;
3812 struct ust_app_session *ua_sess;
3813 struct ust_app_channel *ua_chan;
3814
3815 if (usess == NULL || uchan == NULL) {
3816 ERR("Disabling UST global channel with NULL values");
3817 ret = -1;
3818 goto error;
3819 }
3820
3821 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3822 uchan->name, usess->id);
3823
3824 rcu_read_lock();
3825
3826 /* For every registered applications */
3827 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3828 struct lttng_ht_iter uiter;
3829 if (!app->compatible) {
3830 /*
3831 * TODO: In time, we should notice the caller of this error by
3832 * telling him that this is a version error.
3833 */
3834 continue;
3835 }
3836 ua_sess = lookup_session_by_app(usess, app);
3837 if (ua_sess == NULL) {
3838 continue;
3839 }
3840
3841 /* Get channel */
3842 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3843 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3844 /* If the session if found for the app, the channel must be there */
3845 assert(ua_chan_node);
3846
3847 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3848 /* The channel must not be already disabled */
3849 assert(ua_chan->enabled == 1);
3850
3851 /* Disable channel onto application */
3852 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3853 if (ret < 0) {
3854 /* XXX: We might want to report this error at some point... */
3855 continue;
3856 }
3857 }
3858
3859 rcu_read_unlock();
3860
3861 error:
3862 return ret;
3863 }
3864
3865 /*
3866 * For a specific UST session, enable the channel for all registered apps.
3867 */
3868 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3869 struct ltt_ust_channel *uchan)
3870 {
3871 int ret = 0;
3872 struct lttng_ht_iter iter;
3873 struct ust_app *app;
3874 struct ust_app_session *ua_sess;
3875
3876 if (usess == NULL || uchan == NULL) {
3877 ERR("Adding UST global channel to NULL values");
3878 ret = -1;
3879 goto error;
3880 }
3881
3882 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3883 uchan->name, usess->id);
3884
3885 rcu_read_lock();
3886
3887 /* For every registered applications */
3888 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3889 if (!app->compatible) {
3890 /*
3891 * TODO: In time, we should notice the caller of this error by
3892 * telling him that this is a version error.
3893 */
3894 continue;
3895 }
3896 ua_sess = lookup_session_by_app(usess, app);
3897 if (ua_sess == NULL) {
3898 continue;
3899 }
3900
3901 /* Enable channel onto application */
3902 ret = enable_ust_app_channel(ua_sess, uchan, app);
3903 if (ret < 0) {
3904 /* XXX: We might want to report this error at some point... */
3905 continue;
3906 }
3907 }
3908
3909 rcu_read_unlock();
3910
3911 error:
3912 return ret;
3913 }
3914
3915 /*
3916 * Disable an event in a channel and for a specific session.
3917 */
3918 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3919 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3920 {
3921 int ret = 0;
3922 struct lttng_ht_iter iter, uiter;
3923 struct lttng_ht_node_str *ua_chan_node;
3924 struct ust_app *app;
3925 struct ust_app_session *ua_sess;
3926 struct ust_app_channel *ua_chan;
3927 struct ust_app_event *ua_event;
3928
3929 DBG("UST app disabling event %s for all apps in channel "
3930 "%s for session id %" PRIu64,
3931 uevent->attr.name, uchan->name, usess->id);
3932
3933 rcu_read_lock();
3934
3935 /* For all registered applications */
3936 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3937 if (!app->compatible) {
3938 /*
3939 * TODO: In time, we should notice the caller of this error by
3940 * telling him that this is a version error.
3941 */
3942 continue;
3943 }
3944 ua_sess = lookup_session_by_app(usess, app);
3945 if (ua_sess == NULL) {
3946 /* Next app */
3947 continue;
3948 }
3949
3950 /* Lookup channel in the ust app session */
3951 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3952 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3953 if (ua_chan_node == NULL) {
3954 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3955 "Skipping", uchan->name, usess->id, app->pid);
3956 continue;
3957 }
3958 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3959
3960 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3961 uevent->filter, uevent->attr.loglevel,
3962 uevent->exclusion);
3963 if (ua_event == NULL) {
3964 DBG2("Event %s not found in channel %s for app pid %d."
3965 "Skipping", uevent->attr.name, uchan->name, app->pid);
3966 continue;
3967 }
3968
3969 ret = disable_ust_app_event(ua_sess, ua_event, app);
3970 if (ret < 0) {
3971 /* XXX: Report error someday... */
3972 continue;
3973 }
3974 }
3975
3976 rcu_read_unlock();
3977
3978 return ret;
3979 }
3980
3981 /*
3982 * For a specific UST session, create the channel for all registered apps.
3983 */
3984 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3985 struct ltt_ust_channel *uchan)
3986 {
3987 int ret = 0, created;
3988 struct lttng_ht_iter iter;
3989 struct ust_app *app;
3990 struct ust_app_session *ua_sess = NULL;
3991
3992 /* Very wrong code flow */
3993 assert(usess);
3994 assert(uchan);
3995
3996 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3997 uchan->name, usess->id);
3998
3999 rcu_read_lock();
4000
4001 /* For every registered applications */
4002 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4003 if (!app->compatible) {
4004 /*
4005 * TODO: In time, we should notice the caller of this error by
4006 * telling him that this is a version error.
4007 */
4008 continue;
4009 }
4010 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
4011 /* Skip. */
4012 continue;
4013 }
4014
4015 /*
4016 * Create session on the tracer side and add it to app session HT. Note
4017 * that if session exist, it will simply return a pointer to the ust
4018 * app session.
4019 */
4020 ret = create_ust_app_session(usess, app, &ua_sess, &created);
4021 if (ret < 0) {
4022 switch (ret) {
4023 case -ENOTCONN:
4024 /*
4025 * The application's socket is not valid. Either a bad socket
4026 * or a timeout on it. We can't inform the caller that for a
4027 * specific app, the session failed so lets continue here.
4028 */
4029 ret = 0; /* Not an error. */
4030 continue;
4031 case -ENOMEM:
4032 default:
4033 goto error_rcu_unlock;
4034 }
4035 }
4036 assert(ua_sess);
4037
4038 pthread_mutex_lock(&ua_sess->lock);
4039
4040 if (ua_sess->deleted) {
4041 pthread_mutex_unlock(&ua_sess->lock);
4042 continue;
4043 }
4044
4045 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4046 sizeof(uchan->name))) {
4047 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4048 ret = 0;
4049 } else {
4050 /* Create channel onto application. We don't need the chan ref. */
4051 ret = create_ust_app_channel(ua_sess, uchan, app,
4052 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4053 }
4054 pthread_mutex_unlock(&ua_sess->lock);
4055 if (ret < 0) {
4056 /* Cleanup the created session if it's the case. */
4057 if (created) {
4058 destroy_app_session(app, ua_sess);
4059 }
4060 switch (ret) {
4061 case -ENOTCONN:
4062 /*
4063 * The application's socket is not valid. Either a bad socket
4064 * or a timeout on it. We can't inform the caller that for a
4065 * specific app, the session failed so lets continue here.
4066 */
4067 ret = 0; /* Not an error. */
4068 continue;
4069 case -ENOMEM:
4070 default:
4071 goto error_rcu_unlock;
4072 }
4073 }
4074 }
4075
4076 error_rcu_unlock:
4077 rcu_read_unlock();
4078 return ret;
4079 }
4080
4081 /*
4082 * Enable event for a specific session and channel on the tracer.
4083 */
4084 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4085 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4086 {
4087 int ret = 0;
4088 struct lttng_ht_iter iter, uiter;
4089 struct lttng_ht_node_str *ua_chan_node;
4090 struct ust_app *app;
4091 struct ust_app_session *ua_sess;
4092 struct ust_app_channel *ua_chan;
4093 struct ust_app_event *ua_event;
4094
4095 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4096 uevent->attr.name, usess->id);
4097
4098 /*
4099 * NOTE: At this point, this function is called only if the session and
4100 * channel passed are already created for all apps. and enabled on the
4101 * tracer also.
4102 */
4103
4104 rcu_read_lock();
4105
4106 /* For all registered applications */
4107 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4108 if (!app->compatible) {
4109 /*
4110 * TODO: In time, we should notice the caller of this error by
4111 * telling him that this is a version error.
4112 */
4113 continue;
4114 }
4115 ua_sess = lookup_session_by_app(usess, app);
4116 if (!ua_sess) {
4117 /* The application has problem or is probably dead. */
4118 continue;
4119 }
4120
4121 pthread_mutex_lock(&ua_sess->lock);
4122
4123 if (ua_sess->deleted) {
4124 pthread_mutex_unlock(&ua_sess->lock);
4125 continue;
4126 }
4127
4128 /* Lookup channel in the ust app session */
4129 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4130 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4131 /*
4132 * It is possible that the channel cannot be found is
4133 * the channel/event creation occurs concurrently with
4134 * an application exit.
4135 */
4136 if (!ua_chan_node) {
4137 pthread_mutex_unlock(&ua_sess->lock);
4138 continue;
4139 }
4140
4141 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4142
4143 /* Get event node */
4144 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4145 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4146 if (ua_event == NULL) {
4147 DBG3("UST app enable event %s not found for app PID %d."
4148 "Skipping app", uevent->attr.name, app->pid);
4149 goto next_app;
4150 }
4151
4152 ret = enable_ust_app_event(ua_sess, ua_event, app);
4153 if (ret < 0) {
4154 pthread_mutex_unlock(&ua_sess->lock);
4155 goto error;
4156 }
4157 next_app:
4158 pthread_mutex_unlock(&ua_sess->lock);
4159 }
4160
4161 error:
4162 rcu_read_unlock();
4163 return ret;
4164 }
4165
4166 /*
4167 * For a specific existing UST session and UST channel, creates the event for
4168 * all registered apps.
4169 */
4170 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4171 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4172 {
4173 int ret = 0;
4174 struct lttng_ht_iter iter, uiter;
4175 struct lttng_ht_node_str *ua_chan_node;
4176 struct ust_app *app;
4177 struct ust_app_session *ua_sess;
4178 struct ust_app_channel *ua_chan;
4179
4180 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4181 uevent->attr.name, usess->id);
4182
4183 rcu_read_lock();
4184
4185 /* For all registered applications */
4186 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4187 if (!app->compatible) {
4188 /*
4189 * TODO: In time, we should notice the caller of this error by
4190 * telling him that this is a version error.
4191 */
4192 continue;
4193 }
4194 ua_sess = lookup_session_by_app(usess, app);
4195 if (!ua_sess) {
4196 /* The application has problem or is probably dead. */
4197 continue;
4198 }
4199
4200 pthread_mutex_lock(&ua_sess->lock);
4201
4202 if (ua_sess->deleted) {
4203 pthread_mutex_unlock(&ua_sess->lock);
4204 continue;
4205 }
4206
4207 /* Lookup channel in the ust app session */
4208 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4209 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4210 /* If the channel is not found, there is a code flow error */
4211 assert(ua_chan_node);
4212
4213 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4214
4215 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4216 pthread_mutex_unlock(&ua_sess->lock);
4217 if (ret < 0) {
4218 if (ret != -LTTNG_UST_ERR_EXIST) {
4219 /* Possible value at this point: -ENOMEM. If so, we stop! */
4220 break;
4221 }
4222 DBG2("UST app event %s already exist on app PID %d",
4223 uevent->attr.name, app->pid);
4224 continue;
4225 }
4226 }
4227
4228 rcu_read_unlock();
4229
4230 return ret;
4231 }
4232
4233 /*
4234 * Start tracing for a specific UST session and app.
4235 */
4236 static
4237 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4238 {
4239 int ret = 0;
4240 struct ust_app_session *ua_sess;
4241
4242 DBG("Starting tracing for ust app pid %d", app->pid);
4243
4244 rcu_read_lock();
4245
4246 if (!app->compatible) {
4247 goto end;
4248 }
4249
4250 ua_sess = lookup_session_by_app(usess, app);
4251 if (ua_sess == NULL) {
4252 /* The session is in teardown process. Ignore and continue. */
4253 goto end;
4254 }
4255
4256 pthread_mutex_lock(&ua_sess->lock);
4257
4258 if (ua_sess->deleted) {
4259 pthread_mutex_unlock(&ua_sess->lock);
4260 goto end;
4261 }
4262
4263 /* Upon restart, we skip the setup, already done */
4264 if (ua_sess->started) {
4265 goto skip_setup;
4266 }
4267
4268 /* Create directories if consumer is LOCAL and has a path defined. */
4269 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4270 strlen(usess->consumer->dst.trace_path) > 0) {
4271 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
4272 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
4273 if (ret < 0) {
4274 if (errno != EEXIST) {
4275 ERR("Trace directory creation error");
4276 goto error_unlock;
4277 }
4278 }
4279 }
4280
4281 /*
4282 * Create the metadata for the application. This returns gracefully if a
4283 * metadata was already set for the session.
4284 */
4285 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4286 if (ret < 0) {
4287 goto error_unlock;
4288 }
4289
4290 health_code_update();
4291
4292 skip_setup:
4293 /* This start the UST tracing */
4294 pthread_mutex_lock(&app->sock_lock);
4295 ret = ustctl_start_session(app->sock, ua_sess->handle);
4296 pthread_mutex_unlock(&app->sock_lock);
4297 if (ret < 0) {
4298 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4299 ERR("Error starting tracing for app pid: %d (ret: %d)",
4300 app->pid, ret);
4301 } else {
4302 DBG("UST app start session failed. Application is dead.");
4303 /*
4304 * This is normal behavior, an application can die during the
4305 * creation process. Don't report an error so the execution can
4306 * continue normally.
4307 */
4308 pthread_mutex_unlock(&ua_sess->lock);
4309 goto end;
4310 }
4311 goto error_unlock;
4312 }
4313
4314 /* Indicate that the session has been started once */
4315 ua_sess->started = 1;
4316
4317 pthread_mutex_unlock(&ua_sess->lock);
4318
4319 health_code_update();
4320
4321 /* Quiescent wait after starting trace */
4322 pthread_mutex_lock(&app->sock_lock);
4323 ret = ustctl_wait_quiescent(app->sock);
4324 pthread_mutex_unlock(&app->sock_lock);
4325 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4326 ERR("UST app wait quiescent failed for app pid %d ret %d",
4327 app->pid, ret);
4328 }
4329
4330 end:
4331 rcu_read_unlock();
4332 health_code_update();
4333 return 0;
4334
4335 error_unlock:
4336 pthread_mutex_unlock(&ua_sess->lock);
4337 rcu_read_unlock();
4338 health_code_update();
4339 return -1;
4340 }
4341
4342 /*
4343 * Stop tracing for a specific UST session and app.
4344 */
4345 static
4346 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4347 {
4348 int ret = 0;
4349 struct ust_app_session *ua_sess;
4350 struct ust_registry_session *registry;
4351
4352 DBG("Stopping tracing for ust app pid %d", app->pid);
4353
4354 rcu_read_lock();
4355
4356 if (!app->compatible) {
4357 goto end_no_session;
4358 }
4359
4360 ua_sess = lookup_session_by_app(usess, app);
4361 if (ua_sess == NULL) {
4362 goto end_no_session;
4363 }
4364
4365 pthread_mutex_lock(&ua_sess->lock);
4366
4367 if (ua_sess->deleted) {
4368 pthread_mutex_unlock(&ua_sess->lock);
4369 goto end_no_session;
4370 }
4371
4372 /*
4373 * If started = 0, it means that stop trace has been called for a session
4374 * that was never started. It's possible since we can have a fail start
4375 * from either the application manager thread or the command thread. Simply
4376 * indicate that this is a stop error.
4377 */
4378 if (!ua_sess->started) {
4379 goto error_rcu_unlock;
4380 }
4381
4382 health_code_update();
4383
4384 /* This inhibits UST tracing */
4385 pthread_mutex_lock(&app->sock_lock);
4386 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4387 pthread_mutex_unlock(&app->sock_lock);
4388 if (ret < 0) {
4389 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4390 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4391 app->pid, ret);
4392 } else {
4393 DBG("UST app stop session failed. Application is dead.");
4394 /*
4395 * This is normal behavior, an application can die during the
4396 * creation process. Don't report an error so the execution can
4397 * continue normally.
4398 */
4399 goto end_unlock;
4400 }
4401 goto error_rcu_unlock;
4402 }
4403
4404 health_code_update();
4405
4406 /* Quiescent wait after stopping trace */
4407 pthread_mutex_lock(&app->sock_lock);
4408 ret = ustctl_wait_quiescent(app->sock);
4409 pthread_mutex_unlock(&app->sock_lock);
4410 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4411 ERR("UST app wait quiescent failed for app pid %d ret %d",
4412 app->pid, ret);
4413 }
4414
4415 health_code_update();
4416
4417 registry = get_session_registry(ua_sess);
4418 assert(registry);
4419
4420 /* Push metadata for application before freeing the application. */
4421 (void) push_metadata(registry, ua_sess->consumer);
4422
4423 end_unlock:
4424 pthread_mutex_unlock(&ua_sess->lock);
4425 end_no_session:
4426 rcu_read_unlock();
4427 health_code_update();
4428 return 0;
4429
4430 error_rcu_unlock:
4431 pthread_mutex_unlock(&ua_sess->lock);
4432 rcu_read_unlock();
4433 health_code_update();
4434 return -1;
4435 }
4436
4437 static
4438 int ust_app_flush_app_session(struct ust_app *app,
4439 struct ust_app_session *ua_sess)
4440 {
4441 int ret, retval = 0;
4442 struct lttng_ht_iter iter;
4443 struct ust_app_channel *ua_chan;
4444 struct consumer_socket *socket;
4445
4446 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4447
4448 rcu_read_lock();
4449
4450 if (!app->compatible) {
4451 goto end_not_compatible;
4452 }
4453
4454 pthread_mutex_lock(&ua_sess->lock);
4455
4456 if (ua_sess->deleted) {
4457 goto end_deleted;
4458 }
4459
4460 health_code_update();
4461
4462 /* Flushing buffers */
4463 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4464 ua_sess->consumer);
4465
4466 /* Flush buffers and push metadata. */
4467 switch (ua_sess->buffer_type) {
4468 case LTTNG_BUFFER_PER_PID:
4469 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4470 node.node) {
4471 health_code_update();
4472 ret = consumer_flush_channel(socket, ua_chan->key);
4473 if (ret) {
4474 ERR("Error flushing consumer channel");
4475 retval = -1;
4476 continue;
4477 }
4478 }
4479 break;
4480 case LTTNG_BUFFER_PER_UID:
4481 default:
4482 assert(0);
4483 break;
4484 }
4485
4486 health_code_update();
4487
4488 end_deleted:
4489 pthread_mutex_unlock(&ua_sess->lock);
4490
4491 end_not_compatible:
4492 rcu_read_unlock();
4493 health_code_update();
4494 return retval;
4495 }
4496
4497 /*
4498 * Flush buffers for all applications for a specific UST session.
4499 * Called with UST session lock held.
4500 */
4501 static
4502 int ust_app_flush_session(struct ltt_ust_session *usess)
4503
4504 {
4505 int ret = 0;
4506
4507 DBG("Flushing session buffers for all ust apps");
4508
4509 rcu_read_lock();
4510
4511 /* Flush buffers and push metadata. */
4512 switch (usess->buffer_type) {
4513 case LTTNG_BUFFER_PER_UID:
4514 {
4515 struct buffer_reg_uid *reg;
4516 struct lttng_ht_iter iter;
4517
4518 /* Flush all per UID buffers associated to that session. */
4519 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4520 struct ust_registry_session *ust_session_reg;
4521 struct buffer_reg_channel *reg_chan;
4522 struct consumer_socket *socket;
4523
4524 /* Get consumer socket to use to push the metadata.*/
4525 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4526 usess->consumer);
4527 if (!socket) {
4528 /* Ignore request if no consumer is found for the session. */
4529 continue;
4530 }
4531
4532 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4533 reg_chan, node.node) {
4534 /*
4535 * The following call will print error values so the return
4536 * code is of little importance because whatever happens, we
4537 * have to try them all.
4538 */
4539 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4540 }
4541
4542 ust_session_reg = reg->registry->reg.ust;
4543 /* Push metadata. */
4544 (void) push_metadata(ust_session_reg, usess->consumer);
4545 }
4546 break;
4547 }
4548 case LTTNG_BUFFER_PER_PID:
4549 {
4550 struct ust_app_session *ua_sess;
4551 struct lttng_ht_iter iter;
4552 struct ust_app *app;
4553
4554 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4555 ua_sess = lookup_session_by_app(usess, app);
4556 if (ua_sess == NULL) {
4557 continue;
4558 }
4559 (void) ust_app_flush_app_session(app, ua_sess);
4560 }
4561 break;
4562 }
4563 default:
4564 ret = -1;
4565 assert(0);
4566 break;
4567 }
4568
4569 rcu_read_unlock();
4570 health_code_update();
4571 return ret;
4572 }
4573
4574 /*
4575 * Destroy a specific UST session in apps.
4576 */
4577 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4578 {
4579 int ret;
4580 struct ust_app_session *ua_sess;
4581 struct lttng_ht_iter iter;
4582 struct lttng_ht_node_u64 *node;
4583
4584 DBG("Destroy tracing for ust app pid %d", app->pid);
4585
4586 rcu_read_lock();
4587
4588 if (!app->compatible) {
4589 goto end;
4590 }
4591
4592 __lookup_session_by_app(usess, app, &iter);
4593 node = lttng_ht_iter_get_node_u64(&iter);
4594 if (node == NULL) {
4595 /* Session is being or is deleted. */
4596 goto end;
4597 }
4598 ua_sess = caa_container_of(node, struct ust_app_session, node);
4599
4600 health_code_update();
4601 destroy_app_session(app, ua_sess);
4602
4603 health_code_update();
4604
4605 /* Quiescent wait after stopping trace */
4606 pthread_mutex_lock(&app->sock_lock);
4607 ret = ustctl_wait_quiescent(app->sock);
4608 pthread_mutex_unlock(&app->sock_lock);
4609 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4610 ERR("UST app wait quiescent failed for app pid %d ret %d",
4611 app->pid, ret);
4612 }
4613 end:
4614 rcu_read_unlock();
4615 health_code_update();
4616 return 0;
4617 }
4618
4619 /*
4620 * Start tracing for the UST session.
4621 */
4622 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4623 {
4624 int ret = 0;
4625 struct lttng_ht_iter iter;
4626 struct ust_app *app;
4627
4628 DBG("Starting all UST traces");
4629
4630 rcu_read_lock();
4631
4632 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4633 ret = ust_app_start_trace(usess, app);
4634 if (ret < 0) {
4635 /* Continue to next apps even on error */
4636 continue;
4637 }
4638 }
4639
4640 rcu_read_unlock();
4641
4642 return 0;
4643 }
4644
4645 /*
4646 * Start tracing for the UST session.
4647 * Called with UST session lock held.
4648 */
4649 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4650 {
4651 int ret = 0;
4652 struct lttng_ht_iter iter;
4653 struct ust_app *app;
4654
4655 DBG("Stopping all UST traces");
4656
4657 rcu_read_lock();
4658
4659 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4660 ret = ust_app_stop_trace(usess, app);
4661 if (ret < 0) {
4662 /* Continue to next apps even on error */
4663 continue;
4664 }
4665 }
4666
4667 (void) ust_app_flush_session(usess);
4668
4669 rcu_read_unlock();
4670
4671 return 0;
4672 }
4673
4674 /*
4675 * Destroy app UST session.
4676 */
4677 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4678 {
4679 int ret = 0;
4680 struct lttng_ht_iter iter;
4681 struct ust_app *app;
4682
4683 DBG("Destroy all UST traces");
4684
4685 rcu_read_lock();
4686
4687 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4688 ret = destroy_trace(usess, app);
4689 if (ret < 0) {
4690 /* Continue to next apps even on error */
4691 continue;
4692 }
4693 }
4694
4695 rcu_read_unlock();
4696
4697 return 0;
4698 }
4699
4700 static
4701 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
4702 {
4703 int ret = 0;
4704 struct lttng_ht_iter iter, uiter;
4705 struct ust_app_session *ua_sess = NULL;
4706 struct ust_app_channel *ua_chan;
4707 struct ust_app_event *ua_event;
4708 struct ust_app_ctx *ua_ctx;
4709 int is_created = 0;
4710
4711 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
4712 if (ret < 0) {
4713 /* Tracer is probably gone or ENOMEM. */
4714 goto error;
4715 }
4716 if (!is_created) {
4717 /* App session already created. */
4718 goto end;
4719 }
4720 assert(ua_sess);
4721
4722 pthread_mutex_lock(&ua_sess->lock);
4723
4724 if (ua_sess->deleted) {
4725 pthread_mutex_unlock(&ua_sess->lock);
4726 goto end;
4727 }
4728
4729 /*
4730 * We can iterate safely here over all UST app session since the create ust
4731 * app session above made a shadow copy of the UST global domain from the
4732 * ltt ust session.
4733 */
4734 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4735 node.node) {
4736 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4737 if (ret < 0 && ret != -ENOTCONN) {
4738 /*
4739 * Stop everything. On error, the application
4740 * failed, no more file descriptor are available
4741 * or ENOMEM so stopping here is the only thing
4742 * we can do for now. The only exception is
4743 * -ENOTCONN, which indicates that the application
4744 * has exit.
4745 */
4746 goto error_unlock;
4747 }
4748
4749 /*
4750 * Add context using the list so they are enabled in the same order the
4751 * user added them.
4752 */
4753 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4754 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4755 if (ret < 0) {
4756 goto error_unlock;
4757 }
4758 }
4759
4760
4761 /* For each events */
4762 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4763 node.node) {
4764 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4765 if (ret < 0) {
4766 goto error_unlock;
4767 }
4768 }
4769 }
4770
4771 pthread_mutex_unlock(&ua_sess->lock);
4772
4773 if (usess->active) {
4774 ret = ust_app_start_trace(usess, app);
4775 if (ret < 0) {
4776 goto error;
4777 }
4778
4779 DBG2("UST trace started for app pid %d", app->pid);
4780 }
4781 end:
4782 /* Everything went well at this point. */
4783 return;
4784
4785 error_unlock:
4786 pthread_mutex_unlock(&ua_sess->lock);
4787 error:
4788 if (ua_sess) {
4789 destroy_app_session(app, ua_sess);
4790 }
4791 return;
4792 }
4793
4794 static
4795 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
4796 {
4797 struct ust_app_session *ua_sess;
4798
4799 ua_sess = lookup_session_by_app(usess, app);
4800 if (ua_sess == NULL) {
4801 return;
4802 }
4803 destroy_app_session(app, ua_sess);
4804 }
4805
4806 /*
4807 * Add channels/events from UST global domain to registered apps at sock.
4808 *
4809 * Called with session lock held.
4810 * Called with RCU read-side lock held.
4811 */
4812 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
4813 {
4814 assert(usess);
4815
4816 DBG2("UST app global update for app sock %d for session id %" PRIu64,
4817 app->sock, usess->id);
4818
4819 if (!app->compatible) {
4820 return;
4821 }
4822
4823 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
4824 ust_app_global_create(usess, app);
4825 } else {
4826 ust_app_global_destroy(usess, app);
4827 }
4828 }
4829
4830 /*
4831 * Called with session lock held.
4832 */
4833 void ust_app_global_update_all(struct ltt_ust_session *usess)
4834 {
4835 struct lttng_ht_iter iter;
4836 struct ust_app *app;
4837
4838 rcu_read_lock();
4839 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4840 ust_app_global_update(usess, app);
4841 }
4842 rcu_read_unlock();
4843 }
4844
4845 /*
4846 * Add context to a specific channel for global UST domain.
4847 */
4848 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4849 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4850 {
4851 int ret = 0;
4852 struct lttng_ht_node_str *ua_chan_node;
4853 struct lttng_ht_iter iter, uiter;
4854 struct ust_app_channel *ua_chan = NULL;
4855 struct ust_app_session *ua_sess;
4856 struct ust_app *app;
4857
4858 rcu_read_lock();
4859
4860 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4861 if (!app->compatible) {
4862 /*
4863 * TODO: In time, we should notice the caller of this error by
4864 * telling him that this is a version error.
4865 */
4866 continue;
4867 }
4868 ua_sess = lookup_session_by_app(usess, app);
4869 if (ua_sess == NULL) {
4870 continue;
4871 }
4872
4873 pthread_mutex_lock(&ua_sess->lock);
4874
4875 if (ua_sess->deleted) {
4876 pthread_mutex_unlock(&ua_sess->lock);
4877 continue;
4878 }
4879
4880 /* Lookup channel in the ust app session */
4881 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4882 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4883 if (ua_chan_node == NULL) {
4884 goto next_app;
4885 }
4886 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4887 node);
4888 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4889 if (ret < 0) {
4890 goto next_app;
4891 }
4892 next_app:
4893 pthread_mutex_unlock(&ua_sess->lock);
4894 }
4895
4896 rcu_read_unlock();
4897 return ret;
4898 }
4899
4900 /*
4901 * Enable event for a channel from a UST session for a specific PID.
4902 */
4903 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4904 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4905 {
4906 int ret = 0;
4907 struct lttng_ht_iter iter;
4908 struct lttng_ht_node_str *ua_chan_node;
4909 struct ust_app *app;
4910 struct ust_app_session *ua_sess;
4911 struct ust_app_channel *ua_chan;
4912 struct ust_app_event *ua_event;
4913
4914 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4915
4916 rcu_read_lock();
4917
4918 app = ust_app_find_by_pid(pid);
4919 if (app == NULL) {
4920 ERR("UST app enable event per PID %d not found", pid);
4921 ret = -1;
4922 goto end;
4923 }
4924
4925 if (!app->compatible) {
4926 ret = 0;
4927 goto end;
4928 }
4929
4930 ua_sess = lookup_session_by_app(usess, app);
4931 if (!ua_sess) {
4932 /* The application has problem or is probably dead. */
4933 ret = 0;
4934 goto end;
4935 }
4936
4937 pthread_mutex_lock(&ua_sess->lock);
4938
4939 if (ua_sess->deleted) {
4940 ret = 0;
4941 goto end_unlock;
4942 }
4943
4944 /* Lookup channel in the ust app session */
4945 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4946 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4947 /* If the channel is not found, there is a code flow error */
4948 assert(ua_chan_node);
4949
4950 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4951
4952 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4953 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4954 if (ua_event == NULL) {
4955 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4956 if (ret < 0) {
4957 goto end_unlock;
4958 }
4959 } else {
4960 ret = enable_ust_app_event(ua_sess, ua_event, app);
4961 if (ret < 0) {
4962 goto end_unlock;
4963 }
4964 }
4965
4966 end_unlock:
4967 pthread_mutex_unlock(&ua_sess->lock);
4968 end:
4969 rcu_read_unlock();
4970 return ret;
4971 }
4972
4973 /*
4974 * Calibrate registered applications.
4975 */
4976 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4977 {
4978 int ret = 0;
4979 struct lttng_ht_iter iter;
4980 struct ust_app *app;
4981
4982 rcu_read_lock();
4983
4984 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4985 if (!app->compatible) {
4986 /*
4987 * TODO: In time, we should notice the caller of this error by
4988 * telling him that this is a version error.
4989 */
4990 continue;
4991 }
4992
4993 health_code_update();
4994
4995 pthread_mutex_lock(&app->sock_lock);
4996 ret = ustctl_calibrate(app->sock, calibrate);
4997 pthread_mutex_unlock(&app->sock_lock);
4998 if (ret < 0) {
4999 switch (ret) {
5000 case -ENOSYS:
5001 /* Means that it's not implemented on the tracer side. */
5002 ret = 0;
5003 break;
5004 default:
5005 DBG2("Calibrate app PID %d returned with error %d",
5006 app->pid, ret);
5007 break;
5008 }
5009 }
5010 }
5011
5012 DBG("UST app global domain calibration finished");
5013
5014 rcu_read_unlock();
5015
5016 health_code_update();
5017
5018 return ret;
5019 }
5020
5021 /*
5022 * Receive registration and populate the given msg structure.
5023 *
5024 * On success return 0 else a negative value returned by the ustctl call.
5025 */
5026 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5027 {
5028 int ret;
5029 uint32_t pid, ppid, uid, gid;
5030
5031 assert(msg);
5032
5033 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5034 &pid, &ppid, &uid, &gid,
5035 &msg->bits_per_long,
5036 &msg->uint8_t_alignment,
5037 &msg->uint16_t_alignment,
5038 &msg->uint32_t_alignment,
5039 &msg->uint64_t_alignment,
5040 &msg->long_alignment,
5041 &msg->byte_order,
5042 msg->name);
5043 if (ret < 0) {
5044 switch (-ret) {
5045 case EPIPE:
5046 case ECONNRESET:
5047 case LTTNG_UST_ERR_EXITING:
5048 DBG3("UST app recv reg message failed. Application died");
5049 break;
5050 case LTTNG_UST_ERR_UNSUP_MAJOR:
5051 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5052 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5053 LTTNG_UST_ABI_MINOR_VERSION);
5054 break;
5055 default:
5056 ERR("UST app recv reg message failed with ret %d", ret);
5057 break;
5058 }
5059 goto error;
5060 }
5061 msg->pid = (pid_t) pid;
5062 msg->ppid = (pid_t) ppid;
5063 msg->uid = (uid_t) uid;
5064 msg->gid = (gid_t) gid;
5065
5066 error:
5067 return ret;
5068 }
5069
5070 /*
5071 * Return a ust app session object using the application object and the
5072 * session object descriptor has a key. If not found, NULL is returned.
5073 * A RCU read side lock MUST be acquired when calling this function.
5074 */
5075 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5076 int objd)
5077 {
5078 struct lttng_ht_node_ulong *node;
5079 struct lttng_ht_iter iter;
5080 struct ust_app_session *ua_sess = NULL;
5081
5082 assert(app);
5083
5084 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5085 node = lttng_ht_iter_get_node_ulong(&iter);
5086 if (node == NULL) {
5087 DBG2("UST app session find by objd %d not found", objd);
5088 goto error;
5089 }
5090
5091 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5092
5093 error:
5094 return ua_sess;
5095 }
5096
5097 /*
5098 * Return a ust app channel object using the application object and the channel
5099 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5100 * lock MUST be acquired before calling this function.
5101 */
5102 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5103 int objd)
5104 {
5105 struct lttng_ht_node_ulong *node;
5106 struct lttng_ht_iter iter;
5107 struct ust_app_channel *ua_chan = NULL;
5108
5109 assert(app);
5110
5111 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5112 node = lttng_ht_iter_get_node_ulong(&iter);
5113 if (node == NULL) {
5114 DBG2("UST app channel find by objd %d not found", objd);
5115 goto error;
5116 }
5117
5118 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5119
5120 error:
5121 return ua_chan;
5122 }
5123
5124 /*
5125 * Reply to a register channel notification from an application on the notify
5126 * socket. The channel metadata is also created.
5127 *
5128 * The session UST registry lock is acquired in this function.
5129 *
5130 * On success 0 is returned else a negative value.
5131 */
5132 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5133 size_t nr_fields, struct ustctl_field *fields)
5134 {
5135 int ret, ret_code = 0;
5136 uint32_t chan_id, reg_count;
5137 uint64_t chan_reg_key;
5138 enum ustctl_channel_header type;
5139 struct ust_app *app;
5140 struct ust_app_channel *ua_chan;
5141 struct ust_app_session *ua_sess;
5142 struct ust_registry_session *registry;
5143 struct ust_registry_channel *chan_reg;
5144
5145 rcu_read_lock();
5146
5147 /* Lookup application. If not found, there is a code flow error. */
5148 app = find_app_by_notify_sock(sock);
5149 if (!app) {
5150 DBG("Application socket %d is being teardown. Abort event notify",
5151 sock);
5152 ret = 0;
5153 free(fields);
5154 goto error_rcu_unlock;
5155 }
5156
5157 /* Lookup channel by UST object descriptor. */
5158 ua_chan = find_channel_by_objd(app, cobjd);
5159 if (!ua_chan) {
5160 DBG("Application channel is being teardown. Abort event notify");
5161 ret = 0;
5162 free(fields);
5163 goto error_rcu_unlock;
5164 }
5165
5166 assert(ua_chan->session);
5167 ua_sess = ua_chan->session;
5168
5169 /* Get right session registry depending on the session buffer type. */
5170 registry = get_session_registry(ua_sess);
5171 assert(registry);
5172
5173 /* Depending on the buffer type, a different channel key is used. */
5174 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5175 chan_reg_key = ua_chan->tracing_channel_id;
5176 } else {
5177 chan_reg_key = ua_chan->key;
5178 }
5179
5180 pthread_mutex_lock(&registry->lock);
5181
5182 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5183 assert(chan_reg);
5184
5185 if (!chan_reg->register_done) {
5186 reg_count = ust_registry_get_event_count(chan_reg);
5187 if (reg_count < 31) {
5188 type = USTCTL_CHANNEL_HEADER_COMPACT;
5189 } else {
5190 type = USTCTL_CHANNEL_HEADER_LARGE;
5191 }
5192
5193 chan_reg->nr_ctx_fields = nr_fields;
5194 chan_reg->ctx_fields = fields;
5195 chan_reg->header_type = type;
5196 } else {
5197 /* Get current already assigned values. */
5198 type = chan_reg->header_type;
5199 free(fields);
5200 /* Set to NULL so the error path does not do a double free. */
5201 fields = NULL;
5202 }
5203 /* Channel id is set during the object creation. */
5204 chan_id = chan_reg->chan_id;
5205
5206 /* Append to metadata */
5207 if (!chan_reg->metadata_dumped) {
5208 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5209 if (ret_code) {
5210 ERR("Error appending channel metadata (errno = %d)", ret_code);
5211 goto reply;
5212 }
5213 }
5214
5215 reply:
5216 DBG3("UST app replying to register channel key %" PRIu64
5217 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5218 ret_code);
5219
5220 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5221 if (ret < 0) {
5222 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5223 ERR("UST app reply channel failed with ret %d", ret);
5224 } else {
5225 DBG3("UST app reply channel failed. Application died");
5226 }
5227 goto error;
5228 }
5229
5230 /* This channel registry registration is completed. */
5231 chan_reg->register_done = 1;
5232
5233 error:
5234 pthread_mutex_unlock(&registry->lock);
5235 error_rcu_unlock:
5236 rcu_read_unlock();
5237 if (ret) {
5238 free(fields);
5239 }
5240 return ret;
5241 }
5242
5243 /*
5244 * Add event to the UST channel registry. When the event is added to the
5245 * registry, the metadata is also created. Once done, this replies to the
5246 * application with the appropriate error code.
5247 *
5248 * The session UST registry lock is acquired in the function.
5249 *
5250 * On success 0 is returned else a negative value.
5251 */
5252 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5253 char *sig, size_t nr_fields, struct ustctl_field *fields,
5254 int loglevel_value, char *model_emf_uri)
5255 {
5256 int ret, ret_code;
5257 uint32_t event_id = 0;
5258 uint64_t chan_reg_key;
5259 struct ust_app *app;
5260 struct ust_app_channel *ua_chan;
5261 struct ust_app_session *ua_sess;
5262 struct ust_registry_session *registry;
5263
5264 rcu_read_lock();
5265
5266 /* Lookup application. If not found, there is a code flow error. */
5267 app = find_app_by_notify_sock(sock);
5268 if (!app) {
5269 DBG("Application socket %d is being teardown. Abort event notify",
5270 sock);
5271 ret = 0;
5272 free(sig);
5273 free(fields);
5274 free(model_emf_uri);
5275 goto error_rcu_unlock;
5276 }
5277
5278 /* Lookup channel by UST object descriptor. */
5279 ua_chan = find_channel_by_objd(app, cobjd);
5280 if (!ua_chan) {
5281 DBG("Application channel is being teardown. Abort event notify");
5282 ret = 0;
5283 free(sig);
5284 free(fields);
5285 free(model_emf_uri);
5286 goto error_rcu_unlock;
5287 }
5288
5289 assert(ua_chan->session);
5290 ua_sess = ua_chan->session;
5291
5292 registry = get_session_registry(ua_sess);
5293 assert(registry);
5294
5295 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5296 chan_reg_key = ua_chan->tracing_channel_id;
5297 } else {
5298 chan_reg_key = ua_chan->key;
5299 }
5300
5301 pthread_mutex_lock(&registry->lock);
5302
5303 /*
5304 * From this point on, this call acquires the ownership of the sig, fields
5305 * and model_emf_uri meaning any free are done inside it if needed. These
5306 * three variables MUST NOT be read/write after this.
5307 */
5308 ret_code = ust_registry_create_event(registry, chan_reg_key,
5309 sobjd, cobjd, name, sig, nr_fields, fields,
5310 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5311 &event_id, app);
5312
5313 /*
5314 * The return value is returned to ustctl so in case of an error, the
5315 * application can be notified. In case of an error, it's important not to
5316 * return a negative error or else the application will get closed.
5317 */
5318 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5319 if (ret < 0) {
5320 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5321 ERR("UST app reply event failed with ret %d", ret);
5322 } else {
5323 DBG3("UST app reply event failed. Application died");
5324 }
5325 /*
5326 * No need to wipe the create event since the application socket will
5327 * get close on error hence cleaning up everything by itself.
5328 */
5329 goto error;
5330 }
5331
5332 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5333 name, event_id);
5334
5335 error:
5336 pthread_mutex_unlock(&registry->lock);
5337 error_rcu_unlock:
5338 rcu_read_unlock();
5339 return ret;
5340 }
5341
5342 /*
5343 * Add enum to the UST session registry. Once done, this replies to the
5344 * application with the appropriate error code.
5345 *
5346 * The session UST registry lock is acquired within this function.
5347 *
5348 * On success 0 is returned else a negative value.
5349 */
5350 static int add_enum_ust_registry(int sock, int sobjd, char *name,
5351 struct ustctl_enum_entry *entries, size_t nr_entries)
5352 {
5353 int ret = 0, ret_code;
5354 struct ust_app *app;
5355 struct ust_app_session *ua_sess;
5356 struct ust_registry_session *registry;
5357 uint64_t enum_id = -1ULL;
5358
5359 rcu_read_lock();
5360
5361 /* Lookup application. If not found, there is a code flow error. */
5362 app = find_app_by_notify_sock(sock);
5363 if (!app) {
5364 /* Return an error since this is not an error */
5365 DBG("Application socket %d is being torn down. Aborting enum registration",
5366 sock);
5367 free(entries);
5368 goto error_rcu_unlock;
5369 }
5370
5371 /* Lookup session by UST object descriptor. */
5372 ua_sess = find_session_by_objd(app, sobjd);
5373 if (!ua_sess) {
5374 /* Return an error since this is not an error */
5375 DBG("Application session is being torn down. Aborting enum registration.");
5376 free(entries);
5377 goto error_rcu_unlock;
5378 }
5379
5380 registry = get_session_registry(ua_sess);
5381 assert(registry);
5382
5383 pthread_mutex_lock(&registry->lock);
5384
5385 /*
5386 * From this point on, the callee acquires the ownership of
5387 * entries. The variable entries MUST NOT be read/written after
5388 * call.
5389 */
5390 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5391 entries, nr_entries, &enum_id);
5392 entries = NULL;
5393
5394 /*
5395 * The return value is returned to ustctl so in case of an error, the
5396 * application can be notified. In case of an error, it's important not to
5397 * return a negative error or else the application will get closed.
5398 */
5399 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5400 if (ret < 0) {
5401 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5402 ERR("UST app reply enum failed with ret %d", ret);
5403 } else {
5404 DBG3("UST app reply enum failed. Application died");
5405 }
5406 /*
5407 * No need to wipe the create enum since the application socket will
5408 * get close on error hence cleaning up everything by itself.
5409 */
5410 goto error;
5411 }
5412
5413 DBG3("UST registry enum %s added successfully or already found", name);
5414
5415 error:
5416 pthread_mutex_unlock(&registry->lock);
5417 error_rcu_unlock:
5418 rcu_read_unlock();
5419 return ret;
5420 }
5421
5422 /*
5423 * Handle application notification through the given notify socket.
5424 *
5425 * Return 0 on success or else a negative value.
5426 */
5427 int ust_app_recv_notify(int sock)
5428 {
5429 int ret;
5430 enum ustctl_notify_cmd cmd;
5431
5432 DBG3("UST app receiving notify from sock %d", sock);
5433
5434 ret = ustctl_recv_notify(sock, &cmd);
5435 if (ret < 0) {
5436 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5437 ERR("UST app recv notify failed with ret %d", ret);
5438 } else {
5439 DBG3("UST app recv notify failed. Application died");
5440 }
5441 goto error;
5442 }
5443
5444 switch (cmd) {
5445 case USTCTL_NOTIFY_CMD_EVENT:
5446 {
5447 int sobjd, cobjd, loglevel_value;
5448 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5449 size_t nr_fields;
5450 struct ustctl_field *fields;
5451
5452 DBG2("UST app ustctl register event received");
5453
5454 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5455 &loglevel_value, &sig, &nr_fields, &fields,
5456 &model_emf_uri);
5457 if (ret < 0) {
5458 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5459 ERR("UST app recv event failed with ret %d", ret);
5460 } else {
5461 DBG3("UST app recv event failed. Application died");
5462 }
5463 goto error;
5464 }
5465
5466 /*
5467 * Add event to the UST registry coming from the notify socket. This
5468 * call will free if needed the sig, fields and model_emf_uri. This
5469 * code path loses the ownsership of these variables and transfer them
5470 * to the this function.
5471 */
5472 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5473 fields, loglevel_value, model_emf_uri);
5474 if (ret < 0) {
5475 goto error;
5476 }
5477
5478 break;
5479 }
5480 case USTCTL_NOTIFY_CMD_CHANNEL:
5481 {
5482 int sobjd, cobjd;
5483 size_t nr_fields;
5484 struct ustctl_field *fields;
5485
5486 DBG2("UST app ustctl register channel received");
5487
5488 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5489 &fields);
5490 if (ret < 0) {
5491 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5492 ERR("UST app recv channel failed with ret %d", ret);
5493 } else {
5494 DBG3("UST app recv channel failed. Application died");
5495 }
5496 goto error;
5497 }
5498
5499 /*
5500 * The fields ownership are transfered to this function call meaning
5501 * that if needed it will be freed. After this, it's invalid to access
5502 * fields or clean it up.
5503 */
5504 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5505 fields);
5506 if (ret < 0) {
5507 goto error;
5508 }
5509
5510 break;
5511 }
5512 case USTCTL_NOTIFY_CMD_ENUM:
5513 {
5514 int sobjd;
5515 char name[LTTNG_UST_SYM_NAME_LEN];
5516 size_t nr_entries;
5517 struct ustctl_enum_entry *entries;
5518
5519 DBG2("UST app ustctl register enum received");
5520
5521 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5522 &entries, &nr_entries);
5523 if (ret < 0) {
5524 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5525 ERR("UST app recv enum failed with ret %d", ret);
5526 } else {
5527 DBG3("UST app recv enum failed. Application died");
5528 }
5529 goto error;
5530 }
5531
5532 /* Callee assumes ownership of entries */
5533 ret = add_enum_ust_registry(sock, sobjd, name,
5534 entries, nr_entries);
5535 if (ret < 0) {
5536 goto error;
5537 }
5538
5539 break;
5540 }
5541 default:
5542 /* Should NEVER happen. */
5543 assert(0);
5544 }
5545
5546 error:
5547 return ret;
5548 }
5549
5550 /*
5551 * Once the notify socket hangs up, this is called. First, it tries to find the
5552 * corresponding application. On failure, the call_rcu to close the socket is
5553 * executed. If an application is found, it tries to delete it from the notify
5554 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5555 *
5556 * Note that an object needs to be allocated here so on ENOMEM failure, the
5557 * call RCU is not done but the rest of the cleanup is.
5558 */
5559 void ust_app_notify_sock_unregister(int sock)
5560 {
5561 int err_enomem = 0;
5562 struct lttng_ht_iter iter;
5563 struct ust_app *app;
5564 struct ust_app_notify_sock_obj *obj;
5565
5566 assert(sock >= 0);
5567
5568 rcu_read_lock();
5569
5570 obj = zmalloc(sizeof(*obj));
5571 if (!obj) {
5572 /*
5573 * An ENOMEM is kind of uncool. If this strikes we continue the
5574 * procedure but the call_rcu will not be called. In this case, we
5575 * accept the fd leak rather than possibly creating an unsynchronized
5576 * state between threads.
5577 *
5578 * TODO: The notify object should be created once the notify socket is
5579 * registered and stored independantely from the ust app object. The
5580 * tricky part is to synchronize the teardown of the application and
5581 * this notify object. Let's keep that in mind so we can avoid this
5582 * kind of shenanigans with ENOMEM in the teardown path.
5583 */
5584 err_enomem = 1;
5585 } else {
5586 obj->fd = sock;
5587 }
5588
5589 DBG("UST app notify socket unregister %d", sock);
5590
5591 /*
5592 * Lookup application by notify socket. If this fails, this means that the
5593 * hash table delete has already been done by the application
5594 * unregistration process so we can safely close the notify socket in a
5595 * call RCU.
5596 */
5597 app = find_app_by_notify_sock(sock);
5598 if (!app) {
5599 goto close_socket;
5600 }
5601
5602 iter.iter.node = &app->notify_sock_n.node;
5603
5604 /*
5605 * Whatever happens here either we fail or succeed, in both cases we have
5606 * to close the socket after a grace period to continue to the call RCU
5607 * here. If the deletion is successful, the application is not visible
5608 * anymore by other threads and is it fails it means that it was already
5609 * deleted from the hash table so either way we just have to close the
5610 * socket.
5611 */
5612 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5613
5614 close_socket:
5615 rcu_read_unlock();
5616
5617 /*
5618 * Close socket after a grace period to avoid for the socket to be reused
5619 * before the application object is freed creating potential race between
5620 * threads trying to add unique in the global hash table.
5621 */
5622 if (!err_enomem) {
5623 call_rcu(&obj->head, close_notify_sock_rcu);
5624 }
5625 }
5626
5627 /*
5628 * Destroy a ust app data structure and free its memory.
5629 */
5630 void ust_app_destroy(struct ust_app *app)
5631 {
5632 if (!app) {
5633 return;
5634 }
5635
5636 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5637 }
5638
5639 /*
5640 * Take a snapshot for a given UST session. The snapshot is sent to the given
5641 * output.
5642 *
5643 * Return 0 on success or else a negative value.
5644 */
5645 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5646 struct snapshot_output *output, int wait,
5647 uint64_t nb_packets_per_stream)
5648 {
5649 int ret = 0;
5650 unsigned int snapshot_done = 0;
5651 struct lttng_ht_iter iter;
5652 struct ust_app *app;
5653 char pathname[PATH_MAX];
5654
5655 assert(usess);
5656 assert(output);
5657
5658 rcu_read_lock();
5659
5660 switch (usess->buffer_type) {
5661 case LTTNG_BUFFER_PER_UID:
5662 {
5663 struct buffer_reg_uid *reg;
5664
5665 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5666 struct buffer_reg_channel *reg_chan;
5667 struct consumer_socket *socket;
5668
5669 /* Get consumer socket to use to push the metadata.*/
5670 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5671 usess->consumer);
5672 if (!socket) {
5673 ret = -EINVAL;
5674 goto error;
5675 }
5676
5677 memset(pathname, 0, sizeof(pathname));
5678 ret = snprintf(pathname, sizeof(pathname),
5679 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5680 reg->uid, reg->bits_per_long);
5681 if (ret < 0) {
5682 PERROR("snprintf snapshot path");
5683 goto error;
5684 }
5685
5686 /* Add the UST default trace dir to path. */
5687 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5688 reg_chan, node.node) {
5689 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5690 output, 0, usess->uid, usess->gid, pathname, wait,
5691 nb_packets_per_stream);
5692 if (ret < 0) {
5693 goto error;
5694 }
5695 }
5696 ret = consumer_snapshot_channel(socket,
5697 reg->registry->reg.ust->metadata_key, output, 1,
5698 usess->uid, usess->gid, pathname, wait, 0);
5699 if (ret < 0) {
5700 goto error;
5701 }
5702 snapshot_done = 1;
5703 }
5704 break;
5705 }
5706 case LTTNG_BUFFER_PER_PID:
5707 {
5708 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5709 struct consumer_socket *socket;
5710 struct lttng_ht_iter chan_iter;
5711 struct ust_app_channel *ua_chan;
5712 struct ust_app_session *ua_sess;
5713 struct ust_registry_session *registry;
5714
5715 ua_sess = lookup_session_by_app(usess, app);
5716 if (!ua_sess) {
5717 /* Session not associated with this app. */
5718 continue;
5719 }
5720
5721 /* Get the right consumer socket for the application. */
5722 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5723 output->consumer);
5724 if (!socket) {
5725 ret = -EINVAL;
5726 goto error;
5727 }
5728
5729 /* Add the UST default trace dir to path. */
5730 memset(pathname, 0, sizeof(pathname));
5731 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5732 ua_sess->path);
5733 if (ret < 0) {
5734 PERROR("snprintf snapshot path");
5735 goto error;
5736 }
5737
5738 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5739 ua_chan, node.node) {
5740 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5741 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5742 nb_packets_per_stream);
5743 if (ret < 0) {
5744 goto error;
5745 }
5746 }
5747
5748 registry = get_session_registry(ua_sess);
5749 assert(registry);
5750 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5751 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5752 if (ret < 0) {
5753 goto error;
5754 }
5755 snapshot_done = 1;
5756 }
5757 break;
5758 }
5759 default:
5760 assert(0);
5761 break;
5762 }
5763
5764 if (!snapshot_done) {
5765 /*
5766 * If no snapshot was made and we are not in the error path, this means
5767 * that there are no buffers thus no (prior) application to snapshot
5768 * data from so we have simply NO data.
5769 */
5770 ret = -ENODATA;
5771 }
5772
5773 error:
5774 rcu_read_unlock();
5775 return ret;
5776 }
5777
5778 /*
5779 * Return the size taken by one more packet per stream.
5780 */
5781 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5782 uint64_t cur_nr_packets)
5783 {
5784 uint64_t tot_size = 0;
5785 struct ust_app *app;
5786 struct lttng_ht_iter iter;
5787
5788 assert(usess);
5789
5790 switch (usess->buffer_type) {
5791 case LTTNG_BUFFER_PER_UID:
5792 {
5793 struct buffer_reg_uid *reg;
5794
5795 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5796 struct buffer_reg_channel *reg_chan;
5797
5798 rcu_read_lock();
5799 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5800 reg_chan, node.node) {
5801 if (cur_nr_packets >= reg_chan->num_subbuf) {
5802 /*
5803 * Don't take channel into account if we
5804 * already grab all its packets.
5805 */
5806 continue;
5807 }
5808 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5809 }
5810 rcu_read_unlock();
5811 }
5812 break;
5813 }
5814 case LTTNG_BUFFER_PER_PID:
5815 {
5816 rcu_read_lock();
5817 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5818 struct ust_app_channel *ua_chan;
5819 struct ust_app_session *ua_sess;
5820 struct lttng_ht_iter chan_iter;
5821
5822 ua_sess = lookup_session_by_app(usess, app);
5823 if (!ua_sess) {
5824 /* Session not associated with this app. */
5825 continue;
5826 }
5827
5828 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5829 ua_chan, node.node) {
5830 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5831 /*
5832 * Don't take channel into account if we
5833 * already grab all its packets.
5834 */
5835 continue;
5836 }
5837 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5838 }
5839 }
5840 rcu_read_unlock();
5841 break;
5842 }
5843 default:
5844 assert(0);
5845 break;
5846 }
5847
5848 return tot_size;
5849 }
This page took 0.194121 seconds and 5 git commands to generate.