d8adccde214925f2f9cd68d20ef65dadc3617951
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 ret_val = ret;
485 goto error_push;
486 }
487
488 free(metadata_str);
489 return len;
490
491 end:
492 error:
493 pthread_mutex_unlock(&registry->lock);
494 error_push:
495 free(metadata_str);
496 return ret_val;
497 }
498
499 /*
500 * For a given application and session, push metadata to consumer. The session
501 * lock MUST be acquired here before calling this.
502 * Either sock or consumer is required : if sock is NULL, the default
503 * socket to send the metadata is retrieved from consumer, if sock
504 * is not NULL we use it to send the metadata.
505 *
506 * Return 0 on success else a negative error.
507 */
508 static int push_metadata(struct ust_registry_session *registry,
509 struct consumer_output *consumer)
510 {
511 int ret_val;
512 ssize_t ret;
513 struct consumer_socket *socket;
514
515 assert(registry);
516 assert(consumer);
517
518 rcu_read_lock();
519
520 /*
521 * Means that no metadata was assigned to the session. This can happens if
522 * no start has been done previously.
523 */
524 if (!registry->metadata_key) {
525 ret_val = 0;
526 goto end_rcu_unlock;
527 }
528
529 /* Get consumer socket to use to push the metadata.*/
530 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
531 consumer);
532 if (!socket) {
533 ret_val = -1;
534 goto error_rcu_unlock;
535 }
536
537 /*
538 * TODO: Currently, we hold the socket lock around sampling of the next
539 * metadata segment to ensure we send metadata over the consumer socket in
540 * the correct order. This makes the registry lock nest inside the socket
541 * lock.
542 *
543 * Please note that this is a temporary measure: we should move this lock
544 * back into ust_consumer_push_metadata() when the consumer gets the
545 * ability to reorder the metadata it receives.
546 */
547 pthread_mutex_lock(socket->lock);
548 ret = ust_app_push_metadata(registry, socket, 0);
549 pthread_mutex_unlock(socket->lock);
550 if (ret < 0) {
551 ret_val = ret;
552 goto error_rcu_unlock;
553 }
554
555 rcu_read_unlock();
556 return 0;
557
558 error_rcu_unlock:
559 /*
560 * On error, flag the registry that the metadata is closed. We were unable
561 * to push anything and this means that either the consumer is not
562 * responding or the metadata cache has been destroyed on the consumer.
563 */
564 registry->metadata_closed = 1;
565 end_rcu_unlock:
566 rcu_read_unlock();
567 return ret_val;
568 }
569
570 /*
571 * Send to the consumer a close metadata command for the given session. Once
572 * done, the metadata channel is deleted and the session metadata pointer is
573 * nullified. The session lock MUST be acquired here unless the application is
574 * in the destroy path.
575 *
576 * Return 0 on success else a negative value.
577 */
578 static int close_metadata(struct ust_registry_session *registry,
579 struct consumer_output *consumer)
580 {
581 int ret;
582 struct consumer_socket *socket;
583
584 assert(registry);
585 assert(consumer);
586
587 rcu_read_lock();
588
589 if (!registry->metadata_key || registry->metadata_closed) {
590 ret = 0;
591 goto end;
592 }
593
594 /* Get consumer socket to use to push the metadata.*/
595 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
596 consumer);
597 if (!socket) {
598 ret = -1;
599 goto error;
600 }
601
602 ret = consumer_close_metadata(socket, registry->metadata_key);
603 if (ret < 0) {
604 goto error;
605 }
606
607 error:
608 /*
609 * Metadata closed. Even on error this means that the consumer is not
610 * responding or not found so either way a second close should NOT be emit
611 * for this registry.
612 */
613 registry->metadata_closed = 1;
614 end:
615 rcu_read_unlock();
616 return ret;
617 }
618
619 /*
620 * We need to execute ht_destroy outside of RCU read-side critical
621 * section and outside of call_rcu thread, so we postpone its execution
622 * using ht_cleanup_push. It is simpler than to change the semantic of
623 * the many callers of delete_ust_app_session().
624 */
625 static
626 void delete_ust_app_session_rcu(struct rcu_head *head)
627 {
628 struct ust_app_session *ua_sess =
629 caa_container_of(head, struct ust_app_session, rcu_head);
630
631 ht_cleanup_push(ua_sess->channels);
632 free(ua_sess);
633 }
634
635 /*
636 * Delete ust app session safely. RCU read lock must be held before calling
637 * this function.
638 */
639 static
640 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
641 struct ust_app *app)
642 {
643 int ret;
644 struct lttng_ht_iter iter;
645 struct ust_app_channel *ua_chan;
646 struct ust_registry_session *registry;
647
648 assert(ua_sess);
649
650 pthread_mutex_lock(&ua_sess->lock);
651
652 registry = get_session_registry(ua_sess);
653 if (registry && !registry->metadata_closed) {
654 /* Push metadata for application before freeing the application. */
655 (void) push_metadata(registry, ua_sess->consumer);
656
657 /*
658 * Don't ask to close metadata for global per UID buffers. Close
659 * metadata only on destroy trace session in this case. Also, the
660 * previous push metadata could have flag the metadata registry to
661 * close so don't send a close command if closed.
662 */
663 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
664 !registry->metadata_closed) {
665 /* And ask to close it for this session registry. */
666 (void) close_metadata(registry, ua_sess->consumer);
667 }
668 }
669
670 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
671 node.node) {
672 ret = lttng_ht_del(ua_sess->channels, &iter);
673 assert(!ret);
674 delete_ust_app_channel(sock, ua_chan, app);
675 }
676
677 /* In case of per PID, the registry is kept in the session. */
678 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
679 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
680 if (reg_pid) {
681 buffer_reg_pid_remove(reg_pid);
682 buffer_reg_pid_destroy(reg_pid);
683 }
684 }
685
686 if (ua_sess->handle != -1) {
687 ret = ustctl_release_handle(sock, ua_sess->handle);
688 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
689 ERR("UST app sock %d release session handle failed with ret %d",
690 sock, ret);
691 }
692 }
693 pthread_mutex_unlock(&ua_sess->lock);
694
695 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
696 }
697
698 /*
699 * Delete a traceable application structure from the global list. Never call
700 * this function outside of a call_rcu call.
701 *
702 * RCU read side lock should _NOT_ be held when calling this function.
703 */
704 static
705 void delete_ust_app(struct ust_app *app)
706 {
707 int ret, sock;
708 struct ust_app_session *ua_sess, *tmp_ua_sess;
709
710 /* Delete ust app sessions info */
711 sock = app->sock;
712 app->sock = -1;
713
714 /* Wipe sessions */
715 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
716 teardown_node) {
717 /* Free every object in the session and the session. */
718 rcu_read_lock();
719 delete_ust_app_session(sock, ua_sess, app);
720 rcu_read_unlock();
721 }
722
723 ht_cleanup_push(app->sessions);
724 ht_cleanup_push(app->ust_objd);
725
726 /*
727 * Wait until we have deleted the application from the sock hash table
728 * before closing this socket, otherwise an application could re-use the
729 * socket ID and race with the teardown, using the same hash table entry.
730 *
731 * It's OK to leave the close in call_rcu. We want it to stay unique for
732 * all RCU readers that could run concurrently with unregister app,
733 * therefore we _need_ to only close that socket after a grace period. So
734 * it should stay in this RCU callback.
735 *
736 * This close() is a very important step of the synchronization model so
737 * every modification to this function must be carefully reviewed.
738 */
739 ret = close(sock);
740 if (ret) {
741 PERROR("close");
742 }
743 lttng_fd_put(LTTNG_FD_APPS, 1);
744
745 DBG2("UST app pid %d deleted", app->pid);
746 free(app);
747 }
748
749 /*
750 * URCU intermediate call to delete an UST app.
751 */
752 static
753 void delete_ust_app_rcu(struct rcu_head *head)
754 {
755 struct lttng_ht_node_ulong *node =
756 caa_container_of(head, struct lttng_ht_node_ulong, head);
757 struct ust_app *app =
758 caa_container_of(node, struct ust_app, pid_n);
759
760 DBG3("Call RCU deleting app PID %d", app->pid);
761 delete_ust_app(app);
762 }
763
764 /*
765 * Delete the session from the application ht and delete the data structure by
766 * freeing every object inside and releasing them.
767 */
768 static void destroy_app_session(struct ust_app *app,
769 struct ust_app_session *ua_sess)
770 {
771 int ret;
772 struct lttng_ht_iter iter;
773
774 assert(app);
775 assert(ua_sess);
776
777 iter.iter.node = &ua_sess->node.node;
778 ret = lttng_ht_del(app->sessions, &iter);
779 if (ret) {
780 /* Already scheduled for teardown. */
781 goto end;
782 }
783
784 /* Once deleted, free the data structure. */
785 delete_ust_app_session(app->sock, ua_sess, app);
786
787 end:
788 return;
789 }
790
791 /*
792 * Alloc new UST app session.
793 */
794 static
795 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
796 {
797 struct ust_app_session *ua_sess;
798
799 /* Init most of the default value by allocating and zeroing */
800 ua_sess = zmalloc(sizeof(struct ust_app_session));
801 if (ua_sess == NULL) {
802 PERROR("malloc");
803 goto error_free;
804 }
805
806 ua_sess->handle = -1;
807 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
808 pthread_mutex_init(&ua_sess->lock, NULL);
809
810 return ua_sess;
811
812 error_free:
813 return NULL;
814 }
815
816 /*
817 * Alloc new UST app channel.
818 */
819 static
820 struct ust_app_channel *alloc_ust_app_channel(char *name,
821 struct ust_app_session *ua_sess,
822 struct lttng_ust_channel_attr *attr)
823 {
824 struct ust_app_channel *ua_chan;
825
826 /* Init most of the default value by allocating and zeroing */
827 ua_chan = zmalloc(sizeof(struct ust_app_channel));
828 if (ua_chan == NULL) {
829 PERROR("malloc");
830 goto error;
831 }
832
833 /* Setup channel name */
834 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
835 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
836
837 ua_chan->enabled = 1;
838 ua_chan->handle = -1;
839 ua_chan->session = ua_sess;
840 ua_chan->key = get_next_channel_key();
841 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
842 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
843 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
844
845 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
846 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
847
848 /* Copy attributes */
849 if (attr) {
850 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
851 ua_chan->attr.subbuf_size = attr->subbuf_size;
852 ua_chan->attr.num_subbuf = attr->num_subbuf;
853 ua_chan->attr.overwrite = attr->overwrite;
854 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
855 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
856 ua_chan->attr.output = attr->output;
857 }
858 /* By default, the channel is a per cpu channel. */
859 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
860
861 DBG3("UST app channel %s allocated", ua_chan->name);
862
863 return ua_chan;
864
865 error:
866 return NULL;
867 }
868
869 /*
870 * Allocate and initialize a UST app stream.
871 *
872 * Return newly allocated stream pointer or NULL on error.
873 */
874 struct ust_app_stream *ust_app_alloc_stream(void)
875 {
876 struct ust_app_stream *stream = NULL;
877
878 stream = zmalloc(sizeof(*stream));
879 if (stream == NULL) {
880 PERROR("zmalloc ust app stream");
881 goto error;
882 }
883
884 /* Zero could be a valid value for a handle so flag it to -1. */
885 stream->handle = -1;
886
887 error:
888 return stream;
889 }
890
891 /*
892 * Alloc new UST app event.
893 */
894 static
895 struct ust_app_event *alloc_ust_app_event(char *name,
896 struct lttng_ust_event *attr)
897 {
898 struct ust_app_event *ua_event;
899
900 /* Init most of the default value by allocating and zeroing */
901 ua_event = zmalloc(sizeof(struct ust_app_event));
902 if (ua_event == NULL) {
903 PERROR("malloc");
904 goto error;
905 }
906
907 ua_event->enabled = 1;
908 strncpy(ua_event->name, name, sizeof(ua_event->name));
909 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
910 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
911
912 /* Copy attributes */
913 if (attr) {
914 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
915 }
916
917 DBG3("UST app event %s allocated", ua_event->name);
918
919 return ua_event;
920
921 error:
922 return NULL;
923 }
924
925 /*
926 * Alloc new UST app context.
927 */
928 static
929 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
930 {
931 struct ust_app_ctx *ua_ctx;
932
933 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
934 if (ua_ctx == NULL) {
935 goto error;
936 }
937
938 CDS_INIT_LIST_HEAD(&ua_ctx->list);
939
940 if (uctx) {
941 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
942 }
943
944 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
945
946 error:
947 return ua_ctx;
948 }
949
950 /*
951 * Allocate a filter and copy the given original filter.
952 *
953 * Return allocated filter or NULL on error.
954 */
955 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
956 struct lttng_ust_filter_bytecode *orig_f)
957 {
958 struct lttng_ust_filter_bytecode *filter = NULL;
959
960 /* Copy filter bytecode */
961 filter = zmalloc(sizeof(*filter) + orig_f->len);
962 if (!filter) {
963 PERROR("zmalloc alloc ust app filter");
964 goto error;
965 }
966
967 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
968
969 error:
970 return filter;
971 }
972
973 /*
974 * Find an ust_app using the sock and return it. RCU read side lock must be
975 * held before calling this helper function.
976 */
977 struct ust_app *ust_app_find_by_sock(int sock)
978 {
979 struct lttng_ht_node_ulong *node;
980 struct lttng_ht_iter iter;
981
982 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
983 node = lttng_ht_iter_get_node_ulong(&iter);
984 if (node == NULL) {
985 DBG2("UST app find by sock %d not found", sock);
986 goto error;
987 }
988
989 return caa_container_of(node, struct ust_app, sock_n);
990
991 error:
992 return NULL;
993 }
994
995 /*
996 * Find an ust_app using the notify sock and return it. RCU read side lock must
997 * be held before calling this helper function.
998 */
999 static struct ust_app *find_app_by_notify_sock(int sock)
1000 {
1001 struct lttng_ht_node_ulong *node;
1002 struct lttng_ht_iter iter;
1003
1004 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1005 &iter);
1006 node = lttng_ht_iter_get_node_ulong(&iter);
1007 if (node == NULL) {
1008 DBG2("UST app find by notify sock %d not found", sock);
1009 goto error;
1010 }
1011
1012 return caa_container_of(node, struct ust_app, notify_sock_n);
1013
1014 error:
1015 return NULL;
1016 }
1017
1018 /*
1019 * Lookup for an ust app event based on event name, filter bytecode and the
1020 * event loglevel.
1021 *
1022 * Return an ust_app_event object or NULL on error.
1023 */
1024 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1025 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1026 const struct lttng_event_exclusion *exclusion)
1027 {
1028 struct lttng_ht_iter iter;
1029 struct lttng_ht_node_str *node;
1030 struct ust_app_event *event = NULL;
1031 struct ust_app_ht_key key;
1032
1033 assert(name);
1034 assert(ht);
1035
1036 /* Setup key for event lookup. */
1037 key.name = name;
1038 key.filter = filter;
1039 key.loglevel = loglevel;
1040 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1041 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1042
1043 /* Lookup using the event name as hash and a custom match fct. */
1044 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1045 ht_match_ust_app_event, &key, &iter.iter);
1046 node = lttng_ht_iter_get_node_str(&iter);
1047 if (node == NULL) {
1048 goto end;
1049 }
1050
1051 event = caa_container_of(node, struct ust_app_event, node);
1052
1053 end:
1054 return event;
1055 }
1056
1057 /*
1058 * Create the channel context on the tracer.
1059 *
1060 * Called with UST app session lock held.
1061 */
1062 static
1063 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1064 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1065 {
1066 int ret;
1067
1068 health_code_update();
1069
1070 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1071 ua_chan->obj, &ua_ctx->obj);
1072 if (ret < 0) {
1073 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1074 ERR("UST app create channel context failed for app (pid: %d) "
1075 "with ret %d", app->pid, ret);
1076 } else {
1077 /*
1078 * This is normal behavior, an application can die during the
1079 * creation process. Don't report an error so the execution can
1080 * continue normally.
1081 */
1082 ret = 0;
1083 DBG3("UST app disable event failed. Application is dead.");
1084 }
1085 goto error;
1086 }
1087
1088 ua_ctx->handle = ua_ctx->obj->handle;
1089
1090 DBG2("UST app context handle %d created successfully for channel %s",
1091 ua_ctx->handle, ua_chan->name);
1092
1093 error:
1094 health_code_update();
1095 return ret;
1096 }
1097
1098 /*
1099 * Set the filter on the tracer.
1100 */
1101 static
1102 int set_ust_event_filter(struct ust_app_event *ua_event,
1103 struct ust_app *app)
1104 {
1105 int ret;
1106
1107 health_code_update();
1108
1109 if (!ua_event->filter) {
1110 ret = 0;
1111 goto error;
1112 }
1113
1114 ret = ustctl_set_filter(app->sock, ua_event->filter,
1115 ua_event->obj);
1116 if (ret < 0) {
1117 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1118 ERR("UST app event %s filter failed for app (pid: %d) "
1119 "with ret %d", ua_event->attr.name, app->pid, ret);
1120 } else {
1121 /*
1122 * This is normal behavior, an application can die during the
1123 * creation process. Don't report an error so the execution can
1124 * continue normally.
1125 */
1126 ret = 0;
1127 DBG3("UST app filter event failed. Application is dead.");
1128 }
1129 goto error;
1130 }
1131
1132 DBG2("UST filter set successfully for event %s", ua_event->name);
1133
1134 error:
1135 health_code_update();
1136 return ret;
1137 }
1138
1139 /*
1140 * Set event exclusions on the tracer.
1141 */
1142 static
1143 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1144 struct ust_app *app)
1145 {
1146 int ret;
1147
1148 health_code_update();
1149
1150 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1151 ret = 0;
1152 goto error;
1153 }
1154
1155 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1156 ua_event->obj);
1157 if (ret < 0) {
1158 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1159 ERR("UST app event %s exclusions failed for app (pid: %d) "
1160 "with ret %d", ua_event->attr.name, app->pid, ret);
1161 } else {
1162 /*
1163 * This is normal behavior, an application can die during the
1164 * creation process. Don't report an error so the execution can
1165 * continue normally.
1166 */
1167 ret = 0;
1168 DBG3("UST app event exclusion failed. Application is dead.");
1169 }
1170 goto error;
1171 }
1172
1173 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1174
1175 error:
1176 health_code_update();
1177 return ret;
1178 }
1179
1180 /*
1181 * Disable the specified event on to UST tracer for the UST session.
1182 */
1183 static int disable_ust_event(struct ust_app *app,
1184 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1185 {
1186 int ret;
1187
1188 health_code_update();
1189
1190 ret = ustctl_disable(app->sock, ua_event->obj);
1191 if (ret < 0) {
1192 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1193 ERR("UST app event %s disable failed for app (pid: %d) "
1194 "and session handle %d with ret %d",
1195 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1196 } else {
1197 /*
1198 * This is normal behavior, an application can die during the
1199 * creation process. Don't report an error so the execution can
1200 * continue normally.
1201 */
1202 ret = 0;
1203 DBG3("UST app disable event failed. Application is dead.");
1204 }
1205 goto error;
1206 }
1207
1208 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1209 ua_event->attr.name, app->pid);
1210
1211 error:
1212 health_code_update();
1213 return ret;
1214 }
1215
1216 /*
1217 * Disable the specified channel on to UST tracer for the UST session.
1218 */
1219 static int disable_ust_channel(struct ust_app *app,
1220 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1221 {
1222 int ret;
1223
1224 health_code_update();
1225
1226 ret = ustctl_disable(app->sock, ua_chan->obj);
1227 if (ret < 0) {
1228 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1229 ERR("UST app channel %s disable failed for app (pid: %d) "
1230 "and session handle %d with ret %d",
1231 ua_chan->name, app->pid, ua_sess->handle, ret);
1232 } else {
1233 /*
1234 * This is normal behavior, an application can die during the
1235 * creation process. Don't report an error so the execution can
1236 * continue normally.
1237 */
1238 ret = 0;
1239 DBG3("UST app disable channel failed. Application is dead.");
1240 }
1241 goto error;
1242 }
1243
1244 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1245 ua_chan->name, app->pid);
1246
1247 error:
1248 health_code_update();
1249 return ret;
1250 }
1251
1252 /*
1253 * Enable the specified channel on to UST tracer for the UST session.
1254 */
1255 static int enable_ust_channel(struct ust_app *app,
1256 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1257 {
1258 int ret;
1259
1260 health_code_update();
1261
1262 ret = ustctl_enable(app->sock, ua_chan->obj);
1263 if (ret < 0) {
1264 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1265 ERR("UST app channel %s enable failed for app (pid: %d) "
1266 "and session handle %d with ret %d",
1267 ua_chan->name, app->pid, ua_sess->handle, ret);
1268 } else {
1269 /*
1270 * This is normal behavior, an application can die during the
1271 * creation process. Don't report an error so the execution can
1272 * continue normally.
1273 */
1274 ret = 0;
1275 DBG3("UST app enable channel failed. Application is dead.");
1276 }
1277 goto error;
1278 }
1279
1280 ua_chan->enabled = 1;
1281
1282 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1283 ua_chan->name, app->pid);
1284
1285 error:
1286 health_code_update();
1287 return ret;
1288 }
1289
1290 /*
1291 * Enable the specified event on to UST tracer for the UST session.
1292 */
1293 static int enable_ust_event(struct ust_app *app,
1294 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1295 {
1296 int ret;
1297
1298 health_code_update();
1299
1300 ret = ustctl_enable(app->sock, ua_event->obj);
1301 if (ret < 0) {
1302 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1303 ERR("UST app event %s enable failed for app (pid: %d) "
1304 "and session handle %d with ret %d",
1305 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1306 } else {
1307 /*
1308 * This is normal behavior, an application can die during the
1309 * creation process. Don't report an error so the execution can
1310 * continue normally.
1311 */
1312 ret = 0;
1313 DBG3("UST app enable event failed. Application is dead.");
1314 }
1315 goto error;
1316 }
1317
1318 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1319 ua_event->attr.name, app->pid);
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Send channel and stream buffer to application.
1328 *
1329 * Return 0 on success. On error, a negative value is returned.
1330 */
1331 static int send_channel_pid_to_ust(struct ust_app *app,
1332 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1333 {
1334 int ret;
1335 struct ust_app_stream *stream, *stmp;
1336
1337 assert(app);
1338 assert(ua_sess);
1339 assert(ua_chan);
1340
1341 health_code_update();
1342
1343 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1344 app->sock);
1345
1346 /* Send channel to the application. */
1347 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1348 if (ret < 0) {
1349 goto error;
1350 }
1351
1352 health_code_update();
1353
1354 /* Send all streams to application. */
1355 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1356 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1357 if (ret < 0) {
1358 goto error;
1359 }
1360 /* We don't need the stream anymore once sent to the tracer. */
1361 cds_list_del(&stream->list);
1362 delete_ust_app_stream(-1, stream);
1363 }
1364 /* Flag the channel that it is sent to the application. */
1365 ua_chan->is_sent = 1;
1366
1367 error:
1368 health_code_update();
1369 return ret;
1370 }
1371
1372 /*
1373 * Create the specified event onto the UST tracer for a UST session.
1374 *
1375 * Should be called with session mutex held.
1376 */
1377 static
1378 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1379 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1380 {
1381 int ret = 0;
1382
1383 health_code_update();
1384
1385 /* Create UST event on tracer */
1386 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1387 &ua_event->obj);
1388 if (ret < 0) {
1389 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1390 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1391 ua_event->attr.name, app->pid, ret);
1392 } else {
1393 /*
1394 * This is normal behavior, an application can die during the
1395 * creation process. Don't report an error so the execution can
1396 * continue normally.
1397 */
1398 ret = 0;
1399 DBG3("UST app create event failed. Application is dead.");
1400 }
1401 goto error;
1402 }
1403
1404 ua_event->handle = ua_event->obj->handle;
1405
1406 DBG2("UST app event %s created successfully for pid:%d",
1407 ua_event->attr.name, app->pid);
1408
1409 health_code_update();
1410
1411 /* Set filter if one is present. */
1412 if (ua_event->filter) {
1413 ret = set_ust_event_filter(ua_event, app);
1414 if (ret < 0) {
1415 goto error;
1416 }
1417 }
1418
1419 /* Set exclusions for the event */
1420 if (ua_event->exclusion) {
1421 ret = set_ust_event_exclusion(ua_event, app);
1422 if (ret < 0) {
1423 goto error;
1424 }
1425 }
1426
1427 /* If event not enabled, disable it on the tracer */
1428 if (ua_event->enabled == 0) {
1429 ret = disable_ust_event(app, ua_sess, ua_event);
1430 if (ret < 0) {
1431 /*
1432 * If we hit an EPERM, something is wrong with our disable call. If
1433 * we get an EEXIST, there is a problem on the tracer side since we
1434 * just created it.
1435 */
1436 switch (ret) {
1437 case -LTTNG_UST_ERR_PERM:
1438 /* Code flow problem */
1439 assert(0);
1440 case -LTTNG_UST_ERR_EXIST:
1441 /* It's OK for our use case. */
1442 ret = 0;
1443 break;
1444 default:
1445 break;
1446 }
1447 goto error;
1448 }
1449 }
1450
1451 error:
1452 health_code_update();
1453 return ret;
1454 }
1455
1456 /*
1457 * Copy data between an UST app event and a LTT event.
1458 */
1459 static void shadow_copy_event(struct ust_app_event *ua_event,
1460 struct ltt_ust_event *uevent)
1461 {
1462 size_t exclusion_alloc_size;
1463
1464 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1465 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1466
1467 ua_event->enabled = uevent->enabled;
1468
1469 /* Copy event attributes */
1470 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1471
1472 /* Copy filter bytecode */
1473 if (uevent->filter) {
1474 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1475 /* Filter might be NULL here in case of ENONEM. */
1476 }
1477
1478 /* Copy exclusion data */
1479 if (uevent->exclusion) {
1480 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1481 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1482 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1483 if (ua_event->exclusion) {
1484 memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
1485 }
1486 }
1487 }
1488
1489 /*
1490 * Copy data between an UST app channel and a LTT channel.
1491 */
1492 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1493 struct ltt_ust_channel *uchan)
1494 {
1495 struct lttng_ht_iter iter;
1496 struct ltt_ust_event *uevent;
1497 struct ltt_ust_context *uctx;
1498 struct ust_app_event *ua_event;
1499 struct ust_app_ctx *ua_ctx;
1500
1501 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1502
1503 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1504 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1505
1506 ua_chan->tracefile_size = uchan->tracefile_size;
1507 ua_chan->tracefile_count = uchan->tracefile_count;
1508
1509 /* Copy event attributes since the layout is different. */
1510 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1511 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1512 ua_chan->attr.overwrite = uchan->attr.overwrite;
1513 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1514 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1515 ua_chan->attr.output = uchan->attr.output;
1516 /*
1517 * Note that the attribute channel type is not set since the channel on the
1518 * tracing registry side does not have this information.
1519 */
1520
1521 ua_chan->enabled = uchan->enabled;
1522 ua_chan->tracing_channel_id = uchan->id;
1523
1524 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1525 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1526 if (ua_ctx == NULL) {
1527 continue;
1528 }
1529 lttng_ht_node_init_ulong(&ua_ctx->node,
1530 (unsigned long) ua_ctx->ctx.ctx);
1531 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1532 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1533 }
1534
1535 /* Copy all events from ltt ust channel to ust app channel */
1536 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1537 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1538 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1539 if (ua_event == NULL) {
1540 DBG2("UST event %s not found on shadow copy channel",
1541 uevent->attr.name);
1542 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1543 if (ua_event == NULL) {
1544 continue;
1545 }
1546 shadow_copy_event(ua_event, uevent);
1547 add_unique_ust_app_event(ua_chan, ua_event);
1548 }
1549 }
1550
1551 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1552 }
1553
1554 /*
1555 * Copy data between a UST app session and a regular LTT session.
1556 */
1557 static void shadow_copy_session(struct ust_app_session *ua_sess,
1558 struct ltt_ust_session *usess, struct ust_app *app)
1559 {
1560 struct lttng_ht_node_str *ua_chan_node;
1561 struct lttng_ht_iter iter;
1562 struct ltt_ust_channel *uchan;
1563 struct ust_app_channel *ua_chan;
1564 time_t rawtime;
1565 struct tm *timeinfo;
1566 char datetime[16];
1567 int ret;
1568
1569 /* Get date and time for unique app path */
1570 time(&rawtime);
1571 timeinfo = localtime(&rawtime);
1572 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1573
1574 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1575
1576 ua_sess->tracing_id = usess->id;
1577 ua_sess->id = get_next_session_id();
1578 ua_sess->uid = app->uid;
1579 ua_sess->gid = app->gid;
1580 ua_sess->euid = usess->uid;
1581 ua_sess->egid = usess->gid;
1582 ua_sess->buffer_type = usess->buffer_type;
1583 ua_sess->bits_per_long = app->bits_per_long;
1584 /* There is only one consumer object per session possible. */
1585 ua_sess->consumer = usess->consumer;
1586 ua_sess->output_traces = usess->output_traces;
1587 ua_sess->live_timer_interval = usess->live_timer_interval;
1588
1589 switch (ua_sess->buffer_type) {
1590 case LTTNG_BUFFER_PER_PID:
1591 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1592 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1593 datetime);
1594 break;
1595 case LTTNG_BUFFER_PER_UID:
1596 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1597 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1598 break;
1599 default:
1600 assert(0);
1601 goto error;
1602 }
1603 if (ret < 0) {
1604 PERROR("asprintf UST shadow copy session");
1605 assert(0);
1606 goto error;
1607 }
1608
1609 /* Iterate over all channels in global domain. */
1610 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1611 uchan, node.node) {
1612 struct lttng_ht_iter uiter;
1613
1614 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1615 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1616 if (ua_chan_node != NULL) {
1617 /* Session exist. Contiuing. */
1618 continue;
1619 }
1620
1621 DBG2("Channel %s not found on shadow session copy, creating it",
1622 uchan->name);
1623 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1624 if (ua_chan == NULL) {
1625 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1626 continue;
1627 }
1628 shadow_copy_channel(ua_chan, uchan);
1629 /*
1630 * The concept of metadata channel does not exist on the tracing
1631 * registry side of the session daemon so this can only be a per CPU
1632 * channel and not metadata.
1633 */
1634 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1635
1636 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1637 }
1638
1639 error:
1640 return;
1641 }
1642
1643 /*
1644 * Lookup sesison wrapper.
1645 */
1646 static
1647 void __lookup_session_by_app(struct ltt_ust_session *usess,
1648 struct ust_app *app, struct lttng_ht_iter *iter)
1649 {
1650 /* Get right UST app session from app */
1651 lttng_ht_lookup(app->sessions, &usess->id, iter);
1652 }
1653
1654 /*
1655 * Return ust app session from the app session hashtable using the UST session
1656 * id.
1657 */
1658 static struct ust_app_session *lookup_session_by_app(
1659 struct ltt_ust_session *usess, struct ust_app *app)
1660 {
1661 struct lttng_ht_iter iter;
1662 struct lttng_ht_node_u64 *node;
1663
1664 __lookup_session_by_app(usess, app, &iter);
1665 node = lttng_ht_iter_get_node_u64(&iter);
1666 if (node == NULL) {
1667 goto error;
1668 }
1669
1670 return caa_container_of(node, struct ust_app_session, node);
1671
1672 error:
1673 return NULL;
1674 }
1675
1676 /*
1677 * Setup buffer registry per PID for the given session and application. If none
1678 * is found, a new one is created, added to the global registry and
1679 * initialized. If regp is valid, it's set with the newly created object.
1680 *
1681 * Return 0 on success or else a negative value.
1682 */
1683 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1684 struct ust_app *app, struct buffer_reg_pid **regp)
1685 {
1686 int ret = 0;
1687 struct buffer_reg_pid *reg_pid;
1688
1689 assert(ua_sess);
1690 assert(app);
1691
1692 rcu_read_lock();
1693
1694 reg_pid = buffer_reg_pid_find(ua_sess->id);
1695 if (!reg_pid) {
1696 /*
1697 * This is the create channel path meaning that if there is NO
1698 * registry available, we have to create one for this session.
1699 */
1700 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1701 if (ret < 0) {
1702 goto error;
1703 }
1704 buffer_reg_pid_add(reg_pid);
1705 } else {
1706 goto end;
1707 }
1708
1709 /* Initialize registry. */
1710 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1711 app->bits_per_long, app->uint8_t_alignment,
1712 app->uint16_t_alignment, app->uint32_t_alignment,
1713 app->uint64_t_alignment, app->long_alignment,
1714 app->byte_order, app->version.major,
1715 app->version.minor);
1716 if (ret < 0) {
1717 goto error;
1718 }
1719
1720 DBG3("UST app buffer registry per PID created successfully");
1721
1722 end:
1723 if (regp) {
1724 *regp = reg_pid;
1725 }
1726 error:
1727 rcu_read_unlock();
1728 return ret;
1729 }
1730
1731 /*
1732 * Setup buffer registry per UID for the given session and application. If none
1733 * is found, a new one is created, added to the global registry and
1734 * initialized. If regp is valid, it's set with the newly created object.
1735 *
1736 * Return 0 on success or else a negative value.
1737 */
1738 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1739 struct ust_app *app, struct buffer_reg_uid **regp)
1740 {
1741 int ret = 0;
1742 struct buffer_reg_uid *reg_uid;
1743
1744 assert(usess);
1745 assert(app);
1746
1747 rcu_read_lock();
1748
1749 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1750 if (!reg_uid) {
1751 /*
1752 * This is the create channel path meaning that if there is NO
1753 * registry available, we have to create one for this session.
1754 */
1755 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1756 LTTNG_DOMAIN_UST, &reg_uid);
1757 if (ret < 0) {
1758 goto error;
1759 }
1760 buffer_reg_uid_add(reg_uid);
1761 } else {
1762 goto end;
1763 }
1764
1765 /* Initialize registry. */
1766 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1767 app->bits_per_long, app->uint8_t_alignment,
1768 app->uint16_t_alignment, app->uint32_t_alignment,
1769 app->uint64_t_alignment, app->long_alignment,
1770 app->byte_order, app->version.major,
1771 app->version.minor);
1772 if (ret < 0) {
1773 goto error;
1774 }
1775 /* Add node to teardown list of the session. */
1776 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1777
1778 DBG3("UST app buffer registry per UID created successfully");
1779
1780 end:
1781 if (regp) {
1782 *regp = reg_uid;
1783 }
1784 error:
1785 rcu_read_unlock();
1786 return ret;
1787 }
1788
1789 /*
1790 * Create a session on the tracer side for the given app.
1791 *
1792 * On success, ua_sess_ptr is populated with the session pointer or else left
1793 * untouched. If the session was created, is_created is set to 1. On error,
1794 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1795 * be NULL.
1796 *
1797 * Returns 0 on success or else a negative code which is either -ENOMEM or
1798 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1799 */
1800 static int create_ust_app_session(struct ltt_ust_session *usess,
1801 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1802 int *is_created)
1803 {
1804 int ret, created = 0;
1805 struct ust_app_session *ua_sess;
1806
1807 assert(usess);
1808 assert(app);
1809 assert(ua_sess_ptr);
1810
1811 health_code_update();
1812
1813 ua_sess = lookup_session_by_app(usess, app);
1814 if (ua_sess == NULL) {
1815 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1816 app->pid, usess->id);
1817 ua_sess = alloc_ust_app_session(app);
1818 if (ua_sess == NULL) {
1819 /* Only malloc can failed so something is really wrong */
1820 ret = -ENOMEM;
1821 goto error;
1822 }
1823 shadow_copy_session(ua_sess, usess, app);
1824 created = 1;
1825 }
1826
1827 switch (usess->buffer_type) {
1828 case LTTNG_BUFFER_PER_PID:
1829 /* Init local registry. */
1830 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1831 if (ret < 0) {
1832 goto error;
1833 }
1834 break;
1835 case LTTNG_BUFFER_PER_UID:
1836 /* Look for a global registry. If none exists, create one. */
1837 ret = setup_buffer_reg_uid(usess, app, NULL);
1838 if (ret < 0) {
1839 goto error;
1840 }
1841 break;
1842 default:
1843 assert(0);
1844 ret = -EINVAL;
1845 goto error;
1846 }
1847
1848 health_code_update();
1849
1850 if (ua_sess->handle == -1) {
1851 ret = ustctl_create_session(app->sock);
1852 if (ret < 0) {
1853 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1854 ERR("Creating session for app pid %d with ret %d",
1855 app->pid, ret);
1856 } else {
1857 DBG("UST app creating session failed. Application is dead");
1858 /*
1859 * This is normal behavior, an application can die during the
1860 * creation process. Don't report an error so the execution can
1861 * continue normally. This will get flagged ENOTCONN and the
1862 * caller will handle it.
1863 */
1864 ret = 0;
1865 }
1866 delete_ust_app_session(-1, ua_sess, app);
1867 if (ret != -ENOMEM) {
1868 /*
1869 * Tracer is probably gone or got an internal error so let's
1870 * behave like it will soon unregister or not usable.
1871 */
1872 ret = -ENOTCONN;
1873 }
1874 goto error;
1875 }
1876
1877 ua_sess->handle = ret;
1878
1879 /* Add ust app session to app's HT */
1880 lttng_ht_node_init_u64(&ua_sess->node,
1881 ua_sess->tracing_id);
1882 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1883
1884 DBG2("UST app session created successfully with handle %d", ret);
1885 }
1886
1887 *ua_sess_ptr = ua_sess;
1888 if (is_created) {
1889 *is_created = created;
1890 }
1891
1892 /* Everything went well. */
1893 ret = 0;
1894
1895 error:
1896 health_code_update();
1897 return ret;
1898 }
1899
1900 /*
1901 * Create a context for the channel on the tracer.
1902 *
1903 * Called with UST app session lock held and a RCU read side lock.
1904 */
1905 static
1906 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1907 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1908 struct ust_app *app)
1909 {
1910 int ret = 0;
1911 struct lttng_ht_iter iter;
1912 struct lttng_ht_node_ulong *node;
1913 struct ust_app_ctx *ua_ctx;
1914
1915 DBG2("UST app adding context to channel %s", ua_chan->name);
1916
1917 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1918 node = lttng_ht_iter_get_node_ulong(&iter);
1919 if (node != NULL) {
1920 ret = -EEXIST;
1921 goto error;
1922 }
1923
1924 ua_ctx = alloc_ust_app_ctx(uctx);
1925 if (ua_ctx == NULL) {
1926 /* malloc failed */
1927 ret = -1;
1928 goto error;
1929 }
1930
1931 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1932 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1933 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1934
1935 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1936 if (ret < 0) {
1937 goto error;
1938 }
1939
1940 error:
1941 return ret;
1942 }
1943
1944 /*
1945 * Enable on the tracer side a ust app event for the session and channel.
1946 *
1947 * Called with UST app session lock held.
1948 */
1949 static
1950 int enable_ust_app_event(struct ust_app_session *ua_sess,
1951 struct ust_app_event *ua_event, struct ust_app *app)
1952 {
1953 int ret;
1954
1955 ret = enable_ust_event(app, ua_sess, ua_event);
1956 if (ret < 0) {
1957 goto error;
1958 }
1959
1960 ua_event->enabled = 1;
1961
1962 error:
1963 return ret;
1964 }
1965
1966 /*
1967 * Disable on the tracer side a ust app event for the session and channel.
1968 */
1969 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1970 struct ust_app_event *ua_event, struct ust_app *app)
1971 {
1972 int ret;
1973
1974 ret = disable_ust_event(app, ua_sess, ua_event);
1975 if (ret < 0) {
1976 goto error;
1977 }
1978
1979 ua_event->enabled = 0;
1980
1981 error:
1982 return ret;
1983 }
1984
1985 /*
1986 * Lookup ust app channel for session and disable it on the tracer side.
1987 */
1988 static
1989 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1990 struct ust_app_channel *ua_chan, struct ust_app *app)
1991 {
1992 int ret;
1993
1994 ret = disable_ust_channel(app, ua_sess, ua_chan);
1995 if (ret < 0) {
1996 goto error;
1997 }
1998
1999 ua_chan->enabled = 0;
2000
2001 error:
2002 return ret;
2003 }
2004
2005 /*
2006 * Lookup ust app channel for session and enable it on the tracer side. This
2007 * MUST be called with a RCU read side lock acquired.
2008 */
2009 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2010 struct ltt_ust_channel *uchan, struct ust_app *app)
2011 {
2012 int ret = 0;
2013 struct lttng_ht_iter iter;
2014 struct lttng_ht_node_str *ua_chan_node;
2015 struct ust_app_channel *ua_chan;
2016
2017 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2018 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2019 if (ua_chan_node == NULL) {
2020 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2021 uchan->name, ua_sess->tracing_id);
2022 goto error;
2023 }
2024
2025 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2026
2027 ret = enable_ust_channel(app, ua_sess, ua_chan);
2028 if (ret < 0) {
2029 goto error;
2030 }
2031
2032 error:
2033 return ret;
2034 }
2035
2036 /*
2037 * Ask the consumer to create a channel and get it if successful.
2038 *
2039 * Return 0 on success or else a negative value.
2040 */
2041 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2042 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2043 int bitness, struct ust_registry_session *registry)
2044 {
2045 int ret;
2046 unsigned int nb_fd = 0;
2047 struct consumer_socket *socket;
2048
2049 assert(usess);
2050 assert(ua_sess);
2051 assert(ua_chan);
2052 assert(registry);
2053
2054 rcu_read_lock();
2055 health_code_update();
2056
2057 /* Get the right consumer socket for the application. */
2058 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2059 if (!socket) {
2060 ret = -EINVAL;
2061 goto error;
2062 }
2063
2064 health_code_update();
2065
2066 /* Need one fd for the channel. */
2067 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2068 if (ret < 0) {
2069 ERR("Exhausted number of available FD upon create channel");
2070 goto error;
2071 }
2072
2073 /*
2074 * Ask consumer to create channel. The consumer will return the number of
2075 * stream we have to expect.
2076 */
2077 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2078 registry);
2079 if (ret < 0) {
2080 goto error_ask;
2081 }
2082
2083 /*
2084 * Compute the number of fd needed before receiving them. It must be 2 per
2085 * stream (2 being the default value here).
2086 */
2087 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2088
2089 /* Reserve the amount of file descriptor we need. */
2090 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2091 if (ret < 0) {
2092 ERR("Exhausted number of available FD upon create channel");
2093 goto error_fd_get_stream;
2094 }
2095
2096 health_code_update();
2097
2098 /*
2099 * Now get the channel from the consumer. This call wil populate the stream
2100 * list of that channel and set the ust objects.
2101 */
2102 if (usess->consumer->enabled) {
2103 ret = ust_consumer_get_channel(socket, ua_chan);
2104 if (ret < 0) {
2105 goto error_destroy;
2106 }
2107 }
2108
2109 rcu_read_unlock();
2110 return 0;
2111
2112 error_destroy:
2113 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2114 error_fd_get_stream:
2115 /*
2116 * Initiate a destroy channel on the consumer since we had an error
2117 * handling it on our side. The return value is of no importance since we
2118 * already have a ret value set by the previous error that we need to
2119 * return.
2120 */
2121 (void) ust_consumer_destroy_channel(socket, ua_chan);
2122 error_ask:
2123 lttng_fd_put(LTTNG_FD_APPS, 1);
2124 error:
2125 health_code_update();
2126 rcu_read_unlock();
2127 return ret;
2128 }
2129
2130 /*
2131 * Duplicate the ust data object of the ust app stream and save it in the
2132 * buffer registry stream.
2133 *
2134 * Return 0 on success or else a negative value.
2135 */
2136 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2137 struct ust_app_stream *stream)
2138 {
2139 int ret;
2140
2141 assert(reg_stream);
2142 assert(stream);
2143
2144 /* Reserve the amount of file descriptor we need. */
2145 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2146 if (ret < 0) {
2147 ERR("Exhausted number of available FD upon duplicate stream");
2148 goto error;
2149 }
2150
2151 /* Duplicate object for stream once the original is in the registry. */
2152 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2153 reg_stream->obj.ust);
2154 if (ret < 0) {
2155 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2156 reg_stream->obj.ust, stream->obj, ret);
2157 lttng_fd_put(LTTNG_FD_APPS, 2);
2158 goto error;
2159 }
2160 stream->handle = stream->obj->handle;
2161
2162 error:
2163 return ret;
2164 }
2165
2166 /*
2167 * Duplicate the ust data object of the ust app. channel and save it in the
2168 * buffer registry channel.
2169 *
2170 * Return 0 on success or else a negative value.
2171 */
2172 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2173 struct ust_app_channel *ua_chan)
2174 {
2175 int ret;
2176
2177 assert(reg_chan);
2178 assert(ua_chan);
2179
2180 /* Need two fds for the channel. */
2181 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2182 if (ret < 0) {
2183 ERR("Exhausted number of available FD upon duplicate channel");
2184 goto error_fd_get;
2185 }
2186
2187 /* Duplicate object for stream once the original is in the registry. */
2188 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2189 if (ret < 0) {
2190 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2191 reg_chan->obj.ust, ua_chan->obj, ret);
2192 goto error;
2193 }
2194 ua_chan->handle = ua_chan->obj->handle;
2195
2196 return 0;
2197
2198 error:
2199 lttng_fd_put(LTTNG_FD_APPS, 1);
2200 error_fd_get:
2201 return ret;
2202 }
2203
2204 /*
2205 * For a given channel buffer registry, setup all streams of the given ust
2206 * application channel.
2207 *
2208 * Return 0 on success or else a negative value.
2209 */
2210 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2211 struct ust_app_channel *ua_chan)
2212 {
2213 int ret = 0;
2214 struct ust_app_stream *stream, *stmp;
2215
2216 assert(reg_chan);
2217 assert(ua_chan);
2218
2219 DBG2("UST app setup buffer registry stream");
2220
2221 /* Send all streams to application. */
2222 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2223 struct buffer_reg_stream *reg_stream;
2224
2225 ret = buffer_reg_stream_create(&reg_stream);
2226 if (ret < 0) {
2227 goto error;
2228 }
2229
2230 /*
2231 * Keep original pointer and nullify it in the stream so the delete
2232 * stream call does not release the object.
2233 */
2234 reg_stream->obj.ust = stream->obj;
2235 stream->obj = NULL;
2236 buffer_reg_stream_add(reg_stream, reg_chan);
2237
2238 /* We don't need the streams anymore. */
2239 cds_list_del(&stream->list);
2240 delete_ust_app_stream(-1, stream);
2241 }
2242
2243 error:
2244 return ret;
2245 }
2246
2247 /*
2248 * Create a buffer registry channel for the given session registry and
2249 * application channel object. If regp pointer is valid, it's set with the
2250 * created object. Important, the created object is NOT added to the session
2251 * registry hash table.
2252 *
2253 * Return 0 on success else a negative value.
2254 */
2255 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2256 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2257 {
2258 int ret;
2259 struct buffer_reg_channel *reg_chan = NULL;
2260
2261 assert(reg_sess);
2262 assert(ua_chan);
2263
2264 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2265
2266 /* Create buffer registry channel. */
2267 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2268 if (ret < 0) {
2269 goto error_create;
2270 }
2271 assert(reg_chan);
2272 reg_chan->consumer_key = ua_chan->key;
2273 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2274
2275 /* Create and add a channel registry to session. */
2276 ret = ust_registry_channel_add(reg_sess->reg.ust,
2277 ua_chan->tracing_channel_id);
2278 if (ret < 0) {
2279 goto error;
2280 }
2281 buffer_reg_channel_add(reg_sess, reg_chan);
2282
2283 if (regp) {
2284 *regp = reg_chan;
2285 }
2286
2287 return 0;
2288
2289 error:
2290 /* Safe because the registry channel object was not added to any HT. */
2291 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2292 error_create:
2293 return ret;
2294 }
2295
2296 /*
2297 * Setup buffer registry channel for the given session registry and application
2298 * channel object. If regp pointer is valid, it's set with the created object.
2299 *
2300 * Return 0 on success else a negative value.
2301 */
2302 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2303 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2304 {
2305 int ret;
2306
2307 assert(reg_sess);
2308 assert(reg_chan);
2309 assert(ua_chan);
2310 assert(ua_chan->obj);
2311
2312 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2313
2314 /* Setup all streams for the registry. */
2315 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2316 if (ret < 0) {
2317 goto error;
2318 }
2319
2320 reg_chan->obj.ust = ua_chan->obj;
2321 ua_chan->obj = NULL;
2322
2323 return 0;
2324
2325 error:
2326 buffer_reg_channel_remove(reg_sess, reg_chan);
2327 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2328 return ret;
2329 }
2330
2331 /*
2332 * Send buffer registry channel to the application.
2333 *
2334 * Return 0 on success else a negative value.
2335 */
2336 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2337 struct ust_app *app, struct ust_app_session *ua_sess,
2338 struct ust_app_channel *ua_chan)
2339 {
2340 int ret;
2341 struct buffer_reg_stream *reg_stream;
2342
2343 assert(reg_chan);
2344 assert(app);
2345 assert(ua_sess);
2346 assert(ua_chan);
2347
2348 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2349
2350 ret = duplicate_channel_object(reg_chan, ua_chan);
2351 if (ret < 0) {
2352 goto error;
2353 }
2354
2355 /* Send channel to the application. */
2356 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2357 if (ret < 0) {
2358 goto error;
2359 }
2360
2361 health_code_update();
2362
2363 /* Send all streams to application. */
2364 pthread_mutex_lock(&reg_chan->stream_list_lock);
2365 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2366 struct ust_app_stream stream;
2367
2368 ret = duplicate_stream_object(reg_stream, &stream);
2369 if (ret < 0) {
2370 goto error_stream_unlock;
2371 }
2372
2373 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2374 if (ret < 0) {
2375 (void) release_ust_app_stream(-1, &stream);
2376 goto error_stream_unlock;
2377 }
2378
2379 /*
2380 * The return value is not important here. This function will output an
2381 * error if needed.
2382 */
2383 (void) release_ust_app_stream(-1, &stream);
2384 }
2385 ua_chan->is_sent = 1;
2386
2387 error_stream_unlock:
2388 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2389 error:
2390 return ret;
2391 }
2392
2393 /*
2394 * Create and send to the application the created buffers with per UID buffers.
2395 *
2396 * Return 0 on success else a negative value.
2397 */
2398 static int create_channel_per_uid(struct ust_app *app,
2399 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2400 struct ust_app_channel *ua_chan)
2401 {
2402 int ret;
2403 struct buffer_reg_uid *reg_uid;
2404 struct buffer_reg_channel *reg_chan;
2405
2406 assert(app);
2407 assert(usess);
2408 assert(ua_sess);
2409 assert(ua_chan);
2410
2411 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2412
2413 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2414 /*
2415 * The session creation handles the creation of this global registry
2416 * object. If none can be find, there is a code flow problem or a
2417 * teardown race.
2418 */
2419 assert(reg_uid);
2420
2421 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2422 reg_uid);
2423 if (!reg_chan) {
2424 /* Create the buffer registry channel object. */
2425 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2426 if (ret < 0) {
2427 goto error;
2428 }
2429 assert(reg_chan);
2430
2431 /*
2432 * Create the buffers on the consumer side. This call populates the
2433 * ust app channel object with all streams and data object.
2434 */
2435 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2436 app->bits_per_long, reg_uid->registry->reg.ust);
2437 if (ret < 0) {
2438 /*
2439 * Let's remove the previously created buffer registry channel so
2440 * it's not visible anymore in the session registry.
2441 */
2442 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2443 ua_chan->tracing_channel_id);
2444 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2445 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2446 goto error;
2447 }
2448
2449 /*
2450 * Setup the streams and add it to the session registry.
2451 */
2452 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2453 if (ret < 0) {
2454 goto error;
2455 }
2456
2457 }
2458
2459 /* Send buffers to the application. */
2460 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2461 if (ret < 0) {
2462 goto error;
2463 }
2464
2465 error:
2466 return ret;
2467 }
2468
2469 /*
2470 * Create and send to the application the created buffers with per PID buffers.
2471 *
2472 * Return 0 on success else a negative value.
2473 */
2474 static int create_channel_per_pid(struct ust_app *app,
2475 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2476 struct ust_app_channel *ua_chan)
2477 {
2478 int ret;
2479 struct ust_registry_session *registry;
2480
2481 assert(app);
2482 assert(usess);
2483 assert(ua_sess);
2484 assert(ua_chan);
2485
2486 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2487
2488 rcu_read_lock();
2489
2490 registry = get_session_registry(ua_sess);
2491 assert(registry);
2492
2493 /* Create and add a new channel registry to session. */
2494 ret = ust_registry_channel_add(registry, ua_chan->key);
2495 if (ret < 0) {
2496 goto error;
2497 }
2498
2499 /* Create and get channel on the consumer side. */
2500 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2501 app->bits_per_long, registry);
2502 if (ret < 0) {
2503 goto error;
2504 }
2505
2506 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2507 if (ret < 0) {
2508 goto error;
2509 }
2510
2511 error:
2512 rcu_read_unlock();
2513 return ret;
2514 }
2515
2516 /*
2517 * From an already allocated ust app channel, create the channel buffers if
2518 * need and send it to the application. This MUST be called with a RCU read
2519 * side lock acquired.
2520 *
2521 * Return 0 on success or else a negative value.
2522 */
2523 static int do_create_channel(struct ust_app *app,
2524 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2525 struct ust_app_channel *ua_chan)
2526 {
2527 int ret;
2528
2529 assert(app);
2530 assert(usess);
2531 assert(ua_sess);
2532 assert(ua_chan);
2533
2534 /* Handle buffer type before sending the channel to the application. */
2535 switch (usess->buffer_type) {
2536 case LTTNG_BUFFER_PER_UID:
2537 {
2538 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2539 if (ret < 0) {
2540 goto error;
2541 }
2542 break;
2543 }
2544 case LTTNG_BUFFER_PER_PID:
2545 {
2546 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2547 if (ret < 0) {
2548 goto error;
2549 }
2550 break;
2551 }
2552 default:
2553 assert(0);
2554 ret = -EINVAL;
2555 goto error;
2556 }
2557
2558 /* Initialize ust objd object using the received handle and add it. */
2559 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2560 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2561
2562 /* If channel is not enabled, disable it on the tracer */
2563 if (!ua_chan->enabled) {
2564 ret = disable_ust_channel(app, ua_sess, ua_chan);
2565 if (ret < 0) {
2566 goto error;
2567 }
2568 }
2569
2570 error:
2571 return ret;
2572 }
2573
2574 /*
2575 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2576 * newly created channel if not NULL.
2577 *
2578 * Called with UST app session lock and RCU read-side lock held.
2579 *
2580 * Return 0 on success or else a negative value.
2581 */
2582 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2583 struct ltt_ust_channel *uchan, struct ust_app *app,
2584 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2585 struct ust_app_channel **ua_chanp)
2586 {
2587 int ret = 0;
2588 struct lttng_ht_iter iter;
2589 struct lttng_ht_node_str *ua_chan_node;
2590 struct ust_app_channel *ua_chan;
2591
2592 /* Lookup channel in the ust app session */
2593 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2594 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2595 if (ua_chan_node != NULL) {
2596 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2597 goto end;
2598 }
2599
2600 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2601 if (ua_chan == NULL) {
2602 /* Only malloc can fail here */
2603 ret = -ENOMEM;
2604 goto error_alloc;
2605 }
2606 shadow_copy_channel(ua_chan, uchan);
2607
2608 /* Set channel type. */
2609 ua_chan->attr.type = type;
2610
2611 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2612 if (ret < 0) {
2613 goto error;
2614 }
2615
2616 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2617 app->pid);
2618
2619 /* Only add the channel if successful on the tracer side. */
2620 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2621
2622 end:
2623 if (ua_chanp) {
2624 *ua_chanp = ua_chan;
2625 }
2626
2627 /* Everything went well. */
2628 return 0;
2629
2630 error:
2631 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2632 error_alloc:
2633 return ret;
2634 }
2635
2636 /*
2637 * Create UST app event and create it on the tracer side.
2638 *
2639 * Called with ust app session mutex held.
2640 */
2641 static
2642 int create_ust_app_event(struct ust_app_session *ua_sess,
2643 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2644 struct ust_app *app)
2645 {
2646 int ret = 0;
2647 struct ust_app_event *ua_event;
2648
2649 /* Get event node */
2650 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2651 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2652 if (ua_event != NULL) {
2653 ret = -EEXIST;
2654 goto end;
2655 }
2656
2657 /* Does not exist so create one */
2658 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2659 if (ua_event == NULL) {
2660 /* Only malloc can failed so something is really wrong */
2661 ret = -ENOMEM;
2662 goto end;
2663 }
2664 shadow_copy_event(ua_event, uevent);
2665
2666 /* Create it on the tracer side */
2667 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2668 if (ret < 0) {
2669 /* Not found previously means that it does not exist on the tracer */
2670 assert(ret != -LTTNG_UST_ERR_EXIST);
2671 goto error;
2672 }
2673
2674 add_unique_ust_app_event(ua_chan, ua_event);
2675
2676 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2677 app->pid);
2678
2679 end:
2680 return ret;
2681
2682 error:
2683 /* Valid. Calling here is already in a read side lock */
2684 delete_ust_app_event(-1, ua_event);
2685 return ret;
2686 }
2687
2688 /*
2689 * Create UST metadata and open it on the tracer side.
2690 *
2691 * Called with UST app session lock held and RCU read side lock.
2692 */
2693 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2694 struct ust_app *app, struct consumer_output *consumer,
2695 struct ustctl_consumer_channel_attr *attr)
2696 {
2697 int ret = 0;
2698 struct ust_app_channel *metadata;
2699 struct consumer_socket *socket;
2700 struct ust_registry_session *registry;
2701
2702 assert(ua_sess);
2703 assert(app);
2704 assert(consumer);
2705
2706 registry = get_session_registry(ua_sess);
2707 assert(registry);
2708
2709 /* Metadata already exists for this registry or it was closed previously */
2710 if (registry->metadata_key || registry->metadata_closed) {
2711 ret = 0;
2712 goto error;
2713 }
2714
2715 /* Allocate UST metadata */
2716 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2717 if (!metadata) {
2718 /* malloc() failed */
2719 ret = -ENOMEM;
2720 goto error;
2721 }
2722
2723 if (!attr) {
2724 /* Set default attributes for metadata. */
2725 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2726 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2727 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2728 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2729 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2730 metadata->attr.output = LTTNG_UST_MMAP;
2731 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2732 } else {
2733 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2734 metadata->attr.output = LTTNG_UST_MMAP;
2735 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2736 }
2737
2738 /* Need one fd for the channel. */
2739 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2740 if (ret < 0) {
2741 ERR("Exhausted number of available FD upon create metadata");
2742 goto error;
2743 }
2744
2745 /* Get the right consumer socket for the application. */
2746 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2747 if (!socket) {
2748 ret = -EINVAL;
2749 goto error_consumer;
2750 }
2751
2752 /*
2753 * Keep metadata key so we can identify it on the consumer side. Assign it
2754 * to the registry *before* we ask the consumer so we avoid the race of the
2755 * consumer requesting the metadata and the ask_channel call on our side
2756 * did not returned yet.
2757 */
2758 registry->metadata_key = metadata->key;
2759
2760 /*
2761 * Ask the metadata channel creation to the consumer. The metadata object
2762 * will be created by the consumer and kept their. However, the stream is
2763 * never added or monitored until we do a first push metadata to the
2764 * consumer.
2765 */
2766 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2767 registry);
2768 if (ret < 0) {
2769 /* Nullify the metadata key so we don't try to close it later on. */
2770 registry->metadata_key = 0;
2771 goto error_consumer;
2772 }
2773
2774 /*
2775 * The setup command will make the metadata stream be sent to the relayd,
2776 * if applicable, and the thread managing the metadatas. This is important
2777 * because after this point, if an error occurs, the only way the stream
2778 * can be deleted is to be monitored in the consumer.
2779 */
2780 ret = consumer_setup_metadata(socket, metadata->key);
2781 if (ret < 0) {
2782 /* Nullify the metadata key so we don't try to close it later on. */
2783 registry->metadata_key = 0;
2784 goto error_consumer;
2785 }
2786
2787 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2788 metadata->key, app->pid);
2789
2790 error_consumer:
2791 lttng_fd_put(LTTNG_FD_APPS, 1);
2792 delete_ust_app_channel(-1, metadata, app);
2793 error:
2794 return ret;
2795 }
2796
2797 /*
2798 * Return pointer to traceable apps list.
2799 */
2800 struct lttng_ht *ust_app_get_ht(void)
2801 {
2802 return ust_app_ht;
2803 }
2804
2805 /*
2806 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2807 * acquired before calling this function.
2808 */
2809 struct ust_app *ust_app_find_by_pid(pid_t pid)
2810 {
2811 struct ust_app *app = NULL;
2812 struct lttng_ht_node_ulong *node;
2813 struct lttng_ht_iter iter;
2814
2815 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2816 node = lttng_ht_iter_get_node_ulong(&iter);
2817 if (node == NULL) {
2818 DBG2("UST app no found with pid %d", pid);
2819 goto error;
2820 }
2821
2822 DBG2("Found UST app by pid %d", pid);
2823
2824 app = caa_container_of(node, struct ust_app, pid_n);
2825
2826 error:
2827 return app;
2828 }
2829
2830 /*
2831 * Allocate and init an UST app object using the registration information and
2832 * the command socket. This is called when the command socket connects to the
2833 * session daemon.
2834 *
2835 * The object is returned on success or else NULL.
2836 */
2837 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2838 {
2839 struct ust_app *lta = NULL;
2840
2841 assert(msg);
2842 assert(sock >= 0);
2843
2844 DBG3("UST app creating application for socket %d", sock);
2845
2846 if ((msg->bits_per_long == 64 &&
2847 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2848 || (msg->bits_per_long == 32 &&
2849 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2850 ERR("Registration failed: application \"%s\" (pid: %d) has "
2851 "%d-bit long, but no consumerd for this size is available.\n",
2852 msg->name, msg->pid, msg->bits_per_long);
2853 goto error;
2854 }
2855
2856 lta = zmalloc(sizeof(struct ust_app));
2857 if (lta == NULL) {
2858 PERROR("malloc");
2859 goto error;
2860 }
2861
2862 lta->ppid = msg->ppid;
2863 lta->uid = msg->uid;
2864 lta->gid = msg->gid;
2865
2866 lta->bits_per_long = msg->bits_per_long;
2867 lta->uint8_t_alignment = msg->uint8_t_alignment;
2868 lta->uint16_t_alignment = msg->uint16_t_alignment;
2869 lta->uint32_t_alignment = msg->uint32_t_alignment;
2870 lta->uint64_t_alignment = msg->uint64_t_alignment;
2871 lta->long_alignment = msg->long_alignment;
2872 lta->byte_order = msg->byte_order;
2873
2874 lta->v_major = msg->major;
2875 lta->v_minor = msg->minor;
2876 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2877 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2878 lta->notify_sock = -1;
2879
2880 /* Copy name and make sure it's NULL terminated. */
2881 strncpy(lta->name, msg->name, sizeof(lta->name));
2882 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2883
2884 /*
2885 * Before this can be called, when receiving the registration information,
2886 * the application compatibility is checked. So, at this point, the
2887 * application can work with this session daemon.
2888 */
2889 lta->compatible = 1;
2890
2891 lta->pid = msg->pid;
2892 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2893 lta->sock = sock;
2894 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2895
2896 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2897
2898 error:
2899 return lta;
2900 }
2901
2902 /*
2903 * For a given application object, add it to every hash table.
2904 */
2905 void ust_app_add(struct ust_app *app)
2906 {
2907 assert(app);
2908 assert(app->notify_sock >= 0);
2909
2910 rcu_read_lock();
2911
2912 /*
2913 * On a re-registration, we want to kick out the previous registration of
2914 * that pid
2915 */
2916 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2917
2918 /*
2919 * The socket _should_ be unique until _we_ call close. So, a add_unique
2920 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2921 * already in the table.
2922 */
2923 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2924
2925 /* Add application to the notify socket hash table. */
2926 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2927 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2928
2929 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2930 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2931 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2932 app->v_minor);
2933
2934 rcu_read_unlock();
2935 }
2936
2937 /*
2938 * Set the application version into the object.
2939 *
2940 * Return 0 on success else a negative value either an errno code or a
2941 * LTTng-UST error code.
2942 */
2943 int ust_app_version(struct ust_app *app)
2944 {
2945 int ret;
2946
2947 assert(app);
2948
2949 ret = ustctl_tracer_version(app->sock, &app->version);
2950 if (ret < 0) {
2951 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2952 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2953 } else {
2954 DBG3("UST app %d verion failed. Application is dead", app->sock);
2955 }
2956 }
2957
2958 return ret;
2959 }
2960
2961 /*
2962 * Unregister app by removing it from the global traceable app list and freeing
2963 * the data struct.
2964 *
2965 * The socket is already closed at this point so no close to sock.
2966 */
2967 void ust_app_unregister(int sock)
2968 {
2969 struct ust_app *lta;
2970 struct lttng_ht_node_ulong *node;
2971 struct lttng_ht_iter iter;
2972 struct ust_app_session *ua_sess;
2973 int ret;
2974
2975 rcu_read_lock();
2976
2977 /* Get the node reference for a call_rcu */
2978 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2979 node = lttng_ht_iter_get_node_ulong(&iter);
2980 assert(node);
2981
2982 lta = caa_container_of(node, struct ust_app, sock_n);
2983 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2984
2985 /* Remove application from PID hash table */
2986 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2987 assert(!ret);
2988
2989 /*
2990 * Remove application from notify hash table. The thread handling the
2991 * notify socket could have deleted the node so ignore on error because
2992 * either way it's valid. The close of that socket is handled by the other
2993 * thread.
2994 */
2995 iter.iter.node = &lta->notify_sock_n.node;
2996 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2997
2998 /*
2999 * Ignore return value since the node might have been removed before by an
3000 * add replace during app registration because the PID can be reassigned by
3001 * the OS.
3002 */
3003 iter.iter.node = &lta->pid_n.node;
3004 ret = lttng_ht_del(ust_app_ht, &iter);
3005 if (ret) {
3006 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3007 lta->pid);
3008 }
3009
3010 /* Remove sessions so they are not visible during deletion.*/
3011 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3012 node.node) {
3013 struct ust_registry_session *registry;
3014
3015 ret = lttng_ht_del(lta->sessions, &iter);
3016 if (ret) {
3017 /* The session was already removed so scheduled for teardown. */
3018 continue;
3019 }
3020
3021 /*
3022 * Add session to list for teardown. This is safe since at this point we
3023 * are the only one using this list.
3024 */
3025 pthread_mutex_lock(&ua_sess->lock);
3026
3027 /*
3028 * Normally, this is done in the delete session process which is
3029 * executed in the call rcu below. However, upon registration we can't
3030 * afford to wait for the grace period before pushing data or else the
3031 * data pending feature can race between the unregistration and stop
3032 * command where the data pending command is sent *before* the grace
3033 * period ended.
3034 *
3035 * The close metadata below nullifies the metadata pointer in the
3036 * session so the delete session will NOT push/close a second time.
3037 */
3038 registry = get_session_registry(ua_sess);
3039 if (registry && !registry->metadata_closed) {
3040 /* Push metadata for application before freeing the application. */
3041 (void) push_metadata(registry, ua_sess->consumer);
3042
3043 /*
3044 * Don't ask to close metadata for global per UID buffers. Close
3045 * metadata only on destroy trace session in this case. Also, the
3046 * previous push metadata could have flag the metadata registry to
3047 * close so don't send a close command if closed.
3048 */
3049 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
3050 !registry->metadata_closed) {
3051 /* And ask to close it for this session registry. */
3052 (void) close_metadata(registry, ua_sess->consumer);
3053 }
3054 }
3055
3056 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3057 pthread_mutex_unlock(&ua_sess->lock);
3058 }
3059
3060 /* Free memory */
3061 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3062
3063 rcu_read_unlock();
3064 return;
3065 }
3066
3067 /*
3068 * Return traceable_app_count
3069 */
3070 unsigned long ust_app_list_count(void)
3071 {
3072 unsigned long count;
3073
3074 rcu_read_lock();
3075 count = lttng_ht_get_count(ust_app_ht);
3076 rcu_read_unlock();
3077
3078 return count;
3079 }
3080
3081 /*
3082 * Fill events array with all events name of all registered apps.
3083 */
3084 int ust_app_list_events(struct lttng_event **events)
3085 {
3086 int ret, handle;
3087 size_t nbmem, count = 0;
3088 struct lttng_ht_iter iter;
3089 struct ust_app *app;
3090 struct lttng_event *tmp_event;
3091
3092 nbmem = UST_APP_EVENT_LIST_SIZE;
3093 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3094 if (tmp_event == NULL) {
3095 PERROR("zmalloc ust app events");
3096 ret = -ENOMEM;
3097 goto error;
3098 }
3099
3100 rcu_read_lock();
3101
3102 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3103 struct lttng_ust_tracepoint_iter uiter;
3104
3105 health_code_update();
3106
3107 if (!app->compatible) {
3108 /*
3109 * TODO: In time, we should notice the caller of this error by
3110 * telling him that this is a version error.
3111 */
3112 continue;
3113 }
3114 handle = ustctl_tracepoint_list(app->sock);
3115 if (handle < 0) {
3116 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3117 ERR("UST app list events getting handle failed for app pid %d",
3118 app->pid);
3119 }
3120 continue;
3121 }
3122
3123 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3124 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3125 /* Handle ustctl error. */
3126 if (ret < 0) {
3127 free(tmp_event);
3128 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3129 ERR("UST app tp list get failed for app %d with ret %d",
3130 app->sock, ret);
3131 } else {
3132 DBG3("UST app tp list get failed. Application is dead");
3133 /*
3134 * This is normal behavior, an application can die during the
3135 * creation process. Don't report an error so the execution can
3136 * continue normally. Continue normal execution.
3137 */
3138 break;
3139 }
3140 goto rcu_error;
3141 }
3142
3143 health_code_update();
3144 if (count >= nbmem) {
3145 /* In case the realloc fails, we free the memory */
3146 void *ptr;
3147
3148 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3149 2 * nbmem);
3150 nbmem *= 2;
3151 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3152 if (ptr == NULL) {
3153 PERROR("realloc ust app events");
3154 free(tmp_event);
3155 ret = -ENOMEM;
3156 goto rcu_error;
3157 }
3158 tmp_event = ptr;
3159 }
3160 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3161 tmp_event[count].loglevel = uiter.loglevel;
3162 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3163 tmp_event[count].pid = app->pid;
3164 tmp_event[count].enabled = -1;
3165 count++;
3166 }
3167 }
3168
3169 ret = count;
3170 *events = tmp_event;
3171
3172 DBG2("UST app list events done (%zu events)", count);
3173
3174 rcu_error:
3175 rcu_read_unlock();
3176 error:
3177 health_code_update();
3178 return ret;
3179 }
3180
3181 /*
3182 * Fill events array with all events name of all registered apps.
3183 */
3184 int ust_app_list_event_fields(struct lttng_event_field **fields)
3185 {
3186 int ret, handle;
3187 size_t nbmem, count = 0;
3188 struct lttng_ht_iter iter;
3189 struct ust_app *app;
3190 struct lttng_event_field *tmp_event;
3191
3192 nbmem = UST_APP_EVENT_LIST_SIZE;
3193 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3194 if (tmp_event == NULL) {
3195 PERROR("zmalloc ust app event fields");
3196 ret = -ENOMEM;
3197 goto error;
3198 }
3199
3200 rcu_read_lock();
3201
3202 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3203 struct lttng_ust_field_iter uiter;
3204
3205 health_code_update();
3206
3207 if (!app->compatible) {
3208 /*
3209 * TODO: In time, we should notice the caller of this error by
3210 * telling him that this is a version error.
3211 */
3212 continue;
3213 }
3214 handle = ustctl_tracepoint_field_list(app->sock);
3215 if (handle < 0) {
3216 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3217 ERR("UST app list field getting handle failed for app pid %d",
3218 app->pid);
3219 }
3220 continue;
3221 }
3222
3223 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3224 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3225 /* Handle ustctl error. */
3226 if (ret < 0) {
3227 free(tmp_event);
3228 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3229 ERR("UST app tp list field failed for app %d with ret %d",
3230 app->sock, ret);
3231 } else {
3232 DBG3("UST app tp list field failed. Application is dead");
3233 /*
3234 * This is normal behavior, an application can die during the
3235 * creation process. Don't report an error so the execution can
3236 * continue normally.
3237 */
3238 break;
3239 }
3240 goto rcu_error;
3241 }
3242
3243 health_code_update();
3244 if (count >= nbmem) {
3245 /* In case the realloc fails, we free the memory */
3246 void *ptr;
3247
3248 DBG2("Reallocating event field list from %zu to %zu entries", nbmem