255a41b0d7ca46bd0ec7da805065f33c1d399c35
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 ret_val = ret;
485 goto error_push;
486 }
487
488 free(metadata_str);
489 return len;
490
491 end:
492 error:
493 pthread_mutex_unlock(&registry->lock);
494 error_push:
495 free(metadata_str);
496 return ret_val;
497 }
498
499 /*
500 * For a given application and session, push metadata to consumer. The session
501 * lock MUST be acquired here before calling this.
502 * Either sock or consumer is required : if sock is NULL, the default
503 * socket to send the metadata is retrieved from consumer, if sock
504 * is not NULL we use it to send the metadata.
505 *
506 * Return 0 on success else a negative error.
507 */
508 static int push_metadata(struct ust_registry_session *registry,
509 struct consumer_output *consumer)
510 {
511 int ret_val;
512 ssize_t ret;
513 struct consumer_socket *socket;
514
515 assert(registry);
516 assert(consumer);
517
518 rcu_read_lock();
519
520 /*
521 * Means that no metadata was assigned to the session. This can happens if
522 * no start has been done previously.
523 */
524 if (!registry->metadata_key) {
525 ret_val = 0;
526 goto end_rcu_unlock;
527 }
528
529 /* Get consumer socket to use to push the metadata.*/
530 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
531 consumer);
532 if (!socket) {
533 ret_val = -1;
534 goto error_rcu_unlock;
535 }
536
537 /*
538 * TODO: Currently, we hold the socket lock around sampling of the next
539 * metadata segment to ensure we send metadata over the consumer socket in
540 * the correct order. This makes the registry lock nest inside the socket
541 * lock.
542 *
543 * Please note that this is a temporary measure: we should move this lock
544 * back into ust_consumer_push_metadata() when the consumer gets the
545 * ability to reorder the metadata it receives.
546 */
547 pthread_mutex_lock(socket->lock);
548 ret = ust_app_push_metadata(registry, socket, 0);
549 pthread_mutex_unlock(socket->lock);
550 if (ret < 0) {
551 ret_val = ret;
552 goto error_rcu_unlock;
553 }
554
555 rcu_read_unlock();
556 return 0;
557
558 error_rcu_unlock:
559 /*
560 * On error, flag the registry that the metadata is closed. We were unable
561 * to push anything and this means that either the consumer is not
562 * responding or the metadata cache has been destroyed on the consumer.
563 */
564 registry->metadata_closed = 1;
565 end_rcu_unlock:
566 rcu_read_unlock();
567 return ret_val;
568 }
569
570 /*
571 * Send to the consumer a close metadata command for the given session. Once
572 * done, the metadata channel is deleted and the session metadata pointer is
573 * nullified. The session lock MUST be acquired here unless the application is
574 * in the destroy path.
575 *
576 * Return 0 on success else a negative value.
577 */
578 static int close_metadata(struct ust_registry_session *registry,
579 struct consumer_output *consumer)
580 {
581 int ret;
582 struct consumer_socket *socket;
583
584 assert(registry);
585 assert(consumer);
586
587 rcu_read_lock();
588
589 if (!registry->metadata_key || registry->metadata_closed) {
590 ret = 0;
591 goto end;
592 }
593
594 /* Get consumer socket to use to push the metadata.*/
595 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
596 consumer);
597 if (!socket) {
598 ret = -1;
599 goto error;
600 }
601
602 ret = consumer_close_metadata(socket, registry->metadata_key);
603 if (ret < 0) {
604 goto error;
605 }
606
607 error:
608 /*
609 * Metadata closed. Even on error this means that the consumer is not
610 * responding or not found so either way a second close should NOT be emit
611 * for this registry.
612 */
613 registry->metadata_closed = 1;
614 end:
615 rcu_read_unlock();
616 return ret;
617 }
618
619 /*
620 * We need to execute ht_destroy outside of RCU read-side critical
621 * section and outside of call_rcu thread, so we postpone its execution
622 * using ht_cleanup_push. It is simpler than to change the semantic of
623 * the many callers of delete_ust_app_session().
624 */
625 static
626 void delete_ust_app_session_rcu(struct rcu_head *head)
627 {
628 struct ust_app_session *ua_sess =
629 caa_container_of(head, struct ust_app_session, rcu_head);
630
631 ht_cleanup_push(ua_sess->channels);
632 free(ua_sess);
633 }
634
635 /*
636 * Delete ust app session safely. RCU read lock must be held before calling
637 * this function.
638 */
639 static
640 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
641 struct ust_app *app)
642 {
643 int ret;
644 struct lttng_ht_iter iter;
645 struct ust_app_channel *ua_chan;
646 struct ust_registry_session *registry;
647
648 assert(ua_sess);
649
650 pthread_mutex_lock(&ua_sess->lock);
651
652 registry = get_session_registry(ua_sess);
653 if (registry && !registry->metadata_closed) {
654 /* Push metadata for application before freeing the application. */
655 (void) push_metadata(registry, ua_sess->consumer);
656
657 /*
658 * Don't ask to close metadata for global per UID buffers. Close
659 * metadata only on destroy trace session in this case. Also, the
660 * previous push metadata could have flag the metadata registry to
661 * close so don't send a close command if closed.
662 */
663 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
664 !registry->metadata_closed) {
665 /* And ask to close it for this session registry. */
666 (void) close_metadata(registry, ua_sess->consumer);
667 }
668 }
669
670 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
671 node.node) {
672 ret = lttng_ht_del(ua_sess->channels, &iter);
673 assert(!ret);
674 delete_ust_app_channel(sock, ua_chan, app);
675 }
676
677 /* In case of per PID, the registry is kept in the session. */
678 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
679 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
680 if (reg_pid) {
681 buffer_reg_pid_remove(reg_pid);
682 buffer_reg_pid_destroy(reg_pid);
683 }
684 }
685
686 if (ua_sess->handle != -1) {
687 ret = ustctl_release_handle(sock, ua_sess->handle);
688 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
689 ERR("UST app sock %d release session handle failed with ret %d",
690 sock, ret);
691 }
692 }
693 pthread_mutex_unlock(&ua_sess->lock);
694
695 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
696 }
697
698 /*
699 * Delete a traceable application structure from the global list. Never call
700 * this function outside of a call_rcu call.
701 *
702 * RCU read side lock should _NOT_ be held when calling this function.
703 */
704 static
705 void delete_ust_app(struct ust_app *app)
706 {
707 int ret, sock;
708 struct ust_app_session *ua_sess, *tmp_ua_sess;
709
710 /* Delete ust app sessions info */
711 sock = app->sock;
712 app->sock = -1;
713
714 /* Wipe sessions */
715 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
716 teardown_node) {
717 /* Free every object in the session and the session. */
718 rcu_read_lock();
719 delete_ust_app_session(sock, ua_sess, app);
720 rcu_read_unlock();
721 }
722
723 ht_cleanup_push(app->sessions);
724 ht_cleanup_push(app->ust_objd);
725
726 /*
727 * Wait until we have deleted the application from the sock hash table
728 * before closing this socket, otherwise an application could re-use the
729 * socket ID and race with the teardown, using the same hash table entry.
730 *
731 * It's OK to leave the close in call_rcu. We want it to stay unique for
732 * all RCU readers that could run concurrently with unregister app,
733 * therefore we _need_ to only close that socket after a grace period. So
734 * it should stay in this RCU callback.
735 *
736 * This close() is a very important step of the synchronization model so
737 * every modification to this function must be carefully reviewed.
738 */
739 ret = close(sock);
740 if (ret) {
741 PERROR("close");
742 }
743 lttng_fd_put(LTTNG_FD_APPS, 1);
744
745 DBG2("UST app pid %d deleted", app->pid);
746 free(app);
747 }
748
749 /*
750 * URCU intermediate call to delete an UST app.
751 */
752 static
753 void delete_ust_app_rcu(struct rcu_head *head)
754 {
755 struct lttng_ht_node_ulong *node =
756 caa_container_of(head, struct lttng_ht_node_ulong, head);
757 struct ust_app *app =
758 caa_container_of(node, struct ust_app, pid_n);
759
760 DBG3("Call RCU deleting app PID %d", app->pid);
761 delete_ust_app(app);
762 }
763
764 /*
765 * Delete the session from the application ht and delete the data structure by
766 * freeing every object inside and releasing them.
767 */
768 static void destroy_app_session(struct ust_app *app,
769 struct ust_app_session *ua_sess)
770 {
771 int ret;
772 struct lttng_ht_iter iter;
773
774 assert(app);
775 assert(ua_sess);
776
777 iter.iter.node = &ua_sess->node.node;
778 ret = lttng_ht_del(app->sessions, &iter);
779 if (ret) {
780 /* Already scheduled for teardown. */
781 goto end;
782 }
783
784 /* Once deleted, free the data structure. */
785 delete_ust_app_session(app->sock, ua_sess, app);
786
787 end:
788 return;
789 }
790
791 /*
792 * Alloc new UST app session.
793 */
794 static
795 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
796 {
797 struct ust_app_session *ua_sess;
798
799 /* Init most of the default value by allocating and zeroing */
800 ua_sess = zmalloc(sizeof(struct ust_app_session));
801 if (ua_sess == NULL) {
802 PERROR("malloc");
803 goto error_free;
804 }
805
806 ua_sess->handle = -1;
807 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
808 pthread_mutex_init(&ua_sess->lock, NULL);
809
810 return ua_sess;
811
812 error_free:
813 return NULL;
814 }
815
816 /*
817 * Alloc new UST app channel.
818 */
819 static
820 struct ust_app_channel *alloc_ust_app_channel(char *name,
821 struct ust_app_session *ua_sess,
822 struct lttng_ust_channel_attr *attr)
823 {
824 struct ust_app_channel *ua_chan;
825
826 /* Init most of the default value by allocating and zeroing */
827 ua_chan = zmalloc(sizeof(struct ust_app_channel));
828 if (ua_chan == NULL) {
829 PERROR("malloc");
830 goto error;
831 }
832
833 /* Setup channel name */
834 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
835 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
836
837 ua_chan->enabled = 1;
838 ua_chan->handle = -1;
839 ua_chan->session = ua_sess;
840 ua_chan->key = get_next_channel_key();
841 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
842 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
843 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
844
845 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
846 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
847
848 /* Copy attributes */
849 if (attr) {
850 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
851 ua_chan->attr.subbuf_size = attr->subbuf_size;
852 ua_chan->attr.num_subbuf = attr->num_subbuf;
853 ua_chan->attr.overwrite = attr->overwrite;
854 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
855 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
856 ua_chan->attr.output = attr->output;
857 }
858 /* By default, the channel is a per cpu channel. */
859 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
860
861 DBG3("UST app channel %s allocated", ua_chan->name);
862
863 return ua_chan;
864
865 error:
866 return NULL;
867 }
868
869 /*
870 * Allocate and initialize a UST app stream.
871 *
872 * Return newly allocated stream pointer or NULL on error.
873 */
874 struct ust_app_stream *ust_app_alloc_stream(void)
875 {
876 struct ust_app_stream *stream = NULL;
877
878 stream = zmalloc(sizeof(*stream));
879 if (stream == NULL) {
880 PERROR("zmalloc ust app stream");
881 goto error;
882 }
883
884 /* Zero could be a valid value for a handle so flag it to -1. */
885 stream->handle = -1;
886
887 error:
888 return stream;
889 }
890
891 /*
892 * Alloc new UST app event.
893 */
894 static
895 struct ust_app_event *alloc_ust_app_event(char *name,
896 struct lttng_ust_event *attr)
897 {
898 struct ust_app_event *ua_event;
899
900 /* Init most of the default value by allocating and zeroing */
901 ua_event = zmalloc(sizeof(struct ust_app_event));
902 if (ua_event == NULL) {
903 PERROR("malloc");
904 goto error;
905 }
906
907 ua_event->enabled = 1;
908 strncpy(ua_event->name, name, sizeof(ua_event->name));
909 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
910 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
911
912 /* Copy attributes */
913 if (attr) {
914 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
915 }
916
917 DBG3("UST app event %s allocated", ua_event->name);
918
919 return ua_event;
920
921 error:
922 return NULL;
923 }
924
925 /*
926 * Alloc new UST app context.
927 */
928 static
929 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
930 {
931 struct ust_app_ctx *ua_ctx;
932
933 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
934 if (ua_ctx == NULL) {
935 goto error;
936 }
937
938 CDS_INIT_LIST_HEAD(&ua_ctx->list);
939
940 if (uctx) {
941 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
942 }
943
944 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
945
946 error:
947 return ua_ctx;
948 }
949
950 /*
951 * Allocate a filter and copy the given original filter.
952 *
953 * Return allocated filter or NULL on error.
954 */
955 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
956 struct lttng_ust_filter_bytecode *orig_f)
957 {
958 struct lttng_ust_filter_bytecode *filter = NULL;
959
960 /* Copy filter bytecode */
961 filter = zmalloc(sizeof(*filter) + orig_f->len);
962 if (!filter) {
963 PERROR("zmalloc alloc ust app filter");
964 goto error;
965 }
966
967 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
968
969 error:
970 return filter;
971 }
972
973 /*
974 * Find an ust_app using the sock and return it. RCU read side lock must be
975 * held before calling this helper function.
976 */
977 struct ust_app *ust_app_find_by_sock(int sock)
978 {
979 struct lttng_ht_node_ulong *node;
980 struct lttng_ht_iter iter;
981
982 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
983 node = lttng_ht_iter_get_node_ulong(&iter);
984 if (node == NULL) {
985 DBG2("UST app find by sock %d not found", sock);
986 goto error;
987 }
988
989 return caa_container_of(node, struct ust_app, sock_n);
990
991 error:
992 return NULL;
993 }
994
995 /*
996 * Find an ust_app using the notify sock and return it. RCU read side lock must
997 * be held before calling this helper function.
998 */
999 static struct ust_app *find_app_by_notify_sock(int sock)
1000 {
1001 struct lttng_ht_node_ulong *node;
1002 struct lttng_ht_iter iter;
1003
1004 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1005 &iter);
1006 node = lttng_ht_iter_get_node_ulong(&iter);
1007 if (node == NULL) {
1008 DBG2("UST app find by notify sock %d not found", sock);
1009 goto error;
1010 }
1011
1012 return caa_container_of(node, struct ust_app, notify_sock_n);
1013
1014 error:
1015 return NULL;
1016 }
1017
1018 /*
1019 * Lookup for an ust app event based on event name, filter bytecode and the
1020 * event loglevel.
1021 *
1022 * Return an ust_app_event object or NULL on error.
1023 */
1024 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1025 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1026 const struct lttng_event_exclusion *exclusion)
1027 {
1028 struct lttng_ht_iter iter;
1029 struct lttng_ht_node_str *node;
1030 struct ust_app_event *event = NULL;
1031 struct ust_app_ht_key key;
1032
1033 assert(name);
1034 assert(ht);
1035
1036 /* Setup key for event lookup. */
1037 key.name = name;
1038 key.filter = filter;
1039 key.loglevel = loglevel;
1040 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1041 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1042
1043 /* Lookup using the event name as hash and a custom match fct. */
1044 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1045 ht_match_ust_app_event, &key, &iter.iter);
1046 node = lttng_ht_iter_get_node_str(&iter);
1047 if (node == NULL) {
1048 goto end;
1049 }
1050
1051 event = caa_container_of(node, struct ust_app_event, node);
1052
1053 end:
1054 return event;
1055 }
1056
1057 /*
1058 * Create the channel context on the tracer.
1059 *
1060 * Called with UST app session lock held.
1061 */
1062 static
1063 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1064 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1065 {
1066 int ret;
1067
1068 health_code_update();
1069
1070 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1071 ua_chan->obj, &ua_ctx->obj);
1072 if (ret < 0) {
1073 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1074 ERR("UST app create channel context failed for app (pid: %d) "
1075 "with ret %d", app->pid, ret);
1076 } else {
1077 /*
1078 * This is normal behavior, an application can die during the
1079 * creation process. Don't report an error so the execution can
1080 * continue normally.
1081 */
1082 ret = 0;
1083 DBG3("UST app disable event failed. Application is dead.");
1084 }
1085 goto error;
1086 }
1087
1088 ua_ctx->handle = ua_ctx->obj->handle;
1089
1090 DBG2("UST app context handle %d created successfully for channel %s",
1091 ua_ctx->handle, ua_chan->name);
1092
1093 error:
1094 health_code_update();
1095 return ret;
1096 }
1097
1098 /*
1099 * Set the filter on the tracer.
1100 */
1101 static
1102 int set_ust_event_filter(struct ust_app_event *ua_event,
1103 struct ust_app *app)
1104 {
1105 int ret;
1106
1107 health_code_update();
1108
1109 if (!ua_event->filter) {
1110 ret = 0;
1111 goto error;
1112 }
1113
1114 ret = ustctl_set_filter(app->sock, ua_event->filter,
1115 ua_event->obj);
1116 if (ret < 0) {
1117 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1118 ERR("UST app event %s filter failed for app (pid: %d) "
1119 "with ret %d", ua_event->attr.name, app->pid, ret);
1120 } else {
1121 /*
1122 * This is normal behavior, an application can die during the
1123 * creation process. Don't report an error so the execution can
1124 * continue normally.
1125 */
1126 ret = 0;
1127 DBG3("UST app filter event failed. Application is dead.");
1128 }
1129 goto error;
1130 }
1131
1132 DBG2("UST filter set successfully for event %s", ua_event->name);
1133
1134 error:
1135 health_code_update();
1136 return ret;
1137 }
1138
1139 /*
1140 * Disable the specified event on to UST tracer for the UST session.
1141 */
1142 static int disable_ust_event(struct ust_app *app,
1143 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1144 {
1145 int ret;
1146
1147 health_code_update();
1148
1149 ret = ustctl_disable(app->sock, ua_event->obj);
1150 if (ret < 0) {
1151 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1152 ERR("UST app event %s disable failed for app (pid: %d) "
1153 "and session handle %d with ret %d",
1154 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1155 } else {
1156 /*
1157 * This is normal behavior, an application can die during the
1158 * creation process. Don't report an error so the execution can
1159 * continue normally.
1160 */
1161 ret = 0;
1162 DBG3("UST app disable event failed. Application is dead.");
1163 }
1164 goto error;
1165 }
1166
1167 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1168 ua_event->attr.name, app->pid);
1169
1170 error:
1171 health_code_update();
1172 return ret;
1173 }
1174
1175 /*
1176 * Disable the specified channel on to UST tracer for the UST session.
1177 */
1178 static int disable_ust_channel(struct ust_app *app,
1179 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1180 {
1181 int ret;
1182
1183 health_code_update();
1184
1185 ret = ustctl_disable(app->sock, ua_chan->obj);
1186 if (ret < 0) {
1187 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1188 ERR("UST app channel %s disable failed for app (pid: %d) "
1189 "and session handle %d with ret %d",
1190 ua_chan->name, app->pid, ua_sess->handle, ret);
1191 } else {
1192 /*
1193 * This is normal behavior, an application can die during the
1194 * creation process. Don't report an error so the execution can
1195 * continue normally.
1196 */
1197 ret = 0;
1198 DBG3("UST app disable channel failed. Application is dead.");
1199 }
1200 goto error;
1201 }
1202
1203 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1204 ua_chan->name, app->pid);
1205
1206 error:
1207 health_code_update();
1208 return ret;
1209 }
1210
1211 /*
1212 * Enable the specified channel on to UST tracer for the UST session.
1213 */
1214 static int enable_ust_channel(struct ust_app *app,
1215 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1216 {
1217 int ret;
1218
1219 health_code_update();
1220
1221 ret = ustctl_enable(app->sock, ua_chan->obj);
1222 if (ret < 0) {
1223 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1224 ERR("UST app channel %s enable failed for app (pid: %d) "
1225 "and session handle %d with ret %d",
1226 ua_chan->name, app->pid, ua_sess->handle, ret);
1227 } else {
1228 /*
1229 * This is normal behavior, an application can die during the
1230 * creation process. Don't report an error so the execution can
1231 * continue normally.
1232 */
1233 ret = 0;
1234 DBG3("UST app enable channel failed. Application is dead.");
1235 }
1236 goto error;
1237 }
1238
1239 ua_chan->enabled = 1;
1240
1241 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1242 ua_chan->name, app->pid);
1243
1244 error:
1245 health_code_update();
1246 return ret;
1247 }
1248
1249 /*
1250 * Enable the specified event on to UST tracer for the UST session.
1251 */
1252 static int enable_ust_event(struct ust_app *app,
1253 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1254 {
1255 int ret;
1256
1257 health_code_update();
1258
1259 ret = ustctl_enable(app->sock, ua_event->obj);
1260 if (ret < 0) {
1261 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1262 ERR("UST app event %s enable failed for app (pid: %d) "
1263 "and session handle %d with ret %d",
1264 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1265 } else {
1266 /*
1267 * This is normal behavior, an application can die during the
1268 * creation process. Don't report an error so the execution can
1269 * continue normally.
1270 */
1271 ret = 0;
1272 DBG3("UST app enable event failed. Application is dead.");
1273 }
1274 goto error;
1275 }
1276
1277 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1278 ua_event->attr.name, app->pid);
1279
1280 error:
1281 health_code_update();
1282 return ret;
1283 }
1284
1285 /*
1286 * Send channel and stream buffer to application.
1287 *
1288 * Return 0 on success. On error, a negative value is returned.
1289 */
1290 static int send_channel_pid_to_ust(struct ust_app *app,
1291 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1292 {
1293 int ret;
1294 struct ust_app_stream *stream, *stmp;
1295
1296 assert(app);
1297 assert(ua_sess);
1298 assert(ua_chan);
1299
1300 health_code_update();
1301
1302 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1303 app->sock);
1304
1305 /* Send channel to the application. */
1306 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1307 if (ret < 0) {
1308 goto error;
1309 }
1310
1311 health_code_update();
1312
1313 /* Send all streams to application. */
1314 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1315 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1316 if (ret < 0) {
1317 goto error;
1318 }
1319 /* We don't need the stream anymore once sent to the tracer. */
1320 cds_list_del(&stream->list);
1321 delete_ust_app_stream(-1, stream);
1322 }
1323 /* Flag the channel that it is sent to the application. */
1324 ua_chan->is_sent = 1;
1325
1326 error:
1327 health_code_update();
1328 return ret;
1329 }
1330
1331 /*
1332 * Create the specified event onto the UST tracer for a UST session.
1333 *
1334 * Should be called with session mutex held.
1335 */
1336 static
1337 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1338 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1339 {
1340 int ret = 0;
1341
1342 health_code_update();
1343
1344 /* Create UST event on tracer */
1345 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1346 &ua_event->obj);
1347 if (ret < 0) {
1348 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1349 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1350 ua_event->attr.name, app->pid, ret);
1351 } else {
1352 /*
1353 * This is normal behavior, an application can die during the
1354 * creation process. Don't report an error so the execution can
1355 * continue normally.
1356 */
1357 ret = 0;
1358 DBG3("UST app create event failed. Application is dead.");
1359 }
1360 goto error;
1361 }
1362
1363 ua_event->handle = ua_event->obj->handle;
1364
1365 DBG2("UST app event %s created successfully for pid:%d",
1366 ua_event->attr.name, app->pid);
1367
1368 health_code_update();
1369
1370 /* Set filter if one is present. */
1371 if (ua_event->filter) {
1372 ret = set_ust_event_filter(ua_event, app);
1373 if (ret < 0) {
1374 goto error;
1375 }
1376 }
1377
1378 /* If event not enabled, disable it on the tracer */
1379 if (ua_event->enabled == 0) {
1380 ret = disable_ust_event(app, ua_sess, ua_event);
1381 if (ret < 0) {
1382 /*
1383 * If we hit an EPERM, something is wrong with our disable call. If
1384 * we get an EEXIST, there is a problem on the tracer side since we
1385 * just created it.
1386 */
1387 switch (ret) {
1388 case -LTTNG_UST_ERR_PERM:
1389 /* Code flow problem */
1390 assert(0);
1391 case -LTTNG_UST_ERR_EXIST:
1392 /* It's OK for our use case. */
1393 ret = 0;
1394 break;
1395 default:
1396 break;
1397 }
1398 goto error;
1399 }
1400 }
1401
1402 error:
1403 health_code_update();
1404 return ret;
1405 }
1406
1407 /*
1408 * Copy data between an UST app event and a LTT event.
1409 */
1410 static void shadow_copy_event(struct ust_app_event *ua_event,
1411 struct ltt_ust_event *uevent)
1412 {
1413 size_t exclusion_alloc_size;
1414
1415 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1416 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1417
1418 ua_event->enabled = uevent->enabled;
1419
1420 /* Copy event attributes */
1421 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1422
1423 /* Copy filter bytecode */
1424 if (uevent->filter) {
1425 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1426 /* Filter might be NULL here in case of ENONEM. */
1427 }
1428
1429 /* Copy exclusion data */
1430 if (uevent->exclusion) {
1431 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1432 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1433 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1434 if (ua_event->exclusion) {
1435 memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
1436 }
1437 }
1438 }
1439
1440 /*
1441 * Copy data between an UST app channel and a LTT channel.
1442 */
1443 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1444 struct ltt_ust_channel *uchan)
1445 {
1446 struct lttng_ht_iter iter;
1447 struct ltt_ust_event *uevent;
1448 struct ltt_ust_context *uctx;
1449 struct ust_app_event *ua_event;
1450 struct ust_app_ctx *ua_ctx;
1451
1452 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1453
1454 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1455 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1456
1457 ua_chan->tracefile_size = uchan->tracefile_size;
1458 ua_chan->tracefile_count = uchan->tracefile_count;
1459
1460 /* Copy event attributes since the layout is different. */
1461 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1462 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1463 ua_chan->attr.overwrite = uchan->attr.overwrite;
1464 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1465 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1466 ua_chan->attr.output = uchan->attr.output;
1467 /*
1468 * Note that the attribute channel type is not set since the channel on the
1469 * tracing registry side does not have this information.
1470 */
1471
1472 ua_chan->enabled = uchan->enabled;
1473 ua_chan->tracing_channel_id = uchan->id;
1474
1475 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1476 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1477 if (ua_ctx == NULL) {
1478 continue;
1479 }
1480 lttng_ht_node_init_ulong(&ua_ctx->node,
1481 (unsigned long) ua_ctx->ctx.ctx);
1482 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1483 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1484 }
1485
1486 /* Copy all events from ltt ust channel to ust app channel */
1487 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1488 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1489 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1490 if (ua_event == NULL) {
1491 DBG2("UST event %s not found on shadow copy channel",
1492 uevent->attr.name);
1493 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1494 if (ua_event == NULL) {
1495 continue;
1496 }
1497 shadow_copy_event(ua_event, uevent);
1498 add_unique_ust_app_event(ua_chan, ua_event);
1499 }
1500 }
1501
1502 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1503 }
1504
1505 /*
1506 * Copy data between a UST app session and a regular LTT session.
1507 */
1508 static void shadow_copy_session(struct ust_app_session *ua_sess,
1509 struct ltt_ust_session *usess, struct ust_app *app)
1510 {
1511 struct lttng_ht_node_str *ua_chan_node;
1512 struct lttng_ht_iter iter;
1513 struct ltt_ust_channel *uchan;
1514 struct ust_app_channel *ua_chan;
1515 time_t rawtime;
1516 struct tm *timeinfo;
1517 char datetime[16];
1518 int ret;
1519
1520 /* Get date and time for unique app path */
1521 time(&rawtime);
1522 timeinfo = localtime(&rawtime);
1523 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1524
1525 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1526
1527 ua_sess->tracing_id = usess->id;
1528 ua_sess->id = get_next_session_id();
1529 ua_sess->uid = app->uid;
1530 ua_sess->gid = app->gid;
1531 ua_sess->euid = usess->uid;
1532 ua_sess->egid = usess->gid;
1533 ua_sess->buffer_type = usess->buffer_type;
1534 ua_sess->bits_per_long = app->bits_per_long;
1535 /* There is only one consumer object per session possible. */
1536 ua_sess->consumer = usess->consumer;
1537 ua_sess->output_traces = usess->output_traces;
1538 ua_sess->live_timer_interval = usess->live_timer_interval;
1539
1540 switch (ua_sess->buffer_type) {
1541 case LTTNG_BUFFER_PER_PID:
1542 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1543 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1544 datetime);
1545 break;
1546 case LTTNG_BUFFER_PER_UID:
1547 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1548 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1549 break;
1550 default:
1551 assert(0);
1552 goto error;
1553 }
1554 if (ret < 0) {
1555 PERROR("asprintf UST shadow copy session");
1556 assert(0);
1557 goto error;
1558 }
1559
1560 /* Iterate over all channels in global domain. */
1561 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1562 uchan, node.node) {
1563 struct lttng_ht_iter uiter;
1564
1565 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1566 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1567 if (ua_chan_node != NULL) {
1568 /* Session exist. Contiuing. */
1569 continue;
1570 }
1571
1572 DBG2("Channel %s not found on shadow session copy, creating it",
1573 uchan->name);
1574 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1575 if (ua_chan == NULL) {
1576 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1577 continue;
1578 }
1579 shadow_copy_channel(ua_chan, uchan);
1580 /*
1581 * The concept of metadata channel does not exist on the tracing
1582 * registry side of the session daemon so this can only be a per CPU
1583 * channel and not metadata.
1584 */
1585 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1586
1587 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1588 }
1589
1590 error:
1591 return;
1592 }
1593
1594 /*
1595 * Lookup sesison wrapper.
1596 */
1597 static
1598 void __lookup_session_by_app(struct ltt_ust_session *usess,
1599 struct ust_app *app, struct lttng_ht_iter *iter)
1600 {
1601 /* Get right UST app session from app */
1602 lttng_ht_lookup(app->sessions, &usess->id, iter);
1603 }
1604
1605 /*
1606 * Return ust app session from the app session hashtable using the UST session
1607 * id.
1608 */
1609 static struct ust_app_session *lookup_session_by_app(
1610 struct ltt_ust_session *usess, struct ust_app *app)
1611 {
1612 struct lttng_ht_iter iter;
1613 struct lttng_ht_node_u64 *node;
1614
1615 __lookup_session_by_app(usess, app, &iter);
1616 node = lttng_ht_iter_get_node_u64(&iter);
1617 if (node == NULL) {
1618 goto error;
1619 }
1620
1621 return caa_container_of(node, struct ust_app_session, node);
1622
1623 error:
1624 return NULL;
1625 }
1626
1627 /*
1628 * Setup buffer registry per PID for the given session and application. If none
1629 * is found, a new one is created, added to the global registry and
1630 * initialized. If regp is valid, it's set with the newly created object.
1631 *
1632 * Return 0 on success or else a negative value.
1633 */
1634 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1635 struct ust_app *app, struct buffer_reg_pid **regp)
1636 {
1637 int ret = 0;
1638 struct buffer_reg_pid *reg_pid;
1639
1640 assert(ua_sess);
1641 assert(app);
1642
1643 rcu_read_lock();
1644
1645 reg_pid = buffer_reg_pid_find(ua_sess->id);
1646 if (!reg_pid) {
1647 /*
1648 * This is the create channel path meaning that if there is NO
1649 * registry available, we have to create one for this session.
1650 */
1651 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1652 if (ret < 0) {
1653 goto error;
1654 }
1655 buffer_reg_pid_add(reg_pid);
1656 } else {
1657 goto end;
1658 }
1659
1660 /* Initialize registry. */
1661 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1662 app->bits_per_long, app->uint8_t_alignment,
1663 app->uint16_t_alignment, app->uint32_t_alignment,
1664 app->uint64_t_alignment, app->long_alignment,
1665 app->byte_order, app->version.major,
1666 app->version.minor);
1667 if (ret < 0) {
1668 goto error;
1669 }
1670
1671 DBG3("UST app buffer registry per PID created successfully");
1672
1673 end:
1674 if (regp) {
1675 *regp = reg_pid;
1676 }
1677 error:
1678 rcu_read_unlock();
1679 return ret;
1680 }
1681
1682 /*
1683 * Setup buffer registry per UID for the given session and application. If none
1684 * is found, a new one is created, added to the global registry and
1685 * initialized. If regp is valid, it's set with the newly created object.
1686 *
1687 * Return 0 on success or else a negative value.
1688 */
1689 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1690 struct ust_app *app, struct buffer_reg_uid **regp)
1691 {
1692 int ret = 0;
1693 struct buffer_reg_uid *reg_uid;
1694
1695 assert(usess);
1696 assert(app);
1697
1698 rcu_read_lock();
1699
1700 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1701 if (!reg_uid) {
1702 /*
1703 * This is the create channel path meaning that if there is NO
1704 * registry available, we have to create one for this session.
1705 */
1706 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1707 LTTNG_DOMAIN_UST, &reg_uid);
1708 if (ret < 0) {
1709 goto error;
1710 }
1711 buffer_reg_uid_add(reg_uid);
1712 } else {
1713 goto end;
1714 }
1715
1716 /* Initialize registry. */
1717 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1718 app->bits_per_long, app->uint8_t_alignment,
1719 app->uint16_t_alignment, app->uint32_t_alignment,
1720 app->uint64_t_alignment, app->long_alignment,
1721 app->byte_order, app->version.major,
1722 app->version.minor);
1723 if (ret < 0) {
1724 goto error;
1725 }
1726 /* Add node to teardown list of the session. */
1727 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1728
1729 DBG3("UST app buffer registry per UID created successfully");
1730
1731 end:
1732 if (regp) {
1733 *regp = reg_uid;
1734 }
1735 error:
1736 rcu_read_unlock();
1737 return ret;
1738 }
1739
1740 /*
1741 * Create a session on the tracer side for the given app.
1742 *
1743 * On success, ua_sess_ptr is populated with the session pointer or else left
1744 * untouched. If the session was created, is_created is set to 1. On error,
1745 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1746 * be NULL.
1747 *
1748 * Returns 0 on success or else a negative code which is either -ENOMEM or
1749 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1750 */
1751 static int create_ust_app_session(struct ltt_ust_session *usess,
1752 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1753 int *is_created)
1754 {
1755 int ret, created = 0;
1756 struct ust_app_session *ua_sess;
1757
1758 assert(usess);
1759 assert(app);
1760 assert(ua_sess_ptr);
1761
1762 health_code_update();
1763
1764 ua_sess = lookup_session_by_app(usess, app);
1765 if (ua_sess == NULL) {
1766 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1767 app->pid, usess->id);
1768 ua_sess = alloc_ust_app_session(app);
1769 if (ua_sess == NULL) {
1770 /* Only malloc can failed so something is really wrong */
1771 ret = -ENOMEM;
1772 goto error;
1773 }
1774 shadow_copy_session(ua_sess, usess, app);
1775 created = 1;
1776 }
1777
1778 switch (usess->buffer_type) {
1779 case LTTNG_BUFFER_PER_PID:
1780 /* Init local registry. */
1781 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1782 if (ret < 0) {
1783 goto error;
1784 }
1785 break;
1786 case LTTNG_BUFFER_PER_UID:
1787 /* Look for a global registry. If none exists, create one. */
1788 ret = setup_buffer_reg_uid(usess, app, NULL);
1789 if (ret < 0) {
1790 goto error;
1791 }
1792 break;
1793 default:
1794 assert(0);
1795 ret = -EINVAL;
1796 goto error;
1797 }
1798
1799 health_code_update();
1800
1801 if (ua_sess->handle == -1) {
1802 ret = ustctl_create_session(app->sock);
1803 if (ret < 0) {
1804 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1805 ERR("Creating session for app pid %d with ret %d",
1806 app->pid, ret);
1807 } else {
1808 DBG("UST app creating session failed. Application is dead");
1809 /*
1810 * This is normal behavior, an application can die during the
1811 * creation process. Don't report an error so the execution can
1812 * continue normally. This will get flagged ENOTCONN and the
1813 * caller will handle it.
1814 */
1815 ret = 0;
1816 }
1817 delete_ust_app_session(-1, ua_sess, app);
1818 if (ret != -ENOMEM) {
1819 /*
1820 * Tracer is probably gone or got an internal error so let's
1821 * behave like it will soon unregister or not usable.
1822 */
1823 ret = -ENOTCONN;
1824 }
1825 goto error;
1826 }
1827
1828 ua_sess->handle = ret;
1829
1830 /* Add ust app session to app's HT */
1831 lttng_ht_node_init_u64(&ua_sess->node,
1832 ua_sess->tracing_id);
1833 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1834
1835 DBG2("UST app session created successfully with handle %d", ret);
1836 }
1837
1838 *ua_sess_ptr = ua_sess;
1839 if (is_created) {
1840 *is_created = created;
1841 }
1842
1843 /* Everything went well. */
1844 ret = 0;
1845
1846 error:
1847 health_code_update();
1848 return ret;
1849 }
1850
1851 /*
1852 * Create a context for the channel on the tracer.
1853 *
1854 * Called with UST app session lock held and a RCU read side lock.
1855 */
1856 static
1857 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1858 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1859 struct ust_app *app)
1860 {
1861 int ret = 0;
1862 struct lttng_ht_iter iter;
1863 struct lttng_ht_node_ulong *node;
1864 struct ust_app_ctx *ua_ctx;
1865
1866 DBG2("UST app adding context to channel %s", ua_chan->name);
1867
1868 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1869 node = lttng_ht_iter_get_node_ulong(&iter);
1870 if (node != NULL) {
1871 ret = -EEXIST;
1872 goto error;
1873 }
1874
1875 ua_ctx = alloc_ust_app_ctx(uctx);
1876 if (ua_ctx == NULL) {
1877 /* malloc failed */
1878 ret = -1;
1879 goto error;
1880 }
1881
1882 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1883 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1884 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1885
1886 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1887 if (ret < 0) {
1888 goto error;
1889 }
1890
1891 error:
1892 return ret;
1893 }
1894
1895 /*
1896 * Enable on the tracer side a ust app event for the session and channel.
1897 *
1898 * Called with UST app session lock held.
1899 */
1900 static
1901 int enable_ust_app_event(struct ust_app_session *ua_sess,
1902 struct ust_app_event *ua_event, struct ust_app *app)
1903 {
1904 int ret;
1905
1906 ret = enable_ust_event(app, ua_sess, ua_event);
1907 if (ret < 0) {
1908 goto error;
1909 }
1910
1911 ua_event->enabled = 1;
1912
1913 error:
1914 return ret;
1915 }
1916
1917 /*
1918 * Disable on the tracer side a ust app event for the session and channel.
1919 */
1920 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1921 struct ust_app_event *ua_event, struct ust_app *app)
1922 {
1923 int ret;
1924
1925 ret = disable_ust_event(app, ua_sess, ua_event);
1926 if (ret < 0) {
1927 goto error;
1928 }
1929
1930 ua_event->enabled = 0;
1931
1932 error:
1933 return ret;
1934 }
1935
1936 /*
1937 * Lookup ust app channel for session and disable it on the tracer side.
1938 */
1939 static
1940 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1941 struct ust_app_channel *ua_chan, struct ust_app *app)
1942 {
1943 int ret;
1944
1945 ret = disable_ust_channel(app, ua_sess, ua_chan);
1946 if (ret < 0) {
1947 goto error;
1948 }
1949
1950 ua_chan->enabled = 0;
1951
1952 error:
1953 return ret;
1954 }
1955
1956 /*
1957 * Lookup ust app channel for session and enable it on the tracer side. This
1958 * MUST be called with a RCU read side lock acquired.
1959 */
1960 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1961 struct ltt_ust_channel *uchan, struct ust_app *app)
1962 {
1963 int ret = 0;
1964 struct lttng_ht_iter iter;
1965 struct lttng_ht_node_str *ua_chan_node;
1966 struct ust_app_channel *ua_chan;
1967
1968 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1969 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1970 if (ua_chan_node == NULL) {
1971 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1972 uchan->name, ua_sess->tracing_id);
1973 goto error;
1974 }
1975
1976 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1977
1978 ret = enable_ust_channel(app, ua_sess, ua_chan);
1979 if (ret < 0) {
1980 goto error;
1981 }
1982
1983 error:
1984 return ret;
1985 }
1986
1987 /*
1988 * Ask the consumer to create a channel and get it if successful.
1989 *
1990 * Return 0 on success or else a negative value.
1991 */
1992 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1993 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1994 int bitness, struct ust_registry_session *registry)
1995 {
1996 int ret;
1997 unsigned int nb_fd = 0;
1998 struct consumer_socket *socket;
1999
2000 assert(usess);
2001 assert(ua_sess);
2002 assert(ua_chan);
2003 assert(registry);
2004
2005 rcu_read_lock();
2006 health_code_update();
2007
2008 /* Get the right consumer socket for the application. */
2009 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2010 if (!socket) {
2011 ret = -EINVAL;
2012 goto error;
2013 }
2014
2015 health_code_update();
2016
2017 /* Need one fd for the channel. */
2018 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2019 if (ret < 0) {
2020 ERR("Exhausted number of available FD upon create channel");
2021 goto error;
2022 }
2023
2024 /*
2025 * Ask consumer to create channel. The consumer will return the number of
2026 * stream we have to expect.
2027 */
2028 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2029 registry);
2030 if (ret < 0) {
2031 goto error_ask;
2032 }
2033
2034 /*
2035 * Compute the number of fd needed before receiving them. It must be 2 per
2036 * stream (2 being the default value here).
2037 */
2038 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2039
2040 /* Reserve the amount of file descriptor we need. */
2041 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2042 if (ret < 0) {
2043 ERR("Exhausted number of available FD upon create channel");
2044 goto error_fd_get_stream;
2045 }
2046
2047 health_code_update();
2048
2049 /*
2050 * Now get the channel from the consumer. This call wil populate the stream
2051 * list of that channel and set the ust objects.
2052 */
2053 if (usess->consumer->enabled) {
2054 ret = ust_consumer_get_channel(socket, ua_chan);
2055 if (ret < 0) {
2056 goto error_destroy;
2057 }
2058 }
2059
2060 rcu_read_unlock();
2061 return 0;
2062
2063 error_destroy:
2064 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2065 error_fd_get_stream:
2066 /*
2067 * Initiate a destroy channel on the consumer since we had an error
2068 * handling it on our side. The return value is of no importance since we
2069 * already have a ret value set by the previous error that we need to
2070 * return.
2071 */
2072 (void) ust_consumer_destroy_channel(socket, ua_chan);
2073 error_ask:
2074 lttng_fd_put(LTTNG_FD_APPS, 1);
2075 error:
2076 health_code_update();
2077 rcu_read_unlock();
2078 return ret;
2079 }
2080
2081 /*
2082 * Duplicate the ust data object of the ust app stream and save it in the
2083 * buffer registry stream.
2084 *
2085 * Return 0 on success or else a negative value.
2086 */
2087 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2088 struct ust_app_stream *stream)
2089 {
2090 int ret;
2091
2092 assert(reg_stream);
2093 assert(stream);
2094
2095 /* Reserve the amount of file descriptor we need. */
2096 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2097 if (ret < 0) {
2098 ERR("Exhausted number of available FD upon duplicate stream");
2099 goto error;
2100 }
2101
2102 /* Duplicate object for stream once the original is in the registry. */
2103 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2104 reg_stream->obj.ust);
2105 if (ret < 0) {
2106 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2107 reg_stream->obj.ust, stream->obj, ret);
2108 lttng_fd_put(LTTNG_FD_APPS, 2);
2109 goto error;
2110 }
2111 stream->handle = stream->obj->handle;
2112
2113 error:
2114 return ret;
2115 }
2116
2117 /*
2118 * Duplicate the ust data object of the ust app. channel and save it in the
2119 * buffer registry channel.
2120 *
2121 * Return 0 on success or else a negative value.
2122 */
2123 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2124 struct ust_app_channel *ua_chan)
2125 {
2126 int ret;
2127
2128 assert(reg_chan);
2129 assert(ua_chan);
2130
2131 /* Need two fds for the channel. */
2132 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2133 if (ret < 0) {
2134 ERR("Exhausted number of available FD upon duplicate channel");
2135 goto error_fd_get;
2136 }
2137
2138 /* Duplicate object for stream once the original is in the registry. */
2139 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2140 if (ret < 0) {
2141 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2142 reg_chan->obj.ust, ua_chan->obj, ret);
2143 goto error;
2144 }
2145 ua_chan->handle = ua_chan->obj->handle;
2146
2147 return 0;
2148
2149 error:
2150 lttng_fd_put(LTTNG_FD_APPS, 1);
2151 error_fd_get:
2152 return ret;
2153 }
2154
2155 /*
2156 * For a given channel buffer registry, setup all streams of the given ust
2157 * application channel.
2158 *
2159 * Return 0 on success or else a negative value.
2160 */
2161 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2162 struct ust_app_channel *ua_chan)
2163 {
2164 int ret = 0;
2165 struct ust_app_stream *stream, *stmp;
2166
2167 assert(reg_chan);
2168 assert(ua_chan);
2169
2170 DBG2("UST app setup buffer registry stream");
2171
2172 /* Send all streams to application. */
2173 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2174 struct buffer_reg_stream *reg_stream;
2175
2176 ret = buffer_reg_stream_create(&reg_stream);
2177 if (ret < 0) {
2178 goto error;
2179 }
2180
2181 /*
2182 * Keep original pointer and nullify it in the stream so the delete
2183 * stream call does not release the object.
2184 */
2185 reg_stream->obj.ust = stream->obj;
2186 stream->obj = NULL;
2187 buffer_reg_stream_add(reg_stream, reg_chan);
2188
2189 /* We don't need the streams anymore. */
2190 cds_list_del(&stream->list);
2191 delete_ust_app_stream(-1, stream);
2192 }
2193
2194 error:
2195 return ret;
2196 }
2197
2198 /*
2199 * Create a buffer registry channel for the given session registry and
2200 * application channel object. If regp pointer is valid, it's set with the
2201 * created object. Important, the created object is NOT added to the session
2202 * registry hash table.
2203 *
2204 * Return 0 on success else a negative value.
2205 */
2206 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2207 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2208 {
2209 int ret;
2210 struct buffer_reg_channel *reg_chan = NULL;
2211
2212 assert(reg_sess);
2213 assert(ua_chan);
2214
2215 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2216
2217 /* Create buffer registry channel. */
2218 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2219 if (ret < 0) {
2220 goto error_create;
2221 }
2222 assert(reg_chan);
2223 reg_chan->consumer_key = ua_chan->key;
2224 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2225
2226 /* Create and add a channel registry to session. */
2227 ret = ust_registry_channel_add(reg_sess->reg.ust,
2228 ua_chan->tracing_channel_id);
2229 if (ret < 0) {
2230 goto error;
2231 }
2232 buffer_reg_channel_add(reg_sess, reg_chan);
2233
2234 if (regp) {
2235 *regp = reg_chan;
2236 }
2237
2238 return 0;
2239
2240 error:
2241 /* Safe because the registry channel object was not added to any HT. */
2242 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2243 error_create:
2244 return ret;
2245 }
2246
2247 /*
2248 * Setup buffer registry channel for the given session registry and application
2249 * channel object. If regp pointer is valid, it's set with the created object.
2250 *
2251 * Return 0 on success else a negative value.
2252 */
2253 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2254 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2255 {
2256 int ret;
2257
2258 assert(reg_sess);
2259 assert(reg_chan);
2260 assert(ua_chan);
2261 assert(ua_chan->obj);
2262
2263 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2264
2265 /* Setup all streams for the registry. */
2266 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2267 if (ret < 0) {
2268 goto error;
2269 }
2270
2271 reg_chan->obj.ust = ua_chan->obj;
2272 ua_chan->obj = NULL;
2273
2274 return 0;
2275
2276 error:
2277 buffer_reg_channel_remove(reg_sess, reg_chan);
2278 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2279 return ret;
2280 }
2281
2282 /*
2283 * Send buffer registry channel to the application.
2284 *
2285 * Return 0 on success else a negative value.
2286 */
2287 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2288 struct ust_app *app, struct ust_app_session *ua_sess,
2289 struct ust_app_channel *ua_chan)
2290 {
2291 int ret;
2292 struct buffer_reg_stream *reg_stream;
2293
2294 assert(reg_chan);
2295 assert(app);
2296 assert(ua_sess);
2297 assert(ua_chan);
2298
2299 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2300
2301 ret = duplicate_channel_object(reg_chan, ua_chan);
2302 if (ret < 0) {
2303 goto error;
2304 }
2305
2306 /* Send channel to the application. */
2307 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2308 if (ret < 0) {
2309 goto error;
2310 }
2311
2312 health_code_update();
2313
2314 /* Send all streams to application. */
2315 pthread_mutex_lock(&reg_chan->stream_list_lock);
2316 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2317 struct ust_app_stream stream;
2318
2319 ret = duplicate_stream_object(reg_stream, &stream);
2320 if (ret < 0) {
2321 goto error_stream_unlock;
2322 }
2323
2324 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2325 if (ret < 0) {
2326 (void) release_ust_app_stream(-1, &stream);
2327 goto error_stream_unlock;
2328 }
2329
2330 /*
2331 * The return value is not important here. This function will output an
2332 * error if needed.
2333 */
2334 (void) release_ust_app_stream(-1, &stream);
2335 }
2336 ua_chan->is_sent = 1;
2337
2338 error_stream_unlock:
2339 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2340 error:
2341 return ret;
2342 }
2343
2344 /*
2345 * Create and send to the application the created buffers with per UID buffers.
2346 *
2347 * Return 0 on success else a negative value.
2348 */
2349 static int create_channel_per_uid(struct ust_app *app,
2350 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2351 struct ust_app_channel *ua_chan)
2352 {
2353 int ret;
2354 struct buffer_reg_uid *reg_uid;
2355 struct buffer_reg_channel *reg_chan;
2356
2357 assert(app);
2358 assert(usess);
2359 assert(ua_sess);
2360 assert(ua_chan);
2361
2362 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2363
2364 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2365 /*
2366 * The session creation handles the creation of this global registry
2367 * object. If none can be find, there is a code flow problem or a
2368 * teardown race.
2369 */
2370 assert(reg_uid);
2371
2372 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2373 reg_uid);
2374 if (!reg_chan) {
2375 /* Create the buffer registry channel object. */
2376 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2377 if (ret < 0) {
2378 goto error;
2379 }
2380 assert(reg_chan);
2381
2382 /*
2383 * Create the buffers on the consumer side. This call populates the
2384 * ust app channel object with all streams and data object.
2385 */
2386 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2387 app->bits_per_long, reg_uid->registry->reg.ust);
2388 if (ret < 0) {
2389 /*
2390 * Let's remove the previously created buffer registry channel so
2391 * it's not visible anymore in the session registry.
2392 */
2393 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2394 ua_chan->tracing_channel_id);
2395 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2396 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2397 goto error;
2398 }
2399
2400 /*
2401 * Setup the streams and add it to the session registry.
2402 */
2403 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2404 if (ret < 0) {
2405 goto error;
2406 }
2407
2408 }
2409
2410 /* Send buffers to the application. */
2411 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2412 if (ret < 0) {
2413 goto error;
2414 }
2415
2416 error:
2417 return ret;
2418 }
2419
2420 /*
2421 * Create and send to the application the created buffers with per PID buffers.
2422 *
2423 * Return 0 on success else a negative value.
2424 */
2425 static int create_channel_per_pid(struct ust_app *app,
2426 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2427 struct ust_app_channel *ua_chan)
2428 {
2429 int ret;
2430 struct ust_registry_session *registry;
2431
2432 assert(app);
2433 assert(usess);
2434 assert(ua_sess);
2435 assert(ua_chan);
2436
2437 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2438
2439 rcu_read_lock();
2440
2441 registry = get_session_registry(ua_sess);
2442 assert(registry);
2443
2444 /* Create and add a new channel registry to session. */
2445 ret = ust_registry_channel_add(registry, ua_chan->key);
2446 if (ret < 0) {
2447 goto error;
2448 }
2449
2450 /* Create and get channel on the consumer side. */
2451 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2452 app->bits_per_long, registry);
2453 if (ret < 0) {
2454 goto error;
2455 }
2456
2457 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2458 if (ret < 0) {
2459 goto error;
2460 }
2461
2462 error:
2463 rcu_read_unlock();
2464 return ret;
2465 }
2466
2467 /*
2468 * From an already allocated ust app channel, create the channel buffers if
2469 * need and send it to the application. This MUST be called with a RCU read
2470 * side lock acquired.
2471 *
2472 * Return 0 on success or else a negative value.
2473 */
2474 static int do_create_channel(struct ust_app *app,
2475 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2476 struct ust_app_channel *ua_chan)
2477 {
2478 int ret;
2479
2480 assert(app);
2481 assert(usess);
2482 assert(ua_sess);
2483 assert(ua_chan);
2484
2485 /* Handle buffer type before sending the channel to the application. */
2486 switch (usess->buffer_type) {
2487 case LTTNG_BUFFER_PER_UID:
2488 {
2489 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2490 if (ret < 0) {
2491 goto error;
2492 }
2493 break;
2494 }
2495 case LTTNG_BUFFER_PER_PID:
2496 {
2497 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2498 if (ret < 0) {
2499 goto error;
2500 }
2501 break;
2502 }
2503 default:
2504 assert(0);
2505 ret = -EINVAL;
2506 goto error;
2507 }
2508
2509 /* Initialize ust objd object using the received handle and add it. */
2510 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2511 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2512
2513 /* If channel is not enabled, disable it on the tracer */
2514 if (!ua_chan->enabled) {
2515 ret = disable_ust_channel(app, ua_sess, ua_chan);
2516 if (ret < 0) {
2517 goto error;
2518 }
2519 }
2520
2521 error:
2522 return ret;
2523 }
2524
2525 /*
2526 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2527 * newly created channel if not NULL.
2528 *
2529 * Called with UST app session lock and RCU read-side lock held.
2530 *
2531 * Return 0 on success or else a negative value.
2532 */
2533 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2534 struct ltt_ust_channel *uchan, struct ust_app *app,
2535 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2536 struct ust_app_channel **ua_chanp)
2537 {
2538 int ret = 0;
2539 struct lttng_ht_iter iter;
2540 struct lttng_ht_node_str *ua_chan_node;
2541 struct ust_app_channel *ua_chan;
2542
2543 /* Lookup channel in the ust app session */
2544 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2545 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2546 if (ua_chan_node != NULL) {
2547 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2548 goto end;
2549 }
2550
2551 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2552 if (ua_chan == NULL) {
2553 /* Only malloc can fail here */
2554 ret = -ENOMEM;
2555 goto error_alloc;
2556 }
2557 shadow_copy_channel(ua_chan, uchan);
2558
2559 /* Set channel type. */
2560 ua_chan->attr.type = type;
2561
2562 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2563 if (ret < 0) {
2564 goto error;
2565 }
2566
2567 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2568 app->pid);
2569
2570 /* Only add the channel if successful on the tracer side. */
2571 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2572
2573 end:
2574 if (ua_chanp) {
2575 *ua_chanp = ua_chan;
2576 }
2577
2578 /* Everything went well. */
2579 return 0;
2580
2581 error:
2582 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2583 error_alloc:
2584 return ret;
2585 }
2586
2587 /*
2588 * Create UST app event and create it on the tracer side.
2589 *
2590 * Called with ust app session mutex held.
2591 */
2592 static
2593 int create_ust_app_event(struct ust_app_session *ua_sess,
2594 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2595 struct ust_app *app)
2596 {
2597 int ret = 0;
2598 struct ust_app_event *ua_event;
2599
2600 /* Get event node */
2601 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2602 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2603 if (ua_event != NULL) {
2604 ret = -EEXIST;
2605 goto end;
2606 }
2607
2608 /* Does not exist so create one */
2609 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2610 if (ua_event == NULL) {
2611 /* Only malloc can failed so something is really wrong */
2612 ret = -ENOMEM;
2613 goto end;
2614 }
2615 shadow_copy_event(ua_event, uevent);
2616
2617 /* Create it on the tracer side */
2618 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2619 if (ret < 0) {
2620 /* Not found previously means that it does not exist on the tracer */
2621 assert(ret != -LTTNG_UST_ERR_EXIST);
2622 goto error;
2623 }
2624
2625 add_unique_ust_app_event(ua_chan, ua_event);
2626
2627 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2628 app->pid);
2629
2630 end:
2631 return ret;
2632
2633 error:
2634 /* Valid. Calling here is already in a read side lock */
2635 delete_ust_app_event(-1, ua_event);
2636 return ret;
2637 }
2638
2639 /*
2640 * Create UST metadata and open it on the tracer side.
2641 *
2642 * Called with UST app session lock held and RCU read side lock.
2643 */
2644 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2645 struct ust_app *app, struct consumer_output *consumer,
2646 struct ustctl_consumer_channel_attr *attr)
2647 {
2648 int ret = 0;
2649 struct ust_app_channel *metadata;
2650 struct consumer_socket *socket;
2651 struct ust_registry_session *registry;
2652
2653 assert(ua_sess);
2654 assert(app);
2655 assert(consumer);
2656
2657 registry = get_session_registry(ua_sess);
2658 assert(registry);
2659
2660 /* Metadata already exists for this registry or it was closed previously */
2661 if (registry->metadata_key || registry->metadata_closed) {
2662 ret = 0;
2663 goto error;
2664 }
2665
2666 /* Allocate UST metadata */
2667 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2668 if (!metadata) {
2669 /* malloc() failed */
2670 ret = -ENOMEM;
2671 goto error;
2672 }
2673
2674 if (!attr) {
2675 /* Set default attributes for metadata. */
2676 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2677 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2678 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2679 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2680 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2681 metadata->attr.output = LTTNG_UST_MMAP;
2682 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2683 } else {
2684 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2685 metadata->attr.output = LTTNG_UST_MMAP;
2686 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2687 }
2688
2689 /* Need one fd for the channel. */
2690 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2691 if (ret < 0) {
2692 ERR("Exhausted number of available FD upon create metadata");
2693 goto error;
2694 }
2695
2696 /* Get the right consumer socket for the application. */
2697 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2698 if (!socket) {
2699 ret = -EINVAL;
2700 goto error_consumer;
2701 }
2702
2703 /*
2704 * Keep metadata key so we can identify it on the consumer side. Assign it
2705 * to the registry *before* we ask the consumer so we avoid the race of the
2706 * consumer requesting the metadata and the ask_channel call on our side
2707 * did not returned yet.
2708 */
2709 registry->metadata_key = metadata->key;
2710
2711 /*
2712 * Ask the metadata channel creation to the consumer. The metadata object
2713 * will be created by the consumer and kept their. However, the stream is
2714 * never added or monitored until we do a first push metadata to the
2715 * consumer.
2716 */
2717 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2718 registry);
2719 if (ret < 0) {
2720 /* Nullify the metadata key so we don't try to close it later on. */
2721 registry->metadata_key = 0;
2722 goto error_consumer;
2723 }
2724
2725 /*
2726 * The setup command will make the metadata stream be sent to the relayd,
2727 * if applicable, and the thread managing the metadatas. This is important
2728 * because after this point, if an error occurs, the only way the stream
2729 * can be deleted is to be monitored in the consumer.
2730 */
2731 ret = consumer_setup_metadata(socket, metadata->key);
2732 if (ret < 0) {
2733 /* Nullify the metadata key so we don't try to close it later on. */
2734 registry->metadata_key = 0;
2735 goto error_consumer;
2736 }
2737
2738 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2739 metadata->key, app->pid);
2740
2741 error_consumer:
2742 lttng_fd_put(LTTNG_FD_APPS, 1);
2743 delete_ust_app_channel(-1, metadata, app);
2744 error:
2745 return ret;
2746 }
2747
2748 /*
2749 * Return pointer to traceable apps list.
2750 */
2751 struct lttng_ht *ust_app_get_ht(void)
2752 {
2753 return ust_app_ht;
2754 }
2755
2756 /*
2757 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2758 * acquired before calling this function.
2759 */
2760 struct ust_app *ust_app_find_by_pid(pid_t pid)
2761 {
2762 struct ust_app *app = NULL;
2763 struct lttng_ht_node_ulong *node;
2764 struct lttng_ht_iter iter;
2765
2766 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2767 node = lttng_ht_iter_get_node_ulong(&iter);
2768 if (node == NULL) {
2769 DBG2("UST app no found with pid %d", pid);
2770 goto error;
2771 }
2772
2773 DBG2("Found UST app by pid %d", pid);
2774
2775 app = caa_container_of(node, struct ust_app, pid_n);
2776
2777 error:
2778 return app;
2779 }
2780
2781 /*
2782 * Allocate and init an UST app object using the registration information and
2783 * the command socket. This is called when the command socket connects to the
2784 * session daemon.
2785 *
2786 * The object is returned on success or else NULL.
2787 */
2788 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2789 {
2790 struct ust_app *lta = NULL;
2791
2792 assert(msg);
2793 assert(sock >= 0);
2794
2795 DBG3("UST app creating application for socket %d", sock);
2796
2797 if ((msg->bits_per_long == 64 &&
2798 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2799 || (msg->bits_per_long == 32 &&
2800 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2801 ERR("Registration failed: application \"%s\" (pid: %d) has "
2802 "%d-bit long, but no consumerd for this size is available.\n",
2803 msg->name, msg->pid, msg->bits_per_long);
2804 goto error;
2805 }
2806
2807 lta = zmalloc(sizeof(struct ust_app));
2808 if (lta == NULL) {
2809 PERROR("malloc");
2810 goto error;
2811 }
2812
2813 lta->ppid = msg->ppid;
2814 lta->uid = msg->uid;
2815 lta->gid = msg->gid;
2816
2817 lta->bits_per_long = msg->bits_per_long;
2818 lta->uint8_t_alignment = msg->uint8_t_alignment;
2819 lta->uint16_t_alignment = msg->uint16_t_alignment;
2820 lta->uint32_t_alignment = msg->uint32_t_alignment;
2821 lta->uint64_t_alignment = msg->uint64_t_alignment;
2822 lta->long_alignment = msg->long_alignment;
2823 lta->byte_order = msg->byte_order;
2824
2825 lta->v_major = msg->major;
2826 lta->v_minor = msg->minor;
2827 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2828 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2829 lta->notify_sock = -1;
2830
2831 /* Copy name and make sure it's NULL terminated. */
2832 strncpy(lta->name, msg->name, sizeof(lta->name));
2833 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2834
2835 /*
2836 * Before this can be called, when receiving the registration information,
2837 * the application compatibility is checked. So, at this point, the
2838 * application can work with this session daemon.
2839 */
2840 lta->compatible = 1;
2841
2842 lta->pid = msg->pid;
2843 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2844 lta->sock = sock;
2845 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2846
2847 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2848
2849 error:
2850 return lta;
2851 }
2852
2853 /*
2854 * For a given application object, add it to every hash table.
2855 */
2856 void ust_app_add(struct ust_app *app)
2857 {
2858 assert(app);
2859 assert(app->notify_sock >= 0);
2860
2861 rcu_read_lock();
2862
2863 /*
2864 * On a re-registration, we want to kick out the previous registration of
2865 * that pid
2866 */
2867 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2868
2869 /*
2870 * The socket _should_ be unique until _we_ call close. So, a add_unique
2871 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2872 * already in the table.
2873 */
2874 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2875
2876 /* Add application to the notify socket hash table. */
2877 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2878 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2879
2880 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2881 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2882 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2883 app->v_minor);
2884
2885 rcu_read_unlock();
2886 }
2887
2888 /*
2889 * Set the application version into the object.
2890 *
2891 * Return 0 on success else a negative value either an errno code or a
2892 * LTTng-UST error code.
2893 */
2894 int ust_app_version(struct ust_app *app)
2895 {
2896 int ret;
2897
2898 assert(app);
2899
2900 ret = ustctl_tracer_version(app->sock, &app->version);
2901 if (ret < 0) {
2902 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2903 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2904 } else {
2905 DBG3("UST app %d verion failed. Application is dead", app->sock);
2906 }
2907 }
2908
2909 return ret;
2910 }
2911
2912 /*
2913 * Unregister app by removing it from the global traceable app list and freeing
2914 * the data struct.
2915 *
2916 * The socket is already closed at this point so no close to sock.
2917 */
2918 void ust_app_unregister(int sock)
2919 {
2920 struct ust_app *lta;
2921 struct lttng_ht_node_ulong *node;
2922 struct lttng_ht_iter iter;
2923 struct ust_app_session *ua_sess;
2924 int ret;
2925
2926 rcu_read_lock();
2927
2928 /* Get the node reference for a call_rcu */
2929 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2930 node = lttng_ht_iter_get_node_ulong(&iter);
2931 assert(node);
2932
2933 lta = caa_container_of(node, struct ust_app, sock_n);
2934 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2935
2936 /* Remove application from PID hash table */
2937 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2938 assert(!ret);
2939
2940 /*
2941 * Remove application from notify hash table. The thread handling the
2942 * notify socket could have deleted the node so ignore on error because
2943 * either way it's valid. The close of that socket is handled by the other
2944 * thread.
2945 */
2946 iter.iter.node = &lta->notify_sock_n.node;
2947 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2948
2949 /*
2950 * Ignore return value since the node might have been removed before by an
2951 * add replace during app registration because the PID can be reassigned by
2952 * the OS.
2953 */
2954 iter.iter.node = &lta->pid_n.node;
2955 ret = lttng_ht_del(ust_app_ht, &iter);
2956 if (ret) {
2957 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2958 lta->pid);
2959 }
2960
2961 /* Remove sessions so they are not visible during deletion.*/
2962 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2963 node.node) {
2964 struct ust_registry_session *registry;
2965
2966 ret = lttng_ht_del(lta->sessions, &iter);
2967 if (ret) {
2968 /* The session was already removed so scheduled for teardown. */
2969 continue;
2970 }
2971
2972 /*
2973 * Add session to list for teardown. This is safe since at this point we
2974 * are the only one using this list.
2975 */
2976 pthread_mutex_lock(&ua_sess->lock);
2977
2978 /*
2979 * Normally, this is done in the delete session process which is
2980 * executed in the call rcu below. However, upon registration we can't
2981 * afford to wait for the grace period before pushing data or else the
2982 * data pending feature can race between the unregistration and stop
2983 * command where the data pending command is sent *before* the grace
2984 * period ended.
2985 *
2986 * The close metadata below nullifies the metadata pointer in the
2987 * session so the delete session will NOT push/close a second time.
2988 */
2989 registry = get_session_registry(ua_sess);
2990 if (registry && !registry->metadata_closed) {
2991 /* Push metadata for application before freeing the application. */
2992 (void) push_metadata(registry, ua_sess->consumer);
2993
2994 /*
2995 * Don't ask to close metadata for global per UID buffers. Close
2996 * metadata only on destroy trace session in this case. Also, the
2997 * previous push metadata could have flag the metadata registry to
2998 * close so don't send a close command if closed.
2999 */
3000 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
3001 !registry->metadata_closed) {
3002 /* And ask to close it for this session registry. */
3003 (void) close_metadata(registry, ua_sess->consumer);
3004 }
3005 }
3006
3007 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3008 pthread_mutex_unlock(&ua_sess->lock);
3009 }
3010
3011 /* Free memory */
3012 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3013
3014 rcu_read_unlock();
3015 return;
3016 }
3017
3018 /*
3019 * Return traceable_app_count
3020 */
3021 unsigned long ust_app_list_count(void)
3022 {
3023 unsigned long count;
3024
3025 rcu_read_lock();
3026 count = lttng_ht_get_count(ust_app_ht);
3027 rcu_read_unlock();
3028
3029 return count;
3030 }
3031
3032 /*
3033 * Fill events array with all events name of all registered apps.
3034 */
3035 int ust_app_list_events(struct lttng_event **events)
3036 {
3037 int ret, handle;
3038 size_t nbmem, count = 0;
3039 struct lttng_ht_iter iter;
3040 struct ust_app *app;
3041 struct lttng_event *tmp_event;
3042
3043 nbmem = UST_APP_EVENT_LIST_SIZE;
3044 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3045 if (tmp_event == NULL) {
3046 PERROR("zmalloc ust app events");
3047 ret = -ENOMEM;
3048 goto error;
3049 }
3050
3051 rcu_read_lock();
3052
3053 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3054 struct lttng_ust_tracepoint_iter uiter;
3055
3056 health_code_update();
3057
3058 if (!app->compatible) {
3059 /*
3060 * TODO: In time, we should notice the caller of this error by
3061 * telling him that this is a version error.
3062 */
3063 continue;
3064 }
3065 handle = ustctl_tracepoint_list(app->sock);
3066 if (handle < 0) {
3067 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3068 ERR("UST app list events getting handle failed for app pid %d",
3069 app->pid);
3070 }
3071 continue;
3072 }
3073
3074 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3075 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3076 /* Handle ustctl error. */
3077 if (ret < 0) {
3078 free(tmp_event);
3079 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3080 ERR("UST app tp list get failed for app %d with ret %d",
3081 app->sock, ret);
3082 } else {
3083 DBG3("UST app tp list get failed. Application is dead");
3084 /*
3085 * This is normal behavior, an application can die during the
3086 * creation process. Don't report an error so the execution can
3087 * continue normally. Continue normal execution.
3088 */
3089 break;
3090 }
3091 goto rcu_error;
3092 }
3093
3094 health_code_update();
3095 if (count >= nbmem) {
3096 /* In case the realloc fails, we free the memory */
3097 void *ptr;
3098
3099 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3100 2 * nbmem);
3101 nbmem *= 2;
3102 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3103 if (ptr == NULL) {
3104 PERROR("realloc ust app events");
3105 free(tmp_event);
3106 ret = -ENOMEM;
3107 goto rcu_error;
3108 }
3109 tmp_event = ptr;
3110 }
3111 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3112 tmp_event[count].loglevel = uiter.loglevel;
3113 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3114 tmp_event[count].pid = app->pid;
3115 tmp_event[count].enabled = -1;
3116 count++;
3117 }
3118 }
3119
3120 ret = count;
3121 *events = tmp_event;
3122
3123 DBG2("UST app list events done (%zu events)", count);
3124
3125 rcu_error:
3126 rcu_read_unlock();
3127 error:
3128 health_code_update();
3129 return ret;
3130 }
3131
3132 /*
3133 * Fill events array with all events name of all registered apps.
3134 */
3135 int ust_app_list_event_fields(struct lttng_event_field **fields)
3136 {
3137 int ret, handle;
3138 size_t nbmem, count = 0;
3139 struct lttng_ht_iter iter;
3140 struct ust_app *app;
3141 struct lttng_event_field *tmp_event;
3142
3143 nbmem = UST_APP_EVENT_LIST_SIZE;
3144 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3145 if (tmp_event == NULL) {
3146 PERROR("zmalloc ust app event fields");
3147 ret = -ENOMEM;
3148 goto error;
3149 }
3150
3151 rcu_read_lock();
3152
3153 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3154 struct lttng_ust_field_iter uiter;
3155
3156 health_code_update();
3157
3158 if (!app->compatible) {
3159 /*
3160 * TODO: In time, we should notice the caller of this error by
3161 * telling him that this is a version error.
3162 */
3163 continue;
3164 }
3165 handle = ustctl_tracepoint_field_list(app->sock);
3166 if (handle < 0) {
3167 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3168 ERR("UST app list field getting handle failed for app pid %d",
3169 app->pid);
3170 }
3171 continue;
3172 }
3173
3174 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3175 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3176 /* Handle ustctl error. */
3177 if (ret < 0) {
3178 free(tmp_event);
3179 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3180 ERR("UST app tp list field failed for app %d with ret %d",
3181 app->sock, ret);
3182 } else {
3183 DBG3("UST app tp list field failed. Application is dead");
3184 /*
3185 * This is normal behavior, an application can die during the
3186 * creation process. Don't report an error so the execution can
3187 * continue normally.
3188 */
3189 break;
3190 }
3191 goto rcu_error;
3192 }
3193
3194 health_code_update();
3195 if (count >= nbmem) {
3196 /* In case the realloc fails, we free the memory */
3197 void *ptr;
3198
3199 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3200 2 * nbmem);
3201 nbmem *= 2;
3202 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3203 if (ptr == NULL) {
3204 PERROR("realloc ust app event fields");
3205 free(tmp_event);
3206 ret = -ENOMEM;
3207 goto rcu_error;
3208 }
3209 tmp_event = ptr;
3210 }
3211
3212 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3213 tmp_event[count].type = uiter.type;
3214 tmp_event[count].nowrite = uiter.nowrite;
3215
3216 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3217 tmp_event[count].event.loglevel = uiter.loglevel;
3218 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3219 tmp_event[count].event.pid = app->pid;
3220 tmp_event[count].event.enabled = -1;
3221 count++;
3222 }
3223 }
3224
3225 ret = count;
3226 *fields = tmp_event;
3227
3228 DBG2("UST app list event fields done (%zu events)", count);
3229
3230 rcu_error:
3231 rcu_read_unlock();
3232 error:
3233 health_code_update();
3234 return ret;
3235 }
3236
3237 /*
3238 * Free and clean all traceable apps of the global list.
3239 *
3240 * Should _NOT_ be called with RCU read-side lock held.
3241 */
3242 void ust_app_clean_list(void)
3243 {
3244 int ret;
3245 struct ust_app *app;
3246 struct lttng_ht_iter iter