Copy event exclusion data in add_unique_ust_app_event
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290
291 if (ua_event->obj != NULL) {
292 ret = ustctl_release_object(sock, ua_event->obj);
293 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
294 ERR("UST app sock %d release event obj failed with ret %d",
295 sock, ret);
296 }
297 free(ua_event->obj);
298 }
299 free(ua_event);
300 }
301
302 /*
303 * Release ust data object of the given stream.
304 *
305 * Return 0 on success or else a negative value.
306 */
307 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
308 {
309 int ret = 0;
310
311 assert(stream);
312
313 if (stream->obj) {
314 ret = ustctl_release_object(sock, stream->obj);
315 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
316 ERR("UST app sock %d release stream obj failed with ret %d",
317 sock, ret);
318 }
319 lttng_fd_put(LTTNG_FD_APPS, 2);
320 free(stream->obj);
321 }
322
323 return ret;
324 }
325
326 /*
327 * Delete ust app stream safely. RCU read lock must be held before calling
328 * this function.
329 */
330 static
331 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
332 {
333 assert(stream);
334
335 (void) release_ust_app_stream(sock, stream);
336 free(stream);
337 }
338
339 /*
340 * We need to execute ht_destroy outside of RCU read-side critical
341 * section and outside of call_rcu thread, so we postpone its execution
342 * using ht_cleanup_push. It is simpler than to change the semantic of
343 * the many callers of delete_ust_app_session().
344 */
345 static
346 void delete_ust_app_channel_rcu(struct rcu_head *head)
347 {
348 struct ust_app_channel *ua_chan =
349 caa_container_of(head, struct ust_app_channel, rcu_head);
350
351 ht_cleanup_push(ua_chan->ctx);
352 ht_cleanup_push(ua_chan->events);
353 free(ua_chan);
354 }
355
356 /*
357 * Delete ust app channel safely. RCU read lock must be held before calling
358 * this function.
359 */
360 static
361 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
362 struct ust_app *app)
363 {
364 int ret;
365 struct lttng_ht_iter iter;
366 struct ust_app_event *ua_event;
367 struct ust_app_ctx *ua_ctx;
368 struct ust_app_stream *stream, *stmp;
369 struct ust_registry_session *registry;
370
371 assert(ua_chan);
372
373 DBG3("UST app deleting channel %s", ua_chan->name);
374
375 /* Wipe stream */
376 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
377 cds_list_del(&stream->list);
378 delete_ust_app_stream(sock, stream);
379 }
380
381 /* Wipe context */
382 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
383 cds_list_del(&ua_ctx->list);
384 ret = lttng_ht_del(ua_chan->ctx, &iter);
385 assert(!ret);
386 delete_ust_app_ctx(sock, ua_ctx);
387 }
388
389 /* Wipe events */
390 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
391 node.node) {
392 ret = lttng_ht_del(ua_chan->events, &iter);
393 assert(!ret);
394 delete_ust_app_event(sock, ua_event);
395 }
396
397 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
398 /* Wipe and free registry from session registry. */
399 registry = get_session_registry(ua_chan->session);
400 if (registry) {
401 ust_registry_channel_del_free(registry, ua_chan->key);
402 }
403 }
404
405 if (ua_chan->obj != NULL) {
406 /* Remove channel from application UST object descriptor. */
407 iter.iter.node = &ua_chan->ust_objd_node.node;
408 lttng_ht_del(app->ust_objd, &iter);
409 ret = ustctl_release_object(sock, ua_chan->obj);
410 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
411 ERR("UST app sock %d release channel obj failed with ret %d",
412 sock, ret);
413 }
414 lttng_fd_put(LTTNG_FD_APPS, 1);
415 free(ua_chan->obj);
416 }
417 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
418 }
419
420 /*
421 * Push metadata to consumer socket.
422 *
423 * The socket lock MUST be acquired.
424 * The ust app session lock MUST be acquired.
425 *
426 * On success, return the len of metadata pushed or else a negative value.
427 */
428 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
429 struct consumer_socket *socket, int send_zero_data)
430 {
431 int ret;
432 char *metadata_str = NULL;
433 size_t len, offset;
434 ssize_t ret_val;
435
436 assert(registry);
437 assert(socket);
438
439 /*
440 * On a push metadata error either the consumer is dead or the metadata
441 * channel has been destroyed because its endpoint might have died (e.g:
442 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
443 * metadata again which is not valid anymore on the consumer side.
444 *
445 * The ust app session mutex locked allows us to make this check without
446 * the registry lock.
447 */
448 if (registry->metadata_closed) {
449 return -EPIPE;
450 }
451
452 pthread_mutex_lock(&registry->lock);
453
454 offset = registry->metadata_len_sent;
455 len = registry->metadata_len - registry->metadata_len_sent;
456 if (len == 0) {
457 DBG3("No metadata to push for metadata key %" PRIu64,
458 registry->metadata_key);
459 ret_val = len;
460 if (send_zero_data) {
461 DBG("No metadata to push");
462 goto push_data;
463 }
464 goto end;
465 }
466
467 /* Allocate only what we have to send. */
468 metadata_str = zmalloc(len);
469 if (!metadata_str) {
470 PERROR("zmalloc ust app metadata string");
471 ret_val = -ENOMEM;
472 goto error;
473 }
474 /* Copy what we haven't send out. */
475 memcpy(metadata_str, registry->metadata + offset, len);
476 registry->metadata_len_sent += len;
477
478 push_data:
479 pthread_mutex_unlock(&registry->lock);
480 ret = consumer_push_metadata(socket, registry->metadata_key,
481 metadata_str, len, offset);
482 if (ret < 0) {
483 ret_val = ret;
484 goto error_push;
485 }
486
487 free(metadata_str);
488 return len;
489
490 end:
491 error:
492 pthread_mutex_unlock(&registry->lock);
493 error_push:
494 free(metadata_str);
495 return ret_val;
496 }
497
498 /*
499 * For a given application and session, push metadata to consumer. The session
500 * lock MUST be acquired here before calling this.
501 * Either sock or consumer is required : if sock is NULL, the default
502 * socket to send the metadata is retrieved from consumer, if sock
503 * is not NULL we use it to send the metadata.
504 *
505 * Return 0 on success else a negative error.
506 */
507 static int push_metadata(struct ust_registry_session *registry,
508 struct consumer_output *consumer)
509 {
510 int ret_val;
511 ssize_t ret;
512 struct consumer_socket *socket;
513
514 assert(registry);
515 assert(consumer);
516
517 rcu_read_lock();
518
519 /*
520 * Means that no metadata was assigned to the session. This can happens if
521 * no start has been done previously.
522 */
523 if (!registry->metadata_key) {
524 ret_val = 0;
525 goto end_rcu_unlock;
526 }
527
528 /* Get consumer socket to use to push the metadata.*/
529 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
530 consumer);
531 if (!socket) {
532 ret_val = -1;
533 goto error_rcu_unlock;
534 }
535
536 /*
537 * TODO: Currently, we hold the socket lock around sampling of the next
538 * metadata segment to ensure we send metadata over the consumer socket in
539 * the correct order. This makes the registry lock nest inside the socket
540 * lock.
541 *
542 * Please note that this is a temporary measure: we should move this lock
543 * back into ust_consumer_push_metadata() when the consumer gets the
544 * ability to reorder the metadata it receives.
545 */
546 pthread_mutex_lock(socket->lock);
547 ret = ust_app_push_metadata(registry, socket, 0);
548 pthread_mutex_unlock(socket->lock);
549 if (ret < 0) {
550 ret_val = ret;
551 goto error_rcu_unlock;
552 }
553
554 rcu_read_unlock();
555 return 0;
556
557 error_rcu_unlock:
558 /*
559 * On error, flag the registry that the metadata is closed. We were unable
560 * to push anything and this means that either the consumer is not
561 * responding or the metadata cache has been destroyed on the consumer.
562 */
563 registry->metadata_closed = 1;
564 end_rcu_unlock:
565 rcu_read_unlock();
566 return ret_val;
567 }
568
569 /*
570 * Send to the consumer a close metadata command for the given session. Once
571 * done, the metadata channel is deleted and the session metadata pointer is
572 * nullified. The session lock MUST be acquired here unless the application is
573 * in the destroy path.
574 *
575 * Return 0 on success else a negative value.
576 */
577 static int close_metadata(struct ust_registry_session *registry,
578 struct consumer_output *consumer)
579 {
580 int ret;
581 struct consumer_socket *socket;
582
583 assert(registry);
584 assert(consumer);
585
586 rcu_read_lock();
587
588 if (!registry->metadata_key || registry->metadata_closed) {
589 ret = 0;
590 goto end;
591 }
592
593 /* Get consumer socket to use to push the metadata.*/
594 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
595 consumer);
596 if (!socket) {
597 ret = -1;
598 goto error;
599 }
600
601 ret = consumer_close_metadata(socket, registry->metadata_key);
602 if (ret < 0) {
603 goto error;
604 }
605
606 error:
607 /*
608 * Metadata closed. Even on error this means that the consumer is not
609 * responding or not found so either way a second close should NOT be emit
610 * for this registry.
611 */
612 registry->metadata_closed = 1;
613 end:
614 rcu_read_unlock();
615 return ret;
616 }
617
618 /*
619 * We need to execute ht_destroy outside of RCU read-side critical
620 * section and outside of call_rcu thread, so we postpone its execution
621 * using ht_cleanup_push. It is simpler than to change the semantic of
622 * the many callers of delete_ust_app_session().
623 */
624 static
625 void delete_ust_app_session_rcu(struct rcu_head *head)
626 {
627 struct ust_app_session *ua_sess =
628 caa_container_of(head, struct ust_app_session, rcu_head);
629
630 ht_cleanup_push(ua_sess->channels);
631 free(ua_sess);
632 }
633
634 /*
635 * Delete ust app session safely. RCU read lock must be held before calling
636 * this function.
637 */
638 static
639 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
640 struct ust_app *app)
641 {
642 int ret;
643 struct lttng_ht_iter iter;
644 struct ust_app_channel *ua_chan;
645 struct ust_registry_session *registry;
646
647 assert(ua_sess);
648
649 pthread_mutex_lock(&ua_sess->lock);
650
651 registry = get_session_registry(ua_sess);
652 if (registry && !registry->metadata_closed) {
653 /* Push metadata for application before freeing the application. */
654 (void) push_metadata(registry, ua_sess->consumer);
655
656 /*
657 * Don't ask to close metadata for global per UID buffers. Close
658 * metadata only on destroy trace session in this case. Also, the
659 * previous push metadata could have flag the metadata registry to
660 * close so don't send a close command if closed.
661 */
662 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
663 !registry->metadata_closed) {
664 /* And ask to close it for this session registry. */
665 (void) close_metadata(registry, ua_sess->consumer);
666 }
667 }
668
669 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
670 node.node) {
671 ret = lttng_ht_del(ua_sess->channels, &iter);
672 assert(!ret);
673 delete_ust_app_channel(sock, ua_chan, app);
674 }
675
676 /* In case of per PID, the registry is kept in the session. */
677 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
678 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
679 if (reg_pid) {
680 buffer_reg_pid_remove(reg_pid);
681 buffer_reg_pid_destroy(reg_pid);
682 }
683 }
684
685 if (ua_sess->handle != -1) {
686 ret = ustctl_release_handle(sock, ua_sess->handle);
687 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
688 ERR("UST app sock %d release session handle failed with ret %d",
689 sock, ret);
690 }
691 }
692 pthread_mutex_unlock(&ua_sess->lock);
693
694 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
695 }
696
697 /*
698 * Delete a traceable application structure from the global list. Never call
699 * this function outside of a call_rcu call.
700 *
701 * RCU read side lock should _NOT_ be held when calling this function.
702 */
703 static
704 void delete_ust_app(struct ust_app *app)
705 {
706 int ret, sock;
707 struct ust_app_session *ua_sess, *tmp_ua_sess;
708
709 /* Delete ust app sessions info */
710 sock = app->sock;
711 app->sock = -1;
712
713 /* Wipe sessions */
714 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
715 teardown_node) {
716 /* Free every object in the session and the session. */
717 rcu_read_lock();
718 delete_ust_app_session(sock, ua_sess, app);
719 rcu_read_unlock();
720 }
721
722 ht_cleanup_push(app->sessions);
723 ht_cleanup_push(app->ust_objd);
724
725 /*
726 * Wait until we have deleted the application from the sock hash table
727 * before closing this socket, otherwise an application could re-use the
728 * socket ID and race with the teardown, using the same hash table entry.
729 *
730 * It's OK to leave the close in call_rcu. We want it to stay unique for
731 * all RCU readers that could run concurrently with unregister app,
732 * therefore we _need_ to only close that socket after a grace period. So
733 * it should stay in this RCU callback.
734 *
735 * This close() is a very important step of the synchronization model so
736 * every modification to this function must be carefully reviewed.
737 */
738 ret = close(sock);
739 if (ret) {
740 PERROR("close");
741 }
742 lttng_fd_put(LTTNG_FD_APPS, 1);
743
744 DBG2("UST app pid %d deleted", app->pid);
745 free(app);
746 }
747
748 /*
749 * URCU intermediate call to delete an UST app.
750 */
751 static
752 void delete_ust_app_rcu(struct rcu_head *head)
753 {
754 struct lttng_ht_node_ulong *node =
755 caa_container_of(head, struct lttng_ht_node_ulong, head);
756 struct ust_app *app =
757 caa_container_of(node, struct ust_app, pid_n);
758
759 DBG3("Call RCU deleting app PID %d", app->pid);
760 delete_ust_app(app);
761 }
762
763 /*
764 * Delete the session from the application ht and delete the data structure by
765 * freeing every object inside and releasing them.
766 */
767 static void destroy_app_session(struct ust_app *app,
768 struct ust_app_session *ua_sess)
769 {
770 int ret;
771 struct lttng_ht_iter iter;
772
773 assert(app);
774 assert(ua_sess);
775
776 iter.iter.node = &ua_sess->node.node;
777 ret = lttng_ht_del(app->sessions, &iter);
778 if (ret) {
779 /* Already scheduled for teardown. */
780 goto end;
781 }
782
783 /* Once deleted, free the data structure. */
784 delete_ust_app_session(app->sock, ua_sess, app);
785
786 end:
787 return;
788 }
789
790 /*
791 * Alloc new UST app session.
792 */
793 static
794 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
795 {
796 struct ust_app_session *ua_sess;
797
798 /* Init most of the default value by allocating and zeroing */
799 ua_sess = zmalloc(sizeof(struct ust_app_session));
800 if (ua_sess == NULL) {
801 PERROR("malloc");
802 goto error_free;
803 }
804
805 ua_sess->handle = -1;
806 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
807 pthread_mutex_init(&ua_sess->lock, NULL);
808
809 return ua_sess;
810
811 error_free:
812 return NULL;
813 }
814
815 /*
816 * Alloc new UST app channel.
817 */
818 static
819 struct ust_app_channel *alloc_ust_app_channel(char *name,
820 struct ust_app_session *ua_sess,
821 struct lttng_ust_channel_attr *attr)
822 {
823 struct ust_app_channel *ua_chan;
824
825 /* Init most of the default value by allocating and zeroing */
826 ua_chan = zmalloc(sizeof(struct ust_app_channel));
827 if (ua_chan == NULL) {
828 PERROR("malloc");
829 goto error;
830 }
831
832 /* Setup channel name */
833 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
834 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
835
836 ua_chan->enabled = 1;
837 ua_chan->handle = -1;
838 ua_chan->session = ua_sess;
839 ua_chan->key = get_next_channel_key();
840 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
841 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
842 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
843
844 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
845 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
846
847 /* Copy attributes */
848 if (attr) {
849 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
850 ua_chan->attr.subbuf_size = attr->subbuf_size;
851 ua_chan->attr.num_subbuf = attr->num_subbuf;
852 ua_chan->attr.overwrite = attr->overwrite;
853 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
854 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
855 ua_chan->attr.output = attr->output;
856 }
857 /* By default, the channel is a per cpu channel. */
858 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
859
860 DBG3("UST app channel %s allocated", ua_chan->name);
861
862 return ua_chan;
863
864 error:
865 return NULL;
866 }
867
868 /*
869 * Allocate and initialize a UST app stream.
870 *
871 * Return newly allocated stream pointer or NULL on error.
872 */
873 struct ust_app_stream *ust_app_alloc_stream(void)
874 {
875 struct ust_app_stream *stream = NULL;
876
877 stream = zmalloc(sizeof(*stream));
878 if (stream == NULL) {
879 PERROR("zmalloc ust app stream");
880 goto error;
881 }
882
883 /* Zero could be a valid value for a handle so flag it to -1. */
884 stream->handle = -1;
885
886 error:
887 return stream;
888 }
889
890 /*
891 * Alloc new UST app event.
892 */
893 static
894 struct ust_app_event *alloc_ust_app_event(char *name,
895 struct lttng_ust_event *attr)
896 {
897 struct ust_app_event *ua_event;
898
899 /* Init most of the default value by allocating and zeroing */
900 ua_event = zmalloc(sizeof(struct ust_app_event));
901 if (ua_event == NULL) {
902 PERROR("malloc");
903 goto error;
904 }
905
906 ua_event->enabled = 1;
907 strncpy(ua_event->name, name, sizeof(ua_event->name));
908 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
909 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
910
911 /* Copy attributes */
912 if (attr) {
913 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
914 }
915
916 DBG3("UST app event %s allocated", ua_event->name);
917
918 return ua_event;
919
920 error:
921 return NULL;
922 }
923
924 /*
925 * Alloc new UST app context.
926 */
927 static
928 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
929 {
930 struct ust_app_ctx *ua_ctx;
931
932 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
933 if (ua_ctx == NULL) {
934 goto error;
935 }
936
937 CDS_INIT_LIST_HEAD(&ua_ctx->list);
938
939 if (uctx) {
940 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
941 }
942
943 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
944
945 error:
946 return ua_ctx;
947 }
948
949 /*
950 * Allocate a filter and copy the given original filter.
951 *
952 * Return allocated filter or NULL on error.
953 */
954 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
955 struct lttng_ust_filter_bytecode *orig_f)
956 {
957 struct lttng_ust_filter_bytecode *filter = NULL;
958
959 /* Copy filter bytecode */
960 filter = zmalloc(sizeof(*filter) + orig_f->len);
961 if (!filter) {
962 PERROR("zmalloc alloc ust app filter");
963 goto error;
964 }
965
966 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
967
968 error:
969 return filter;
970 }
971
972 /*
973 * Find an ust_app using the sock and return it. RCU read side lock must be
974 * held before calling this helper function.
975 */
976 struct ust_app *ust_app_find_by_sock(int sock)
977 {
978 struct lttng_ht_node_ulong *node;
979 struct lttng_ht_iter iter;
980
981 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
982 node = lttng_ht_iter_get_node_ulong(&iter);
983 if (node == NULL) {
984 DBG2("UST app find by sock %d not found", sock);
985 goto error;
986 }
987
988 return caa_container_of(node, struct ust_app, sock_n);
989
990 error:
991 return NULL;
992 }
993
994 /*
995 * Find an ust_app using the notify sock and return it. RCU read side lock must
996 * be held before calling this helper function.
997 */
998 static struct ust_app *find_app_by_notify_sock(int sock)
999 {
1000 struct lttng_ht_node_ulong *node;
1001 struct lttng_ht_iter iter;
1002
1003 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1004 &iter);
1005 node = lttng_ht_iter_get_node_ulong(&iter);
1006 if (node == NULL) {
1007 DBG2("UST app find by notify sock %d not found", sock);
1008 goto error;
1009 }
1010
1011 return caa_container_of(node, struct ust_app, notify_sock_n);
1012
1013 error:
1014 return NULL;
1015 }
1016
1017 /*
1018 * Lookup for an ust app event based on event name, filter bytecode and the
1019 * event loglevel.
1020 *
1021 * Return an ust_app_event object or NULL on error.
1022 */
1023 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1024 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
1025 {
1026 struct lttng_ht_iter iter;
1027 struct lttng_ht_node_str *node;
1028 struct ust_app_event *event = NULL;
1029 struct ust_app_ht_key key;
1030
1031 assert(name);
1032 assert(ht);
1033
1034 /* Setup key for event lookup. */
1035 key.name = name;
1036 key.filter = filter;
1037 key.loglevel = loglevel;
1038
1039 /* Lookup using the event name as hash and a custom match fct. */
1040 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1041 ht_match_ust_app_event, &key, &iter.iter);
1042 node = lttng_ht_iter_get_node_str(&iter);
1043 if (node == NULL) {
1044 goto end;
1045 }
1046
1047 event = caa_container_of(node, struct ust_app_event, node);
1048
1049 end:
1050 return event;
1051 }
1052
1053 /*
1054 * Create the channel context on the tracer.
1055 *
1056 * Called with UST app session lock held.
1057 */
1058 static
1059 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1060 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1061 {
1062 int ret;
1063
1064 health_code_update();
1065
1066 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1067 ua_chan->obj, &ua_ctx->obj);
1068 if (ret < 0) {
1069 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1070 ERR("UST app create channel context failed for app (pid: %d) "
1071 "with ret %d", app->pid, ret);
1072 } else {
1073 /*
1074 * This is normal behavior, an application can die during the
1075 * creation process. Don't report an error so the execution can
1076 * continue normally.
1077 */
1078 ret = 0;
1079 DBG3("UST app disable event failed. Application is dead.");
1080 }
1081 goto error;
1082 }
1083
1084 ua_ctx->handle = ua_ctx->obj->handle;
1085
1086 DBG2("UST app context handle %d created successfully for channel %s",
1087 ua_ctx->handle, ua_chan->name);
1088
1089 error:
1090 health_code_update();
1091 return ret;
1092 }
1093
1094 /*
1095 * Set the filter on the tracer.
1096 */
1097 static
1098 int set_ust_event_filter(struct ust_app_event *ua_event,
1099 struct ust_app *app)
1100 {
1101 int ret;
1102
1103 health_code_update();
1104
1105 if (!ua_event->filter) {
1106 ret = 0;
1107 goto error;
1108 }
1109
1110 ret = ustctl_set_filter(app->sock, ua_event->filter,
1111 ua_event->obj);
1112 if (ret < 0) {
1113 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1114 ERR("UST app event %s filter failed for app (pid: %d) "
1115 "with ret %d", ua_event->attr.name, app->pid, ret);
1116 } else {
1117 /*
1118 * This is normal behavior, an application can die during the
1119 * creation process. Don't report an error so the execution can
1120 * continue normally.
1121 */
1122 ret = 0;
1123 DBG3("UST app filter event failed. Application is dead.");
1124 }
1125 goto error;
1126 }
1127
1128 DBG2("UST filter set successfully for event %s", ua_event->name);
1129
1130 error:
1131 health_code_update();
1132 return ret;
1133 }
1134
1135 /*
1136 * Disable the specified event on to UST tracer for the UST session.
1137 */
1138 static int disable_ust_event(struct ust_app *app,
1139 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1140 {
1141 int ret;
1142
1143 health_code_update();
1144
1145 ret = ustctl_disable(app->sock, ua_event->obj);
1146 if (ret < 0) {
1147 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1148 ERR("UST app event %s disable failed for app (pid: %d) "
1149 "and session handle %d with ret %d",
1150 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1151 } else {
1152 /*
1153 * This is normal behavior, an application can die during the
1154 * creation process. Don't report an error so the execution can
1155 * continue normally.
1156 */
1157 ret = 0;
1158 DBG3("UST app disable event failed. Application is dead.");
1159 }
1160 goto error;
1161 }
1162
1163 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1164 ua_event->attr.name, app->pid);
1165
1166 error:
1167 health_code_update();
1168 return ret;
1169 }
1170
1171 /*
1172 * Disable the specified channel on to UST tracer for the UST session.
1173 */
1174 static int disable_ust_channel(struct ust_app *app,
1175 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1176 {
1177 int ret;
1178
1179 health_code_update();
1180
1181 ret = ustctl_disable(app->sock, ua_chan->obj);
1182 if (ret < 0) {
1183 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1184 ERR("UST app channel %s disable failed for app (pid: %d) "
1185 "and session handle %d with ret %d",
1186 ua_chan->name, app->pid, ua_sess->handle, ret);
1187 } else {
1188 /*
1189 * This is normal behavior, an application can die during the
1190 * creation process. Don't report an error so the execution can
1191 * continue normally.
1192 */
1193 ret = 0;
1194 DBG3("UST app disable channel failed. Application is dead.");
1195 }
1196 goto error;
1197 }
1198
1199 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1200 ua_chan->name, app->pid);
1201
1202 error:
1203 health_code_update();
1204 return ret;
1205 }
1206
1207 /*
1208 * Enable the specified channel on to UST tracer for the UST session.
1209 */
1210 static int enable_ust_channel(struct ust_app *app,
1211 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1212 {
1213 int ret;
1214
1215 health_code_update();
1216
1217 ret = ustctl_enable(app->sock, ua_chan->obj);
1218 if (ret < 0) {
1219 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1220 ERR("UST app channel %s enable failed for app (pid: %d) "
1221 "and session handle %d with ret %d",
1222 ua_chan->name, app->pid, ua_sess->handle, ret);
1223 } else {
1224 /*
1225 * This is normal behavior, an application can die during the
1226 * creation process. Don't report an error so the execution can
1227 * continue normally.
1228 */
1229 ret = 0;
1230 DBG3("UST app enable channel failed. Application is dead.");
1231 }
1232 goto error;
1233 }
1234
1235 ua_chan->enabled = 1;
1236
1237 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1238 ua_chan->name, app->pid);
1239
1240 error:
1241 health_code_update();
1242 return ret;
1243 }
1244
1245 /*
1246 * Enable the specified event on to UST tracer for the UST session.
1247 */
1248 static int enable_ust_event(struct ust_app *app,
1249 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1250 {
1251 int ret;
1252
1253 health_code_update();
1254
1255 ret = ustctl_enable(app->sock, ua_event->obj);
1256 if (ret < 0) {
1257 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1258 ERR("UST app event %s enable failed for app (pid: %d) "
1259 "and session handle %d with ret %d",
1260 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1261 } else {
1262 /*
1263 * This is normal behavior, an application can die during the
1264 * creation process. Don't report an error so the execution can
1265 * continue normally.
1266 */
1267 ret = 0;
1268 DBG3("UST app enable event failed. Application is dead.");
1269 }
1270 goto error;
1271 }
1272
1273 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1274 ua_event->attr.name, app->pid);
1275
1276 error:
1277 health_code_update();
1278 return ret;
1279 }
1280
1281 /*
1282 * Send channel and stream buffer to application.
1283 *
1284 * Return 0 on success. On error, a negative value is returned.
1285 */
1286 static int send_channel_pid_to_ust(struct ust_app *app,
1287 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1288 {
1289 int ret;
1290 struct ust_app_stream *stream, *stmp;
1291
1292 assert(app);
1293 assert(ua_sess);
1294 assert(ua_chan);
1295
1296 health_code_update();
1297
1298 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1299 app->sock);
1300
1301 /* Send channel to the application. */
1302 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1303 if (ret < 0) {
1304 goto error;
1305 }
1306
1307 health_code_update();
1308
1309 /* Send all streams to application. */
1310 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1311 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1312 if (ret < 0) {
1313 goto error;
1314 }
1315 /* We don't need the stream anymore once sent to the tracer. */
1316 cds_list_del(&stream->list);
1317 delete_ust_app_stream(-1, stream);
1318 }
1319 /* Flag the channel that it is sent to the application. */
1320 ua_chan->is_sent = 1;
1321
1322 error:
1323 health_code_update();
1324 return ret;
1325 }
1326
1327 /*
1328 * Create the specified event onto the UST tracer for a UST session.
1329 *
1330 * Should be called with session mutex held.
1331 */
1332 static
1333 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1334 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1335 {
1336 int ret = 0;
1337
1338 health_code_update();
1339
1340 /* Create UST event on tracer */
1341 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1342 &ua_event->obj);
1343 if (ret < 0) {
1344 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1345 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1346 ua_event->attr.name, app->pid, ret);
1347 } else {
1348 /*
1349 * This is normal behavior, an application can die during the
1350 * creation process. Don't report an error so the execution can
1351 * continue normally.
1352 */
1353 ret = 0;
1354 DBG3("UST app create event failed. Application is dead.");
1355 }
1356 goto error;
1357 }
1358
1359 ua_event->handle = ua_event->obj->handle;
1360
1361 DBG2("UST app event %s created successfully for pid:%d",
1362 ua_event->attr.name, app->pid);
1363
1364 health_code_update();
1365
1366 /* Set filter if one is present. */
1367 if (ua_event->filter) {
1368 ret = set_ust_event_filter(ua_event, app);
1369 if (ret < 0) {
1370 goto error;
1371 }
1372 }
1373
1374 /* If event not enabled, disable it on the tracer */
1375 if (ua_event->enabled == 0) {
1376 ret = disable_ust_event(app, ua_sess, ua_event);
1377 if (ret < 0) {
1378 /*
1379 * If we hit an EPERM, something is wrong with our disable call. If
1380 * we get an EEXIST, there is a problem on the tracer side since we
1381 * just created it.
1382 */
1383 switch (ret) {
1384 case -LTTNG_UST_ERR_PERM:
1385 /* Code flow problem */
1386 assert(0);
1387 case -LTTNG_UST_ERR_EXIST:
1388 /* It's OK for our use case. */
1389 ret = 0;
1390 break;
1391 default:
1392 break;
1393 }
1394 goto error;
1395 }
1396 }
1397
1398 error:
1399 health_code_update();
1400 return ret;
1401 }
1402
1403 /*
1404 * Copy data between an UST app event and a LTT event.
1405 */
1406 static void shadow_copy_event(struct ust_app_event *ua_event,
1407 struct ltt_ust_event *uevent)
1408 {
1409 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1410 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1411
1412 ua_event->enabled = uevent->enabled;
1413
1414 /* Copy event attributes */
1415 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1416
1417 /* Copy filter bytecode */
1418 if (uevent->filter) {
1419 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1420 /* Filter might be NULL here in case of ENONEM. */
1421 }
1422 }
1423
1424 /*
1425 * Copy data between an UST app channel and a LTT channel.
1426 */
1427 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1428 struct ltt_ust_channel *uchan)
1429 {
1430 struct lttng_ht_iter iter;
1431 struct ltt_ust_event *uevent;
1432 struct ltt_ust_context *uctx;
1433 struct ust_app_event *ua_event;
1434 struct ust_app_ctx *ua_ctx;
1435
1436 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1437
1438 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1439 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1440
1441 ua_chan->tracefile_size = uchan->tracefile_size;
1442 ua_chan->tracefile_count = uchan->tracefile_count;
1443
1444 /* Copy event attributes since the layout is different. */
1445 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1446 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1447 ua_chan->attr.overwrite = uchan->attr.overwrite;
1448 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1449 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1450 ua_chan->attr.output = uchan->attr.output;
1451 /*
1452 * Note that the attribute channel type is not set since the channel on the
1453 * tracing registry side does not have this information.
1454 */
1455
1456 ua_chan->enabled = uchan->enabled;
1457 ua_chan->tracing_channel_id = uchan->id;
1458
1459 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1460 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1461 if (ua_ctx == NULL) {
1462 continue;
1463 }
1464 lttng_ht_node_init_ulong(&ua_ctx->node,
1465 (unsigned long) ua_ctx->ctx.ctx);
1466 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1467 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1468 }
1469
1470 /* Copy all events from ltt ust channel to ust app channel */
1471 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1472 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1473 uevent->filter, uevent->attr.loglevel);
1474 if (ua_event == NULL) {
1475 DBG2("UST event %s not found on shadow copy channel",
1476 uevent->attr.name);
1477 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1478 if (ua_event == NULL) {
1479 continue;
1480 }
1481 shadow_copy_event(ua_event, uevent);
1482 add_unique_ust_app_event(ua_chan, ua_event);
1483 }
1484 }
1485
1486 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1487 }
1488
1489 /*
1490 * Copy data between a UST app session and a regular LTT session.
1491 */
1492 static void shadow_copy_session(struct ust_app_session *ua_sess,
1493 struct ltt_ust_session *usess, struct ust_app *app)
1494 {
1495 struct lttng_ht_node_str *ua_chan_node;
1496 struct lttng_ht_iter iter;
1497 struct ltt_ust_channel *uchan;
1498 struct ust_app_channel *ua_chan;
1499 time_t rawtime;
1500 struct tm *timeinfo;
1501 char datetime[16];
1502 int ret;
1503
1504 /* Get date and time for unique app path */
1505 time(&rawtime);
1506 timeinfo = localtime(&rawtime);
1507 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1508
1509 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1510
1511 ua_sess->tracing_id = usess->id;
1512 ua_sess->id = get_next_session_id();
1513 ua_sess->uid = app->uid;
1514 ua_sess->gid = app->gid;
1515 ua_sess->euid = usess->uid;
1516 ua_sess->egid = usess->gid;
1517 ua_sess->buffer_type = usess->buffer_type;
1518 ua_sess->bits_per_long = app->bits_per_long;
1519 /* There is only one consumer object per session possible. */
1520 ua_sess->consumer = usess->consumer;
1521 ua_sess->output_traces = usess->output_traces;
1522 ua_sess->live_timer_interval = usess->live_timer_interval;
1523
1524 switch (ua_sess->buffer_type) {
1525 case LTTNG_BUFFER_PER_PID:
1526 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1527 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1528 datetime);
1529 break;
1530 case LTTNG_BUFFER_PER_UID:
1531 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1532 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1533 break;
1534 default:
1535 assert(0);
1536 goto error;
1537 }
1538 if (ret < 0) {
1539 PERROR("asprintf UST shadow copy session");
1540 assert(0);
1541 goto error;
1542 }
1543
1544 /* Iterate over all channels in global domain. */
1545 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1546 uchan, node.node) {
1547 struct lttng_ht_iter uiter;
1548
1549 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1550 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1551 if (ua_chan_node != NULL) {
1552 /* Session exist. Contiuing. */
1553 continue;
1554 }
1555
1556 DBG2("Channel %s not found on shadow session copy, creating it",
1557 uchan->name);
1558 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1559 if (ua_chan == NULL) {
1560 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1561 continue;
1562 }
1563 shadow_copy_channel(ua_chan, uchan);
1564 /*
1565 * The concept of metadata channel does not exist on the tracing
1566 * registry side of the session daemon so this can only be a per CPU
1567 * channel and not metadata.
1568 */
1569 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1570
1571 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1572 }
1573
1574 error:
1575 return;
1576 }
1577
1578 /*
1579 * Lookup sesison wrapper.
1580 */
1581 static
1582 void __lookup_session_by_app(struct ltt_ust_session *usess,
1583 struct ust_app *app, struct lttng_ht_iter *iter)
1584 {
1585 /* Get right UST app session from app */
1586 lttng_ht_lookup(app->sessions, &usess->id, iter);
1587 }
1588
1589 /*
1590 * Return ust app session from the app session hashtable using the UST session
1591 * id.
1592 */
1593 static struct ust_app_session *lookup_session_by_app(
1594 struct ltt_ust_session *usess, struct ust_app *app)
1595 {
1596 struct lttng_ht_iter iter;
1597 struct lttng_ht_node_u64 *node;
1598
1599 __lookup_session_by_app(usess, app, &iter);
1600 node = lttng_ht_iter_get_node_u64(&iter);
1601 if (node == NULL) {
1602 goto error;
1603 }
1604
1605 return caa_container_of(node, struct ust_app_session, node);
1606
1607 error:
1608 return NULL;
1609 }
1610
1611 /*
1612 * Setup buffer registry per PID for the given session and application. If none
1613 * is found, a new one is created, added to the global registry and
1614 * initialized. If regp is valid, it's set with the newly created object.
1615 *
1616 * Return 0 on success or else a negative value.
1617 */
1618 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1619 struct ust_app *app, struct buffer_reg_pid **regp)
1620 {
1621 int ret = 0;
1622 struct buffer_reg_pid *reg_pid;
1623
1624 assert(ua_sess);
1625 assert(app);
1626
1627 rcu_read_lock();
1628
1629 reg_pid = buffer_reg_pid_find(ua_sess->id);
1630 if (!reg_pid) {
1631 /*
1632 * This is the create channel path meaning that if there is NO
1633 * registry available, we have to create one for this session.
1634 */
1635 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1636 if (ret < 0) {
1637 goto error;
1638 }
1639 buffer_reg_pid_add(reg_pid);
1640 } else {
1641 goto end;
1642 }
1643
1644 /* Initialize registry. */
1645 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1646 app->bits_per_long, app->uint8_t_alignment,
1647 app->uint16_t_alignment, app->uint32_t_alignment,
1648 app->uint64_t_alignment, app->long_alignment,
1649 app->byte_order, app->version.major,
1650 app->version.minor);
1651 if (ret < 0) {
1652 goto error;
1653 }
1654
1655 DBG3("UST app buffer registry per PID created successfully");
1656
1657 end:
1658 if (regp) {
1659 *regp = reg_pid;
1660 }
1661 error:
1662 rcu_read_unlock();
1663 return ret;
1664 }
1665
1666 /*
1667 * Setup buffer registry per UID for the given session and application. If none
1668 * is found, a new one is created, added to the global registry and
1669 * initialized. If regp is valid, it's set with the newly created object.
1670 *
1671 * Return 0 on success or else a negative value.
1672 */
1673 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1674 struct ust_app *app, struct buffer_reg_uid **regp)
1675 {
1676 int ret = 0;
1677 struct buffer_reg_uid *reg_uid;
1678
1679 assert(usess);
1680 assert(app);
1681
1682 rcu_read_lock();
1683
1684 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1685 if (!reg_uid) {
1686 /*
1687 * This is the create channel path meaning that if there is NO
1688 * registry available, we have to create one for this session.
1689 */
1690 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1691 LTTNG_DOMAIN_UST, &reg_uid);
1692 if (ret < 0) {
1693 goto error;
1694 }
1695 buffer_reg_uid_add(reg_uid);
1696 } else {
1697 goto end;
1698 }
1699
1700 /* Initialize registry. */
1701 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1702 app->bits_per_long, app->uint8_t_alignment,
1703 app->uint16_t_alignment, app->uint32_t_alignment,
1704 app->uint64_t_alignment, app->long_alignment,
1705 app->byte_order, app->version.major,
1706 app->version.minor);
1707 if (ret < 0) {
1708 goto error;
1709 }
1710 /* Add node to teardown list of the session. */
1711 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1712
1713 DBG3("UST app buffer registry per UID created successfully");
1714
1715 end:
1716 if (regp) {
1717 *regp = reg_uid;
1718 }
1719 error:
1720 rcu_read_unlock();
1721 return ret;
1722 }
1723
1724 /*
1725 * Create a session on the tracer side for the given app.
1726 *
1727 * On success, ua_sess_ptr is populated with the session pointer or else left
1728 * untouched. If the session was created, is_created is set to 1. On error,
1729 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1730 * be NULL.
1731 *
1732 * Returns 0 on success or else a negative code which is either -ENOMEM or
1733 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1734 */
1735 static int create_ust_app_session(struct ltt_ust_session *usess,
1736 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1737 int *is_created)
1738 {
1739 int ret, created = 0;
1740 struct ust_app_session *ua_sess;
1741
1742 assert(usess);
1743 assert(app);
1744 assert(ua_sess_ptr);
1745
1746 health_code_update();
1747
1748 ua_sess = lookup_session_by_app(usess, app);
1749 if (ua_sess == NULL) {
1750 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1751 app->pid, usess->id);
1752 ua_sess = alloc_ust_app_session(app);
1753 if (ua_sess == NULL) {
1754 /* Only malloc can failed so something is really wrong */
1755 ret = -ENOMEM;
1756 goto error;
1757 }
1758 shadow_copy_session(ua_sess, usess, app);
1759 created = 1;
1760 }
1761
1762 switch (usess->buffer_type) {
1763 case LTTNG_BUFFER_PER_PID:
1764 /* Init local registry. */
1765 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1766 if (ret < 0) {
1767 goto error;
1768 }
1769 break;
1770 case LTTNG_BUFFER_PER_UID:
1771 /* Look for a global registry. If none exists, create one. */
1772 ret = setup_buffer_reg_uid(usess, app, NULL);
1773 if (ret < 0) {
1774 goto error;
1775 }
1776 break;
1777 default:
1778 assert(0);
1779 ret = -EINVAL;
1780 goto error;
1781 }
1782
1783 health_code_update();
1784
1785 if (ua_sess->handle == -1) {
1786 ret = ustctl_create_session(app->sock);
1787 if (ret < 0) {
1788 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1789 ERR("Creating session for app pid %d with ret %d",
1790 app->pid, ret);
1791 } else {
1792 DBG("UST app creating session failed. Application is dead");
1793 /*
1794 * This is normal behavior, an application can die during the
1795 * creation process. Don't report an error so the execution can
1796 * continue normally. This will get flagged ENOTCONN and the
1797 * caller will handle it.
1798 */
1799 ret = 0;
1800 }
1801 delete_ust_app_session(-1, ua_sess, app);
1802 if (ret != -ENOMEM) {
1803 /*
1804 * Tracer is probably gone or got an internal error so let's
1805 * behave like it will soon unregister or not usable.
1806 */
1807 ret = -ENOTCONN;
1808 }
1809 goto error;
1810 }
1811
1812 ua_sess->handle = ret;
1813
1814 /* Add ust app session to app's HT */
1815 lttng_ht_node_init_u64(&ua_sess->node,
1816 ua_sess->tracing_id);
1817 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1818
1819 DBG2("UST app session created successfully with handle %d", ret);
1820 }
1821
1822 *ua_sess_ptr = ua_sess;
1823 if (is_created) {
1824 *is_created = created;
1825 }
1826
1827 /* Everything went well. */
1828 ret = 0;
1829
1830 error:
1831 health_code_update();
1832 return ret;
1833 }
1834
1835 /*
1836 * Create a context for the channel on the tracer.
1837 *
1838 * Called with UST app session lock held and a RCU read side lock.
1839 */
1840 static
1841 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1842 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1843 struct ust_app *app)
1844 {
1845 int ret = 0;
1846 struct lttng_ht_iter iter;
1847 struct lttng_ht_node_ulong *node;
1848 struct ust_app_ctx *ua_ctx;
1849
1850 DBG2("UST app adding context to channel %s", ua_chan->name);
1851
1852 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1853 node = lttng_ht_iter_get_node_ulong(&iter);
1854 if (node != NULL) {
1855 ret = -EEXIST;
1856 goto error;
1857 }
1858
1859 ua_ctx = alloc_ust_app_ctx(uctx);
1860 if (ua_ctx == NULL) {
1861 /* malloc failed */
1862 ret = -1;
1863 goto error;
1864 }
1865
1866 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1867 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1868 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1869
1870 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1871 if (ret < 0) {
1872 goto error;
1873 }
1874
1875 error:
1876 return ret;
1877 }
1878
1879 /*
1880 * Enable on the tracer side a ust app event for the session and channel.
1881 *
1882 * Called with UST app session lock held.
1883 */
1884 static
1885 int enable_ust_app_event(struct ust_app_session *ua_sess,
1886 struct ust_app_event *ua_event, struct ust_app *app)
1887 {
1888 int ret;
1889
1890 ret = enable_ust_event(app, ua_sess, ua_event);
1891 if (ret < 0) {
1892 goto error;
1893 }
1894
1895 ua_event->enabled = 1;
1896
1897 error:
1898 return ret;
1899 }
1900
1901 /*
1902 * Disable on the tracer side a ust app event for the session and channel.
1903 */
1904 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1905 struct ust_app_event *ua_event, struct ust_app *app)
1906 {
1907 int ret;
1908
1909 ret = disable_ust_event(app, ua_sess, ua_event);
1910 if (ret < 0) {
1911 goto error;
1912 }
1913
1914 ua_event->enabled = 0;
1915
1916 error:
1917 return ret;
1918 }
1919
1920 /*
1921 * Lookup ust app channel for session and disable it on the tracer side.
1922 */
1923 static
1924 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1925 struct ust_app_channel *ua_chan, struct ust_app *app)
1926 {
1927 int ret;
1928
1929 ret = disable_ust_channel(app, ua_sess, ua_chan);
1930 if (ret < 0) {
1931 goto error;
1932 }
1933
1934 ua_chan->enabled = 0;
1935
1936 error:
1937 return ret;
1938 }
1939
1940 /*
1941 * Lookup ust app channel for session and enable it on the tracer side. This
1942 * MUST be called with a RCU read side lock acquired.
1943 */
1944 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1945 struct ltt_ust_channel *uchan, struct ust_app *app)
1946 {
1947 int ret = 0;
1948 struct lttng_ht_iter iter;
1949 struct lttng_ht_node_str *ua_chan_node;
1950 struct ust_app_channel *ua_chan;
1951
1952 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1953 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1954 if (ua_chan_node == NULL) {
1955 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1956 uchan->name, ua_sess->tracing_id);
1957 goto error;
1958 }
1959
1960 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1961
1962 ret = enable_ust_channel(app, ua_sess, ua_chan);
1963 if (ret < 0) {
1964 goto error;
1965 }
1966
1967 error:
1968 return ret;
1969 }
1970
1971 /*
1972 * Ask the consumer to create a channel and get it if successful.
1973 *
1974 * Return 0 on success or else a negative value.
1975 */
1976 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1977 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1978 int bitness, struct ust_registry_session *registry)
1979 {
1980 int ret;
1981 unsigned int nb_fd = 0;
1982 struct consumer_socket *socket;
1983
1984 assert(usess);
1985 assert(ua_sess);
1986 assert(ua_chan);
1987 assert(registry);
1988
1989 rcu_read_lock();
1990 health_code_update();
1991
1992 /* Get the right consumer socket for the application. */
1993 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1994 if (!socket) {
1995 ret = -EINVAL;
1996 goto error;
1997 }
1998
1999 health_code_update();
2000
2001 /* Need one fd for the channel. */
2002 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2003 if (ret < 0) {
2004 ERR("Exhausted number of available FD upon create channel");
2005 goto error;
2006 }
2007
2008 /*
2009 * Ask consumer to create channel. The consumer will return the number of
2010 * stream we have to expect.
2011 */
2012 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2013 registry);
2014 if (ret < 0) {
2015 goto error_ask;
2016 }
2017
2018 /*
2019 * Compute the number of fd needed before receiving them. It must be 2 per
2020 * stream (2 being the default value here).
2021 */
2022 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2023
2024 /* Reserve the amount of file descriptor we need. */
2025 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2026 if (ret < 0) {
2027 ERR("Exhausted number of available FD upon create channel");
2028 goto error_fd_get_stream;
2029 }
2030
2031 health_code_update();
2032
2033 /*
2034 * Now get the channel from the consumer. This call wil populate the stream
2035 * list of that channel and set the ust objects.
2036 */
2037 if (usess->consumer->enabled) {
2038 ret = ust_consumer_get_channel(socket, ua_chan);
2039 if (ret < 0) {
2040 goto error_destroy;
2041 }
2042 }
2043
2044 rcu_read_unlock();
2045 return 0;
2046
2047 error_destroy:
2048 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2049 error_fd_get_stream:
2050 /*
2051 * Initiate a destroy channel on the consumer since we had an error
2052 * handling it on our side. The return value is of no importance since we
2053 * already have a ret value set by the previous error that we need to
2054 * return.
2055 */
2056 (void) ust_consumer_destroy_channel(socket, ua_chan);
2057 error_ask:
2058 lttng_fd_put(LTTNG_FD_APPS, 1);
2059 error:
2060 health_code_update();
2061 rcu_read_unlock();
2062 return ret;
2063 }
2064
2065 /*
2066 * Duplicate the ust data object of the ust app stream and save it in the
2067 * buffer registry stream.
2068 *
2069 * Return 0 on success or else a negative value.
2070 */
2071 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2072 struct ust_app_stream *stream)
2073 {
2074 int ret;
2075
2076 assert(reg_stream);
2077 assert(stream);
2078
2079 /* Reserve the amount of file descriptor we need. */
2080 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2081 if (ret < 0) {
2082 ERR("Exhausted number of available FD upon duplicate stream");
2083 goto error;
2084 }
2085
2086 /* Duplicate object for stream once the original is in the registry. */
2087 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2088 reg_stream->obj.ust);
2089 if (ret < 0) {
2090 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2091 reg_stream->obj.ust, stream->obj, ret);
2092 lttng_fd_put(LTTNG_FD_APPS, 2);
2093 goto error;
2094 }
2095 stream->handle = stream->obj->handle;
2096
2097 error:
2098 return ret;
2099 }
2100
2101 /*
2102 * Duplicate the ust data object of the ust app. channel and save it in the
2103 * buffer registry channel.
2104 *
2105 * Return 0 on success or else a negative value.
2106 */
2107 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2108 struct ust_app_channel *ua_chan)
2109 {
2110 int ret;
2111
2112 assert(reg_chan);
2113 assert(ua_chan);
2114
2115 /* Need two fds for the channel. */
2116 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2117 if (ret < 0) {
2118 ERR("Exhausted number of available FD upon duplicate channel");
2119 goto error_fd_get;
2120 }
2121
2122 /* Duplicate object for stream once the original is in the registry. */
2123 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2124 if (ret < 0) {
2125 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2126 reg_chan->obj.ust, ua_chan->obj, ret);
2127 goto error;
2128 }
2129 ua_chan->handle = ua_chan->obj->handle;
2130
2131 return 0;
2132
2133 error:
2134 lttng_fd_put(LTTNG_FD_APPS, 1);
2135 error_fd_get:
2136 return ret;
2137 }
2138
2139 /*
2140 * For a given channel buffer registry, setup all streams of the given ust
2141 * application channel.
2142 *
2143 * Return 0 on success or else a negative value.
2144 */
2145 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2146 struct ust_app_channel *ua_chan)
2147 {
2148 int ret = 0;
2149 struct ust_app_stream *stream, *stmp;
2150
2151 assert(reg_chan);
2152 assert(ua_chan);
2153
2154 DBG2("UST app setup buffer registry stream");
2155
2156 /* Send all streams to application. */
2157 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2158 struct buffer_reg_stream *reg_stream;
2159
2160 ret = buffer_reg_stream_create(&reg_stream);
2161 if (ret < 0) {
2162 goto error;
2163 }
2164
2165 /*
2166 * Keep original pointer and nullify it in the stream so the delete
2167 * stream call does not release the object.
2168 */
2169 reg_stream->obj.ust = stream->obj;
2170 stream->obj = NULL;
2171 buffer_reg_stream_add(reg_stream, reg_chan);
2172
2173 /* We don't need the streams anymore. */
2174 cds_list_del(&stream->list);
2175 delete_ust_app_stream(-1, stream);
2176 }
2177
2178 error:
2179 return ret;
2180 }
2181
2182 /*
2183 * Create a buffer registry channel for the given session registry and
2184 * application channel object. If regp pointer is valid, it's set with the
2185 * created object. Important, the created object is NOT added to the session
2186 * registry hash table.
2187 *
2188 * Return 0 on success else a negative value.
2189 */
2190 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2191 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2192 {
2193 int ret;
2194 struct buffer_reg_channel *reg_chan = NULL;
2195
2196 assert(reg_sess);
2197 assert(ua_chan);
2198
2199 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2200
2201 /* Create buffer registry channel. */
2202 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2203 if (ret < 0) {
2204 goto error_create;
2205 }
2206 assert(reg_chan);
2207 reg_chan->consumer_key = ua_chan->key;
2208 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2209
2210 /* Create and add a channel registry to session. */
2211 ret = ust_registry_channel_add(reg_sess->reg.ust,
2212 ua_chan->tracing_channel_id);
2213 if (ret < 0) {
2214 goto error;
2215 }
2216 buffer_reg_channel_add(reg_sess, reg_chan);
2217
2218 if (regp) {
2219 *regp = reg_chan;
2220 }
2221
2222 return 0;
2223
2224 error:
2225 /* Safe because the registry channel object was not added to any HT. */
2226 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2227 error_create:
2228 return ret;
2229 }
2230
2231 /*
2232 * Setup buffer registry channel for the given session registry and application
2233 * channel object. If regp pointer is valid, it's set with the created object.
2234 *
2235 * Return 0 on success else a negative value.
2236 */
2237 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2238 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2239 {
2240 int ret;
2241
2242 assert(reg_sess);
2243 assert(reg_chan);
2244 assert(ua_chan);
2245 assert(ua_chan->obj);
2246
2247 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2248
2249 /* Setup all streams for the registry. */
2250 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2251 if (ret < 0) {
2252 goto error;
2253 }
2254
2255 reg_chan->obj.ust = ua_chan->obj;
2256 ua_chan->obj = NULL;
2257
2258 return 0;
2259
2260 error:
2261 buffer_reg_channel_remove(reg_sess, reg_chan);
2262 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2263 return ret;
2264 }
2265
2266 /*
2267 * Send buffer registry channel to the application.
2268 *
2269 * Return 0 on success else a negative value.
2270 */
2271 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2272 struct ust_app *app, struct ust_app_session *ua_sess,
2273 struct ust_app_channel *ua_chan)
2274 {
2275 int ret;
2276 struct buffer_reg_stream *reg_stream;
2277
2278 assert(reg_chan);
2279 assert(app);
2280 assert(ua_sess);
2281 assert(ua_chan);
2282
2283 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2284
2285 ret = duplicate_channel_object(reg_chan, ua_chan);
2286 if (ret < 0) {
2287 goto error;
2288 }
2289
2290 /* Send channel to the application. */
2291 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2292 if (ret < 0) {
2293 goto error;
2294 }
2295
2296 health_code_update();
2297
2298 /* Send all streams to application. */
2299 pthread_mutex_lock(&reg_chan->stream_list_lock);
2300 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2301 struct ust_app_stream stream;
2302
2303 ret = duplicate_stream_object(reg_stream, &stream);
2304 if (ret < 0) {
2305 goto error_stream_unlock;
2306 }
2307
2308 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2309 if (ret < 0) {
2310 (void) release_ust_app_stream(-1, &stream);
2311 goto error_stream_unlock;
2312 }
2313
2314 /*
2315 * The return value is not important here. This function will output an
2316 * error if needed.
2317 */
2318 (void) release_ust_app_stream(-1, &stream);
2319 }
2320 ua_chan->is_sent = 1;
2321
2322 error_stream_unlock:
2323 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2324 error:
2325 return ret;
2326 }
2327
2328 /*
2329 * Create and send to the application the created buffers with per UID buffers.
2330 *
2331 * Return 0 on success else a negative value.
2332 */
2333 static int create_channel_per_uid(struct ust_app *app,
2334 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2335 struct ust_app_channel *ua_chan)
2336 {
2337 int ret;
2338 struct buffer_reg_uid *reg_uid;
2339 struct buffer_reg_channel *reg_chan;
2340
2341 assert(app);
2342 assert(usess);
2343 assert(ua_sess);
2344 assert(ua_chan);
2345
2346 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2347
2348 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2349 /*
2350 * The session creation handles the creation of this global registry
2351 * object. If none can be find, there is a code flow problem or a
2352 * teardown race.
2353 */
2354 assert(reg_uid);
2355
2356 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2357 reg_uid);
2358 if (!reg_chan) {
2359 /* Create the buffer registry channel object. */
2360 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2361 if (ret < 0) {
2362 goto error;
2363 }
2364 assert(reg_chan);
2365
2366 /*
2367 * Create the buffers on the consumer side. This call populates the
2368 * ust app channel object with all streams and data object.
2369 */
2370 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2371 app->bits_per_long, reg_uid->registry->reg.ust);
2372 if (ret < 0) {
2373 /*
2374 * Let's remove the previously created buffer registry channel so
2375 * it's not visible anymore in the session registry.
2376 */
2377 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2378 ua_chan->tracing_channel_id);
2379 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2380 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2381 goto error;
2382 }
2383
2384 /*
2385 * Setup the streams and add it to the session registry.
2386 */
2387 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2388 if (ret < 0) {
2389 goto error;
2390 }
2391
2392 }
2393
2394 /* Send buffers to the application. */
2395 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2396 if (ret < 0) {
2397 goto error;
2398 }
2399
2400 error:
2401 return ret;
2402 }
2403
2404 /*
2405 * Create and send to the application the created buffers with per PID buffers.
2406 *
2407 * Return 0 on success else a negative value.
2408 */
2409 static int create_channel_per_pid(struct ust_app *app,
2410 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2411 struct ust_app_channel *ua_chan)
2412 {
2413 int ret;
2414 struct ust_registry_session *registry;
2415
2416 assert(app);
2417 assert(usess);
2418 assert(ua_sess);
2419 assert(ua_chan);
2420
2421 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2422
2423 rcu_read_lock();
2424
2425 registry = get_session_registry(ua_sess);
2426 assert(registry);
2427
2428 /* Create and add a new channel registry to session. */
2429 ret = ust_registry_channel_add(registry, ua_chan->key);
2430 if (ret < 0) {
2431 goto error;
2432 }
2433
2434 /* Create and get channel on the consumer side. */
2435 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2436 app->bits_per_long, registry);
2437 if (ret < 0) {
2438 goto error;
2439 }
2440
2441 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2442 if (ret < 0) {
2443 goto error;
2444 }
2445
2446 error:
2447 rcu_read_unlock();
2448 return ret;
2449 }
2450
2451 /*
2452 * From an already allocated ust app channel, create the channel buffers if
2453 * need and send it to the application. This MUST be called with a RCU read
2454 * side lock acquired.
2455 *
2456 * Return 0 on success or else a negative value.
2457 */
2458 static int do_create_channel(struct ust_app *app,
2459 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2460 struct ust_app_channel *ua_chan)
2461 {
2462 int ret;
2463
2464 assert(app);
2465 assert(usess);
2466 assert(ua_sess);
2467 assert(ua_chan);
2468
2469 /* Handle buffer type before sending the channel to the application. */
2470 switch (usess->buffer_type) {
2471 case LTTNG_BUFFER_PER_UID:
2472 {
2473 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2474 if (ret < 0) {
2475 goto error;
2476 }
2477 break;
2478 }
2479 case LTTNG_BUFFER_PER_PID:
2480 {
2481 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2482 if (ret < 0) {
2483 goto error;
2484 }
2485 break;
2486 }
2487 default:
2488 assert(0);
2489 ret = -EINVAL;
2490 goto error;
2491 }
2492
2493 /* Initialize ust objd object using the received handle and add it. */
2494 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2495 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2496
2497 /* If channel is not enabled, disable it on the tracer */
2498 if (!ua_chan->enabled) {
2499 ret = disable_ust_channel(app, ua_sess, ua_chan);
2500 if (ret < 0) {
2501 goto error;
2502 }
2503 }
2504
2505 error:
2506 return ret;
2507 }
2508
2509 /*
2510 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2511 * newly created channel if not NULL.
2512 *
2513 * Called with UST app session lock and RCU read-side lock held.
2514 *
2515 * Return 0 on success or else a negative value.
2516 */
2517 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2518 struct ltt_ust_channel *uchan, struct ust_app *app,
2519 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2520 struct ust_app_channel **ua_chanp)
2521 {
2522 int ret = 0;
2523 struct lttng_ht_iter iter;
2524 struct lttng_ht_node_str *ua_chan_node;
2525 struct ust_app_channel *ua_chan;
2526
2527 /* Lookup channel in the ust app session */
2528 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2529 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2530 if (ua_chan_node != NULL) {
2531 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2532 goto end;
2533 }
2534
2535 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2536 if (ua_chan == NULL) {
2537 /* Only malloc can fail here */
2538 ret = -ENOMEM;
2539 goto error_alloc;
2540 }
2541 shadow_copy_channel(ua_chan, uchan);
2542
2543 /* Set channel type. */
2544 ua_chan->attr.type = type;
2545
2546 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2547 if (ret < 0) {
2548 goto error;
2549 }
2550
2551 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2552 app->pid);
2553
2554 /* Only add the channel if successful on the tracer side. */
2555 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2556
2557 end:
2558 if (ua_chanp) {
2559 *ua_chanp = ua_chan;
2560 }
2561
2562 /* Everything went well. */
2563 return 0;
2564
2565 error:
2566 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2567 error_alloc:
2568 return ret;
2569 }
2570
2571 /*
2572 * Create UST app event and create it on the tracer side.
2573 *
2574 * Called with ust app session mutex held.
2575 */
2576 static
2577 int create_ust_app_event(struct ust_app_session *ua_sess,
2578 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2579 struct ust_app *app)
2580 {
2581 int ret = 0;
2582 struct ust_app_event *ua_event;
2583
2584 /* Get event node */
2585 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2586 uevent->filter, uevent->attr.loglevel);
2587 if (ua_event != NULL) {
2588 ret = -EEXIST;
2589 goto end;
2590 }
2591
2592 /* Does not exist so create one */
2593 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2594 if (ua_event == NULL) {
2595 /* Only malloc can failed so something is really wrong */
2596 ret = -ENOMEM;
2597 goto end;
2598 }
2599 shadow_copy_event(ua_event, uevent);
2600
2601 /* Create it on the tracer side */
2602 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2603 if (ret < 0) {
2604 /* Not found previously means that it does not exist on the tracer */
2605 assert(ret != -LTTNG_UST_ERR_EXIST);
2606 goto error;
2607 }
2608
2609 add_unique_ust_app_event(ua_chan, ua_event);
2610
2611 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2612 app->pid);
2613
2614 end:
2615 return ret;
2616
2617 error:
2618 /* Valid. Calling here is already in a read side lock */
2619 delete_ust_app_event(-1, ua_event);
2620 return ret;
2621 }
2622
2623 /*
2624 * Create UST metadata and open it on the tracer side.
2625 *
2626 * Called with UST app session lock held and RCU read side lock.
2627 */
2628 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2629 struct ust_app *app, struct consumer_output *consumer,
2630 struct ustctl_consumer_channel_attr *attr)
2631 {
2632 int ret = 0;
2633 struct ust_app_channel *metadata;
2634 struct consumer_socket *socket;
2635 struct ust_registry_session *registry;
2636
2637 assert(ua_sess);
2638 assert(app);
2639 assert(consumer);
2640
2641 registry = get_session_registry(ua_sess);
2642 assert(registry);
2643
2644 /* Metadata already exists for this registry or it was closed previously */
2645 if (registry->metadata_key || registry->metadata_closed) {
2646 ret = 0;
2647 goto error;
2648 }
2649
2650 /* Allocate UST metadata */
2651 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2652 if (!metadata) {
2653 /* malloc() failed */
2654 ret = -ENOMEM;
2655 goto error;
2656 }
2657
2658 if (!attr) {
2659 /* Set default attributes for metadata. */
2660 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2661 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2662 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2663 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2664 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2665 metadata->attr.output = LTTNG_UST_MMAP;
2666 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2667 } else {
2668 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2669 metadata->attr.output = LTTNG_UST_MMAP;
2670 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2671 }
2672
2673 /* Need one fd for the channel. */
2674 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2675 if (ret < 0) {
2676 ERR("Exhausted number of available FD upon create metadata");
2677 goto error;
2678 }
2679
2680 /* Get the right consumer socket for the application. */
2681 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2682 if (!socket) {
2683 ret = -EINVAL;
2684 goto error_consumer;
2685 }
2686
2687 /*
2688 * Keep metadata key so we can identify it on the consumer side. Assign it
2689 * to the registry *before* we ask the consumer so we avoid the race of the
2690 * consumer requesting the metadata and the ask_channel call on our side
2691 * did not returned yet.
2692 */
2693 registry->metadata_key = metadata->key;
2694
2695 /*
2696 * Ask the metadata channel creation to the consumer. The metadata object
2697 * will be created by the consumer and kept their. However, the stream is
2698 * never added or monitored until we do a first push metadata to the
2699 * consumer.
2700 */
2701 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2702 registry);
2703 if (ret < 0) {
2704 /* Nullify the metadata key so we don't try to close it later on. */
2705 registry->metadata_key = 0;
2706 goto error_consumer;
2707 }
2708
2709 /*
2710 * The setup command will make the metadata stream be sent to the relayd,
2711 * if applicable, and the thread managing the metadatas. This is important
2712 * because after this point, if an error occurs, the only way the stream
2713 * can be deleted is to be monitored in the consumer.
2714 */
2715 ret = consumer_setup_metadata(socket, metadata->key);
2716 if (ret < 0) {
2717 /* Nullify the metadata key so we don't try to close it later on. */
2718 registry->metadata_key = 0;
2719 goto error_consumer;
2720 }
2721
2722 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2723 metadata->key, app->pid);
2724
2725 error_consumer:
2726 lttng_fd_put(LTTNG_FD_APPS, 1);
2727 delete_ust_app_channel(-1, metadata, app);
2728 error:
2729 return ret;
2730 }
2731
2732 /*
2733 * Return pointer to traceable apps list.
2734 */
2735 struct lttng_ht *ust_app_get_ht(void)
2736 {
2737 return ust_app_ht;
2738 }
2739
2740 /*
2741 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2742 * acquired before calling this function.
2743 */
2744 struct ust_app *ust_app_find_by_pid(pid_t pid)
2745 {
2746 struct ust_app *app = NULL;
2747 struct lttng_ht_node_ulong *node;
2748 struct lttng_ht_iter iter;
2749
2750 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2751 node = lttng_ht_iter_get_node_ulong(&iter);
2752 if (node == NULL) {
2753 DBG2("UST app no found with pid %d", pid);
2754 goto error;
2755 }
2756
2757 DBG2("Found UST app by pid %d", pid);
2758
2759 app = caa_container_of(node, struct ust_app, pid_n);
2760
2761 error:
2762 return app;
2763 }
2764
2765 /*
2766 * Allocate and init an UST app object using the registration information and
2767 * the command socket. This is called when the command socket connects to the
2768 * session daemon.
2769 *
2770 * The object is returned on success or else NULL.
2771 */
2772 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2773 {
2774 struct ust_app *lta = NULL;
2775
2776 assert(msg);
2777 assert(sock >= 0);
2778
2779 DBG3("UST app creating application for socket %d", sock);
2780
2781 if ((msg->bits_per_long == 64 &&
2782 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2783 || (msg->bits_per_long == 32 &&
2784 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2785 ERR("Registration failed: application \"%s\" (pid: %d) has "
2786 "%d-bit long, but no consumerd for this size is available.\n",
2787 msg->name, msg->pid, msg->bits_per_long);
2788 goto error;
2789 }
2790
2791 lta = zmalloc(sizeof(struct ust_app));
2792 if (lta == NULL) {
2793 PERROR("malloc");
2794 goto error;
2795 }
2796
2797 lta->ppid = msg->ppid;
2798 lta->uid = msg->uid;
2799 lta->gid = msg->gid;
2800
2801 lta->bits_per_long = msg->bits_per_long;
2802 lta->uint8_t_alignment = msg->uint8_t_alignment;
2803 lta->uint16_t_alignment = msg->uint16_t_alignment;
2804 lta->uint32_t_alignment = msg->uint32_t_alignment;
2805 lta->uint64_t_alignment = msg->uint64_t_alignment;
2806 lta->long_alignment = msg->long_alignment;
2807 lta->byte_order = msg->byte_order;
2808
2809 lta->v_major = msg->major;
2810 lta->v_minor = msg->minor;
2811 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2812 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2813 lta->notify_sock = -1;
2814
2815 /* Copy name and make sure it's NULL terminated. */
2816 strncpy(lta->name, msg->name, sizeof(lta->name));
2817 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2818
2819 /*
2820 * Before this can be called, when receiving the registration information,
2821 * the application compatibility is checked. So, at this point, the
2822 * application can work with this session daemon.
2823 */
2824 lta->compatible = 1;
2825
2826 lta->pid = msg->pid;
2827 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2828 lta->sock = sock;
2829 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2830
2831 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2832
2833 error:
2834 return lta;
2835 }
2836
2837 /*
2838 * For a given application object, add it to every hash table.
2839 */
2840 void ust_app_add(struct ust_app *app)
2841 {
2842 assert(app);
2843 assert(app->notify_sock >= 0);
2844
2845 rcu_read_lock();
2846
2847 /*
2848 * On a re-registration, we want to kick out the previous registration of
2849 * that pid
2850 */
2851 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2852
2853 /*
2854 * The socket _should_ be unique until _we_ call close. So, a add_unique
2855 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2856 * already in the table.
2857 */
2858 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2859
2860 /* Add application to the notify socket hash table. */
2861 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2862 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2863
2864 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2865 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2866 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2867 app->v_minor);
2868
2869 rcu_read_unlock();
2870 }
2871
2872 /*
2873 * Set the application version into the object.
2874 *
2875 * Return 0 on success else a negative value either an errno code or a
2876 * LTTng-UST error code.
2877 */
2878 int ust_app_version(struct ust_app *app)
2879 {
2880 int ret;
2881
2882 assert(app);
2883
2884 ret = ustctl_tracer_version(app->sock, &app->version);
2885 if (ret < 0) {
2886 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2887 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2888 } else {
2889 DBG3("UST app %d verion failed. Application is dead", app->sock);
2890 }
2891 }
2892
2893 return ret;
2894 }
2895
2896 /*
2897 * Unregister app by removing it from the global traceable app list and freeing
2898 * the data struct.
2899 *
2900 * The socket is already closed at this point so no close to sock.
2901 */
2902 void ust_app_unregister(int sock)
2903 {
2904 struct ust_app *lta;
2905 struct lttng_ht_node_ulong *node;
2906 struct lttng_ht_iter iter;
2907 struct ust_app_session *ua_sess;
2908 int ret;
2909
2910 rcu_read_lock();
2911
2912 /* Get the node reference for a call_rcu */
2913 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2914 node = lttng_ht_iter_get_node_ulong(&iter);
2915 assert(node);
2916
2917 lta = caa_container_of(node, struct ust_app, sock_n);
2918 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2919
2920 /* Remove application from PID hash table */
2921 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2922 assert(!ret);
2923
2924 /*
2925 * Remove application from notify hash table. The thread handling the
2926 * notify socket could have deleted the node so ignore on error because
2927 * either way it's valid. The close of that socket is handled by the other
2928 * thread.
2929 */
2930 iter.iter.node = &lta->notify_sock_n.node;
2931 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2932
2933 /*
2934 * Ignore return value since the node might have been removed before by an
2935 * add replace during app registration because the PID can be reassigned by
2936 * the OS.
2937 */
2938 iter.iter.node = &lta->pid_n.node;
2939 ret = lttng_ht_del(ust_app_ht, &iter);
2940 if (ret) {
2941 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2942 lta->pid);
2943 }
2944
2945 /* Remove sessions so they are not visible during deletion.*/
2946 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2947 node.node) {
2948 struct ust_registry_session *registry;
2949
2950 ret = lttng_ht_del(lta->sessions, &iter);
2951 if (ret) {
2952 /* The session was already removed so scheduled for teardown. */
2953 continue;
2954 }
2955
2956 /*
2957 * Add session to list for teardown. This is safe since at this point we
2958 * are the only one using this list.
2959 */
2960 pthread_mutex_lock(&ua_sess->lock);
2961
2962 /*
2963 * Normally, this is done in the delete session process which is
2964 * executed in the call rcu below. However, upon registration we can't
2965 * afford to wait for the grace period before pushing data or else the
2966 * data pending feature can race between the unregistration and stop
2967 * command where the data pending command is sent *before* the grace
2968 * period ended.
2969 *
2970 * The close metadata below nullifies the metadata pointer in the
2971 * session so the delete session will NOT push/close a second time.
2972 */
2973 registry = get_session_registry(ua_sess);
2974 if (registry && !registry->metadata_closed) {
2975 /* Push metadata for application before freeing the application. */
2976 (void) push_metadata(registry, ua_sess->consumer);
2977
2978 /*
2979 * Don't ask to close metadata for global per UID buffers. Close
2980 * metadata only on destroy trace session in this case. Also, the
2981 * previous push metadata could have flag the metadata registry to
2982 * close so don't send a close command if closed.
2983 */
2984 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2985 !registry->metadata_closed) {
2986 /* And ask to close it for this session registry. */
2987 (void) close_metadata(registry, ua_sess->consumer);
2988 }
2989 }
2990
2991 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2992 pthread_mutex_unlock(&ua_sess->lock);
2993 }
2994
2995 /* Free memory */
2996 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2997
2998 rcu_read_unlock();
2999 return;
3000 }
3001
3002 /*
3003 * Return traceable_app_count
3004 */
3005 unsigned long ust_app_list_count(void)
3006 {
3007 unsigned long count;
3008
3009 rcu_read_lock();
3010 count = lttng_ht_get_count(ust_app_ht);
3011 rcu_read_unlock();
3012
3013 return count;
3014 }
3015
3016 /*
3017 * Fill events array with all events name of all registered apps.
3018 */
3019 int ust_app_list_events(struct lttng_event **events)
3020 {
3021 int ret, handle;
3022 size_t nbmem, count = 0;
3023 struct lttng_ht_iter iter;
3024 struct ust_app *app;
3025 struct lttng_event *tmp_event;
3026
3027 nbmem = UST_APP_EVENT_LIST_SIZE;
3028 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3029 if (tmp_event == NULL) {
3030 PERROR("zmalloc ust app events");
3031 ret = -ENOMEM;
3032 goto error;
3033 }
3034
3035 rcu_read_lock();
3036
3037 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3038 struct lttng_ust_tracepoint_iter uiter;
3039
3040 health_code_update();
3041
3042 if (!app->compatible) {
3043 /*
3044 * TODO: In time, we should notice the caller of this error by
3045 * telling him that this is a version error.
3046 */
3047 continue;
3048 }
3049 handle = ustctl_tracepoint_list(app->sock);
3050 if (handle < 0) {
3051 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3052 ERR("UST app list events getting handle failed for app pid %d",
3053 app->pid);
3054 }
3055 continue;
3056 }
3057
3058 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3059 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3060 /* Handle ustctl error. */
3061 if (ret < 0) {
3062 free(tmp_event);
3063 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3064 ERR("UST app tp list get failed for app %d with ret %d",
3065 app->sock, ret);
3066 } else {
3067 DBG3("UST app tp list get failed. Application is dead");
3068 /*
3069 * This is normal behavior, an application can die during the
3070 * creation process. Don't report an error so the execution can
3071 * continue normally. Continue normal execution.
3072 */
3073 break;
3074 }
3075 goto rcu_error;
3076 }
3077
3078 health_code_update();
3079 if (count >= nbmem) {
3080 /* In case the realloc fails, we free the memory */
3081 void *ptr;
3082
3083 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3084 2 * nbmem);
3085 nbmem *= 2;
3086 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3087 if (ptr == NULL) {
3088 PERROR("realloc ust app events");
3089 free(tmp_event);
3090 ret = -ENOMEM;
3091 goto rcu_error;
3092 }
3093 tmp_event = ptr;
3094 }
3095 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3096 tmp_event[count].loglevel = uiter.loglevel;
3097 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3098 tmp_event[count].pid = app->pid;
3099 tmp_event[count].enabled = -1;
3100 count++;
3101 }
3102 }
3103
3104 ret = count;
3105 *events = tmp_event;
3106
3107 DBG2("UST app list events done (%zu events)", count);
3108
3109 rcu_error:
3110 rcu_read_unlock();
3111 error:
3112 health_code_update();
3113 return ret;
3114 }
3115
3116 /*
3117 * Fill events array with all events name of all registered apps.
3118 */
3119 int ust_app_list_event_fields(struct lttng_event_field **fields)
3120 {
3121 int ret, handle;
3122 size_t nbmem, count = 0;
3123 struct lttng_ht_iter iter;
3124 struct ust_app *app;
3125 struct lttng_event_field *tmp_event;
3126
3127 nbmem = UST_APP_EVENT_LIST_SIZE;
3128 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3129 if (tmp_event == NULL) {
3130 PERROR("zmalloc ust app event fields");
3131 ret = -ENOMEM;
3132 goto error;
3133 }
3134
3135 rcu_read_lock();
3136
3137 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3138 struct lttng_ust_field_iter uiter;
3139
3140 health_code_update();
3141
3142 if (!app->compatible) {
3143 /*
3144 * TODO: In time, we should notice the caller of this error by
3145 * telling him that this is a version error.
3146 */
3147 continue;
3148 }
3149 handle = ustctl_tracepoint_field_list(app->sock);
3150 if (handle < 0) {
3151 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3152 ERR("UST app list field getting handle failed for app pid %d",
3153 app->pid);
3154 }
3155 continue;
3156 }
3157
3158 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3159 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3160 /* Handle ustctl error. */
3161 if (ret < 0) {
3162 free(tmp_event);
3163 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3164 ERR("UST app tp list field failed for app %d with ret %d",
3165 app->sock, ret);
3166 } else {
3167 DBG3("UST app tp list field failed. Application is dead");
3168 /*
3169 * This is normal behavior, an application can die during the
3170 * creation process. Don't report an error so the execution can
3171 * continue normally.
3172 */
3173 break;
3174 }
3175 goto rcu_error;
3176 }
3177
3178 health_code_update();
3179 if (count >= nbmem) {
3180 /* In case the realloc fails, we free the memory */
3181 void *ptr;
3182
3183 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3184 2 * nbmem);
3185 nbmem *= 2;
3186 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3187 if (ptr == NULL) {
3188 PERROR("realloc ust app event fields");
3189 free(tmp_event);
3190 ret = -ENOMEM;
3191 goto rcu_error;
3192 }
3193 tmp_event = ptr;
3194 }
3195
3196 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3197 tmp_event[count].type = uiter.type;
3198 tmp_event[count].nowrite = uiter.nowrite;
3199
3200 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3201 tmp_event[count].event.loglevel = uiter.loglevel;
3202 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3203 tmp_event[count].event.pid = app->pid;
3204 tmp_event[count].event.enabled = -1;
3205 count++;
3206 }
3207 }
3208
3209 ret = count;
3210 *fields = tmp_event;
3211
3212 DBG2("UST app list event fields done (%zu events)", count);
3213
3214 rcu_error:
3215 rcu_read_unlock();
3216 error:
3217 health_code_update();
3218 return ret;
3219 }
3220
3221 /*
3222 * Free and clean all traceable apps of the global list.
3223 *
3224 * Should _NOT_ be called with RCU read-side lock held.
3225 */
3226 void ust_app_clean_list(void)
3227 {
3228 int ret;
3229 struct ust_app *app;
3230 struct lttng_ht_iter iter;
3231
3232 DBG2("UST app cleaning registered apps hash table");
3233
3234 rcu_read_lock();
3235
3236 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3237 ret = lttng_ht_del(ust_app_ht, &iter);
3238 assert(!ret);
3239 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3240 }
3241
3242 /* Cleanup socket hash table */
3243 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3244 sock_n.node) {