Add ust event exclusion UST ABI to internal copy
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 ret_val = ret;
485 goto error_push;
486 }
487
488 free(metadata_str);
489 return len;
490
491 end:
492 error:
493 pthread_mutex_unlock(&registry->lock);
494 error_push:
495 free(metadata_str);
496 return ret_val;
497 }
498
499 /*
500 * For a given application and session, push metadata to consumer. The session
501 * lock MUST be acquired here before calling this.
502 * Either sock or consumer is required : if sock is NULL, the default
503 * socket to send the metadata is retrieved from consumer, if sock
504 * is not NULL we use it to send the metadata.
505 *
506 * Return 0 on success else a negative error.
507 */
508 static int push_metadata(struct ust_registry_session *registry,
509 struct consumer_output *consumer)
510 {
511 int ret_val;
512 ssize_t ret;
513 struct consumer_socket *socket;
514
515 assert(registry);
516 assert(consumer);
517
518 rcu_read_lock();
519
520 /*
521 * Means that no metadata was assigned to the session. This can happens if
522 * no start has been done previously.
523 */
524 if (!registry->metadata_key) {
525 ret_val = 0;
526 goto end_rcu_unlock;
527 }
528
529 /* Get consumer socket to use to push the metadata.*/
530 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
531 consumer);
532 if (!socket) {
533 ret_val = -1;
534 goto error_rcu_unlock;
535 }
536
537 /*
538 * TODO: Currently, we hold the socket lock around sampling of the next
539 * metadata segment to ensure we send metadata over the consumer socket in
540 * the correct order. This makes the registry lock nest inside the socket
541 * lock.
542 *
543 * Please note that this is a temporary measure: we should move this lock
544 * back into ust_consumer_push_metadata() when the consumer gets the
545 * ability to reorder the metadata it receives.
546 */
547 pthread_mutex_lock(socket->lock);
548 ret = ust_app_push_metadata(registry, socket, 0);
549 pthread_mutex_unlock(socket->lock);
550 if (ret < 0) {
551 ret_val = ret;
552 goto error_rcu_unlock;
553 }
554
555 rcu_read_unlock();
556 return 0;
557
558 error_rcu_unlock:
559 /*
560 * On error, flag the registry that the metadata is closed. We were unable
561 * to push anything and this means that either the consumer is not
562 * responding or the metadata cache has been destroyed on the consumer.
563 */
564 registry->metadata_closed = 1;
565 end_rcu_unlock:
566 rcu_read_unlock();
567 return ret_val;
568 }
569
570 /*
571 * Send to the consumer a close metadata command for the given session. Once
572 * done, the metadata channel is deleted and the session metadata pointer is
573 * nullified. The session lock MUST be acquired here unless the application is
574 * in the destroy path.
575 *
576 * Return 0 on success else a negative value.
577 */
578 static int close_metadata(struct ust_registry_session *registry,
579 struct consumer_output *consumer)
580 {
581 int ret;
582 struct consumer_socket *socket;
583
584 assert(registry);
585 assert(consumer);
586
587 rcu_read_lock();
588
589 if (!registry->metadata_key || registry->metadata_closed) {
590 ret = 0;
591 goto end;
592 }
593
594 /* Get consumer socket to use to push the metadata.*/
595 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
596 consumer);
597 if (!socket) {
598 ret = -1;
599 goto error;
600 }
601
602 ret = consumer_close_metadata(socket, registry->metadata_key);
603 if (ret < 0) {
604 goto error;
605 }
606
607 error:
608 /*
609 * Metadata closed. Even on error this means that the consumer is not
610 * responding or not found so either way a second close should NOT be emit
611 * for this registry.
612 */
613 registry->metadata_closed = 1;
614 end:
615 rcu_read_unlock();
616 return ret;
617 }
618
619 /*
620 * We need to execute ht_destroy outside of RCU read-side critical
621 * section and outside of call_rcu thread, so we postpone its execution
622 * using ht_cleanup_push. It is simpler than to change the semantic of
623 * the many callers of delete_ust_app_session().
624 */
625 static
626 void delete_ust_app_session_rcu(struct rcu_head *head)
627 {
628 struct ust_app_session *ua_sess =
629 caa_container_of(head, struct ust_app_session, rcu_head);
630
631 ht_cleanup_push(ua_sess->channels);
632 free(ua_sess);
633 }
634
635 /*
636 * Delete ust app session safely. RCU read lock must be held before calling
637 * this function.
638 */
639 static
640 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
641 struct ust_app *app)
642 {
643 int ret;
644 struct lttng_ht_iter iter;
645 struct ust_app_channel *ua_chan;
646 struct ust_registry_session *registry;
647
648 assert(ua_sess);
649
650 pthread_mutex_lock(&ua_sess->lock);
651
652 registry = get_session_registry(ua_sess);
653 if (registry && !registry->metadata_closed) {
654 /* Push metadata for application before freeing the application. */
655 (void) push_metadata(registry, ua_sess->consumer);
656
657 /*
658 * Don't ask to close metadata for global per UID buffers. Close
659 * metadata only on destroy trace session in this case. Also, the
660 * previous push metadata could have flag the metadata registry to
661 * close so don't send a close command if closed.
662 */
663 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
664 !registry->metadata_closed) {
665 /* And ask to close it for this session registry. */
666 (void) close_metadata(registry, ua_sess->consumer);
667 }
668 }
669
670 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
671 node.node) {
672 ret = lttng_ht_del(ua_sess->channels, &iter);
673 assert(!ret);
674 delete_ust_app_channel(sock, ua_chan, app);
675 }
676
677 /* In case of per PID, the registry is kept in the session. */
678 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
679 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
680 if (reg_pid) {
681 buffer_reg_pid_remove(reg_pid);
682 buffer_reg_pid_destroy(reg_pid);
683 }
684 }
685
686 if (ua_sess->handle != -1) {
687 ret = ustctl_release_handle(sock, ua_sess->handle);
688 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
689 ERR("UST app sock %d release session handle failed with ret %d",
690 sock, ret);
691 }
692 }
693 pthread_mutex_unlock(&ua_sess->lock);
694
695 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
696 }
697
698 /*
699 * Delete a traceable application structure from the global list. Never call
700 * this function outside of a call_rcu call.
701 *
702 * RCU read side lock should _NOT_ be held when calling this function.
703 */
704 static
705 void delete_ust_app(struct ust_app *app)
706 {
707 int ret, sock;
708 struct ust_app_session *ua_sess, *tmp_ua_sess;
709
710 /* Delete ust app sessions info */
711 sock = app->sock;
712 app->sock = -1;
713
714 /* Wipe sessions */
715 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
716 teardown_node) {
717 /* Free every object in the session and the session. */
718 rcu_read_lock();
719 delete_ust_app_session(sock, ua_sess, app);
720 rcu_read_unlock();
721 }
722
723 ht_cleanup_push(app->sessions);
724 ht_cleanup_push(app->ust_objd);
725
726 /*
727 * Wait until we have deleted the application from the sock hash table
728 * before closing this socket, otherwise an application could re-use the
729 * socket ID and race with the teardown, using the same hash table entry.
730 *
731 * It's OK to leave the close in call_rcu. We want it to stay unique for
732 * all RCU readers that could run concurrently with unregister app,
733 * therefore we _need_ to only close that socket after a grace period. So
734 * it should stay in this RCU callback.
735 *
736 * This close() is a very important step of the synchronization model so
737 * every modification to this function must be carefully reviewed.
738 */
739 ret = close(sock);
740 if (ret) {
741 PERROR("close");
742 }
743 lttng_fd_put(LTTNG_FD_APPS, 1);
744
745 DBG2("UST app pid %d deleted", app->pid);
746 free(app);
747 }
748
749 /*
750 * URCU intermediate call to delete an UST app.
751 */
752 static
753 void delete_ust_app_rcu(struct rcu_head *head)
754 {
755 struct lttng_ht_node_ulong *node =
756 caa_container_of(head, struct lttng_ht_node_ulong, head);
757 struct ust_app *app =
758 caa_container_of(node, struct ust_app, pid_n);
759
760 DBG3("Call RCU deleting app PID %d", app->pid);
761 delete_ust_app(app);
762 }
763
764 /*
765 * Delete the session from the application ht and delete the data structure by
766 * freeing every object inside and releasing them.
767 */
768 static void destroy_app_session(struct ust_app *app,
769 struct ust_app_session *ua_sess)
770 {
771 int ret;
772 struct lttng_ht_iter iter;
773
774 assert(app);
775 assert(ua_sess);
776
777 iter.iter.node = &ua_sess->node.node;
778 ret = lttng_ht_del(app->sessions, &iter);
779 if (ret) {
780 /* Already scheduled for teardown. */
781 goto end;
782 }
783
784 /* Once deleted, free the data structure. */
785 delete_ust_app_session(app->sock, ua_sess, app);
786
787 end:
788 return;
789 }
790
791 /*
792 * Alloc new UST app session.
793 */
794 static
795 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
796 {
797 struct ust_app_session *ua_sess;
798
799 /* Init most of the default value by allocating and zeroing */
800 ua_sess = zmalloc(sizeof(struct ust_app_session));
801 if (ua_sess == NULL) {
802 PERROR("malloc");
803 goto error_free;
804 }
805
806 ua_sess->handle = -1;
807 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
808 pthread_mutex_init(&ua_sess->lock, NULL);
809
810 return ua_sess;
811
812 error_free:
813 return NULL;
814 }
815
816 /*
817 * Alloc new UST app channel.
818 */
819 static
820 struct ust_app_channel *alloc_ust_app_channel(char *name,
821 struct ust_app_session *ua_sess,
822 struct lttng_ust_channel_attr *attr)
823 {
824 struct ust_app_channel *ua_chan;
825
826 /* Init most of the default value by allocating and zeroing */
827 ua_chan = zmalloc(sizeof(struct ust_app_channel));
828 if (ua_chan == NULL) {
829 PERROR("malloc");
830 goto error;
831 }
832
833 /* Setup channel name */
834 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
835 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
836
837 ua_chan->enabled = 1;
838 ua_chan->handle = -1;
839 ua_chan->session = ua_sess;
840 ua_chan->key = get_next_channel_key();
841 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
842 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
843 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
844
845 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
846 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
847
848 /* Copy attributes */
849 if (attr) {
850 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
851 ua_chan->attr.subbuf_size = attr->subbuf_size;
852 ua_chan->attr.num_subbuf = attr->num_subbuf;
853 ua_chan->attr.overwrite = attr->overwrite;
854 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
855 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
856 ua_chan->attr.output = attr->output;
857 }
858 /* By default, the channel is a per cpu channel. */
859 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
860
861 DBG3("UST app channel %s allocated", ua_chan->name);
862
863 return ua_chan;
864
865 error:
866 return NULL;
867 }
868
869 /*
870 * Allocate and initialize a UST app stream.
871 *
872 * Return newly allocated stream pointer or NULL on error.
873 */
874 struct ust_app_stream *ust_app_alloc_stream(void)
875 {
876 struct ust_app_stream *stream = NULL;
877
878 stream = zmalloc(sizeof(*stream));
879 if (stream == NULL) {
880 PERROR("zmalloc ust app stream");
881 goto error;
882 }
883
884 /* Zero could be a valid value for a handle so flag it to -1. */
885 stream->handle = -1;
886
887 error:
888 return stream;
889 }
890
891 /*
892 * Alloc new UST app event.
893 */
894 static
895 struct ust_app_event *alloc_ust_app_event(char *name,
896 struct lttng_ust_event *attr)
897 {
898 struct ust_app_event *ua_event;
899
900 /* Init most of the default value by allocating and zeroing */
901 ua_event = zmalloc(sizeof(struct ust_app_event));
902 if (ua_event == NULL) {
903 PERROR("malloc");
904 goto error;
905 }
906
907 ua_event->enabled = 1;
908 strncpy(ua_event->name, name, sizeof(ua_event->name));
909 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
910 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
911
912 /* Copy attributes */
913 if (attr) {
914 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
915 }
916
917 DBG3("UST app event %s allocated", ua_event->name);
918
919 return ua_event;
920
921 error:
922 return NULL;
923 }
924
925 /*
926 * Alloc new UST app context.
927 */
928 static
929 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
930 {
931 struct ust_app_ctx *ua_ctx;
932
933 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
934 if (ua_ctx == NULL) {
935 goto error;
936 }
937
938 CDS_INIT_LIST_HEAD(&ua_ctx->list);
939
940 if (uctx) {
941 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
942 }
943
944 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
945
946 error:
947 return ua_ctx;
948 }
949
950 /*
951 * Allocate a filter and copy the given original filter.
952 *
953 * Return allocated filter or NULL on error.
954 */
955 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
956 struct lttng_ust_filter_bytecode *orig_f)
957 {
958 struct lttng_ust_filter_bytecode *filter = NULL;
959
960 /* Copy filter bytecode */
961 filter = zmalloc(sizeof(*filter) + orig_f->len);
962 if (!filter) {
963 PERROR("zmalloc alloc ust app filter");
964 goto error;
965 }
966
967 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
968
969 error:
970 return filter;
971 }
972
973 /*
974 * Find an ust_app using the sock and return it. RCU read side lock must be
975 * held before calling this helper function.
976 */
977 struct ust_app *ust_app_find_by_sock(int sock)
978 {
979 struct lttng_ht_node_ulong *node;
980 struct lttng_ht_iter iter;
981
982 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
983 node = lttng_ht_iter_get_node_ulong(&iter);
984 if (node == NULL) {
985 DBG2("UST app find by sock %d not found", sock);
986 goto error;
987 }
988
989 return caa_container_of(node, struct ust_app, sock_n);
990
991 error:
992 return NULL;
993 }
994
995 /*
996 * Find an ust_app using the notify sock and return it. RCU read side lock must
997 * be held before calling this helper function.
998 */
999 static struct ust_app *find_app_by_notify_sock(int sock)
1000 {
1001 struct lttng_ht_node_ulong *node;
1002 struct lttng_ht_iter iter;
1003
1004 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1005 &iter);
1006 node = lttng_ht_iter_get_node_ulong(&iter);
1007 if (node == NULL) {
1008 DBG2("UST app find by notify sock %d not found", sock);
1009 goto error;
1010 }
1011
1012 return caa_container_of(node, struct ust_app, notify_sock_n);
1013
1014 error:
1015 return NULL;
1016 }
1017
1018 /*
1019 * Lookup for an ust app event based on event name, filter bytecode and the
1020 * event loglevel.
1021 *
1022 * Return an ust_app_event object or NULL on error.
1023 */
1024 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1025 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1026 const struct lttng_event_exclusion *exclusion)
1027 {
1028 struct lttng_ht_iter iter;
1029 struct lttng_ht_node_str *node;
1030 struct ust_app_event *event = NULL;
1031 struct ust_app_ht_key key;
1032
1033 assert(name);
1034 assert(ht);
1035
1036 /* Setup key for event lookup. */
1037 key.name = name;
1038 key.filter = filter;
1039 key.loglevel = loglevel;
1040 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1041 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1042
1043 /* Lookup using the event name as hash and a custom match fct. */
1044 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1045 ht_match_ust_app_event, &key, &iter.iter);
1046 node = lttng_ht_iter_get_node_str(&iter);
1047 if (node == NULL) {
1048 goto end;
1049 }
1050
1051 event = caa_container_of(node, struct ust_app_event, node);
1052
1053 end:
1054 return event;
1055 }
1056
1057 /*
1058 * Create the channel context on the tracer.
1059 *
1060 * Called with UST app session lock held.
1061 */
1062 static
1063 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1064 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1065 {
1066 int ret;
1067
1068 health_code_update();
1069
1070 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1071 ua_chan->obj, &ua_ctx->obj);
1072 if (ret < 0) {
1073 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1074 ERR("UST app create channel context failed for app (pid: %d) "
1075 "with ret %d", app->pid, ret);
1076 } else {
1077 /*
1078 * This is normal behavior, an application can die during the
1079 * creation process. Don't report an error so the execution can
1080 * continue normally.
1081 */
1082 ret = 0;
1083 DBG3("UST app disable event failed. Application is dead.");
1084 }
1085 goto error;
1086 }
1087
1088 ua_ctx->handle = ua_ctx->obj->handle;
1089
1090 DBG2("UST app context handle %d created successfully for channel %s",
1091 ua_ctx->handle, ua_chan->name);
1092
1093 error:
1094 health_code_update();
1095 return ret;
1096 }
1097
1098 /*
1099 * Set the filter on the tracer.
1100 */
1101 static
1102 int set_ust_event_filter(struct ust_app_event *ua_event,
1103 struct ust_app *app)
1104 {
1105 int ret;
1106
1107 health_code_update();
1108
1109 if (!ua_event->filter) {
1110 ret = 0;
1111 goto error;
1112 }
1113
1114 ret = ustctl_set_filter(app->sock, ua_event->filter,
1115 ua_event->obj);
1116 if (ret < 0) {
1117 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1118 ERR("UST app event %s filter failed for app (pid: %d) "
1119 "with ret %d", ua_event->attr.name, app->pid, ret);
1120 } else {
1121 /*
1122 * This is normal behavior, an application can die during the
1123 * creation process. Don't report an error so the execution can
1124 * continue normally.
1125 */
1126 ret = 0;
1127 DBG3("UST app filter event failed. Application is dead.");
1128 }
1129 goto error;
1130 }
1131
1132 DBG2("UST filter set successfully for event %s", ua_event->name);
1133
1134 error:
1135 health_code_update();
1136 return ret;
1137 }
1138
1139 /*
1140 * Set event exclusions on the tracer.
1141 */
1142 static
1143 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1144 struct ust_app *app)
1145 {
1146 int ret;
1147
1148 health_code_update();
1149
1150 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1151 ret = 0;
1152 goto error;
1153 }
1154
1155 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1156 ua_event->obj);
1157 if (ret < 0) {
1158 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1159 ERR("UST app event %s exclusions failed for app (pid: %d) "
1160 "with ret %d", ua_event->attr.name, app->pid, ret);
1161 } else {
1162 /*
1163 * This is normal behavior, an application can die during the
1164 * creation process. Don't report an error so the execution can
1165 * continue normally.
1166 */
1167 ret = 0;
1168 DBG3("UST app event exclusion failed. Application is dead.");
1169 }
1170 goto error;
1171 }
1172
1173 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1174
1175 error:
1176 health_code_update();
1177 return ret;
1178 }
1179
1180 /*
1181 * Disable the specified event on to UST tracer for the UST session.
1182 */
1183 static int disable_ust_event(struct ust_app *app,
1184 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1185 {
1186 int ret;
1187
1188 health_code_update();
1189
1190 ret = ustctl_disable(app->sock, ua_event->obj);
1191 if (ret < 0) {
1192 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1193 ERR("UST app event %s disable failed for app (pid: %d) "
1194 "and session handle %d with ret %d",
1195 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1196 } else {
1197 /*
1198 * This is normal behavior, an application can die during the
1199 * creation process. Don't report an error so the execution can
1200 * continue normally.
1201 */
1202 ret = 0;
1203 DBG3("UST app disable event failed. Application is dead.");
1204 }
1205 goto error;
1206 }
1207
1208 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1209 ua_event->attr.name, app->pid);
1210
1211 error:
1212 health_code_update();
1213 return ret;
1214 }
1215
1216 /*
1217 * Disable the specified channel on to UST tracer for the UST session.
1218 */
1219 static int disable_ust_channel(struct ust_app *app,
1220 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1221 {
1222 int ret;
1223
1224 health_code_update();
1225
1226 ret = ustctl_disable(app->sock, ua_chan->obj);
1227 if (ret < 0) {
1228 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1229 ERR("UST app channel %s disable failed for app (pid: %d) "
1230 "and session handle %d with ret %d",
1231 ua_chan->name, app->pid, ua_sess->handle, ret);
1232 } else {
1233 /*
1234 * This is normal behavior, an application can die during the
1235 * creation process. Don't report an error so the execution can
1236 * continue normally.
1237 */
1238 ret = 0;
1239 DBG3("UST app disable channel failed. Application is dead.");
1240 }
1241 goto error;
1242 }
1243
1244 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1245 ua_chan->name, app->pid);
1246
1247 error:
1248 health_code_update();
1249 return ret;
1250 }
1251
1252 /*
1253 * Enable the specified channel on to UST tracer for the UST session.
1254 */
1255 static int enable_ust_channel(struct ust_app *app,
1256 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1257 {
1258 int ret;
1259
1260 health_code_update();
1261
1262 ret = ustctl_enable(app->sock, ua_chan->obj);
1263 if (ret < 0) {
1264 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1265 ERR("UST app channel %s enable failed for app (pid: %d) "
1266 "and session handle %d with ret %d",
1267 ua_chan->name, app->pid, ua_sess->handle, ret);
1268 } else {
1269 /*
1270 * This is normal behavior, an application can die during the
1271 * creation process. Don't report an error so the execution can
1272 * continue normally.
1273 */
1274 ret = 0;
1275 DBG3("UST app enable channel failed. Application is dead.");
1276 }
1277 goto error;
1278 }
1279
1280 ua_chan->enabled = 1;
1281
1282 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1283 ua_chan->name, app->pid);
1284
1285 error:
1286 health_code_update();
1287 return ret;
1288 }
1289
1290 /*
1291 * Enable the specified event on to UST tracer for the UST session.
1292 */
1293 static int enable_ust_event(struct ust_app *app,
1294 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1295 {
1296 int ret;
1297
1298 health_code_update();
1299
1300 ret = ustctl_enable(app->sock, ua_event->obj);
1301 if (ret < 0) {
1302 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1303 ERR("UST app event %s enable failed for app (pid: %d) "
1304 "and session handle %d with ret %d",
1305 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1306 } else {
1307 /*
1308 * This is normal behavior, an application can die during the
1309 * creation process. Don't report an error so the execution can
1310 * continue normally.
1311 */
1312 ret = 0;
1313 DBG3("UST app enable event failed. Application is dead.");
1314 }
1315 goto error;
1316 }
1317
1318 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1319 ua_event->attr.name, app->pid);
1320
1321 error:
1322 health_code_update();
1323 return ret;
1324 }
1325
1326 /*
1327 * Send channel and stream buffer to application.
1328 *
1329 * Return 0 on success. On error, a negative value is returned.
1330 */
1331 static int send_channel_pid_to_ust(struct ust_app *app,
1332 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1333 {
1334 int ret;
1335 struct ust_app_stream *stream, *stmp;
1336
1337 assert(app);
1338 assert(ua_sess);
1339 assert(ua_chan);
1340
1341 health_code_update();
1342
1343 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1344 app->sock);
1345
1346 /* Send channel to the application. */
1347 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1348 if (ret < 0) {
1349 goto error;
1350 }
1351
1352 health_code_update();
1353
1354 /* Send all streams to application. */
1355 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1356 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1357 if (ret < 0) {
1358 goto error;
1359 }
1360 /* We don't need the stream anymore once sent to the tracer. */
1361 cds_list_del(&stream->list);
1362 delete_ust_app_stream(-1, stream);
1363 }
1364 /* Flag the channel that it is sent to the application. */
1365 ua_chan->is_sent = 1;
1366
1367 error:
1368 health_code_update();
1369 return ret;
1370 }
1371
1372 /*
1373 * Create the specified event onto the UST tracer for a UST session.
1374 *
1375 * Should be called with session mutex held.
1376 */
1377 static
1378 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1379 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1380 {
1381 int ret = 0;
1382
1383 health_code_update();
1384
1385 /* Create UST event on tracer */
1386 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1387 &ua_event->obj);
1388 if (ret < 0) {
1389 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1390 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1391 ua_event->attr.name, app->pid, ret);
1392 } else {
1393 /*
1394 * This is normal behavior, an application can die during the
1395 * creation process. Don't report an error so the execution can
1396 * continue normally.
1397 */
1398 ret = 0;
1399 DBG3("UST app create event failed. Application is dead.");
1400 }
1401 goto error;
1402 }
1403
1404 ua_event->handle = ua_event->obj->handle;
1405
1406 DBG2("UST app event %s created successfully for pid:%d",
1407 ua_event->attr.name, app->pid);
1408
1409 health_code_update();
1410
1411 /* Set filter if one is present. */
1412 if (ua_event->filter) {
1413 ret = set_ust_event_filter(ua_event, app);
1414 if (ret < 0) {
1415 goto error;
1416 }
1417 }
1418
1419 /* Set exclusions for the event */
1420 if (ua_event->exclusion) {
1421 ret = set_ust_event_exclusion(ua_event, app);
1422 if (ret < 0) {
1423 goto error;
1424 }
1425 }
1426
1427 /* If event not enabled, disable it on the tracer */
1428 if (ua_event->enabled == 0) {
1429 ret = disable_ust_event(app, ua_sess, ua_event);
1430 if (ret < 0) {
1431 /*
1432 * If we hit an EPERM, something is wrong with our disable call. If
1433 * we get an EEXIST, there is a problem on the tracer side since we
1434 * just created it.
1435 */
1436 switch (ret) {
1437 case -LTTNG_UST_ERR_PERM:
1438 /* Code flow problem */
1439 assert(0);
1440 case -LTTNG_UST_ERR_EXIST:
1441 /* It's OK for our use case. */
1442 ret = 0;
1443 break;
1444 default:
1445 break;
1446 }
1447 goto error;
1448 }
1449 }
1450
1451 error:
1452 health_code_update();
1453 return ret;
1454 }
1455
1456 /*
1457 * Copy data between an UST app event and a LTT event.
1458 */
1459 static void shadow_copy_event(struct ust_app_event *ua_event,
1460 struct ltt_ust_event *uevent)
1461 {
1462 size_t exclusion_alloc_size;
1463
1464 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1465 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1466
1467 ua_event->enabled = uevent->enabled;
1468
1469 /* Copy event attributes */
1470 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1471
1472 /* Copy filter bytecode */
1473 if (uevent->filter) {
1474 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1475 /* Filter might be NULL here in case of ENONEM. */
1476 }
1477
1478 /* Copy exclusion data */
1479 if (uevent->exclusion) {
1480 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1481 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1482 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1483 if (ua_event->exclusion) {
1484 memcpy(ua_event->exclusion, uevent->exclusion, exclusion_alloc_size);
1485 }
1486 }
1487 }
1488
1489 /*
1490 * Copy data between an UST app channel and a LTT channel.
1491 */
1492 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1493 struct ltt_ust_channel *uchan)
1494 {
1495 struct lttng_ht_iter iter;
1496 struct ltt_ust_event *uevent;
1497 struct ltt_ust_context *uctx;
1498 struct ust_app_event *ua_event;
1499 struct ust_app_ctx *ua_ctx;
1500
1501 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1502
1503 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1504 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1505
1506 ua_chan->tracefile_size = uchan->tracefile_size;
1507 ua_chan->tracefile_count = uchan->tracefile_count;
1508
1509 /* Copy event attributes since the layout is different. */
1510 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1511 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1512 ua_chan->attr.overwrite = uchan->attr.overwrite;
1513 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1514 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1515 ua_chan->attr.output = uchan->attr.output;
1516 /*
1517 * Note that the attribute channel type is not set since the channel on the
1518 * tracing registry side does not have this information.
1519 */
1520
1521 ua_chan->enabled = uchan->enabled;
1522 ua_chan->tracing_channel_id = uchan->id;
1523
1524 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1525 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1526 if (ua_ctx == NULL) {
1527 continue;
1528 }
1529 lttng_ht_node_init_ulong(&ua_ctx->node,
1530 (unsigned long) ua_ctx->ctx.ctx);
1531 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1532 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1533 }
1534
1535 /* Copy all events from ltt ust channel to ust app channel */
1536 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1537 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1538 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1539 if (ua_event == NULL) {
1540 DBG2("UST event %s not found on shadow copy channel",
1541 uevent->attr.name);
1542 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1543 if (ua_event == NULL) {
1544 continue;
1545 }
1546 shadow_copy_event(ua_event, uevent);
1547 add_unique_ust_app_event(ua_chan, ua_event);
1548 }
1549 }
1550
1551 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1552 }
1553
1554 /*
1555 * Copy data between a UST app session and a regular LTT session.
1556 */
1557 static void shadow_copy_session(struct ust_app_session *ua_sess,
1558 struct ltt_ust_session *usess, struct ust_app *app)
1559 {
1560 struct lttng_ht_node_str *ua_chan_node;
1561 struct lttng_ht_iter iter;
1562 struct ltt_ust_channel *uchan;
1563 struct ust_app_channel *ua_chan;
1564 time_t rawtime;
1565 struct tm *timeinfo;
1566 char datetime[16];
1567 int ret;
1568
1569 /* Get date and time for unique app path */
1570 time(&rawtime);
1571 timeinfo = localtime(&rawtime);
1572 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1573
1574 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1575
1576 ua_sess->tracing_id = usess->id;
1577 ua_sess->id = get_next_session_id();
1578 ua_sess->uid = app->uid;
1579 ua_sess->gid = app->gid;
1580 ua_sess->euid = usess->uid;
1581 ua_sess->egid = usess->gid;
1582 ua_sess->buffer_type = usess->buffer_type;
1583 ua_sess->bits_per_long = app->bits_per_long;
1584 /* There is only one consumer object per session possible. */
1585 ua_sess->consumer = usess->consumer;
1586 ua_sess->output_traces = usess->output_traces;
1587 ua_sess->live_timer_interval = usess->live_timer_interval;
1588
1589 switch (ua_sess->buffer_type) {
1590 case LTTNG_BUFFER_PER_PID:
1591 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1592 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1593 datetime);
1594 break;
1595 case LTTNG_BUFFER_PER_UID:
1596 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1597 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1598 break;
1599 default:
1600 assert(0);
1601 goto error;
1602 }
1603 if (ret < 0) {
1604 PERROR("asprintf UST shadow copy session");
1605 assert(0);
1606 goto error;
1607 }
1608
1609 /* Iterate over all channels in global domain. */
1610 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1611 uchan, node.node) {
1612 struct lttng_ht_iter uiter;
1613
1614 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1615 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1616 if (ua_chan_node != NULL) {
1617 /* Session exist. Contiuing. */
1618 continue;
1619 }
1620
1621 DBG2("Channel %s not found on shadow session copy, creating it",
1622 uchan->name);
1623 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1624 if (ua_chan == NULL) {
1625 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1626 continue;
1627 }
1628 shadow_copy_channel(ua_chan, uchan);
1629 /*
1630 * The concept of metadata channel does not exist on the tracing
1631 * registry side of the session daemon so this can only be a per CPU
1632 * channel and not metadata.
1633 */
1634 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1635
1636 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1637 }
1638
1639 error:
1640 return;
1641 }
1642
1643 /*
1644 * Lookup sesison wrapper.
1645 */
1646 static
1647 void __lookup_session_by_app(struct ltt_ust_session *usess,
1648 struct ust_app *app, struct lttng_ht_iter *iter)
1649 {
1650 /* Get right UST app session from app */
1651 lttng_ht_lookup(app->sessions, &usess->id, iter);
1652 }
1653
1654 /*
1655 * Return ust app session from the app session hashtable using the UST session
1656 * id.
1657 */
1658 static struct ust_app_session *lookup_session_by_app(
1659 struct ltt_ust_session *usess, struct ust_app *app)
1660 {
1661 struct lttng_ht_iter iter;
1662 struct lttng_ht_node_u64 *node;
1663
1664 __lookup_session_by_app(usess, app, &iter);
1665 node = lttng_ht_iter_get_node_u64(&iter);
1666 if (node == NULL) {
1667 goto error;
1668 }
1669
1670 return caa_container_of(node, struct ust_app_session, node);
1671
1672 error:
1673 return NULL;
1674 }
1675
1676 /*
1677 * Setup buffer registry per PID for the given session and application. If none
1678 * is found, a new one is created, added to the global registry and
1679 * initialized. If regp is valid, it's set with the newly created object.
1680 *
1681 * Return 0 on success or else a negative value.
1682 */
1683 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1684 struct ust_app *app, struct buffer_reg_pid **regp)
1685 {
1686 int ret = 0;
1687 struct buffer_reg_pid *reg_pid;
1688
1689 assert(ua_sess);
1690 assert(app);
1691
1692 rcu_read_lock();
1693
1694 reg_pid = buffer_reg_pid_find(ua_sess->id);
1695 if (!reg_pid) {
1696 /*
1697 * This is the create channel path meaning that if there is NO
1698 * registry available, we have to create one for this session.
1699 */
1700 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1701 if (ret < 0) {
1702 goto error;
1703 }
1704 buffer_reg_pid_add(reg_pid);
1705 } else {
1706 goto end;
1707 }
1708
1709 /* Initialize registry. */
1710 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1711 app->bits_per_long, app->uint8_t_alignment,
1712 app->uint16_t_alignment, app->uint32_t_alignment,
1713 app->uint64_t_alignment, app->long_alignment,
1714 app->byte_order, app->version.major,
1715 app->version.minor);
1716 if (ret < 0) {
1717 goto error;
1718 }
1719
1720 DBG3("UST app buffer registry per PID created successfully");
1721
1722 end:
1723 if (regp) {
1724 *regp = reg_pid;
1725 }
1726 error:
1727 rcu_read_unlock();
1728 return ret;
1729 }
1730
1731 /*
1732 * Setup buffer registry per UID for the given session and application. If none
1733 * is found, a new one is created, added to the global registry and
1734 * initialized. If regp is valid, it's set with the newly created object.
1735 *
1736 * Return 0 on success or else a negative value.
1737 */
1738 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1739 struct ust_app *app, struct buffer_reg_uid **regp)
1740 {
1741 int ret = 0;
1742 struct buffer_reg_uid *reg_uid;
1743
1744 assert(usess);
1745 assert(app);
1746
1747 rcu_read_lock();
1748
1749 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1750 if (!reg_uid) {
1751 /*
1752 * This is the create channel path meaning that if there is NO
1753 * registry available, we have to create one for this session.
1754 */
1755 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1756 LTTNG_DOMAIN_UST, &reg_uid);
1757 if (ret < 0) {
1758 goto error;
1759 }
1760 buffer_reg_uid_add(reg_uid);
1761 } else {
1762 goto end;
1763 }
1764
1765 /* Initialize registry. */
1766 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1767 app->bits_per_long, app->uint8_t_alignment,
1768 app->uint16_t_alignment, app->uint32_t_alignment,
1769 app->uint64_t_alignment, app->long_alignment,
1770 app->byte_order, app->version.major,
1771 app->version.minor);
1772 if (ret < 0) {
1773 goto error;
1774 }
1775 /* Add node to teardown list of the session. */
1776 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1777
1778 DBG3("UST app buffer registry per UID created successfully");
1779
1780 end:
1781 if (regp) {
1782 *regp = reg_uid;
1783 }
1784 error:
1785 rcu_read_unlock();
1786 return ret;
1787 }
1788
1789 /*
1790 * Create a session on the tracer side for the given app.
1791 *
1792 * On success, ua_sess_ptr is populated with the session pointer or else left
1793 * untouched. If the session was created, is_created is set to 1. On error,
1794 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1795 * be NULL.
1796 *
1797 * Returns 0 on success or else a negative code which is either -ENOMEM or
1798 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1799 */
1800 static int create_ust_app_session(struct ltt_ust_session *usess,
1801 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1802 int *is_created)
1803 {
1804 int ret, created = 0;
1805 struct ust_app_session *ua_sess;
1806
1807 assert(usess);
1808 assert(app);
1809 assert(ua_sess_ptr);
1810
1811 health_code_update();
1812
1813 ua_sess = lookup_session_by_app(usess, app);
1814 if (ua_sess == NULL) {
1815 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1816 app->pid, usess->id);
1817 ua_sess = alloc_ust_app_session(app);
1818 if (ua_sess == NULL) {
1819 /* Only malloc can failed so something is really wrong */
1820 ret = -ENOMEM;
1821 goto error;
1822 }
1823 shadow_copy_session(ua_sess, usess, app);
1824 created = 1;
1825 }
1826
1827 switch (usess->buffer_type) {
1828 case LTTNG_BUFFER_PER_PID:
1829 /* Init local registry. */
1830 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1831 if (ret < 0) {
1832 goto error;
1833 }
1834 break;
1835 case LTTNG_BUFFER_PER_UID:
1836 /* Look for a global registry. If none exists, create one. */
1837 ret = setup_buffer_reg_uid(usess, app, NULL);
1838 if (ret < 0) {
1839 goto error;
1840 }
1841 break;
1842 default:
1843 assert(0);
1844 ret = -EINVAL;
1845 goto error;
1846 }
1847
1848 health_code_update();
1849
1850 if (ua_sess->handle == -1) {
1851 ret = ustctl_create_session(app->sock);
1852 if (ret < 0) {
1853 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1854 ERR("Creating session for app pid %d with ret %d",
1855 app->pid, ret);
1856 } else {
1857 DBG("UST app creating session failed. Application is dead");
1858 /*
1859 * This is normal behavior, an application can die during the
1860 * creation process. Don't report an error so the execution can
1861 * continue normally. This will get flagged ENOTCONN and the
1862 * caller will handle it.
1863 */
1864 ret = 0;
1865 }
1866 delete_ust_app_session(-1, ua_sess, app);
1867 if (ret != -ENOMEM) {
1868 /*
1869 * Tracer is probably gone or got an internal error so let's
1870 * behave like it will soon unregister or not usable.
1871 */
1872 ret = -ENOTCONN;
1873 }
1874 goto error;
1875 }
1876
1877 ua_sess->handle = ret;
1878
1879 /* Add ust app session to app's HT */
1880 lttng_ht_node_init_u64(&ua_sess->node,
1881 ua_sess->tracing_id);
1882 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1883
1884 DBG2("UST app session created successfully with handle %d", ret);
1885 }
1886
1887 *ua_sess_ptr = ua_sess;
1888 if (is_created) {
1889 *is_created = created;
1890 }
1891
1892 /* Everything went well. */
1893 ret = 0;
1894
1895 error:
1896 health_code_update();
1897 return ret;
1898 }
1899
1900 /*
1901 * Create a context for the channel on the tracer.
1902 *
1903 * Called with UST app session lock held and a RCU read side lock.
1904 */
1905 static
1906 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1907 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1908 struct ust_app *app)
1909 {
1910 int ret = 0;
1911 struct lttng_ht_iter iter;
1912 struct lttng_ht_node_ulong *node;
1913 struct ust_app_ctx *ua_ctx;
1914
1915 DBG2("UST app adding context to channel %s", ua_chan->name);
1916
1917 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1918 node = lttng_ht_iter_get_node_ulong(&iter);
1919 if (node != NULL) {
1920 ret = -EEXIST;
1921 goto error;
1922 }
1923
1924 ua_ctx = alloc_ust_app_ctx(uctx);
1925 if (ua_ctx == NULL) {
1926 /* malloc failed */
1927 ret = -1;
1928 goto error;
1929 }
1930
1931 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1932 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1933 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1934
1935 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1936 if (ret < 0) {
1937 goto error;
1938 }
1939
1940 error:
1941 return ret;
1942 }
1943
1944 /*
1945 * Enable on the tracer side a ust app event for the session and channel.
1946 *
1947 * Called with UST app session lock held.
1948 */
1949 static
1950 int enable_ust_app_event(struct ust_app_session *ua_sess,
1951 struct ust_app_event *ua_event, struct ust_app *app)
1952 {
1953 int ret;
1954
1955 ret = enable_ust_event(app, ua_sess, ua_event);
1956 if (ret < 0) {
1957 goto error;
1958 }
1959
1960 ua_event->enabled = 1;
1961
1962 error:
1963 return ret;
1964 }
1965
1966 /*
1967 * Disable on the tracer side a ust app event for the session and channel.
1968 */
1969 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1970 struct ust_app_event *ua_event, struct ust_app *app)
1971 {
1972 int ret;
1973
1974 ret = disable_ust_event(app, ua_sess, ua_event);
1975 if (ret < 0) {
1976 goto error;
1977 }
1978
1979 ua_event->enabled = 0;
1980
1981 error:
1982 return ret;
1983 }
1984
1985 /*
1986 * Lookup ust app channel for session and disable it on the tracer side.
1987 */
1988 static
1989 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1990 struct ust_app_channel *ua_chan, struct ust_app *app)
1991 {
1992 int ret;
1993
1994 ret = disable_ust_channel(app, ua_sess, ua_chan);
1995 if (ret < 0) {
1996 goto error;
1997 }
1998
1999 ua_chan->enabled = 0;
2000
2001 error:
2002 return ret;
2003 }
2004
2005 /*
2006 * Lookup ust app channel for session and enable it on the tracer side. This
2007 * MUST be called with a RCU read side lock acquired.
2008 */
2009 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2010 struct ltt_ust_channel *uchan, struct ust_app *app)
2011 {
2012 int ret = 0;
2013 struct lttng_ht_iter iter;
2014 struct lttng_ht_node_str *ua_chan_node;
2015 struct ust_app_channel *ua_chan;
2016
2017 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2018 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2019 if (ua_chan_node == NULL) {
2020 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2021 uchan->name, ua_sess->tracing_id);
2022 goto error;
2023 }
2024
2025 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2026
2027 ret = enable_ust_channel(app, ua_sess, ua_chan);
2028 if (ret < 0) {
2029 goto error;
2030 }
2031
2032 error:
2033 return ret;
2034 }
2035
2036 /*
2037 * Ask the consumer to create a channel and get it if successful.
2038 *
2039 * Return 0 on success or else a negative value.
2040 */
2041 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2042 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2043 int bitness, struct ust_registry_session *registry)
2044 {
2045 int ret;
2046 unsigned int nb_fd = 0;
2047 struct consumer_socket *socket;
2048
2049 assert(usess);
2050 assert(ua_sess);
2051 assert(ua_chan);
2052 assert(registry);
2053
2054 rcu_read_lock();
2055 health_code_update();
2056
2057 /* Get the right consumer socket for the application. */
2058 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2059 if (!socket) {
2060 ret = -EINVAL;
2061 goto error;
2062 }
2063
2064 health_code_update();
2065
2066 /* Need one fd for the channel. */
2067 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2068 if (ret < 0) {
2069 ERR("Exhausted number of available FD upon create channel");
2070 goto error;
2071 }
2072
2073 /*
2074 * Ask consumer to create channel. The consumer will return the number of
2075 * stream we have to expect.
2076 */
2077 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2078 registry);
2079 if (ret < 0) {
2080 goto error_ask;
2081 }
2082
2083 /*
2084 * Compute the number of fd needed before receiving them. It must be 2 per
2085 * stream (2 being the default value here).
2086 */
2087 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2088
2089 /* Reserve the amount of file descriptor we need. */
2090 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2091 if (ret < 0) {
2092 ERR("Exhausted number of available FD upon create channel");
2093 goto error_fd_get_stream;
2094 }
2095
2096 health_code_update();
2097
2098 /*
2099 * Now get the channel from the consumer. This call wil populate the stream
2100 * list of that channel and set the ust objects.
2101 */
2102 if (usess->consumer->enabled) {
2103 ret = ust_consumer_get_channel(socket, ua_chan);
2104 if (ret < 0) {
2105 goto error_destroy;
2106 }
2107 }
2108
2109 rcu_read_unlock();
2110 return 0;
2111
2112 error_destroy:
2113 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2114 error_fd_get_stream:
2115 /*
2116 * Initiate a destroy channel on the consumer since we had an error
2117 * handling it on our side. The return value is of no importance since we
2118 * already have a ret value set by the previous error that we need to
2119 * return.
2120 */
2121 (void) ust_consumer_destroy_channel(socket, ua_chan);
2122 error_ask:
2123 lttng_fd_put(LTTNG_FD_APPS, 1);
2124 error:
2125 health_code_update();
2126 rcu_read_unlock();
2127 return ret;
2128 }
2129
2130 /*
2131 * Duplicate the ust data object of the ust app stream and save it in the
2132 * buffer registry stream.
2133 *
2134 * Return 0 on success or else a negative value.
2135 */
2136 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2137 struct ust_app_stream *stream)
2138 {
2139 int ret;
2140
2141 assert(reg_stream);
2142 assert(stream);
2143
2144 /* Reserve the amount of file descriptor we need. */
2145 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2146 if (ret < 0) {
2147 ERR("Exhausted number of available FD upon duplicate stream");
2148 goto error;
2149 }
2150
2151 /* Duplicate object for stream once the original is in the registry. */
2152 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2153 reg_stream->obj.ust);
2154 if (ret < 0) {
2155 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2156 reg_stream->obj.ust, stream->obj, ret);
2157 lttng_fd_put(LTTNG_FD_APPS, 2);
2158 goto error;
2159 }
2160 stream->handle = stream->obj->handle;
2161
2162 error:
2163 return ret;
2164 }
2165
2166 /*
2167 * Duplicate the ust data object of the ust app. channel and save it in the
2168 * buffer registry channel.
2169 *
2170 * Return 0 on success or else a negative value.
2171 */
2172 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2173 struct ust_app_channel *ua_chan)
2174 {
2175 int ret;
2176
2177 assert(reg_chan);
2178 assert(ua_chan);
2179
2180 /* Need two fds for the channel. */
2181 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2182 if (ret < 0) {
2183 ERR("Exhausted number of available FD upon duplicate channel");
2184 goto error_fd_get;
2185 }
2186
2187 /* Duplicate object for stream once the original is in the registry. */
2188 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2189 if (ret < 0) {
2190 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2191 reg_chan->obj.ust, ua_chan->obj, ret);
2192 goto error;
2193 }
2194 ua_chan->handle = ua_chan->obj->handle;
2195
2196 return 0;
2197
2198 error:
2199 lttng_fd_put(LTTNG_FD_APPS, 1);
2200 error_fd_get:
2201 return ret;
2202 }
2203
2204 /*
2205 * For a given channel buffer registry, setup all streams of the given ust
2206 * application channel.
2207 *
2208 * Return 0 on success or else a negative value.
2209 */
2210 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2211 struct ust_app_channel *ua_chan)
2212 {
2213 int ret = 0;
2214 struct ust_app_stream *stream, *stmp;
2215
2216 assert(reg_chan);
2217 assert(ua_chan);
2218
2219 DBG2("UST app setup buffer registry stream");
2220
2221 /* Send all streams to application. */
2222 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2223 struct buffer_reg_stream *reg_stream;
2224
2225 ret = buffer_reg_stream_create(&reg_stream);
2226 if (ret < 0) {
2227 goto error;
2228 }
2229
2230 /*
2231 * Keep original pointer and nullify it in the stream so the delete
2232 * stream call does not release the object.
2233 */
2234 reg_stream->obj.ust = stream->obj;
2235 stream->obj = NULL;
2236 buffer_reg_stream_add(reg_stream, reg_chan);
2237
2238 /* We don't need the streams anymore. */
2239 cds_list_del(&stream->list);
2240 delete_ust_app_stream(-1, stream);
2241 }
2242
2243 error:
2244 return ret;
2245 }
2246
2247 /*
2248 * Create a buffer registry channel for the given session registry and
2249 * application channel object. If regp pointer is valid, it's set with the
2250 * created object. Important, the created object is NOT added to the session
2251 * registry hash table.
2252 *
2253 * Return 0 on success else a negative value.
2254 */
2255 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2256 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2257 {
2258 int ret;
2259 struct buffer_reg_channel *reg_chan = NULL;
2260
2261 assert(reg_sess);
2262 assert(ua_chan);
2263
2264 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2265
2266 /* Create buffer registry channel. */
2267 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2268 if (ret < 0) {
2269 goto error_create;
2270 }
2271 assert(reg_chan);
2272 reg_chan->consumer_key = ua_chan->key;
2273 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2274
2275 /* Create and add a channel registry to session. */
2276 ret = ust_registry_channel_add(reg_sess->reg.ust,
2277 ua_chan->tracing_channel_id);
2278 if (ret < 0) {
2279 goto error;
2280 }
2281 buffer_reg_channel_add(reg_sess, reg_chan);
2282
2283 if (regp) {
2284 *regp = reg_chan;
2285 }
2286
2287 return 0;
2288
2289 error:
2290 /* Safe because the registry channel object was not added to any HT. */
2291 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2292 error_create:
2293 return ret;
2294 }
2295
2296 /*
2297 * Setup buffer registry channel for the given session registry and application
2298 * channel object. If regp pointer is valid, it's set with the created object.
2299 *
2300 * Return 0 on success else a negative value.
2301 */
2302 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2303 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2304 {
2305 int ret;
2306
2307 assert(reg_sess);
2308 assert(reg_chan);
2309 assert(ua_chan);
2310 assert(ua_chan->obj);
2311
2312 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2313
2314 /* Setup all streams for the registry. */
2315 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2316 if (ret < 0) {
2317 goto error;
2318 }
2319
2320 reg_chan->obj.ust = ua_chan->obj;
2321 ua_chan->obj = NULL;
2322
2323 return 0;
2324
2325 error:
2326 buffer_reg_channel_remove(reg_sess, reg_chan);
2327 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2328 return ret;
2329 }
2330
2331 /*
2332 * Send buffer registry channel to the application.
2333 *
2334 * Return 0 on success else a negative value.
2335 */
2336 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2337 struct ust_app *app, struct ust_app_session *ua_sess,
2338 struct ust_app_channel *ua_chan)
2339 {
2340 int ret;
2341 struct buffer_reg_stream *reg_stream;
2342
2343 assert(reg_chan);
2344 assert(app);
2345 assert(ua_sess);
2346 assert(ua_chan);
2347
2348 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2349
2350 ret = duplicate_channel_object(reg_chan, ua_chan);
2351 if (ret < 0) {
2352 goto error;
2353 }
2354
2355 /* Send channel to the application. */
2356 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2357 if (ret < 0) {
2358 goto error;
2359 }
2360
2361 health_code_update();
2362
2363 /* Send all streams to application. */
2364 pthread_mutex_lock(&reg_chan->stream_list_lock);
2365 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2366 struct ust_app_stream stream;
2367
2368 ret = duplicate_stream_object(reg_stream, &stream);
2369 if (ret < 0) {
2370 goto error_stream_unlock;
2371 }
2372
2373 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2374 if (ret < 0) {
2375 (void) release_ust_app_stream(-1, &stream);
2376 goto error_stream_unlock;
2377 }
2378
2379 /*
2380 * The return value is not important here. This function will output an
2381 * error if needed.
2382 */
2383 (void) release_ust_app_stream(-1, &stream);
2384 }
2385 ua_chan->is_sent = 1;
2386
2387 error_stream_unlock:
2388 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2389 error:
2390 return ret;
2391 }
2392
2393 /*
2394 * Create and send to the application the created buffers with per UID buffers.
2395 *
2396 * Return 0 on success else a negative value.
2397 */
2398 static int create_channel_per_uid(struct ust_app *app,
2399 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2400 struct ust_app_channel *ua_chan)
2401 {
2402 int ret;
2403 struct buffer_reg_uid *reg_uid;
2404 struct buffer_reg_channel *reg_chan;
2405
2406 assert(app);
2407 assert(usess);
2408 assert(ua_sess);
2409 assert(ua_chan);
2410
2411 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2412
2413 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2414 /*
2415 * The session creation handles the creation of this global registry
2416 * object. If none can be find, there is a code flow problem or a
2417 * teardown race.
2418 */
2419 assert(reg_uid);
2420
2421 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2422 reg_uid);
2423 if (!reg_chan) {
2424 /* Create the buffer registry channel object. */
2425 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2426 if (ret < 0) {
2427 goto error;
2428 }
2429 assert(reg_chan);
2430
2431 /*
2432 * Create the buffers on the consumer side. This call populates the
2433 * ust app channel object with all streams and data object.
2434 */
2435 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2436 app->bits_per_long, reg_uid->registry->reg.ust);
2437 if (ret < 0) {
2438 /*
2439 * Let's remove the previously created buffer registry channel so
2440 * it's not visible anymore in the session registry.
2441 */
2442 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2443 ua_chan->tracing_channel_id);
2444 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2445 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2446 goto error;
2447 }
2448
2449 /*
2450 * Setup the streams and add it to the session registry.
2451 */
2452 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2453 if (ret < 0) {
2454 goto error;
2455 }
2456
2457 }
2458
2459 /* Send buffers to the application. */
2460 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2461 if (ret < 0) {
2462 goto error;
2463 }
2464
2465 error:
2466 return ret;
2467 }
2468
2469 /*
2470 * Create and send to the application the created buffers with per PID buffers.
2471 *
2472 * Return 0 on success else a negative value.
2473 */
2474 static int create_channel_per_pid(struct ust_app *app,
2475 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2476 struct ust_app_channel *ua_chan)
2477 {
2478 int ret;
2479 struct ust_registry_session *registry;
2480
2481 assert(app);
2482 assert(usess);
2483 assert(ua_sess);
2484 assert(ua_chan);
2485
2486 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2487
2488 rcu_read_lock();
2489
2490 registry = get_session_registry(ua_sess);
2491 assert(registry);
2492
2493 /* Create and add a new channel registry to session. */
2494 ret = ust_registry_channel_add(registry, ua_chan->key);
2495 if (ret < 0) {
2496 goto error;
2497 }
2498
2499 /* Create and get channel on the consumer side. */
2500 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2501 app->bits_per_long, registry);
2502 if (ret < 0) {
2503 goto error;
2504 }
2505
2506 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2507 if (ret < 0) {
2508 goto error;
2509 }
2510
2511 error:
2512 rcu_read_unlock();
2513 return ret;
2514 }
2515
2516 /*
2517 * From an already allocated ust app channel, create the channel buffers if
2518 * need and send it to the application. This MUST be called with a RCU read
2519 * side lock acquired.
2520 *
2521 * Return 0 on success or else a negative value.
2522 */
2523 static int do_create_channel(struct ust_app *app,
2524 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2525 struct ust_app_channel *ua_chan)
2526 {
2527 int ret;
2528
2529 assert(app);
2530 assert(usess);
2531 assert(ua_sess);
2532 assert(ua_chan);
2533
2534 /* Handle buffer type before sending the channel to the application. */
2535 switch (usess->buffer_type) {
2536 case LTTNG_BUFFER_PER_UID:
2537 {
2538 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2539 if (ret < 0) {
2540 goto error;
2541 }
2542 break;
2543 }
2544 case LTTNG_BUFFER_PER_PID:
2545 {
2546 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2547 if (ret < 0) {
2548 goto error;
2549 }
2550 break;
2551 }
2552 default:
2553 assert(0);
2554 ret = -EINVAL;
2555 goto error;
2556 }
2557
2558 /* Initialize ust objd object using the received handle and add it. */
2559 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2560 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2561
2562 /* If channel is not enabled, disable it on the tracer */
2563 if (!ua_chan->enabled) {
2564 ret = disable_ust_channel(app, ua_sess, ua_chan);
2565 if (ret < 0) {
2566 goto error;
2567 }
2568 }
2569
2570 error:
2571 return ret;
2572 }
2573
2574 /*
2575 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2576 * newly created channel if not NULL.
2577 *
2578 * Called with UST app session lock and RCU read-side lock held.
2579 *
2580 * Return 0 on success or else a negative value.
2581 */
2582 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2583 struct ltt_ust_channel *uchan, struct ust_app *app,
2584 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2585 struct ust_app_channel **ua_chanp)
2586 {
2587 int ret = 0;
2588 struct lttng_ht_iter iter;
2589 struct lttng_ht_node_str *ua_chan_node;
2590 struct ust_app_channel *ua_chan;
2591
2592 /* Lookup channel in the ust app session */
2593 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2594 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2595 if (ua_chan_node != NULL) {
2596 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2597 goto end;
2598 }
2599
2600 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2601 if (ua_chan == NULL) {
2602 /* Only malloc can fail here */
2603 ret = -ENOMEM;
2604 goto error_alloc;
2605 }
2606 shadow_copy_channel(ua_chan, uchan);
2607
2608 /* Set channel type. */
2609 ua_chan->attr.type = type;
2610
2611 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2612 if (ret < 0) {
2613 goto error;
2614 }
2615
2616 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2617 app->pid);
2618
2619 /* Only add the channel if successful on the tracer side. */
2620 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2621
2622 end:
2623 if (ua_chanp) {
2624 *ua_chanp = ua_chan;
2625 }
2626
2627 /* Everything went well. */
2628 return 0;
2629
2630 error:
2631 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2632 error_alloc:
2633 return ret;
2634 }
2635
2636 /*
2637 * Create UST app event and create it on the tracer side.
2638 *
2639 * Called with ust app session mutex held.
2640 */
2641 static
2642 int create_ust_app_event(struct ust_app_session *ua_sess,
2643 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2644 struct ust_app *app)
2645 {
2646 int ret = 0;
2647 struct ust_app_event *ua_event;
2648
2649 /* Get event node */
2650 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2651 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2652 if (ua_event != NULL) {
2653 ret = -EEXIST;
2654 goto end;
2655 }
2656
2657 /* Does not exist so create one */
2658 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2659 if (ua_event == NULL) {
2660 /* Only malloc can failed so something is really wrong */
2661 ret = -ENOMEM;
2662 goto end;
2663 }
2664 shadow_copy_event(ua_event, uevent);
2665
2666 /* Create it on the tracer side */
2667 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2668 if (ret < 0) {
2669 /* Not found previously means that it does not exist on the tracer */
2670 assert(ret != -LTTNG_UST_ERR_EXIST);
2671 goto error;
2672 }
2673
2674 add_unique_ust_app_event(ua_chan, ua_event);
2675
2676 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2677 app->pid);
2678
2679 end:
2680 return ret;
2681
2682 error:
2683 /* Valid. Calling here is already in a read side lock */
2684 delete_ust_app_event(-1, ua_event);
2685 return ret;
2686 }
2687
2688 /*
2689 * Create UST metadata and open it on the tracer side.
2690 *
2691 * Called with UST app session lock held and RCU read side lock.
2692 */
2693 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2694 struct ust_app *app, struct consumer_output *consumer,
2695 struct ustctl_consumer_channel_attr *attr)
2696 {
2697 int ret = 0;
2698 struct ust_app_channel *metadata;
2699 struct consumer_socket *socket;
2700 struct ust_registry_session *registry;
2701
2702 assert(ua_sess);
2703 assert(app);
2704 assert(consumer);
2705
2706 registry = get_session_registry(ua_sess);
2707 assert(registry);
2708
2709 /* Metadata already exists for this registry or it was closed previously */
2710 if (registry->metadata_key || registry->metadata_closed) {
2711 ret = 0;
2712 goto error;
2713 }
2714
2715 /* Allocate UST metadata */
2716 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2717 if (!metadata) {
2718 /* malloc() failed */
2719 ret = -ENOMEM;
2720 goto error;
2721 }
2722
2723 if (!attr) {
2724 /* Set default attributes for metadata. */
2725 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2726 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2727 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2728 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2729 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2730 metadata->attr.output = LTTNG_UST_MMAP;
2731 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2732 } else {
2733 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2734 metadata->attr.output = LTTNG_UST_MMAP;
2735 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2736 }
2737
2738 /* Need one fd for the channel. */
2739 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2740 if (ret < 0) {
2741 ERR("Exhausted number of available FD upon create metadata");
2742 goto error;
2743 }
2744
2745 /* Get the right consumer socket for the application. */
2746 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2747 if (!socket) {
2748 ret = -EINVAL;
2749 goto error_consumer;
2750 }
2751
2752 /*
2753 * Keep metadata key so we can identify it on the consumer side. Assign it
2754 * to the registry *before* we ask the consumer so we avoid the race of the
2755 * consumer requesting the metadata and the ask_channel call on our side
2756 * did not returned yet.
2757 */
2758 registry->metadata_key = metadata->key;
2759
2760 /*
2761 * Ask the metadata channel creation to the consumer. The metadata object
2762 * will be created by the consumer and kept their. However, the stream is
2763 * never added or monitored until we do a first push metadata to the
2764 * consumer.
2765 */
2766 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2767 registry);
2768 if (ret < 0) {
2769 /* Nullify the metadata key so we don't try to close it later on. */
2770 registry->metadata_key = 0;
2771 goto error_consumer;
2772 }
2773
2774 /*
2775 * The setup command will make the metadata stream be sent to the relayd,
2776 * if applicable, and the thread managing the metadatas. This is important
2777 * because after this point, if an error occurs, the only way the stream
2778 * can be deleted is to be monitored in the consumer.
2779 */
2780 ret = consumer_setup_metadata(socket, metadata->key);
2781 if (ret < 0) {
2782 /* Nullify the metadata key so we don't try to close it later on. */
2783 registry->metadata_key = 0;
2784 goto error_consumer;
2785 }
2786
2787 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2788 metadata->key, app->pid);
2789
2790 error_consumer:
2791 lttng_fd_put(LTTNG_FD_APPS, 1);
2792 delete_ust_app_channel(-1, metadata, app);
2793 error:
2794 return ret;
2795 }
2796
2797 /*
2798 * Return pointer to traceable apps list.
2799 */
2800 struct lttng_ht *ust_app_get_ht(void)
2801 {
2802 return ust_app_ht;
2803 }
2804
2805 /*
2806 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2807 * acquired before calling this function.
2808 */
2809 struct ust_app *ust_app_find_by_pid(pid_t pid)
2810 {
2811 struct ust_app *app = NULL;
2812 struct lttng_ht_node_ulong *node;
2813 struct lttng_ht_iter iter;
2814
2815 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2816 node = lttng_ht_iter_get_node_ulong(&iter);
2817 if (node == NULL) {
2818 DBG2("UST app no found with pid %d", pid);
2819 goto error;
2820 }
2821
2822 DBG2("Found UST app by pid %d", pid);
2823
2824 app = caa_container_of(node, struct ust_app, pid_n);
2825
2826 error:
2827 return app;
2828 }
2829
2830 /*
2831 * Allocate and init an UST app object using the registration information and
2832 * the command socket. This is called when the command socket connects to the
2833 * session daemon.
2834 *
2835 * The object is returned on success or else NULL.
2836 */
2837 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2838 {
2839 struct ust_app *lta = NULL;
2840
2841 assert(msg);
2842 assert(sock >= 0);
2843
2844 DBG3("UST app creating application for socket %d", sock);
2845
2846 if ((msg->bits_per_long == 64 &&
2847 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2848 || (msg->bits_per_long == 32 &&
2849 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2850 ERR("Registration failed: application \"%s\" (pid: %d) has "
2851 "%d-bit long, but no consumerd for this size is available.\n",
2852 msg->name, msg->pid, msg->bits_per_long);
2853 goto error;
2854 }
2855
2856 lta = zmalloc(sizeof(struct ust_app));
2857 if (lta == NULL) {
2858 PERROR("malloc");
2859 goto error;
2860 }
2861
2862 lta->ppid = msg->ppid;
2863 lta->uid = msg->uid;
2864 lta->gid = msg->gid;
2865
2866 lta->bits_per_long = msg->bits_per_long;
2867 lta->uint8_t_alignment = msg->uint8_t_alignment;
2868 lta->uint16_t_alignment = msg->uint16_t_alignment;
2869 lta->uint32_t_alignment = msg->uint32_t_alignment;
2870 lta->uint64_t_alignment = msg->uint64_t_alignment;
2871 lta->long_alignment = msg->long_alignment;
2872 lta->byte_order = msg->byte_order;
2873
2874 lta->v_major = msg->major;
2875 lta->v_minor = msg->minor;
2876 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2877 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2878 lta->notify_sock = -1;
2879
2880 /* Copy name and make sure it's NULL terminated. */
2881 strncpy(lta->name, msg->name, sizeof(lta->name));
2882 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2883
2884 /*
2885 * Before this can be called, when receiving the registration information,
2886 * the application compatibility is checked. So, at this point, the
2887 * application can work with this session daemon.
2888 */
2889 lta->compatible = 1;
2890
2891 lta->pid = msg->pid;
2892 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2893 lta->sock = sock;
2894 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2895
2896 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2897
2898 error:
2899 return lta;
2900 }
2901
2902 /*
2903 * For a given application object, add it to every hash table.
2904 */
2905 void ust_app_add(struct ust_app *app)
2906 {
2907 assert(app);
2908 assert(app->notify_sock >= 0);
2909
2910 rcu_read_lock();
2911
2912 /*
2913 * On a re-registration, we want to kick out the previous registration of
2914 * that pid
2915 */
2916 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2917
2918 /*
2919 * The socket _should_ be unique until _we_ call close. So, a add_unique
2920 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2921 * already in the table.
2922 */
2923 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2924
2925 /* Add application to the notify socket hash table. */
2926 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2927 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2928
2929 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2930 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2931 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2932 app->v_minor);
2933
2934 rcu_read_unlock();
2935 }
2936
2937 /*
2938 * Set the application version into the object.
2939 *
2940 * Return 0 on success else a negative value either an errno code or a
2941 * LTTng-UST error code.
2942 */
2943 int ust_app_version(struct ust_app *app)
2944 {
2945 int ret;
2946
2947 assert(app);
2948
2949 ret = ustctl_tracer_version(app->sock, &app->version);
2950 if (ret < 0) {
2951 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2952 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2953 } else {
2954 DBG3("UST app %d verion failed. Application is dead", app->sock);
2955 }
2956 }
2957
2958 return ret;
2959 }
2960
2961 /*
2962 * Unregister app by removing it from the global traceable app list and freeing
2963 * the data struct.
2964 *
2965 * The socket is already closed at this point so no close to sock.
2966 */
2967 void ust_app_unregister(int sock)
2968 {
2969 struct ust_app *lta;
2970 struct lttng_ht_node_ulong *node;
2971 struct lttng_ht_iter iter;
2972 struct ust_app_session *ua_sess;
2973 int ret;
2974
2975 rcu_read_lock();
2976
2977 /* Get the node reference for a call_rcu */
2978 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2979 node = lttng_ht_iter_get_node_ulong(&iter);
2980 assert(node);
2981
2982 lta = caa_container_of(node, struct ust_app, sock_n);
2983 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2984
2985 /* Remove application from PID hash table */
2986 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2987 assert(!ret);
2988
2989 /*
2990 * Remove application from notify hash table. The thread handling the
2991 * notify socket could have deleted the node so ignore on error because
2992 * either way it's valid. The close of that socket is handled by the other
2993 * thread.
2994 */
2995 iter.iter.node = &lta->notify_sock_n.node;
2996 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2997
2998 /*
2999 * Ignore return value since the node might have been removed before by an
3000 * add replace during app registration because the PID can be reassigned by
3001 * the OS.
3002 */
3003 iter.iter.node = &lta->pid_n.node;
3004 ret = lttng_ht_del(ust_app_ht, &iter);
3005 if (ret) {
3006 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3007 lta->pid);
3008 }
3009
3010 /* Remove sessions so they are not visible during deletion.*/
3011 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3012 node.node) {
3013 struct ust_registry_session *registry;
3014
3015 ret = lttng_ht_del(lta->sessions, &iter);
3016 if (ret) {
3017 /* The session was already removed so scheduled for teardown. */
3018 continue;
3019 }
3020
3021 /*
3022 * Add session to list for teardown. This is safe since at this point we
3023 * are the only one using this list.
3024 */
3025 pthread_mutex_lock(&ua_sess->lock);
3026
3027 /*
3028 * Normally, this is done in the delete session process which is
3029 * executed in the call rcu below. However, upon registration we can't
3030 * afford to wait for the grace period before pushing data or else the
3031 * data pending feature can race between the unregistration and stop
3032 * command where the data pending command is sent *before* the grace
3033 * period ended.
3034 *
3035 * The close metadata below nullifies the metadata pointer in the
3036 * session so the delete session will NOT push/close a second time.
3037 */
3038 registry = get_session_registry(ua_sess);
3039 if (registry && !registry->metadata_closed) {
3040 /* Push metadata for application before freeing the application. */
3041 (void) push_metadata(registry, ua_sess->consumer);
3042
3043 /*
3044 * Don't ask to close metadata for global per UID buffers. Close
3045 * metadata only on destroy trace session in this case. Also, the
3046 * previous push metadata could have flag the metadata registry to
3047 * close so don't send a close command if closed.
3048 */
3049 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
3050 !registry->metadata_closed) {
3051 /* And ask to close it for this session registry. */
3052 (void) close_metadata(registry, ua_sess->consumer);
3053 }
3054 }
3055
3056 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3057 pthread_mutex_unlock(&ua_sess->lock);
3058 }
3059
3060 /* Free memory */
3061 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3062
3063 rcu_read_unlock();
3064 return;
3065 }
3066
3067 /*
3068 * Return traceable_app_count
3069 */
3070 unsigned long ust_app_list_count(void)
3071 {
3072 unsigned long count;
3073
3074 rcu_read_lock();
3075 count = lttng_ht_get_count(ust_app_ht);
3076 rcu_read_unlock();
3077
3078 return count;
3079 }
3080
3081 /*
3082 * Fill events array with all events name of all registered apps.
3083 */
3084 int ust_app_list_events(struct lttng_event **events)
3085 {
3086 int ret, handle;
3087 size_t nbmem, count = 0;
3088 struct lttng_ht_iter iter;
3089 struct ust_app *app;
3090 struct lttng_event *tmp_event;
3091
3092 nbmem = UST_APP_EVENT_LIST_SIZE;
3093 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3094 if (tmp_event == NULL) {
3095 PERROR("zmalloc ust app events");
3096 ret = -ENOMEM;
3097 goto error;
3098 }
3099
3100 rcu_read_lock();
3101
3102 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3103 struct lttng_ust_tracepoint_iter uiter;
3104
3105 health_code_update();
3106
3107 if (!app->compatible) {
3108 /*
3109 * TODO: In time, we should notice the caller of this error by
3110 * telling him that this is a version error.
3111 */
3112 continue;
3113 }
3114 handle = ustctl_tracepoint_list(app->sock);
3115 if (handle < 0) {
3116 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3117 ERR("UST app list events getting handle failed for app pid %d",
3118 app->pid);
3119 }
3120 continue;
3121 }
3122
3123 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3124 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3125 /* Handle ustctl error. */
3126 if (ret < 0) {
3127 free(tmp_event);
3128 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3129 ERR("UST app tp list get failed for app %d with ret %d",
3130 app->sock, ret);
3131 } else {
3132 DBG3("UST app tp list get failed. Application is dead");
3133 /*
3134 * This is normal behavior, an application can die during the
3135 * creation process. Don't report an error so the execution can
3136 * continue normally. Continue normal execution.
3137 */
3138 break;
3139 }
3140 goto rcu_error;
3141 }
3142
3143 health_code_update();
3144 if (count >= nbmem) {
3145 /* In case the realloc fails, we free the memory */
3146 void *ptr;
3147
3148 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3149 2 * nbmem);
3150 nbmem *= 2;
3151 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3152 if (ptr == NULL) {
3153 PERROR("realloc ust app events");
3154 free(tmp_event);
3155 ret = -ENOMEM;
3156 goto rcu_error;
3157 }
3158 tmp_event = ptr;
3159 }
3160 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3161 tmp_event[count].loglevel = uiter.loglevel;
3162 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3163 tmp_event[count].pid = app->pid;
3164 tmp_event[count].enabled = -1;
3165 count++;
3166 }
3167 }
3168
3169 ret = count;
3170 *events = tmp_event;
3171
3172 DBG2("UST app list events done (%zu events)", count);
3173
3174 rcu_error:
3175 rcu_read_unlock();
3176 error:
3177 health_code_update();
3178 return ret;
3179 }
3180
3181 /*
3182 * Fill events array with all events name of all registered apps.
3183 */
3184 int ust_app_list_event_fields(struct lttng_event_field **fields)
3185 {
3186 int ret, handle;
3187 size_t nbmem, count = 0;
3188 struct lttng_ht_iter iter;
3189 struct ust_app *app;
3190 struct lttng_event_field *tmp_event;
3191
3192 nbmem = UST_APP_EVENT_LIST_SIZE;
3193 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3194 if (tmp_event == NULL) {
3195 PERROR("zmalloc ust app event fields");
3196 ret = -ENOMEM;
3197 goto error;
3198 }
3199
3200 rcu_read_lock();
3201
3202 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3203 struct lttng_ust_field_iter uiter;
3204
3205 health_code_update();
3206
3207 if (!app->compatible) {
3208 /*
3209 * TODO: In time, we should notice the caller of this error by
3210 * telling him that this is a version error.
3211 */
3212 continue;
3213 }
3214 handle = ustctl_tracepoint_field_list(app->sock);
3215 if (handle < 0) {
3216 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3217 ERR("UST app list field getting handle failed for app pid %d",
3218 app->pid);
3219 }
3220 continue;
3221 }
3222
3223 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3224 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3225 /* Handle ustctl error. */
3226 if (ret < 0) {
3227 free(tmp_event);
3228 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3229 ERR("UST app tp list field failed for app %d with ret %d",
3230 app->sock, ret);
3231 } else {
3232 DBG3("UST app tp list field failed. Application is dead");
3233 /*
3234 * This is normal behavior, an application can die during the
3235 * creation process. Don't report an error so the execution can
3236 * continue normally.
3237 */
3238 break;
3239 }
3240 goto rcu_error;
3241 }
3242
3243 health_code_update();
3244 if (count >= nbmem) {
3245 /* In case the realloc fails, we free the memory */
3246 void *ptr;
3247
3248 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3249 2 * nbmem);
3250 nbmem *= 2;
3251 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3252 if (ptr == NULL) {
3253 PERROR("realloc ust app event fields");
3254 free(tmp_event);
3255 ret = -ENOMEM;
3256 goto rcu_error;
3257 }
3258 tmp_event = ptr;
3259 }
3260
3261 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3262 tmp_event[count].type = uiter.type;
3263 tmp_event[count].nowrite = uiter.nowrite;
3264
3265 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3266 tmp_event[count].event.loglevel = uiter.loglevel;
3267 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3268 tmp_event[count].event.pid = app->pid;
3269 tmp_event[count].event.enabled = -1;
3270 count++;
3271 }
3272 }
3273
3274 ret = count;
3275 *fields = tmp_event;
3276
3277 DBG2("UST app list event fields done (%zu events)", count);
3278
3279 rcu_error:
3280 rcu_read_unlock();
3281 error:
3282 health_code_update();
3283 return ret;
3284 }
3285
3286 /*
3287 * Free and clean all traceable apps of the global list.
3288 *
3289 * Should _NOT_ be called with RCU read-side lock held.
3290 */
3291 void ust_app_clean_list(void)
3292 {
3293 int ret;
3294 struct ust_app *app;
3295 struct lttng_ht_iter iter;
3296
3297 DBG2("UST app cleaning registered apps hash table");
3298
3299 rcu_read_lock();
3300
3301 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3302 ret = lttng_ht_del(ust_app_ht, &iter);
3303 assert(!ret);
3304 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3305 }
3306
3307 /* Cleanup socket hash table */
3308 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3309 sock_n.node) {
3310 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3311 assert(!ret);
3312 }
3313
3314 /* Cleanup notify socket hash table */
3315 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3316 notify_sock_n.node) {
3317 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3318 assert(!ret);
3319 }
3320 rcu_read_unlock();
3321
3322 /* Destroy is done only when the ht is empty */
3323 ht_cleanup_push(ust_app_ht);
3324 ht_cleanup_push(ust_app_ht_by_sock);
3325 ht_cleanup_push(ust_app_ht_by_notify_sock);
3326 }
3327
3328 /*
3329 * Init UST app hash table.
3330 */
3331 void ust_app_ht_alloc(void)
3332 {
3333 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3334 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3335 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3336 }
3337
3338 /*
3339 * For a specific UST session, disable the channel for all registered apps.
3340 */
3341 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3342 struct ltt_ust_channel *uchan)
3343 {
3344 int ret = 0;
3345 struct lttng_ht_iter iter;
3346 struct lttng_ht_node_str *ua_chan_node;
3347 struct ust_app *app;
3348 struct ust_app_session *ua_sess;
3349 struct ust_app_channel *ua_chan;
3350
3351 if (usess == NULL || uchan == NULL) {
3352 ERR("Disabling UST global channel with NULL values");
3353 ret = -1;
3354 goto error;
3355 }
3356
3357 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3358 uchan->name, usess->id);
3359
3360 rcu_read_lock();
3361
3362 /* For every registered applications */
3363 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3364 struct lttng_ht_iter uiter;
3365 if (!app->compatible) {
3366 /*
3367 * TODO: In time, we should notice the caller of this error by
3368 * telling him that this is a version error.
3369 */
3370 continue;
3371 }
3372 ua_sess = lookup_session_by_app(usess, app);
3373 if (ua_sess == NULL) {
3374 continue;
3375 }
3376
3377 /* Get channel */
3378 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3379 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3380 /* If the session if found for the app, the channel must be there */
3381 assert(ua_chan_node);
3382
3383 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3384 /* The channel must not be already disabled */
3385 assert(ua_chan->enabled == 1);
3386
3387 /* Disable channel onto application */
3388 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3389 if (ret < 0) {
3390 /* XXX: We might want to report this error at some point... */
3391 continue;
3392 }
3393 }
3394
3395 rcu_read_unlock();
3396
3397 error:
3398 return ret;
3399 }
3400
3401 /*
3402 * For a specific UST session, enable the channel for all registered apps.
3403 */
3404 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3405 struct ltt_ust_channel *uchan)
3406 {
3407 int ret = 0;
3408 struct lttng_ht_iter iter;
3409 struct ust_app *app;
3410 struct ust_app_session *ua_sess;
3411
3412 if (usess == NULL || uchan == NULL) {
3413 ERR("Adding UST global channel to NULL values");
3414 ret = -1;
3415 goto error;
3416 }
3417
3418 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3419 uchan->name, usess->id);
3420
3421 rcu_read_lock();
3422
3423 /* For every registered applications */
3424 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3425 if (!app->compatible) {
3426 /*
3427 * TODO: In time, we should notice the caller of this error by
3428 * telling him that this is a version error.
3429 */
3430 continue;
3431 }
3432 ua_sess = lookup_session_by_app(usess, app);
3433 if (ua_sess == NULL) {
3434 continue;
3435 }
3436
3437 /* Enable channel onto application */
3438 ret = enable_ust_app_channel(ua_sess, uchan, app);
3439 if (ret < 0) {
3440 /* XXX: We might want to report this error at some point... */
3441 continue;
3442 }
3443 }
3444
3445 rcu_read_unlock();
3446
3447 error:
3448 return ret;
3449 }
3450
3451 /*
3452 * Disable an event in a channel and for a specific session.
3453 */
3454 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3455 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3456 {
3457 int ret = 0;
3458 struct lttng_ht_iter iter, uiter;
3459 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3460 struct ust_app *app;
3461 struct ust_app_session *ua_sess;
3462 struct ust_app_channel *ua_chan;
3463 struct ust_app_event *ua_event;
3464
3465 DBG("UST app disabling event %s for all apps in channel "
3466 "%s for session id %" PRIu64,
3467 uevent->attr.name, uchan->name, usess->id);
3468
3469 rcu_read_lock();
3470
3471 /* For all registered applications */
3472 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3473 if (!app->compatible) {
3474 /*
3475 * TODO: In time, we should notice the caller of this error by
3476 * telling him that this is a version error.
3477 */
3478 continue;
3479 }
3480 ua_sess = lookup_session_by_app(usess, app);
3481 if (ua_sess == NULL) {
3482 /* Next app */
3483 continue;
3484 }
3485
3486 /* Lookup channel in the ust app session */
3487 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3488 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3489 if (ua_chan_node == NULL) {
3490 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3491 "Skipping", uchan->name, usess->id, app->pid);
3492 continue;
3493 }
3494 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3495
3496 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3497 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3498 if (ua_event_node == NULL) {
3499 DBG2("Event %s not found in channel %s for app pid %d."
3500 "Skipping", uevent->attr.name, uchan->name, app->pid);
3501 continue;
3502 }
3503 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3504
3505 ret = disable_ust_app_event(ua_sess, ua_event, app);
3506 if (ret < 0) {
3507 /* XXX: Report error someday... */
3508 continue;
3509 }
3510 }
3511
3512 rcu_read_unlock();
3513
3514 return ret;
3515 }
3516
3517 /*
3518 * For a specific UST session and UST channel, the event for all
3519 * registered apps.
3520 */
3521 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3522 struct ltt_ust_channel *uchan)
3523 {
3524 int ret = 0;
3525 struct lttng_ht_iter iter, uiter;
3526 struct lttng_ht_node_str *ua_chan_node;
3527 struct ust_app *app;
3528 struct ust_app_session *ua_sess;
3529 struct ust_app_channel *ua_chan;
3530 struct ust_app_event *ua_event;
3531
3532 DBG("UST app disabling all event for all apps in channel "
3533 "%s for session id %" PRIu64, uchan->name, usess->id);
3534
3535 rcu_read_lock();
3536
3537 /* For all registered applications */
3538 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3539 if (!app->compatible) {
3540 /*
3541 * TODO: In time, we should notice the caller of this error by
3542 * telling him that this is a version error.
3543 */
3544 continue;
3545 }
3546 ua_sess = lookup_session_by_app(usess, app);
3547 if (!ua_sess) {
3548 /* The application has problem or is probably dead. */
3549 continue;
3550 }
3551
3552 /* Lookup channel in the ust app session */
3553 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3554 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3555 /* If the channel is not found, there is a code flow error */
3556 assert(ua_chan_node);
3557
3558 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3559
3560 /* Disable each events of channel */
3561 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3562 node.node) {
3563 ret = disable_ust_app_event(ua_sess, ua_event, app);
3564 if (ret < 0) {
3565 /* XXX: Report error someday... */
3566 continue;
3567 }
3568 }
3569 }
3570
3571 rcu_read_unlock();
3572
3573 return ret;
3574 }
3575
3576 /*
3577 * For a specific UST session, create the channel for all registered apps.
3578 */
3579 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3580 struct ltt_ust_channel *uchan)
3581 {
3582 int ret = 0, created;
3583 struct lttng_ht_iter iter;
3584 struct ust_app *app;
3585 struct ust_app_session *ua_sess = NULL;
3586
3587 /* Very wrong code flow */
3588 assert(usess);
3589 assert(uchan);
3590
3591 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3592 uchan->name, usess->id);
3593
3594 rcu_read_lock();
3595
3596 /* For every registered applications */
3597 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3598 if (!app->compatible) {
3599 /*
3600 * TODO: In time, we should notice the caller of this error by
3601 * telling him that this is a version error.
3602 */
3603 continue;
3604 }
3605 /*
3606 * Create session on the tracer side and add it to app session HT. Note
3607 * that if session exist, it will simply return a pointer to the ust
3608 * app session.
3609 */
3610 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3611 if (ret < 0) {
3612 switch (ret) {
3613 case -ENOTCONN:
3614 /*
3615 * The application's socket is not valid. Either a bad socket
3616 * or a timeout on it. We can't inform the caller that for a
3617 * specific app, the session failed so lets continue here.
3618 */
3619 continue;
3620 case -ENOMEM:
3621 default:
3622 goto error_rcu_unlock;
3623 }
3624 }
3625 assert(ua_sess);
3626
3627 pthread_mutex_lock(&ua_sess->lock);
3628 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3629 sizeof(uchan->name))) {
3630 struct ustctl_consumer_channel_attr attr;
3631 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3632 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3633 &attr);
3634 } else {
3635 /* Create channel onto application. We don't need the chan ref. */
3636 ret = create_ust_app_channel(ua_sess, uchan, app,
3637 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3638 }
3639 pthread_mutex_unlock(&ua_sess->lock);
3640 if (ret < 0) {
3641 if (ret == -ENOMEM) {
3642 /* No more memory is a fatal error. Stop right now. */
3643 goto error_rcu_unlock;
3644 }
3645 /* Cleanup the created session if it's the case. */
3646 if (created) {
3647 destroy_app_session(app, ua_sess);
3648 }
3649 }
3650 }
3651
3652 error_rcu_unlock:
3653 rcu_read_unlock();
3654 return ret;
3655 }
3656
3657 /*
3658 * Enable event for a specific session and channel on the tracer.
3659 */
3660 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3661 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3662 {
3663 int ret = 0;
3664 struct lttng_ht_iter iter, uiter;
3665 struct lttng_ht_node_str *ua_chan_node;
3666 struct ust_app *app;
3667 struct ust_app_session *ua_sess;
3668 struct ust_app_channel *ua_chan;
3669 struct ust_app_event *ua_event;
3670
3671 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3672 uevent->attr.name, usess->id);
3673
3674 /*
3675 * NOTE: At this point, this function is called only if the session and
3676 * channel passed are already created for all apps. and enabled on the
3677 * tracer also.
3678 */
3679
3680 rcu_read_lock();
3681
3682 /* For all registered applications */
3683 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3684 if (!app->compatible) {
3685 /*
3686 * TODO: In time, we should notice the caller of this error by
3687 * telling him that this is a version error.
3688 */
3689 continue;
3690 }
3691 ua_sess = lookup_session_by_app(usess, app);
3692 if (!ua_sess) {
3693 /* The application has problem or is probably dead. */
3694 continue;
3695 }
3696
3697 pthread_mutex_lock(&ua_sess->lock);
3698
3699 /* Lookup channel in the ust app session */
3700 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3701 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3702 /* If the channel is not found, there is a code flow error */
3703 assert(ua_chan_node);
3704
3705 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3706
3707 /* Get event node */
3708 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3709 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3710 if (ua_event == NULL) {
3711 DBG3("UST app enable event %s not found for app PID %d."
3712 "Skipping app", uevent->attr.name, app->pid);
3713 goto next_app;
3714 }
3715
3716 ret = enable_ust_app_event(ua_sess, ua_event, app);
3717 if (ret < 0) {
3718 pthread_mutex_unlock(&ua_sess->lock);
3719 goto error;
3720 }
3721 next_app:
3722 pthread_mutex_unlock(&ua_sess->lock);
3723 }
3724
3725 error:
3726 rcu_read_unlock();
3727 return ret;
3728 }
3729
3730 /*
3731 * For a specific existing UST session and UST channel, creates the event for
3732 * all registered apps.
3733 */
3734 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3735 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3736 {
3737 int ret = 0;
3738 struct lttng_ht_iter iter, uiter;
3739 struct lttng_ht_node_str *ua_chan_node;
3740 struct ust_app *app;
3741 struct ust_app_session *ua_sess;
3742 struct ust_app_channel *ua_chan;
3743
3744 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3745 uevent->attr.name, usess->id);
3746
3747 rcu_read_lock();
3748
3749 /* For all registered applications */
3750 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3751 if (!app->compatible) {
3752 /*
3753 * TODO: In time, we should notice the caller of this error by
3754 * telling him that this is a version error.
3755 */
3756 continue;
3757 }
3758 ua_sess = lookup_session_by_app(usess, app);
3759 if (!ua_sess) {
3760 /* The application has problem or is probably dead. */
3761 continue;
3762 }
3763
3764 pthread_mutex_lock(&ua_sess->lock);
3765 /* Lookup channel in the ust app session */
3766 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3767 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3768 /* If the channel is not found, there is a code flow error */
3769 assert(ua_chan_node);
3770
3771 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3772
3773 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3774 pthread_mutex_unlock(&ua_sess->lock);
3775 if (ret < 0) {
3776 if (ret != -LTTNG_UST_ERR_EXIST) {
3777 /* Possible value at this point: -ENOMEM. If so, we stop! */
3778 break;
3779 }
3780 DBG2("UST app event %s already exist on app PID %d",
3781 uevent->attr.name, app->pid);
3782 continue;
3783 }
3784 }
3785
3786 rcu_read_unlock();
3787
3788 return ret;
3789 }
3790
3791 /*
3792 * Start tracing for a specific UST session and app.
3793 */
3794 static
3795 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3796 {
3797 int ret = 0;
3798 struct ust_app_session *ua_sess;
3799
3800 DBG("Starting tracing for ust app pid %d", app->pid);
3801
3802 rcu_read_lock();
3803
3804 if (!app->compatible) {
3805 goto end;
3806 }
3807
3808 ua_sess = lookup_session_by_app(usess, app);
3809 if (ua_sess == NULL) {
3810 /* The session is in teardown process. Ignore and continue. */
3811 goto end;
3812 }
3813
3814 pthread_mutex_lock(&ua_sess->lock);
3815
3816 /* Upon restart, we skip the setup, already done */
3817 if (ua_sess->started) {
3818 goto skip_setup;
3819 }
3820
3821 /* Create directories if consumer is LOCAL and has a path defined. */
3822 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3823 strlen(usess->consumer->dst.trace_path) > 0) {
3824 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3825 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3826 if (ret < 0) {
3827 if (ret != -EEXIST) {
3828 ERR("Trace directory creation error");
3829 goto error_unlock;
3830 }
3831 }
3832 }
3833
3834 /*
3835 * Create the metadata for the application. This returns gracefully if a
3836 * metadata was already set for the session.
3837 */
3838 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3839 if (ret < 0) {
3840 goto error_unlock;
3841 }
3842
3843 health_code_update();
3844
3845 skip_setup:
3846 /* This start the UST tracing */
3847 ret = ustctl_start_session(app->sock, ua_sess->handle);
3848 if (ret < 0) {
3849 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3850 ERR("Error starting tracing for app pid: %d (ret: %d)",
3851 app->pid, ret);
3852 } else {
3853 DBG("UST app start session failed. Application is dead.");
3854 /*
3855 * This is normal behavior, an application can die during the
3856 * creation process. Don't report an error so the execution can
3857 * continue normally.
3858 */
3859 pthread_mutex_unlock(&ua_sess->lock);
3860 goto end;
3861 }
3862 goto error_unlock;
3863 }
3864
3865 /* Indicate that the session has been started once */
3866 ua_sess->started = 1;
3867
3868 pthread_mutex_unlock(&ua_sess->lock);
3869
3870 health_code_update();
3871
3872 /* Quiescent wait after starting trace */
3873 ret = ustctl_wait_quiescent(app->sock);
3874 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3875 ERR("UST app wait quiescent failed for app pid %d ret %d",
3876 app->pid, ret);
3877 }
3878
3879 end:
3880 rcu_read_unlock();
3881 health_code_update();
3882 return 0;
3883
3884 error_unlock:
3885 pthread_mutex_unlock(&ua_sess->lock);
3886 rcu_read_unlock();
3887 health_code_update();
3888 return -1;
3889 }
3890
3891 /*
3892 * Stop tracing for a specific UST session and app.
3893 */
3894 static
3895 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3896 {
3897 int ret = 0;
3898 struct ust_app_session *ua_sess;
3899 struct ust_registry_session *registry;
3900
3901 DBG("Stopping tracing for ust app pid %d", app->pid);
3902
3903 rcu_read_lock();
3904
3905 if (!app->compatible) {
3906 goto end_no_session;
3907 }
3908
3909 ua_sess = lookup_session_by_app(usess, app);
3910 if (ua_sess == NULL) {
3911 goto end_no_session;
3912 }
3913
3914 pthread_mutex_lock(&ua_sess->lock);
3915
3916 /*
3917 * If started = 0, it means that stop trace has been called for a session
3918 * that was never started. It's possible since we can have a fail start
3919 * from either the application manager thread or the command thread. Simply
3920 * indicate that this is a stop error.
3921 */
3922 if (!ua_sess->started) {
3923 goto error_rcu_unlock;
3924 }
3925
3926 health_code_update();
3927
3928 /* This inhibits UST tracing */
3929 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3930 if (ret < 0) {
3931 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3932 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3933 app->pid, ret);
3934 } else {
3935 DBG("UST app stop session failed. Application is dead.");
3936 /*
3937 * This is normal behavior, an application can die during the
3938 * creation process. Don't report an error so the execution can
3939 * continue normally.
3940 */
3941 goto end_unlock;
3942 }
3943 goto error_rcu_unlock;
3944 }
3945
3946 health_code_update();
3947
3948 /* Quiescent wait after stopping trace */
3949 ret = ustctl_wait_quiescent(app->sock);
3950 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3951 ERR("UST app wait quiescent failed for app pid %d ret %d",
3952 app->pid, ret);
3953 }
3954
3955 health_code_update();
3956
3957 registry = get_session_registry(ua_sess);
3958 assert(registry);
3959
3960 if (!registry->metadata_closed) {
3961 /* Push metadata for application before freeing the application. */
3962 (void) push_metadata(registry, ua_sess->consumer);
3963 }
3964
3965 end_unlock:
3966 pthread_mutex_unlock(&ua_sess->lock);
3967 end_no_session:
3968 rcu_read_unlock();
3969 health_code_update();
3970 return 0;
3971
3972 error_rcu_unlock:
3973 pthread_mutex_unlock(&ua_sess->lock);
3974 rcu_read_unlock();
3975 health_code_update();
3976 return -1;
3977 }
3978
3979 /*
3980 * Flush buffers for a specific UST session and app.
3981 */
3982 static
3983 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3984 {
3985 int ret = 0;
3986 struct lttng_ht_iter iter;
3987 struct ust_app_session *ua_sess;
3988 struct ust_app_channel *ua_chan;
3989
3990 DBG("Flushing buffers for ust app pid %d", app->pid);
3991
3992 rcu_read_lock();
3993
3994 if (!app->compatible) {
3995 goto end_no_session;
3996 }
3997
3998 ua_sess = lookup_session_by_app(usess, app);
3999 if (ua_sess == NULL) {
4000 goto end_no_session;
4001 }
4002
4003 pthread_mutex_lock(&ua_sess->lock);
4004
4005 health_code_update();
4006
4007 /* Flushing buffers */
4008 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4009 node.node) {
4010 health_code_update();
4011 assert(ua_chan->is_sent);
4012 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
4013 if (ret < 0) {
4014 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4015 ERR("UST app PID %d channel %s flush failed with ret %d",
4016 app->pid, ua_chan->name, ret);
4017 } else {
4018 DBG3("UST app failed to flush %s. Application is dead.",
4019 ua_chan->name);
4020 /*
4021 * This is normal behavior, an application can die during the
4022 * creation process. Don't report an error so the execution can
4023 * continue normally.
4024 */
4025 }
4026 /* Continuing flushing all buffers */
4027 continue;
4028 }
4029 }
4030
4031 health_code_update();
4032
4033 pthread_mutex_unlock(&ua_sess->lock);
4034 end_no_session:
4035 rcu_read_unlock();
4036 health_code_update();
4037 return 0;
4038 }
4039
4040 /*
4041 * Destroy a specific UST session in apps.
4042 */
4043 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4044 {
4045 int ret;
4046 struct ust_app_session *ua_sess;
4047 struct lttng_ht_iter iter;
4048 struct lttng_ht_node_u64 *node;
4049
4050 DBG("Destroy tracing for ust app pid %d", app->pid);
4051
4052 rcu_read_lock();
4053
4054 if (!app->compatible) {
4055 goto end;
4056 }
4057
4058 __lookup_session_by_app(usess, app, &iter);
4059 node = lttng_ht_iter_get_node_u64(&iter);
4060 if (node == NULL) {
4061 /* Session is being or is deleted. */
4062 goto end;
4063 }
4064 ua_sess = caa_container_of(node, struct ust_app_session, node);
4065
4066 health_code_update();
4067 destroy_app_session(app, ua_sess);
4068
4069 health_code_update();
4070
4071 /* Quiescent wait after stopping trace */
4072 ret = ustctl_wait_quiescent(app->sock);
4073 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4074 ERR("UST app wait quiescent failed for app pid %d ret %d",
4075 app->pid, ret);
4076 }
4077 end:
4078 rcu_read_unlock();
4079 health_code_update();
4080 return 0;
4081 }
4082
4083 /*
4084 * Start tracing for the UST session.
4085 */
4086 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4087 {
4088 int ret = 0;
4089 struct lttng_ht_iter iter;
4090 struct ust_app *app;
4091
4092 DBG("Starting all UST traces");
4093
4094 rcu_read_lock();
4095
4096 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4097 ret = ust_app_start_trace(usess, app);
4098 if (ret < 0) {
4099 /* Continue to next apps even on error */
4100 continue;
4101 }
4102 }
4103
4104 rcu_read_unlock();
4105
4106 return 0;
4107 }
4108
4109 /*
4110 * Start tracing for the UST session.
4111 */
4112 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4113 {
4114 int ret = 0;
4115 struct lttng_ht_iter iter;
4116 struct ust_app *app;
4117
4118 DBG("Stopping all UST traces");
4119
4120 rcu_read_lock();
4121
4122 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4123 ret = ust_app_stop_trace(usess, app);
4124 if (ret < 0) {
4125 /* Continue to next apps even on error */
4126 continue;
4127 }
4128 }
4129
4130 /* Flush buffers and push metadata (for UID buffers). */
4131 switch (usess->buffer_type) {
4132 case LTTNG_BUFFER_PER_UID:
4133 {
4134 struct buffer_reg_uid *reg;
4135
4136 /* Flush all per UID buffers associated to that session. */
4137 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4138 struct ust_registry_session *ust_session_reg;
4139 struct buffer_reg_channel *reg_chan;
4140 struct consumer_socket *socket;
4141
4142 /* Get consumer socket to use to push the metadata.*/
4143 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4144 usess->consumer);
4145 if (!socket) {
4146 /* Ignore request if no consumer is found for the session. */
4147 continue;
4148 }
4149
4150 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4151 reg_chan, node.node) {
4152 /*
4153 * The following call will print error values so the return
4154 * code is of little importance because whatever happens, we
4155 * have to try them all.
4156 */
4157 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4158 }
4159
4160 ust_session_reg = reg->registry->reg.ust;
4161 if (!ust_session_reg->metadata_closed) {
4162 /* Push metadata. */
4163 (void) push_metadata(ust_session_reg, usess->consumer);
4164 }
4165 }
4166
4167 break;
4168 }
4169 case LTTNG_BUFFER_PER_PID:
4170 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4171 ret = ust_app_flush_trace(usess, app);
4172 if (ret < 0) {
4173 /* Continue to next apps even on error */
4174 continue;
4175 }
4176 }
4177 break;
4178 default:
4179 assert(0);
4180 break;
4181 }
4182
4183 rcu_read_unlock();
4184
4185 return 0;
4186 }
4187
4188 /*
4189 * Destroy app UST session.
4190 */
4191 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4192 {
4193 int ret = 0;
4194 struct lttng_ht_iter iter;
4195 struct ust_app *app;
4196
4197 DBG("Destroy all UST traces");
4198
4199 rcu_read_lock();
4200
4201 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4202 ret = destroy_trace(usess, app);
4203 if (ret < 0) {
4204 /* Continue to next apps even on error */
4205 continue;
4206 }
4207 }
4208
4209 rcu_read_unlock();
4210
4211 return 0;
4212 }
4213
4214 /*
4215 * Add channels/events from UST global domain to registered apps at sock.
4216 */
4217 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4218 {
4219 int ret = 0;
4220 struct lttng_ht_iter iter, uiter;
4221 struct ust_app *app;
4222 struct ust_app_session *ua_sess = NULL;
4223 struct ust_app_channel *ua_chan;
4224 struct ust_app_event *ua_event;
4225 struct ust_app_ctx *ua_ctx;
4226
4227 assert(usess);
4228 assert(sock >= 0);
4229
4230 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4231 usess->id);
4232
4233 rcu_read_lock();
4234
4235 app = ust_app_find_by_sock(sock);
4236 if (app == NULL) {
4237 /*
4238 * Application can be unregistered before so this is possible hence
4239 * simply stopping the update.
4240 */
4241 DBG3("UST app update failed to find app sock %d", sock);
4242 goto error;
4243 }
4244
4245 if (!app->compatible) {
4246 goto error;
4247 }
4248
4249 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4250 if (ret < 0) {
4251 /* Tracer is probably gone or ENOMEM. */
4252 goto error;
4253 }
4254 assert(ua_sess);
4255
4256 pthread_mutex_lock(&ua_sess->lock);
4257
4258 /*
4259 * We can iterate safely here over all UST app session since the create ust
4260 * app session above made a shadow copy of the UST global domain from the
4261 * ltt ust session.
4262 */
4263 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4264 node.node) {
4265 /*
4266 * For a metadata channel, handle it differently.
4267 */
4268 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4269 sizeof(ua_chan->name))) {
4270 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4271 &ua_chan->attr);
4272 if (ret < 0) {
4273 goto error_unlock;
4274 }
4275 /* Remove it from the hash table and continue!. */
4276 ret = lttng_ht_del(ua_sess->channels, &iter);
4277 assert(!ret);
4278 delete_ust_app_channel(-1, ua_chan, app);
4279 continue;
4280 } else {
4281 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4282 if (ret < 0) {
4283 /*
4284 * Stop everything. On error, the application failed, no more
4285 * file descriptor are available or ENOMEM so stopping here is
4286 * the only thing we can do for now.
4287 */
4288 goto error_unlock;
4289 }
4290 }
4291
4292 /*
4293 * Add context using the list so they are enabled in the same order the
4294 * user added them.
4295 */
4296 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4297 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4298 if (ret < 0) {
4299 goto error_unlock;
4300 }
4301 }
4302
4303
4304 /* For each events */
4305 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4306 node.node) {
4307 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4308 if (ret < 0) {
4309 goto error_unlock;
4310 }
4311 }
4312 }
4313
4314 pthread_mutex_unlock(&ua_sess->lock);
4315
4316 if (usess->start_trace) {
4317 ret = ust_app_start_trace(usess, app);
4318 if (ret < 0) {
4319 goto error;
4320 }
4321
4322 DBG2("UST trace started for app pid %d", app->pid);
4323 }
4324
4325 /* Everything went well at this point. */
4326 rcu_read_unlock();
4327 return;
4328
4329 error_unlock:
4330 pthread_mutex_unlock(&ua_sess->lock);
4331 error:
4332 if (ua_sess) {
4333 destroy_app_session(app, ua_sess);
4334 }
4335 rcu_read_unlock();
4336 return;
4337 }
4338
4339 /*
4340 * Add context to a specific channel for global UST domain.
4341 */
4342 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4343 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4344 {
4345 int ret = 0;
4346 struct lttng_ht_node_str *ua_chan_node;
4347 struct lttng_ht_iter iter, uiter;
4348 struct ust_app_channel *ua_chan = NULL;
4349 struct ust_app_session *ua_sess;
4350 struct ust_app *app;
4351
4352 rcu_read_lock();
4353
4354 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4355 if (!app->compatible) {
4356 /*
4357 * TODO: In time, we should notice the caller of this error by
4358 * telling him that this is a version error.
4359 */
4360 continue;
4361 }
4362 ua_sess = lookup_session_by_app(usess, app);
4363 if (ua_sess == NULL) {
4364 continue;
4365 }
4366
4367 pthread_mutex_lock(&ua_sess->lock);
4368 /* Lookup channel in the ust app session */
4369 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4370 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4371 if (ua_chan_node == NULL) {
4372 goto next_app;
4373 }
4374 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4375 node);
4376 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4377 if (ret < 0) {
4378 goto next_app;
4379 }
4380 next_app:
4381 pthread_mutex_unlock(&ua_sess->lock);
4382 }
4383
4384 rcu_read_unlock();
4385 return ret;
4386 }
4387
4388 /*
4389 * Enable event for a channel from a UST session for a specific PID.
4390 */
4391 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4392 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4393 {
4394 int ret = 0;
4395 struct lttng_ht_iter iter;
4396 struct lttng_ht_node_str *ua_chan_node;
4397 struct ust_app *app;
4398 struct ust_app_session *ua_sess;
4399 struct ust_app_channel *ua_chan;
4400 struct ust_app_event *ua_event;
4401
4402 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4403
4404 rcu_read_lock();
4405
4406 app = ust_app_find_by_pid(pid);
4407 if (app == NULL) {
4408 ERR("UST app enable event per PID %d not found", pid);
4409 ret = -1;
4410 goto end;
4411 }
4412
4413 if (!app->compatible) {
4414 ret = 0;
4415 goto end;
4416 }
4417
4418 ua_sess = lookup_session_by_app(usess, app);
4419 if (!ua_sess) {
4420 /* The application has problem or is probably dead. */
4421 ret = 0;
4422 goto end;
4423 }
4424
4425 pthread_mutex_lock(&ua_sess->lock);
4426 /* Lookup channel in the ust app session */
4427 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4428 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4429 /* If the channel is not found, there is a code flow error */
4430 assert(ua_chan_node);
4431
4432 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4433
4434 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4435 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4436 if (ua_event == NULL) {
4437 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4438 if (ret < 0) {
4439 goto end_unlock;
4440 }
4441 } else {
4442 ret = enable_ust_app_event(ua_sess, ua_event, app);
4443 if (ret < 0) {
4444 goto end_unlock;
4445 }
4446 }
4447
4448 end_unlock:
4449 pthread_mutex_unlock(&ua_sess->lock);
4450 end:
4451 rcu_read_unlock();
4452 return ret;
4453 }
4454
4455 /*
4456 * Disable event for a channel from a UST session for a specific PID.
4457 */
4458 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4459 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4460 {
4461 int ret = 0;
4462 struct lttng_ht_iter iter;
4463 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4464 struct ust_app *app;
4465 struct ust_app_session *ua_sess;
4466 struct ust_app_channel *ua_chan;
4467 struct ust_app_event *ua_event;
4468
4469 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4470
4471 rcu_read_lock();
4472
4473 app = ust_app_find_by_pid(pid);
4474 if (app == NULL) {
4475 ERR("UST app disable event per PID %d not found", pid);
4476 ret = -1;
4477 goto error;
4478 }
4479
4480 if (!app->compatible) {
4481 ret = 0;
4482 goto error;
4483 }
4484
4485 ua_sess = lookup_session_by_app(usess, app);
4486 if (!ua_sess) {
4487 /* The application has problem or is probably dead. */
4488 goto error;
4489 }
4490
4491 /* Lookup channel in the ust app session */
4492 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4493 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4494 if (ua_chan_node == NULL) {
4495 /* Channel does not exist, skip disabling */
4496 goto error;
4497 }
4498 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4499
4500 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4501 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4502 if (ua_event_node == NULL) {
4503 /* Event does not exist, skip disabling */
4504 goto error;
4505 }
4506 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4507
4508 ret = disable_ust_app_event(ua_sess, ua_event, app);
4509 if (ret < 0) {
4510 goto error;
4511 }
4512
4513 error:
4514 rcu_read_unlock();
4515 return ret;
4516 }
4517
4518 /*
4519 * Calibrate registered applications.
4520 */
4521 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4522 {
4523 int ret = 0;
4524 struct lttng_ht_iter iter;
4525 struct ust_app *app;
4526
4527 rcu_read_lock();
4528
4529 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4530 if (!app->compatible) {
4531 /*
4532 * TODO: In time, we should notice the caller of this error by
4533 * telling him that this is a version error.
4534 */
4535 continue;
4536 }
4537
4538 health_code_update();
4539
4540 ret = ustctl_calibrate(app->sock, calibrate);
4541 if (ret < 0) {
4542 switch (ret) {
4543 case -ENOSYS:
4544 /* Means that it's not implemented on the tracer side. */
4545 ret = 0;
4546 break;
4547 default:
4548 DBG2("Calibrate app PID %d returned with error %d",
4549 app->pid, ret);
4550 break;
4551 }
4552 }
4553 }
4554
4555 DBG("UST app global domain calibration finished");
4556
4557 rcu_read_unlock();
4558
4559 health_code_update();
4560
4561 return ret;
4562 }
4563
4564 /*
4565 * Receive registration and populate the given msg structure.
4566 *
4567 * On success return 0 else a negative value returned by the ustctl call.
4568 */
4569 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4570 {
4571 int ret;
4572 uint32_t pid, ppid, uid, gid;
4573
4574 assert(msg);
4575
4576 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4577 &pid, &ppid, &uid, &gid,
4578 &msg->bits_per_long,
4579 &msg->uint8_t_alignment,
4580 &msg->uint16_t_alignment,
4581 &msg->uint32_t_alignment,
4582 &msg->uint64_t_alignment,
4583 &msg->long_alignment,
4584 &msg->byte_order,
4585 msg->name);
4586 if (ret < 0) {
4587 switch (-ret) {
4588 case EPIPE:
4589 case ECONNRESET:
4590 case LTTNG_UST_ERR_EXITING:
4591 DBG3("UST app recv reg message failed. Application died");
4592 break;
4593 case LTTNG_UST_ERR_UNSUP_MAJOR:
4594 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4595 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4596 LTTNG_UST_ABI_MINOR_VERSION);
4597 break;
4598 default:
4599 ERR("UST app recv reg message failed with ret %d", ret);
4600 break;
4601 }
4602 goto error;
4603 }
4604 msg->pid = (pid_t) pid;
4605 msg->ppid = (pid_t) ppid;
4606 msg->uid = (uid_t) uid;
4607 msg->gid = (gid_t) gid;
4608
4609 error:
4610 return ret;
4611 }
4612
4613 /*
4614 * Return a ust app channel object using the application object and the channel
4615 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4616 * lock MUST be acquired before calling this function.
4617 */
4618 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4619 int objd)
4620 {
4621 struct lttng_ht_node_ulong *node;
4622 struct lttng_ht_iter iter;
4623 struct ust_app_channel *ua_chan = NULL;
4624
4625 assert(app);
4626
4627 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4628 node = lttng_ht_iter_get_node_ulong(&iter);
4629 if (node == NULL) {
4630 DBG2("UST app channel find by objd %d not found", objd);
4631 goto error;
4632 }
4633
4634 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4635
4636 error:
4637 return ua_chan;
4638 }
4639
4640 /*
4641 * Reply to a register channel notification from an application on the notify
4642 * socket. The channel metadata is also created.
4643 *
4644 * The session UST registry lock is acquired in this function.
4645 *
4646 * On success 0 is returned else a negative value.
4647 */
4648 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4649 size_t nr_fields, struct ustctl_field *fields)
4650 {
4651 int ret, ret_code = 0;
4652 uint32_t chan_id, reg_count;
4653 uint64_t chan_reg_key;
4654 enum ustctl_channel_header type;
4655 struct ust_app *app;
4656 struct ust_app_channel *ua_chan;
4657 struct ust_app_session *ua_sess;
4658 struct ust_registry_session *registry;
4659 struct ust_registry_channel *chan_reg;
4660
4661 rcu_read_lock();
4662
4663 /* Lookup application. If not found, there is a code flow error. */
4664 app = find_app_by_notify_sock(sock);
4665 if (!app) {
4666 DBG("Application socket %d is being teardown. Abort event notify",
4667 sock);
4668 ret = 0;
4669 free(fields);
4670 goto error_rcu_unlock;
4671 }
4672
4673 /* Lookup channel by UST object descriptor. */
4674 ua_chan = find_channel_by_objd(app, cobjd);
4675 if (!ua_chan) {
4676 DBG("Application channel is being teardown. Abort event notify");
4677 ret = 0;
4678 free(fields);
4679 goto error_rcu_unlock;
4680 }
4681
4682 assert(ua_chan->session);
4683 ua_sess = ua_chan->session;
4684
4685 /* Get right session registry depending on the session buffer type. */
4686 registry = get_session_registry(ua_sess);
4687 assert(registry);
4688
4689 /* Depending on the buffer type, a different channel key is used. */
4690 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4691 chan_reg_key = ua_chan->tracing_channel_id;
4692 } else {
4693 chan_reg_key = ua_chan->key;
4694 }
4695
4696 pthread_mutex_lock(&registry->lock);
4697
4698 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4699 assert(chan_reg);
4700
4701 if (!chan_reg->register_done) {
4702 reg_count = ust_registry_get_event_count(chan_reg);
4703 if (reg_count < 31) {
4704 type = USTCTL_CHANNEL_HEADER_COMPACT;
4705 } else {
4706 type = USTCTL_CHANNEL_HEADER_LARGE;
4707 }
4708
4709 chan_reg->nr_ctx_fields = nr_fields;
4710 chan_reg->ctx_fields = fields;
4711 chan_reg->header_type = type;
4712 } else {
4713 /* Get current already assigned values. */
4714 type = chan_reg->header_type;
4715 free(fields);
4716 /* Set to NULL so the error path does not do a double free. */
4717 fields = NULL;
4718 }
4719 /* Channel id is set during the object creation. */
4720 chan_id = chan_reg->chan_id;
4721
4722 /* Append to metadata */
4723 if (!chan_reg->metadata_dumped) {
4724 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4725 if (ret_code) {
4726 ERR("Error appending channel metadata (errno = %d)", ret_code);
4727 goto reply;
4728 }
4729 }
4730
4731 reply:
4732 DBG3("UST app replying to register channel key %" PRIu64
4733 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4734 ret_code);
4735
4736 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4737 if (ret < 0) {
4738 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4739 ERR("UST app reply channel failed with ret %d", ret);
4740 } else {
4741 DBG3("UST app reply channel failed. Application died");
4742 }
4743 goto error;
4744 }
4745
4746 /* This channel registry registration is completed. */
4747 chan_reg->register_done = 1;
4748
4749 error:
4750 pthread_mutex_unlock(&registry->lock);
4751 error_rcu_unlock:
4752 rcu_read_unlock();
4753 if (ret) {
4754 free(fields);
4755 }
4756 return ret;
4757 }
4758
4759 /*
4760 * Add event to the UST channel registry. When the event is added to the
4761 * registry, the metadata is also created. Once done, this replies to the
4762 * application with the appropriate error code.
4763 *
4764 * The session UST registry lock is acquired in the function.
4765 *
4766 * On success 0 is returned else a negative value.
4767 */
4768 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4769 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4770 char *model_emf_uri)
4771 {
4772 int ret, ret_code;
4773 uint32_t event_id = 0;
4774 uint64_t chan_reg_key;
4775 struct ust_app *app;
4776 struct ust_app_channel *ua_chan;
4777 struct ust_app_session *ua_sess;
4778 struct ust_registry_session *registry;
4779
4780 rcu_read_lock();
4781
4782 /* Lookup application. If not found, there is a code flow error. */
4783 app = find_app_by_notify_sock(sock);
4784 if (!app) {
4785 DBG("Application socket %d is being teardown. Abort event notify",
4786 sock);
4787 ret = 0;
4788 free(sig);
4789 free(fields);
4790 free(model_emf_uri);
4791 goto error_rcu_unlock;
4792 }
4793
4794 /* Lookup channel by UST object descriptor. */
4795 ua_chan = find_channel_by_objd(app, cobjd);
4796 if (!ua_chan) {
4797 DBG("Application channel is being teardown. Abort event notify");
4798 ret = 0;
4799 free(sig);
4800 free(fields);
4801 free(model_emf_uri);
4802 goto error_rcu_unlock;
4803 }
4804
4805 assert(ua_chan->session);
4806 ua_sess = ua_chan->session;
4807
4808 registry = get_session_registry(ua_sess);
4809 assert(registry);
4810
4811 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4812 chan_reg_key = ua_chan->tracing_channel_id;
4813 } else {
4814 chan_reg_key = ua_chan->key;
4815 }
4816
4817 pthread_mutex_lock(&registry->lock);
4818
4819 /*
4820 * From this point on, this call acquires the ownership of the sig, fields
4821 * and model_emf_uri meaning any free are done inside it if needed. These
4822 * three variables MUST NOT be read/write after this.
4823 */
4824 ret_code = ust_registry_create_event(registry, chan_reg_key,
4825 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4826 model_emf_uri, ua_sess->buffer_type, &event_id,
4827 app);
4828
4829 /*
4830 * The return value is returned to ustctl so in case of an error, the
4831 * application can be notified. In case of an error, it's important not to
4832 * return a negative error or else the application will get closed.
4833 */
4834 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4835 if (ret < 0) {
4836 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4837 ERR("UST app reply event failed with ret %d", ret);
4838 } else {
4839 DBG3("UST app reply event failed. Application died");
4840 }
4841 /*
4842 * No need to wipe the create event since the application socket will
4843 * get close on error hence cleaning up everything by itself.
4844 */
4845 goto error;
4846 }
4847
4848 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4849 name, event_id);
4850
4851 error:
4852 pthread_mutex_unlock(&registry->lock);
4853 error_rcu_unlock:
4854 rcu_read_unlock();
4855 return ret;
4856 }
4857
4858 /*
4859 * Handle application notification through the given notify socket.
4860 *
4861 * Return 0 on success or else a negative value.
4862 */
4863 int ust_app_recv_notify(int sock)
4864 {
4865 int ret;
4866 enum ustctl_notify_cmd cmd;
4867
4868 DBG3("UST app receiving notify from sock %d", sock);
4869
4870 ret = ustctl_recv_notify(sock, &cmd);
4871 if (ret < 0) {
4872 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4873 ERR("UST app recv notify failed with ret %d", ret);
4874 } else {
4875 DBG3("UST app recv notify failed. Application died");
4876 }
4877 goto error;
4878 }
4879
4880 switch (cmd) {
4881 case USTCTL_NOTIFY_CMD_EVENT:
4882 {
4883 int sobjd, cobjd, loglevel;
4884 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4885 size_t nr_fields;
4886 struct ustctl_field *fields;
4887
4888 DBG2("UST app ustctl register event received");
4889
4890 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4891 &sig, &nr_fields, &fields, &model_emf_uri);
4892 if (ret < 0) {
4893 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4894 ERR("UST app recv event failed with ret %d", ret);
4895 } else {
4896 DBG3("UST app recv event failed. Application died");
4897 }
4898 goto error;
4899 }
4900
4901 /*
4902 * Add event to the UST registry coming from the notify socket. This
4903 * call will free if needed the sig, fields and model_emf_uri. This
4904 * code path loses the ownsership of these variables and transfer them
4905 * to the this function.
4906 */
4907 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4908 fields, loglevel, model_emf_uri);
4909 if (ret < 0) {
4910 goto error;
4911 }
4912
4913 break;
4914 }
4915 case USTCTL_NOTIFY_CMD_CHANNEL:
4916 {
4917 int sobjd, cobjd;
4918 size_t nr_fields;
4919 struct ustctl_field *fields;
4920
4921 DBG2("UST app ustctl register channel received");
4922
4923 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4924 &fields);
4925 if (ret < 0) {
4926 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4927 ERR("UST app recv channel failed with ret %d", ret);
4928 } else {
4929 DBG3("UST app recv channel failed. Application died");
4930 }
4931 goto error;
4932 }
4933
4934 /*
4935 * The fields ownership are transfered to this function call meaning
4936 * that if needed it will be freed. After this, it's invalid to access
4937 * fields or clean it up.
4938 */
4939 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4940 fields);
4941 if (ret < 0) {
4942 goto error;
4943 }
4944
4945 break;
4946 }
4947 default:
4948 /* Should NEVER happen. */
4949 assert(0);
4950 }
4951
4952 error:
4953 return ret;
4954 }
4955
4956 /*
4957 * Once the notify socket hangs up, this is called. First, it tries to find the
4958 * corresponding application. On failure, the call_rcu to close the socket is
4959 * executed. If an application is found, it tries to delete it from the notify
4960 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4961 *
4962 * Note that an object needs to be allocated here so on ENOMEM failure, the
4963 * call RCU is not done but the rest of the cleanup is.
4964 */
4965 void ust_app_notify_sock_unregister(int sock)
4966 {
4967 int err_enomem = 0;
4968 struct lttng_ht_iter iter;
4969 struct ust_app *app;
4970 struct ust_app_notify_sock_obj *obj;
4971
4972 assert(sock >= 0);
4973
4974 rcu_read_lock();
4975
4976 obj = zmalloc(sizeof(*obj));
4977 if (!obj) {
4978 /*
4979 * An ENOMEM is kind of uncool. If this strikes we continue the
4980 * procedure but the call_rcu will not be called. In this case, we
4981 * accept the fd leak rather than possibly creating an unsynchronized
4982 * state between threads.
4983 *
4984 * TODO: The notify object should be created once the notify socket is
4985 * registered and stored independantely from the ust app object. The
4986 * tricky part is to synchronize the teardown of the application and
4987 * this notify object. Let's keep that in mind so we can avoid this
4988 * kind of shenanigans with ENOMEM in the teardown path.
4989 */
4990 err_enomem = 1;
4991 } else {
4992 obj->fd = sock;
4993 }
4994
4995 DBG("UST app notify socket unregister %d", sock);
4996
4997 /*
4998 * Lookup application by notify socket. If this fails, this means that the
4999 * hash table delete has already been done by the application
5000 * unregistration process so we can safely close the notify socket in a
5001 * call RCU.
5002 */
5003 app = find_app_by_notify_sock(sock);
5004 if (!app) {
5005 goto close_socket;
5006 }
5007
5008 iter.iter.node = &app->notify_sock_n.node;
5009
5010 /*
5011 * Whatever happens here either we fail or succeed, in both cases we have
5012 * to close the socket after a grace period to continue to the call RCU
5013 * here. If the deletion is successful, the application is not visible
5014 * anymore by other threads and is it fails it means that it was already
5015 * deleted from the hash table so either way we just have to close the
5016 * socket.
5017 */
5018 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5019
5020 close_socket:
5021 rcu_read_unlock();
5022
5023 /*
5024 * Close socket after a grace period to avoid for the socket to be reused
5025 * before the application object is freed creating potential race between
5026 * threads trying to add unique in the global hash table.
5027 */
5028 if (!err_enomem) {
5029 call_rcu(&obj->head, close_notify_sock_rcu);
5030 }
5031 }
5032
5033 /*
5034 * Destroy a ust app data structure and free its memory.
5035 */
5036 void ust_app_destroy(struct ust_app *app)
5037 {
5038 if (!app) {
5039 return;
5040 }
5041
5042 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5043 }
5044
5045 /*
5046 * Take a snapshot for a given UST session. The snapshot is sent to the given
5047 * output.
5048 *
5049 * Return 0 on success or else a negative value.
5050 */
5051 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5052 struct snapshot_output *output, int wait, unsigned int nb_streams)
5053 {
5054 int ret = 0;
5055 struct lttng_ht_iter iter;
5056 struct ust_app *app;
5057 char pathname[PATH_MAX];
5058 uint64_t max_stream_size = 0;
5059
5060 assert(usess);
5061 assert(output);
5062
5063 rcu_read_lock();
5064
5065 /*
5066 * Compute the maximum size of a single stream if a max size is asked by
5067 * the caller.
5068 */
5069 if (output->max_size > 0 && nb_streams > 0) {
5070 max_stream_size = output->max_size / nb_streams;
5071 }
5072
5073 switch (usess->buffer_type) {
5074 case LTTNG_BUFFER_PER_UID:
5075 {
5076 struct buffer_reg_uid *reg;
5077
5078 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5079 struct buffer_reg_channel *reg_chan;
5080 struct consumer_socket *socket;
5081
5082 /* Get consumer socket to use to push the metadata.*/
5083 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5084 usess->consumer);
5085 if (!socket) {
5086 ret = -EINVAL;
5087 goto error;
5088 }
5089
5090 memset(pathname, 0, sizeof(pathname));
5091 ret = snprintf(pathname, sizeof(pathname),
5092 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5093 reg->uid, reg->bits_per_long);
5094 if (ret < 0) {
5095 PERROR("snprintf snapshot path");
5096 goto error;
5097 }
5098
5099 /* Add the UST default trace dir to path. */
5100 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5101 reg_chan, node.node) {
5102
5103 /*
5104 * Make sure the maximum stream size is not lower than the
5105 * subbuffer size or else it's an error since we won't be able to
5106 * snapshot anything.
5107 */
5108 if (max_stream_size &&
5109 reg_chan->subbuf_size > max_stream_size) {
5110 ret = -EINVAL;
5111 DBG3("UST app snapshot record maximum stream size %" PRIu64
5112 " is smaller than subbuffer size of %zu",
5113 max_stream_size, reg_chan->subbuf_size);
5114 goto error;
5115 }
5116 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
5117 usess->uid, usess->gid, pathname, wait,
5118 max_stream_size);
5119 if (ret < 0) {
5120 goto error;
5121 }
5122 }
5123 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
5124 1, usess->uid, usess->gid, pathname, wait,
5125 max_stream_size);
5126 if (ret < 0) {
5127 goto error;
5128 }
5129 }
5130 break;
5131 }
5132 case LTTNG_BUFFER_PER_PID:
5133 {
5134 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5135 struct consumer_socket *socket;
5136 struct lttng_ht_iter chan_iter;
5137 struct ust_app_channel *ua_chan;
5138 struct ust_app_session *ua_sess;
5139 struct ust_registry_session *registry;
5140
5141 ua_sess = lookup_session_by_app(usess, app);
5142 if (!ua_sess) {
5143 /* Session not associated with this app. */
5144 continue;
5145 }
5146
5147 /* Get the right consumer socket for the application. */
5148 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5149 output->consumer);
5150 if (!socket) {
5151 ret = -EINVAL;
5152 goto error;
5153 }
5154
5155 /* Add the UST default trace dir to path. */
5156 memset(pathname, 0, sizeof(pathname));
5157 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5158 ua_sess->path);
5159 if (ret < 0) {
5160 PERROR("snprintf snapshot path");
5161 goto error;
5162 }
5163
5164 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5165 ua_chan, node.node) {
5166 /*
5167 * Make sure the maximum stream size is not lower than the
5168 * subbuffer size or else it's an error since we won't be able to
5169 * snapshot anything.
5170 */
5171 if (max_stream_size &&
5172 ua_chan->attr.subbuf_size > max_stream_size) {
5173 ret = -EINVAL;
5174 DBG3("UST app snapshot record maximum stream size %" PRIu64
5175 " is smaller than subbuffer size of %" PRIu64,
5176 max_stream_size, ua_chan->attr.subbuf_size);
5177 goto error;
5178 }
5179
5180 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5181 ua_sess->euid, ua_sess->egid, pathname, wait,
5182 max_stream_size);
5183 if (ret < 0) {
5184 goto error;
5185 }
5186 }
5187
5188 registry = get_session_registry(ua_sess);
5189 assert(registry);
5190 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5191 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5192 max_stream_size);
5193 if (ret < 0) {
5194 goto error;
5195 }
5196 }
5197 break;
5198 }
5199 default:
5200 assert(0);
5201 break;
5202 }
5203
5204 error:
5205 rcu_read_unlock();
5206 return ret;
5207 }
5208
5209 /*
5210 * Return the number of streams for a UST session.
5211 */
5212 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5213 {
5214 unsigned int ret = 0;
5215 struct ust_app *app;
5216 struct lttng_ht_iter iter;
5217
5218 assert(usess);
5219
5220 switch (usess->buffer_type) {
5221 case LTTNG_BUFFER_PER_UID:
5222 {
5223 struct buffer_reg_uid *reg;
5224
5225 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5226 struct buffer_reg_channel *reg_chan;
5227
5228 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5229 reg_chan, node.node) {
5230 ret += reg_chan->stream_count;
5231 }
5232 }
5233 break;
5234 }
5235 case LTTNG_BUFFER_PER_PID:
5236 {
5237 rcu_read_lock();
5238 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5239 struct ust_app_channel *ua_chan;
5240 struct ust_app_session *ua_sess;
5241 struct lttng_ht_iter chan_iter;
5242
5243 ua_sess = lookup_session_by_app(usess, app);
5244 if (!ua_sess) {
5245 /* Session not associated with this app. */
5246 continue;
5247 }
5248
5249 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5250 ua_chan, node.node) {
5251 ret += ua_chan->streams.count;
5252 }
5253 }
5254 rcu_read_unlock();
5255 break;
5256 }
5257 default:
5258 assert(0);
5259 break;
5260 }
5261
5262 return ret;
5263 }
This page took 0.154601 seconds and 4 git commands to generate.