9f9919e637722bf0b7eab078edea58a9c25803d2
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 ret_val = ret;
485 goto error_push;
486 }
487
488 free(metadata_str);
489 return len;
490
491 end:
492 error:
493 pthread_mutex_unlock(&registry->lock);
494 error_push:
495 free(metadata_str);
496 return ret_val;
497 }
498
499 /*
500 * For a given application and session, push metadata to consumer. The session
501 * lock MUST be acquired here before calling this.
502 * Either sock or consumer is required : if sock is NULL, the default
503 * socket to send the metadata is retrieved from consumer, if sock
504 * is not NULL we use it to send the metadata.
505 *
506 * Return 0 on success else a negative error.
507 */
508 static int push_metadata(struct ust_registry_session *registry,
509 struct consumer_output *consumer)
510 {
511 int ret_val;
512 ssize_t ret;
513 struct consumer_socket *socket;
514
515 assert(registry);
516 assert(consumer);
517
518 rcu_read_lock();
519
520 /*
521 * Means that no metadata was assigned to the session. This can happens if
522 * no start has been done previously.
523 */
524 if (!registry->metadata_key) {
525 ret_val = 0;
526 goto end_rcu_unlock;
527 }
528
529 /* Get consumer socket to use to push the metadata.*/
530 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
531 consumer);
532 if (!socket) {
533 ret_val = -1;
534 goto error_rcu_unlock;
535 }
536
537 /*
538 * TODO: Currently, we hold the socket lock around sampling of the next
539 * metadata segment to ensure we send metadata over the consumer socket in
540 * the correct order. This makes the registry lock nest inside the socket
541 * lock.
542 *
543 * Please note that this is a temporary measure: we should move this lock
544 * back into ust_consumer_push_metadata() when the consumer gets the
545 * ability to reorder the metadata it receives.
546 */
547 pthread_mutex_lock(socket->lock);
548 ret = ust_app_push_metadata(registry, socket, 0);
549 pthread_mutex_unlock(socket->lock);
550 if (ret < 0) {
551 ret_val = ret;
552 goto error_rcu_unlock;
553 }
554
555 rcu_read_unlock();
556 return 0;
557
558 error_rcu_unlock:
559 /*
560 * On error, flag the registry that the metadata is closed. We were unable
561 * to push anything and this means that either the consumer is not
562 * responding or the metadata cache has been destroyed on the consumer.
563 */
564 registry->metadata_closed = 1;
565 end_rcu_unlock:
566 rcu_read_unlock();
567 return ret_val;
568 }
569
570 /*
571 * Send to the consumer a close metadata command for the given session. Once
572 * done, the metadata channel is deleted and the session metadata pointer is
573 * nullified. The session lock MUST be acquired here unless the application is
574 * in the destroy path.
575 *
576 * Return 0 on success else a negative value.
577 */
578 static int close_metadata(struct ust_registry_session *registry,
579 struct consumer_output *consumer)
580 {
581 int ret;
582 struct consumer_socket *socket;
583
584 assert(registry);
585 assert(consumer);
586
587 rcu_read_lock();
588
589 if (!registry->metadata_key || registry->metadata_closed) {
590 ret = 0;
591 goto end;
592 }
593
594 /* Get consumer socket to use to push the metadata.*/
595 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
596 consumer);
597 if (!socket) {
598 ret = -1;
599 goto error;
600 }
601
602 ret = consumer_close_metadata(socket, registry->metadata_key);
603 if (ret < 0) {
604 goto error;
605 }
606
607 error:
608 /*
609 * Metadata closed. Even on error this means that the consumer is not
610 * responding or not found so either way a second close should NOT be emit
611 * for this registry.
612 */
613 registry->metadata_closed = 1;
614 end:
615 rcu_read_unlock();
616 return ret;
617 }
618
619 /*
620 * We need to execute ht_destroy outside of RCU read-side critical
621 * section and outside of call_rcu thread, so we postpone its execution
622 * using ht_cleanup_push. It is simpler than to change the semantic of
623 * the many callers of delete_ust_app_session().
624 */
625 static
626 void delete_ust_app_session_rcu(struct rcu_head *head)
627 {
628 struct ust_app_session *ua_sess =
629 caa_container_of(head, struct ust_app_session, rcu_head);
630
631 ht_cleanup_push(ua_sess->channels);
632 free(ua_sess);
633 }
634
635 /*
636 * Delete ust app session safely. RCU read lock must be held before calling
637 * this function.
638 */
639 static
640 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
641 struct ust_app *app)
642 {
643 int ret;
644 struct lttng_ht_iter iter;
645 struct ust_app_channel *ua_chan;
646 struct ust_registry_session *registry;
647
648 assert(ua_sess);
649
650 pthread_mutex_lock(&ua_sess->lock);
651
652 registry = get_session_registry(ua_sess);
653 if (registry && !registry->metadata_closed) {
654 /* Push metadata for application before freeing the application. */
655 (void) push_metadata(registry, ua_sess->consumer);
656
657 /*
658 * Don't ask to close metadata for global per UID buffers. Close
659 * metadata only on destroy trace session in this case. Also, the
660 * previous push metadata could have flag the metadata registry to
661 * close so don't send a close command if closed.
662 */
663 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
664 !registry->metadata_closed) {
665 /* And ask to close it for this session registry. */
666 (void) close_metadata(registry, ua_sess->consumer);
667 }
668 }
669
670 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
671 node.node) {
672 ret = lttng_ht_del(ua_sess->channels, &iter);
673 assert(!ret);
674 delete_ust_app_channel(sock, ua_chan, app);
675 }
676
677 /* In case of per PID, the registry is kept in the session. */
678 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
679 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
680 if (reg_pid) {
681 buffer_reg_pid_remove(reg_pid);
682 buffer_reg_pid_destroy(reg_pid);
683 }
684 }
685
686 if (ua_sess->handle != -1) {
687 ret = ustctl_release_handle(sock, ua_sess->handle);
688 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
689 ERR("UST app sock %d release session handle failed with ret %d",
690 sock, ret);
691 }
692 }
693 pthread_mutex_unlock(&ua_sess->lock);
694
695 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
696 }
697
698 /*
699 * Delete a traceable application structure from the global list. Never call
700 * this function outside of a call_rcu call.
701 *
702 * RCU read side lock should _NOT_ be held when calling this function.
703 */
704 static
705 void delete_ust_app(struct ust_app *app)
706 {
707 int ret, sock;
708 struct ust_app_session *ua_sess, *tmp_ua_sess;
709
710 /* Delete ust app sessions info */
711 sock = app->sock;
712 app->sock = -1;
713
714 /* Wipe sessions */
715 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
716 teardown_node) {
717 /* Free every object in the session and the session. */
718 rcu_read_lock();
719 delete_ust_app_session(sock, ua_sess, app);
720 rcu_read_unlock();
721 }
722
723 ht_cleanup_push(app->sessions);
724 ht_cleanup_push(app->ust_objd);
725
726 /*
727 * Wait until we have deleted the application from the sock hash table
728 * before closing this socket, otherwise an application could re-use the
729 * socket ID and race with the teardown, using the same hash table entry.
730 *
731 * It's OK to leave the close in call_rcu. We want it to stay unique for
732 * all RCU readers that could run concurrently with unregister app,
733 * therefore we _need_ to only close that socket after a grace period. So
734 * it should stay in this RCU callback.
735 *
736 * This close() is a very important step of the synchronization model so
737 * every modification to this function must be carefully reviewed.
738 */
739 ret = close(sock);
740 if (ret) {
741 PERROR("close");
742 }
743 lttng_fd_put(LTTNG_FD_APPS, 1);
744
745 DBG2("UST app pid %d deleted", app->pid);
746 free(app);
747 }
748
749 /*
750 * URCU intermediate call to delete an UST app.
751 */
752 static
753 void delete_ust_app_rcu(struct rcu_head *head)
754 {
755 struct lttng_ht_node_ulong *node =
756 caa_container_of(head, struct lttng_ht_node_ulong, head);
757 struct ust_app *app =
758 caa_container_of(node, struct ust_app, pid_n);
759
760 DBG3("Call RCU deleting app PID %d", app->pid);
761 delete_ust_app(app);
762 }
763
764 /*
765 * Delete the session from the application ht and delete the data structure by
766 * freeing every object inside and releasing them.
767 */
768 static void destroy_app_session(struct ust_app *app,
769 struct ust_app_session *ua_sess)
770 {
771 int ret;
772 struct lttng_ht_iter iter;
773
774 assert(app);
775 assert(ua_sess);
776
777 iter.iter.node = &ua_sess->node.node;
778 ret = lttng_ht_del(app->sessions, &iter);
779 if (ret) {
780 /* Already scheduled for teardown. */
781 goto end;
782 }
783
784 /* Once deleted, free the data structure. */
785 delete_ust_app_session(app->sock, ua_sess, app);
786
787 end:
788 return;
789 }
790
791 /*
792 * Alloc new UST app session.
793 */
794 static
795 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
796 {
797 struct ust_app_session *ua_sess;
798
799 /* Init most of the default value by allocating and zeroing */
800 ua_sess = zmalloc(sizeof(struct ust_app_session));
801 if (ua_sess == NULL) {
802 PERROR("malloc");
803 goto error_free;
804 }
805
806 ua_sess->handle = -1;
807 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
808 pthread_mutex_init(&ua_sess->lock, NULL);
809
810 return ua_sess;
811
812 error_free:
813 return NULL;
814 }
815
816 /*
817 * Alloc new UST app channel.
818 */
819 static
820 struct ust_app_channel *alloc_ust_app_channel(char *name,
821 struct ust_app_session *ua_sess,
822 struct lttng_ust_channel_attr *attr)
823 {
824 struct ust_app_channel *ua_chan;
825
826 /* Init most of the default value by allocating and zeroing */
827 ua_chan = zmalloc(sizeof(struct ust_app_channel));
828 if (ua_chan == NULL) {
829 PERROR("malloc");
830 goto error;
831 }
832
833 /* Setup channel name */
834 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
835 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
836
837 ua_chan->enabled = 1;
838 ua_chan->handle = -1;
839 ua_chan->session = ua_sess;
840 ua_chan->key = get_next_channel_key();
841 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
842 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
843 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
844
845 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
846 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
847
848 /* Copy attributes */
849 if (attr) {
850 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
851 ua_chan->attr.subbuf_size = attr->subbuf_size;
852 ua_chan->attr.num_subbuf = attr->num_subbuf;
853 ua_chan->attr.overwrite = attr->overwrite;
854 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
855 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
856 ua_chan->attr.output = attr->output;
857 }
858 /* By default, the channel is a per cpu channel. */
859 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
860
861 DBG3("UST app channel %s allocated", ua_chan->name);
862
863 return ua_chan;
864
865 error:
866 return NULL;
867 }
868
869 /*
870 * Allocate and initialize a UST app stream.
871 *
872 * Return newly allocated stream pointer or NULL on error.
873 */
874 struct ust_app_stream *ust_app_alloc_stream(void)
875 {
876 struct ust_app_stream *stream = NULL;
877
878 stream = zmalloc(sizeof(*stream));
879 if (stream == NULL) {
880 PERROR("zmalloc ust app stream");
881 goto error;
882 }
883
884 /* Zero could be a valid value for a handle so flag it to -1. */
885 stream->handle = -1;
886
887 error:
888 return stream;
889 }
890
891 /*
892 * Alloc new UST app event.
893 */
894 static
895 struct ust_app_event *alloc_ust_app_event(char *name,
896 struct lttng_ust_event *attr)
897 {
898 struct ust_app_event *ua_event;
899
900 /* Init most of the default value by allocating and zeroing */
901 ua_event = zmalloc(sizeof(struct ust_app_event));
902 if (ua_event == NULL) {
903 PERROR("malloc");
904 goto error;
905 }
906
907 ua_event->enabled = 1;
908 strncpy(ua_event->name, name, sizeof(ua_event->name));
909 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
910 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
911
912 /* Copy attributes */
913 if (attr) {
914 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
915 }
916
917 DBG3("UST app event %s allocated", ua_event->name);
918
919 return ua_event;
920
921 error:
922 return NULL;
923 }
924
925 /*
926 * Alloc new UST app context.
927 */
928 static
929 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
930 {
931 struct ust_app_ctx *ua_ctx;
932
933 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
934 if (ua_ctx == NULL) {
935 goto error;
936 }
937
938 CDS_INIT_LIST_HEAD(&ua_ctx->list);
939
940 if (uctx) {
941 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
942 }
943
944 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
945
946 error:
947 return ua_ctx;
948 }
949
950 /*
951 * Allocate a filter and copy the given original filter.
952 *
953 * Return allocated filter or NULL on error.
954 */
955 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
956 struct lttng_ust_filter_bytecode *orig_f)
957 {
958 struct lttng_ust_filter_bytecode *filter = NULL;
959
960 /* Copy filter bytecode */
961 filter = zmalloc(sizeof(*filter) + orig_f->len);
962 if (!filter) {
963 PERROR("zmalloc alloc ust app filter");
964 goto error;
965 }
966
967 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
968
969 error:
970 return filter;
971 }
972
973 /*
974 * Find an ust_app using the sock and return it. RCU read side lock must be
975 * held before calling this helper function.
976 */
977 struct ust_app *ust_app_find_by_sock(int sock)
978 {
979 struct lttng_ht_node_ulong *node;
980 struct lttng_ht_iter iter;
981
982 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
983 node = lttng_ht_iter_get_node_ulong(&iter);
984 if (node == NULL) {
985 DBG2("UST app find by sock %d not found", sock);
986 goto error;
987 }
988
989 return caa_container_of(node, struct ust_app, sock_n);
990
991 error:
992 return NULL;
993 }
994
995 /*
996 * Find an ust_app using the notify sock and return it. RCU read side lock must
997 * be held before calling this helper function.
998 */
999 static struct ust_app *find_app_by_notify_sock(int sock)
1000 {
1001 struct lttng_ht_node_ulong *node;
1002 struct lttng_ht_iter iter;
1003
1004 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1005 &iter);
1006 node = lttng_ht_iter_get_node_ulong(&iter);
1007 if (node == NULL) {
1008 DBG2("UST app find by notify sock %d not found", sock);
1009 goto error;
1010 }
1011
1012 return caa_container_of(node, struct ust_app, notify_sock_n);
1013
1014 error:
1015 return NULL;
1016 }
1017
1018 /*
1019 * Lookup for an ust app event based on event name, filter bytecode and the
1020 * event loglevel.
1021 *
1022 * Return an ust_app_event object or NULL on error.
1023 */
1024 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1025 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
1026 {
1027 struct lttng_ht_iter iter;
1028 struct lttng_ht_node_str *node;
1029 struct ust_app_event *event = NULL;
1030 struct ust_app_ht_key key;
1031
1032 assert(name);
1033 assert(ht);
1034
1035 /* Setup key for event lookup. */
1036 key.name = name;
1037 key.filter = filter;
1038 key.loglevel = loglevel;
1039
1040 /* Lookup using the event name as hash and a custom match fct. */
1041 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1042 ht_match_ust_app_event, &key, &iter.iter);
1043 node = lttng_ht_iter_get_node_str(&iter);
1044 if (node == NULL) {
1045 goto end;
1046 }
1047
1048 event = caa_container_of(node, struct ust_app_event, node);
1049
1050 end:
1051 return event;
1052 }
1053
1054 /*
1055 * Create the channel context on the tracer.
1056 *
1057 * Called with UST app session lock held.
1058 */
1059 static
1060 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1061 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1062 {
1063 int ret;
1064
1065 health_code_update();
1066
1067 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1068 ua_chan->obj, &ua_ctx->obj);
1069 if (ret < 0) {
1070 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1071 ERR("UST app create channel context failed for app (pid: %d) "
1072 "with ret %d", app->pid, ret);
1073 } else {
1074 /*
1075 * This is normal behavior, an application can die during the
1076 * creation process. Don't report an error so the execution can
1077 * continue normally.
1078 */
1079 ret = 0;
1080 DBG3("UST app disable event failed. Application is dead.");
1081 }
1082 goto error;
1083 }
1084
1085 ua_ctx->handle = ua_ctx->obj->handle;
1086
1087 DBG2("UST app context handle %d created successfully for channel %s",
1088 ua_ctx->handle, ua_chan->name);
1089
1090 error:
1091 health_code_update();
1092 return ret;
1093 }
1094
1095 /*
1096 * Set the filter on the tracer.
1097 */
1098 static
1099 int set_ust_event_filter(struct ust_app_event *ua_event,
1100 struct ust_app *app)
1101 {
1102 int ret;
1103
1104 health_code_update();
1105
1106 if (!ua_event->filter) {
1107 ret = 0;
1108 goto error;
1109 }
1110
1111 ret = ustctl_set_filter(app->sock, ua_event->filter,
1112 ua_event->obj);
1113 if (ret < 0) {
1114 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1115 ERR("UST app event %s filter failed for app (pid: %d) "
1116 "with ret %d", ua_event->attr.name, app->pid, ret);
1117 } else {
1118 /*
1119 * This is normal behavior, an application can die during the
1120 * creation process. Don't report an error so the execution can
1121 * continue normally.
1122 */
1123 ret = 0;
1124 DBG3("UST app filter event failed. Application is dead.");
1125 }
1126 goto error;
1127 }
1128
1129 DBG2("UST filter set successfully for event %s", ua_event->name);
1130
1131 error:
1132 health_code_update();
1133 return ret;
1134 }
1135
1136 /*
1137 * Disable the specified event on to UST tracer for the UST session.
1138 */
1139 static int disable_ust_event(struct ust_app *app,
1140 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1141 {
1142 int ret;
1143
1144 health_code_update();
1145
1146 ret = ustctl_disable(app->sock, ua_event->obj);
1147 if (ret < 0) {
1148 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1149 ERR("UST app event %s disable failed for app (pid: %d) "
1150 "and session handle %d with ret %d",
1151 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1152 } else {
1153 /*
1154 * This is normal behavior, an application can die during the
1155 * creation process. Don't report an error so the execution can
1156 * continue normally.
1157 */
1158 ret = 0;
1159 DBG3("UST app disable event failed. Application is dead.");
1160 }
1161 goto error;
1162 }
1163
1164 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1165 ua_event->attr.name, app->pid);
1166
1167 error:
1168 health_code_update();
1169 return ret;
1170 }
1171
1172 /*
1173 * Disable the specified channel on to UST tracer for the UST session.
1174 */
1175 static int disable_ust_channel(struct ust_app *app,
1176 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1177 {
1178 int ret;
1179
1180 health_code_update();
1181
1182 ret = ustctl_disable(app->sock, ua_chan->obj);
1183 if (ret < 0) {
1184 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1185 ERR("UST app channel %s disable failed for app (pid: %d) "
1186 "and session handle %d with ret %d",
1187 ua_chan->name, app->pid, ua_sess->handle, ret);
1188 } else {
1189 /*
1190 * This is normal behavior, an application can die during the
1191 * creation process. Don't report an error so the execution can
1192 * continue normally.
1193 */
1194 ret = 0;
1195 DBG3("UST app disable channel failed. Application is dead.");
1196 }
1197 goto error;
1198 }
1199
1200 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1201 ua_chan->name, app->pid);
1202
1203 error:
1204 health_code_update();
1205 return ret;
1206 }
1207
1208 /*
1209 * Enable the specified channel on to UST tracer for the UST session.
1210 */
1211 static int enable_ust_channel(struct ust_app *app,
1212 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1213 {
1214 int ret;
1215
1216 health_code_update();
1217
1218 ret = ustctl_enable(app->sock, ua_chan->obj);
1219 if (ret < 0) {
1220 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1221 ERR("UST app channel %s enable failed for app (pid: %d) "
1222 "and session handle %d with ret %d",
1223 ua_chan->name, app->pid, ua_sess->handle, ret);
1224 } else {
1225 /*
1226 * This is normal behavior, an application can die during the
1227 * creation process. Don't report an error so the execution can
1228 * continue normally.
1229 */
1230 ret = 0;
1231 DBG3("UST app enable channel failed. Application is dead.");
1232 }
1233 goto error;
1234 }
1235
1236 ua_chan->enabled = 1;
1237
1238 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1239 ua_chan->name, app->pid);
1240
1241 error:
1242 health_code_update();
1243 return ret;
1244 }
1245
1246 /*
1247 * Enable the specified event on to UST tracer for the UST session.
1248 */
1249 static int enable_ust_event(struct ust_app *app,
1250 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1251 {
1252 int ret;
1253
1254 health_code_update();
1255
1256 ret = ustctl_enable(app->sock, ua_event->obj);
1257 if (ret < 0) {
1258 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1259 ERR("UST app event %s enable failed for app (pid: %d) "
1260 "and session handle %d with ret %d",
1261 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1262 } else {
1263 /*
1264 * This is normal behavior, an application can die during the
1265 * creation process. Don't report an error so the execution can
1266 * continue normally.
1267 */
1268 ret = 0;
1269 DBG3("UST app enable event failed. Application is dead.");
1270 }
1271 goto error;
1272 }
1273
1274 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1275 ua_event->attr.name, app->pid);
1276
1277 error:
1278 health_code_update();
1279 return ret;
1280 }
1281
1282 /*
1283 * Send channel and stream buffer to application.
1284 *
1285 * Return 0 on success. On error, a negative value is returned.
1286 */
1287 static int send_channel_pid_to_ust(struct ust_app *app,
1288 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1289 {
1290 int ret;
1291 struct ust_app_stream *stream, *stmp;
1292
1293 assert(app);
1294 assert(ua_sess);
1295 assert(ua_chan);
1296
1297 health_code_update();
1298
1299 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1300 app->sock);
1301
1302 /* Send channel to the application. */
1303 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1304 if (ret < 0) {
1305 goto error;
1306 }
1307
1308 health_code_update();
1309
1310 /* Send all streams to application. */
1311 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1312 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1313 if (ret < 0) {
1314 goto error;
1315 }
1316 /* We don't need the stream anymore once sent to the tracer. */
1317 cds_list_del(&stream->list);
1318 delete_ust_app_stream(-1, stream);
1319 }
1320 /* Flag the channel that it is sent to the application. */
1321 ua_chan->is_sent = 1;
1322
1323 error:
1324 health_code_update();
1325 return ret;
1326 }
1327
1328 /*
1329 * Create the specified event onto the UST tracer for a UST session.
1330 *
1331 * Should be called with session mutex held.
1332 */
1333 static
1334 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1335 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1336 {
1337 int ret = 0;
1338
1339 health_code_update();
1340
1341 /* Create UST event on tracer */
1342 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1343 &ua_event->obj);
1344 if (ret < 0) {
1345 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1346 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1347 ua_event->attr.name, app->pid, ret);
1348 } else {
1349 /*
1350 * This is normal behavior, an application can die during the
1351 * creation process. Don't report an error so the execution can
1352 * continue normally.
1353 */
1354 ret = 0;
1355 DBG3("UST app create event failed. Application is dead.");
1356 }
1357 goto error;
1358 }
1359
1360 ua_event->handle = ua_event->obj->handle;
1361
1362 DBG2("UST app event %s created successfully for pid:%d",
1363 ua_event->attr.name, app->pid);
1364
1365 health_code_update();
1366
1367 /* Set filter if one is present. */
1368 if (ua_event->filter) {
1369 ret = set_ust_event_filter(ua_event, app);
1370 if (ret < 0) {
1371 goto error;
1372 }
1373 }
1374
1375 /* If event not enabled, disable it on the tracer */
1376 if (ua_event->enabled == 0) {
1377 ret = disable_ust_event(app, ua_sess, ua_event);
1378 if (ret < 0) {
1379 /*
1380 * If we hit an EPERM, something is wrong with our disable call. If
1381 * we get an EEXIST, there is a problem on the tracer side since we
1382 * just created it.
1383 */
1384 switch (ret) {
1385 case -LTTNG_UST_ERR_PERM:
1386 /* Code flow problem */
1387 assert(0);
1388 case -LTTNG_UST_ERR_EXIST:
1389 /* It's OK for our use case. */
1390 ret = 0;
1391 break;
1392 default:
1393 break;
1394 }
1395 goto error;
1396 }
1397 }
1398
1399 error:
1400 health_code_update();
1401 return ret;
1402 }
1403
1404 /*
1405 * Copy data between an UST app event and a LTT event.
1406 */
1407 static void shadow_copy_event(struct ust_app_event *ua_event,
1408 struct ltt_ust_event *uevent)
1409 {
1410 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1411 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1412
1413 ua_event->enabled = uevent->enabled;
1414
1415 /* Copy event attributes */
1416 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1417
1418 /* Copy filter bytecode */
1419 if (uevent->filter) {
1420 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1421 /* Filter might be NULL here in case of ENONEM. */
1422 }
1423 }
1424
1425 /*
1426 * Copy data between an UST app channel and a LTT channel.
1427 */
1428 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1429 struct ltt_ust_channel *uchan)
1430 {
1431 struct lttng_ht_iter iter;
1432 struct ltt_ust_event *uevent;
1433 struct ltt_ust_context *uctx;
1434 struct ust_app_event *ua_event;
1435 struct ust_app_ctx *ua_ctx;
1436
1437 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1438
1439 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1440 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1441
1442 ua_chan->tracefile_size = uchan->tracefile_size;
1443 ua_chan->tracefile_count = uchan->tracefile_count;
1444
1445 /* Copy event attributes since the layout is different. */
1446 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1447 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1448 ua_chan->attr.overwrite = uchan->attr.overwrite;
1449 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1450 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1451 ua_chan->attr.output = uchan->attr.output;
1452 /*
1453 * Note that the attribute channel type is not set since the channel on the
1454 * tracing registry side does not have this information.
1455 */
1456
1457 ua_chan->enabled = uchan->enabled;
1458 ua_chan->tracing_channel_id = uchan->id;
1459
1460 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1461 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1462 if (ua_ctx == NULL) {
1463 continue;
1464 }
1465 lttng_ht_node_init_ulong(&ua_ctx->node,
1466 (unsigned long) ua_ctx->ctx.ctx);
1467 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1468 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1469 }
1470
1471 /* Copy all events from ltt ust channel to ust app channel */
1472 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1473 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1474 uevent->filter, uevent->attr.loglevel);
1475 if (ua_event == NULL) {
1476 DBG2("UST event %s not found on shadow copy channel",
1477 uevent->attr.name);
1478 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1479 if (ua_event == NULL) {
1480 continue;
1481 }
1482 shadow_copy_event(ua_event, uevent);
1483 add_unique_ust_app_event(ua_chan, ua_event);
1484 }
1485 }
1486
1487 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1488 }
1489
1490 /*
1491 * Copy data between a UST app session and a regular LTT session.
1492 */
1493 static void shadow_copy_session(struct ust_app_session *ua_sess,
1494 struct ltt_ust_session *usess, struct ust_app *app)
1495 {
1496 struct lttng_ht_node_str *ua_chan_node;
1497 struct lttng_ht_iter iter;
1498 struct ltt_ust_channel *uchan;
1499 struct ust_app_channel *ua_chan;
1500 time_t rawtime;
1501 struct tm *timeinfo;
1502 char datetime[16];
1503 int ret;
1504
1505 /* Get date and time for unique app path */
1506 time(&rawtime);
1507 timeinfo = localtime(&rawtime);
1508 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1509
1510 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1511
1512 ua_sess->tracing_id = usess->id;
1513 ua_sess->id = get_next_session_id();
1514 ua_sess->uid = app->uid;
1515 ua_sess->gid = app->gid;
1516 ua_sess->euid = usess->uid;
1517 ua_sess->egid = usess->gid;
1518 ua_sess->buffer_type = usess->buffer_type;
1519 ua_sess->bits_per_long = app->bits_per_long;
1520 /* There is only one consumer object per session possible. */
1521 ua_sess->consumer = usess->consumer;
1522 ua_sess->output_traces = usess->output_traces;
1523 ua_sess->live_timer_interval = usess->live_timer_interval;
1524
1525 switch (ua_sess->buffer_type) {
1526 case LTTNG_BUFFER_PER_PID:
1527 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1528 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1529 datetime);
1530 break;
1531 case LTTNG_BUFFER_PER_UID:
1532 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1533 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1534 break;
1535 default:
1536 assert(0);
1537 goto error;
1538 }
1539 if (ret < 0) {
1540 PERROR("asprintf UST shadow copy session");
1541 assert(0);
1542 goto error;
1543 }
1544
1545 /* Iterate over all channels in global domain. */
1546 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1547 uchan, node.node) {
1548 struct lttng_ht_iter uiter;
1549
1550 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1551 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1552 if (ua_chan_node != NULL) {
1553 /* Session exist. Contiuing. */
1554 continue;
1555 }
1556
1557 DBG2("Channel %s not found on shadow session copy, creating it",
1558 uchan->name);
1559 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1560 if (ua_chan == NULL) {
1561 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1562 continue;
1563 }
1564 shadow_copy_channel(ua_chan, uchan);
1565 /*
1566 * The concept of metadata channel does not exist on the tracing
1567 * registry side of the session daemon so this can only be a per CPU
1568 * channel and not metadata.
1569 */
1570 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1571
1572 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1573 }
1574
1575 error:
1576 return;
1577 }
1578
1579 /*
1580 * Lookup sesison wrapper.
1581 */
1582 static
1583 void __lookup_session_by_app(struct ltt_ust_session *usess,
1584 struct ust_app *app, struct lttng_ht_iter *iter)
1585 {
1586 /* Get right UST app session from app */
1587 lttng_ht_lookup(app->sessions, &usess->id, iter);
1588 }
1589
1590 /*
1591 * Return ust app session from the app session hashtable using the UST session
1592 * id.
1593 */
1594 static struct ust_app_session *lookup_session_by_app(
1595 struct ltt_ust_session *usess, struct ust_app *app)
1596 {
1597 struct lttng_ht_iter iter;
1598 struct lttng_ht_node_u64 *node;
1599
1600 __lookup_session_by_app(usess, app, &iter);
1601 node = lttng_ht_iter_get_node_u64(&iter);
1602 if (node == NULL) {
1603 goto error;
1604 }
1605
1606 return caa_container_of(node, struct ust_app_session, node);
1607
1608 error:
1609 return NULL;
1610 }
1611
1612 /*
1613 * Setup buffer registry per PID for the given session and application. If none
1614 * is found, a new one is created, added to the global registry and
1615 * initialized. If regp is valid, it's set with the newly created object.
1616 *
1617 * Return 0 on success or else a negative value.
1618 */
1619 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1620 struct ust_app *app, struct buffer_reg_pid **regp)
1621 {
1622 int ret = 0;
1623 struct buffer_reg_pid *reg_pid;
1624
1625 assert(ua_sess);
1626 assert(app);
1627
1628 rcu_read_lock();
1629
1630 reg_pid = buffer_reg_pid_find(ua_sess->id);
1631 if (!reg_pid) {
1632 /*
1633 * This is the create channel path meaning that if there is NO
1634 * registry available, we have to create one for this session.
1635 */
1636 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1637 if (ret < 0) {
1638 goto error;
1639 }
1640 buffer_reg_pid_add(reg_pid);
1641 } else {
1642 goto end;
1643 }
1644
1645 /* Initialize registry. */
1646 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1647 app->bits_per_long, app->uint8_t_alignment,
1648 app->uint16_t_alignment, app->uint32_t_alignment,
1649 app->uint64_t_alignment, app->long_alignment,
1650 app->byte_order, app->version.major,
1651 app->version.minor);
1652 if (ret < 0) {
1653 goto error;
1654 }
1655
1656 DBG3("UST app buffer registry per PID created successfully");
1657
1658 end:
1659 if (regp) {
1660 *regp = reg_pid;
1661 }
1662 error:
1663 rcu_read_unlock();
1664 return ret;
1665 }
1666
1667 /*
1668 * Setup buffer registry per UID for the given session and application. If none
1669 * is found, a new one is created, added to the global registry and
1670 * initialized. If regp is valid, it's set with the newly created object.
1671 *
1672 * Return 0 on success or else a negative value.
1673 */
1674 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1675 struct ust_app *app, struct buffer_reg_uid **regp)
1676 {
1677 int ret = 0;
1678 struct buffer_reg_uid *reg_uid;
1679
1680 assert(usess);
1681 assert(app);
1682
1683 rcu_read_lock();
1684
1685 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1686 if (!reg_uid) {
1687 /*
1688 * This is the create channel path meaning that if there is NO
1689 * registry available, we have to create one for this session.
1690 */
1691 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1692 LTTNG_DOMAIN_UST, &reg_uid);
1693 if (ret < 0) {
1694 goto error;
1695 }
1696 buffer_reg_uid_add(reg_uid);
1697 } else {
1698 goto end;
1699 }
1700
1701 /* Initialize registry. */
1702 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1703 app->bits_per_long, app->uint8_t_alignment,
1704 app->uint16_t_alignment, app->uint32_t_alignment,
1705 app->uint64_t_alignment, app->long_alignment,
1706 app->byte_order, app->version.major,
1707 app->version.minor);
1708 if (ret < 0) {
1709 goto error;
1710 }
1711 /* Add node to teardown list of the session. */
1712 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1713
1714 DBG3("UST app buffer registry per UID created successfully");
1715
1716 end:
1717 if (regp) {
1718 *regp = reg_uid;
1719 }
1720 error:
1721 rcu_read_unlock();
1722 return ret;
1723 }
1724
1725 /*
1726 * Create a session on the tracer side for the given app.
1727 *
1728 * On success, ua_sess_ptr is populated with the session pointer or else left
1729 * untouched. If the session was created, is_created is set to 1. On error,
1730 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1731 * be NULL.
1732 *
1733 * Returns 0 on success or else a negative code which is either -ENOMEM or
1734 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1735 */
1736 static int create_ust_app_session(struct ltt_ust_session *usess,
1737 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1738 int *is_created)
1739 {
1740 int ret, created = 0;
1741 struct ust_app_session *ua_sess;
1742
1743 assert(usess);
1744 assert(app);
1745 assert(ua_sess_ptr);
1746
1747 health_code_update();
1748
1749 ua_sess = lookup_session_by_app(usess, app);
1750 if (ua_sess == NULL) {
1751 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1752 app->pid, usess->id);
1753 ua_sess = alloc_ust_app_session(app);
1754 if (ua_sess == NULL) {
1755 /* Only malloc can failed so something is really wrong */
1756 ret = -ENOMEM;
1757 goto error;
1758 }
1759 shadow_copy_session(ua_sess, usess, app);
1760 created = 1;
1761 }
1762
1763 switch (usess->buffer_type) {
1764 case LTTNG_BUFFER_PER_PID:
1765 /* Init local registry. */
1766 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1767 if (ret < 0) {
1768 goto error;
1769 }
1770 break;
1771 case LTTNG_BUFFER_PER_UID:
1772 /* Look for a global registry. If none exists, create one. */
1773 ret = setup_buffer_reg_uid(usess, app, NULL);
1774 if (ret < 0) {
1775 goto error;
1776 }
1777 break;
1778 default:
1779 assert(0);
1780 ret = -EINVAL;
1781 goto error;
1782 }
1783
1784 health_code_update();
1785
1786 if (ua_sess->handle == -1) {
1787 ret = ustctl_create_session(app->sock);
1788 if (ret < 0) {
1789 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1790 ERR("Creating session for app pid %d with ret %d",
1791 app->pid, ret);
1792 } else {
1793 DBG("UST app creating session failed. Application is dead");
1794 /*
1795 * This is normal behavior, an application can die during the
1796 * creation process. Don't report an error so the execution can
1797 * continue normally. This will get flagged ENOTCONN and the
1798 * caller will handle it.
1799 */
1800 ret = 0;
1801 }
1802 delete_ust_app_session(-1, ua_sess, app);
1803 if (ret != -ENOMEM) {
1804 /*
1805 * Tracer is probably gone or got an internal error so let's
1806 * behave like it will soon unregister or not usable.
1807 */
1808 ret = -ENOTCONN;
1809 }
1810 goto error;
1811 }
1812
1813 ua_sess->handle = ret;
1814
1815 /* Add ust app session to app's HT */
1816 lttng_ht_node_init_u64(&ua_sess->node,
1817 ua_sess->tracing_id);
1818 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1819
1820 DBG2("UST app session created successfully with handle %d", ret);
1821 }
1822
1823 *ua_sess_ptr = ua_sess;
1824 if (is_created) {
1825 *is_created = created;
1826 }
1827
1828 /* Everything went well. */
1829 ret = 0;
1830
1831 error:
1832 health_code_update();
1833 return ret;
1834 }
1835
1836 /*
1837 * Create a context for the channel on the tracer.
1838 *
1839 * Called with UST app session lock held and a RCU read side lock.
1840 */
1841 static
1842 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1843 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1844 struct ust_app *app)
1845 {
1846 int ret = 0;
1847 struct lttng_ht_iter iter;
1848 struct lttng_ht_node_ulong *node;
1849 struct ust_app_ctx *ua_ctx;
1850
1851 DBG2("UST app adding context to channel %s", ua_chan->name);
1852
1853 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1854 node = lttng_ht_iter_get_node_ulong(&iter);
1855 if (node != NULL) {
1856 ret = -EEXIST;
1857 goto error;
1858 }
1859
1860 ua_ctx = alloc_ust_app_ctx(uctx);
1861 if (ua_ctx == NULL) {
1862 /* malloc failed */
1863 ret = -1;
1864 goto error;
1865 }
1866
1867 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1868 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1869 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1870
1871 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1872 if (ret < 0) {
1873 goto error;
1874 }
1875
1876 error:
1877 return ret;
1878 }
1879
1880 /*
1881 * Enable on the tracer side a ust app event for the session and channel.
1882 *
1883 * Called with UST app session lock held.
1884 */
1885 static
1886 int enable_ust_app_event(struct ust_app_session *ua_sess,
1887 struct ust_app_event *ua_event, struct ust_app *app)
1888 {
1889 int ret;
1890
1891 ret = enable_ust_event(app, ua_sess, ua_event);
1892 if (ret < 0) {
1893 goto error;
1894 }
1895
1896 ua_event->enabled = 1;
1897
1898 error:
1899 return ret;
1900 }
1901
1902 /*
1903 * Disable on the tracer side a ust app event for the session and channel.
1904 */
1905 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1906 struct ust_app_event *ua_event, struct ust_app *app)
1907 {
1908 int ret;
1909
1910 ret = disable_ust_event(app, ua_sess, ua_event);
1911 if (ret < 0) {
1912 goto error;
1913 }
1914
1915 ua_event->enabled = 0;
1916
1917 error:
1918 return ret;
1919 }
1920
1921 /*
1922 * Lookup ust app channel for session and disable it on the tracer side.
1923 */
1924 static
1925 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1926 struct ust_app_channel *ua_chan, struct ust_app *app)
1927 {
1928 int ret;
1929
1930 ret = disable_ust_channel(app, ua_sess, ua_chan);
1931 if (ret < 0) {
1932 goto error;
1933 }
1934
1935 ua_chan->enabled = 0;
1936
1937 error:
1938 return ret;
1939 }
1940
1941 /*
1942 * Lookup ust app channel for session and enable it on the tracer side. This
1943 * MUST be called with a RCU read side lock acquired.
1944 */
1945 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1946 struct ltt_ust_channel *uchan, struct ust_app *app)
1947 {
1948 int ret = 0;
1949 struct lttng_ht_iter iter;
1950 struct lttng_ht_node_str *ua_chan_node;
1951 struct ust_app_channel *ua_chan;
1952
1953 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1954 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1955 if (ua_chan_node == NULL) {
1956 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1957 uchan->name, ua_sess->tracing_id);
1958 goto error;
1959 }
1960
1961 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1962
1963 ret = enable_ust_channel(app, ua_sess, ua_chan);
1964 if (ret < 0) {
1965 goto error;
1966 }
1967
1968 error:
1969 return ret;
1970 }
1971
1972 /*
1973 * Ask the consumer to create a channel and get it if successful.
1974 *
1975 * Return 0 on success or else a negative value.
1976 */
1977 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1978 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1979 int bitness, struct ust_registry_session *registry)
1980 {
1981 int ret;
1982 unsigned int nb_fd = 0;
1983 struct consumer_socket *socket;
1984
1985 assert(usess);
1986 assert(ua_sess);
1987 assert(ua_chan);
1988 assert(registry);
1989
1990 rcu_read_lock();
1991 health_code_update();
1992
1993 /* Get the right consumer socket for the application. */
1994 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1995 if (!socket) {
1996 ret = -EINVAL;
1997 goto error;
1998 }
1999
2000 health_code_update();
2001
2002 /* Need one fd for the channel. */
2003 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2004 if (ret < 0) {
2005 ERR("Exhausted number of available FD upon create channel");
2006 goto error;
2007 }
2008
2009 /*
2010 * Ask consumer to create channel. The consumer will return the number of
2011 * stream we have to expect.
2012 */
2013 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2014 registry);
2015 if (ret < 0) {
2016 goto error_ask;
2017 }
2018
2019 /*
2020 * Compute the number of fd needed before receiving them. It must be 2 per
2021 * stream (2 being the default value here).
2022 */
2023 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2024
2025 /* Reserve the amount of file descriptor we need. */
2026 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2027 if (ret < 0) {
2028 ERR("Exhausted number of available FD upon create channel");
2029 goto error_fd_get_stream;
2030 }
2031
2032 health_code_update();
2033
2034 /*
2035 * Now get the channel from the consumer. This call wil populate the stream
2036 * list of that channel and set the ust objects.
2037 */
2038 if (usess->consumer->enabled) {
2039 ret = ust_consumer_get_channel(socket, ua_chan);
2040 if (ret < 0) {
2041 goto error_destroy;
2042 }
2043 }
2044
2045 rcu_read_unlock();
2046 return 0;
2047
2048 error_destroy:
2049 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2050 error_fd_get_stream:
2051 /*
2052 * Initiate a destroy channel on the consumer since we had an error
2053 * handling it on our side. The return value is of no importance since we
2054 * already have a ret value set by the previous error that we need to
2055 * return.
2056 */
2057 (void) ust_consumer_destroy_channel(socket, ua_chan);
2058 error_ask:
2059 lttng_fd_put(LTTNG_FD_APPS, 1);
2060 error:
2061 health_code_update();
2062 rcu_read_unlock();
2063 return ret;
2064 }
2065
2066 /*
2067 * Duplicate the ust data object of the ust app stream and save it in the
2068 * buffer registry stream.
2069 *
2070 * Return 0 on success or else a negative value.
2071 */
2072 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2073 struct ust_app_stream *stream)
2074 {
2075 int ret;
2076
2077 assert(reg_stream);
2078 assert(stream);
2079
2080 /* Reserve the amount of file descriptor we need. */
2081 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2082 if (ret < 0) {
2083 ERR("Exhausted number of available FD upon duplicate stream");
2084 goto error;
2085 }
2086
2087 /* Duplicate object for stream once the original is in the registry. */
2088 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2089 reg_stream->obj.ust);
2090 if (ret < 0) {
2091 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2092 reg_stream->obj.ust, stream->obj, ret);
2093 lttng_fd_put(LTTNG_FD_APPS, 2);
2094 goto error;
2095 }
2096 stream->handle = stream->obj->handle;
2097
2098 error:
2099 return ret;
2100 }
2101
2102 /*
2103 * Duplicate the ust data object of the ust app. channel and save it in the
2104 * buffer registry channel.
2105 *
2106 * Return 0 on success or else a negative value.
2107 */
2108 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2109 struct ust_app_channel *ua_chan)
2110 {
2111 int ret;
2112
2113 assert(reg_chan);
2114 assert(ua_chan);
2115
2116 /* Need two fds for the channel. */
2117 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2118 if (ret < 0) {
2119 ERR("Exhausted number of available FD upon duplicate channel");
2120 goto error_fd_get;
2121 }
2122
2123 /* Duplicate object for stream once the original is in the registry. */
2124 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2125 if (ret < 0) {
2126 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2127 reg_chan->obj.ust, ua_chan->obj, ret);
2128 goto error;
2129 }
2130 ua_chan->handle = ua_chan->obj->handle;
2131
2132 return 0;
2133
2134 error:
2135 lttng_fd_put(LTTNG_FD_APPS, 1);
2136 error_fd_get:
2137 return ret;
2138 }
2139
2140 /*
2141 * For a given channel buffer registry, setup all streams of the given ust
2142 * application channel.
2143 *
2144 * Return 0 on success or else a negative value.
2145 */
2146 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2147 struct ust_app_channel *ua_chan)
2148 {
2149 int ret = 0;
2150 struct ust_app_stream *stream, *stmp;
2151
2152 assert(reg_chan);
2153 assert(ua_chan);
2154
2155 DBG2("UST app setup buffer registry stream");
2156
2157 /* Send all streams to application. */
2158 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2159 struct buffer_reg_stream *reg_stream;
2160
2161 ret = buffer_reg_stream_create(&reg_stream);
2162 if (ret < 0) {
2163 goto error;
2164 }
2165
2166 /*
2167 * Keep original pointer and nullify it in the stream so the delete
2168 * stream call does not release the object.
2169 */
2170 reg_stream->obj.ust = stream->obj;
2171 stream->obj = NULL;
2172 buffer_reg_stream_add(reg_stream, reg_chan);
2173
2174 /* We don't need the streams anymore. */
2175 cds_list_del(&stream->list);
2176 delete_ust_app_stream(-1, stream);
2177 }
2178
2179 error:
2180 return ret;
2181 }
2182
2183 /*
2184 * Create a buffer registry channel for the given session registry and
2185 * application channel object. If regp pointer is valid, it's set with the
2186 * created object. Important, the created object is NOT added to the session
2187 * registry hash table.
2188 *
2189 * Return 0 on success else a negative value.
2190 */
2191 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2192 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2193 {
2194 int ret;
2195 struct buffer_reg_channel *reg_chan = NULL;
2196
2197 assert(reg_sess);
2198 assert(ua_chan);
2199
2200 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2201
2202 /* Create buffer registry channel. */
2203 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2204 if (ret < 0) {
2205 goto error_create;
2206 }
2207 assert(reg_chan);
2208 reg_chan->consumer_key = ua_chan->key;
2209 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2210
2211 /* Create and add a channel registry to session. */
2212 ret = ust_registry_channel_add(reg_sess->reg.ust,
2213 ua_chan->tracing_channel_id);
2214 if (ret < 0) {
2215 goto error;
2216 }
2217 buffer_reg_channel_add(reg_sess, reg_chan);
2218
2219 if (regp) {
2220 *regp = reg_chan;
2221 }
2222
2223 return 0;
2224
2225 error:
2226 /* Safe because the registry channel object was not added to any HT. */
2227 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2228 error_create:
2229 return ret;
2230 }
2231
2232 /*
2233 * Setup buffer registry channel for the given session registry and application
2234 * channel object. If regp pointer is valid, it's set with the created object.
2235 *
2236 * Return 0 on success else a negative value.
2237 */
2238 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2239 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2240 {
2241 int ret;
2242
2243 assert(reg_sess);
2244 assert(reg_chan);
2245 assert(ua_chan);
2246 assert(ua_chan->obj);
2247
2248 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2249
2250 /* Setup all streams for the registry. */
2251 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2252 if (ret < 0) {
2253 goto error;
2254 }
2255
2256 reg_chan->obj.ust = ua_chan->obj;
2257 ua_chan->obj = NULL;
2258
2259 return 0;
2260
2261 error:
2262 buffer_reg_channel_remove(reg_sess, reg_chan);
2263 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2264 return ret;
2265 }
2266
2267 /*
2268 * Send buffer registry channel to the application.
2269 *
2270 * Return 0 on success else a negative value.
2271 */
2272 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2273 struct ust_app *app, struct ust_app_session *ua_sess,
2274 struct ust_app_channel *ua_chan)
2275 {
2276 int ret;
2277 struct buffer_reg_stream *reg_stream;
2278
2279 assert(reg_chan);
2280 assert(app);
2281 assert(ua_sess);
2282 assert(ua_chan);
2283
2284 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2285
2286 ret = duplicate_channel_object(reg_chan, ua_chan);
2287 if (ret < 0) {
2288 goto error;
2289 }
2290
2291 /* Send channel to the application. */
2292 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2293 if (ret < 0) {
2294 goto error;
2295 }
2296
2297 health_code_update();
2298
2299 /* Send all streams to application. */
2300 pthread_mutex_lock(&reg_chan->stream_list_lock);
2301 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2302 struct ust_app_stream stream;
2303
2304 ret = duplicate_stream_object(reg_stream, &stream);
2305 if (ret < 0) {
2306 goto error_stream_unlock;
2307 }
2308
2309 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2310 if (ret < 0) {
2311 (void) release_ust_app_stream(-1, &stream);
2312 goto error_stream_unlock;
2313 }
2314
2315 /*
2316 * The return value is not important here. This function will output an
2317 * error if needed.
2318 */
2319 (void) release_ust_app_stream(-1, &stream);
2320 }
2321 ua_chan->is_sent = 1;
2322
2323 error_stream_unlock:
2324 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2325 error:
2326 return ret;
2327 }
2328
2329 /*
2330 * Create and send to the application the created buffers with per UID buffers.
2331 *
2332 * Return 0 on success else a negative value.
2333 */
2334 static int create_channel_per_uid(struct ust_app *app,
2335 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2336 struct ust_app_channel *ua_chan)
2337 {
2338 int ret;
2339 struct buffer_reg_uid *reg_uid;
2340 struct buffer_reg_channel *reg_chan;
2341
2342 assert(app);
2343 assert(usess);
2344 assert(ua_sess);
2345 assert(ua_chan);
2346
2347 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2348
2349 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2350 /*
2351 * The session creation handles the creation of this global registry
2352 * object. If none can be find, there is a code flow problem or a
2353 * teardown race.
2354 */
2355 assert(reg_uid);
2356
2357 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2358 reg_uid);
2359 if (!reg_chan) {
2360 /* Create the buffer registry channel object. */
2361 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2362 if (ret < 0) {
2363 goto error;
2364 }
2365 assert(reg_chan);
2366
2367 /*
2368 * Create the buffers on the consumer side. This call populates the
2369 * ust app channel object with all streams and data object.
2370 */
2371 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2372 app->bits_per_long, reg_uid->registry->reg.ust);
2373 if (ret < 0) {
2374 /*
2375 * Let's remove the previously created buffer registry channel so
2376 * it's not visible anymore in the session registry.
2377 */
2378 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2379 ua_chan->tracing_channel_id);
2380 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2381 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2382 goto error;
2383 }
2384
2385 /*
2386 * Setup the streams and add it to the session registry.
2387 */
2388 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2389 if (ret < 0) {
2390 goto error;
2391 }
2392
2393 }
2394
2395 /* Send buffers to the application. */
2396 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2397 if (ret < 0) {
2398 goto error;
2399 }
2400
2401 error:
2402 return ret;
2403 }
2404
2405 /*
2406 * Create and send to the application the created buffers with per PID buffers.
2407 *
2408 * Return 0 on success else a negative value.
2409 */
2410 static int create_channel_per_pid(struct ust_app *app,
2411 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2412 struct ust_app_channel *ua_chan)
2413 {
2414 int ret;
2415 struct ust_registry_session *registry;
2416
2417 assert(app);
2418 assert(usess);
2419 assert(ua_sess);
2420 assert(ua_chan);
2421
2422 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2423
2424 rcu_read_lock();
2425
2426 registry = get_session_registry(ua_sess);
2427 assert(registry);
2428
2429 /* Create and add a new channel registry to session. */
2430 ret = ust_registry_channel_add(registry, ua_chan->key);
2431 if (ret < 0) {
2432 goto error;
2433 }
2434
2435 /* Create and get channel on the consumer side. */
2436 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2437 app->bits_per_long, registry);
2438 if (ret < 0) {
2439 goto error;
2440 }
2441
2442 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2443 if (ret < 0) {
2444 goto error;
2445 }
2446
2447 error:
2448 rcu_read_unlock();
2449 return ret;
2450 }
2451
2452 /*
2453 * From an already allocated ust app channel, create the channel buffers if
2454 * need and send it to the application. This MUST be called with a RCU read
2455 * side lock acquired.
2456 *
2457 * Return 0 on success or else a negative value.
2458 */
2459 static int do_create_channel(struct ust_app *app,
2460 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2461 struct ust_app_channel *ua_chan)
2462 {
2463 int ret;
2464
2465 assert(app);
2466 assert(usess);
2467 assert(ua_sess);
2468 assert(ua_chan);
2469
2470 /* Handle buffer type before sending the channel to the application. */
2471 switch (usess->buffer_type) {
2472 case LTTNG_BUFFER_PER_UID:
2473 {
2474 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2475 if (ret < 0) {
2476 goto error;
2477 }
2478 break;
2479 }
2480 case LTTNG_BUFFER_PER_PID:
2481 {
2482 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2483 if (ret < 0) {
2484 goto error;
2485 }
2486 break;
2487 }
2488 default:
2489 assert(0);
2490 ret = -EINVAL;
2491 goto error;
2492 }
2493
2494 /* Initialize ust objd object using the received handle and add it. */
2495 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2496 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2497
2498 /* If channel is not enabled, disable it on the tracer */
2499 if (!ua_chan->enabled) {
2500 ret = disable_ust_channel(app, ua_sess, ua_chan);
2501 if (ret < 0) {
2502 goto error;
2503 }
2504 }
2505
2506 error:
2507 return ret;
2508 }
2509
2510 /*
2511 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2512 * newly created channel if not NULL.
2513 *
2514 * Called with UST app session lock and RCU read-side lock held.
2515 *
2516 * Return 0 on success or else a negative value.
2517 */
2518 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2519 struct ltt_ust_channel *uchan, struct ust_app *app,
2520 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2521 struct ust_app_channel **ua_chanp)
2522 {
2523 int ret = 0;
2524 struct lttng_ht_iter iter;
2525 struct lttng_ht_node_str *ua_chan_node;
2526 struct ust_app_channel *ua_chan;
2527
2528 /* Lookup channel in the ust app session */
2529 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2530 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2531 if (ua_chan_node != NULL) {
2532 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2533 goto end;
2534 }
2535
2536 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2537 if (ua_chan == NULL) {
2538 /* Only malloc can fail here */
2539 ret = -ENOMEM;
2540 goto error_alloc;
2541 }
2542 shadow_copy_channel(ua_chan, uchan);
2543
2544 /* Set channel type. */
2545 ua_chan->attr.type = type;
2546
2547 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2548 if (ret < 0) {
2549 goto error;
2550 }
2551
2552 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2553 app->pid);
2554
2555 /* Only add the channel if successful on the tracer side. */
2556 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2557
2558 end:
2559 if (ua_chanp) {
2560 *ua_chanp = ua_chan;
2561 }
2562
2563 /* Everything went well. */
2564 return 0;
2565
2566 error:
2567 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2568 error_alloc:
2569 return ret;
2570 }
2571
2572 /*
2573 * Create UST app event and create it on the tracer side.
2574 *
2575 * Called with ust app session mutex held.
2576 */
2577 static
2578 int create_ust_app_event(struct ust_app_session *ua_sess,
2579 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2580 struct ust_app *app)
2581 {
2582 int ret = 0;
2583 struct ust_app_event *ua_event;
2584
2585 /* Get event node */
2586 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2587 uevent->filter, uevent->attr.loglevel);
2588 if (ua_event != NULL) {
2589 ret = -EEXIST;
2590 goto end;
2591 }
2592
2593 /* Does not exist so create one */
2594 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2595 if (ua_event == NULL) {
2596 /* Only malloc can failed so something is really wrong */
2597 ret = -ENOMEM;
2598 goto end;
2599 }
2600 shadow_copy_event(ua_event, uevent);
2601
2602 /* Create it on the tracer side */
2603 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2604 if (ret < 0) {
2605 /* Not found previously means that it does not exist on the tracer */
2606 assert(ret != -LTTNG_UST_ERR_EXIST);
2607 goto error;
2608 }
2609
2610 add_unique_ust_app_event(ua_chan, ua_event);
2611
2612 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2613 app->pid);
2614
2615 end:
2616 return ret;
2617
2618 error:
2619 /* Valid. Calling here is already in a read side lock */
2620 delete_ust_app_event(-1, ua_event);
2621 return ret;
2622 }
2623
2624 /*
2625 * Create UST metadata and open it on the tracer side.
2626 *
2627 * Called with UST app session lock held and RCU read side lock.
2628 */
2629 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2630 struct ust_app *app, struct consumer_output *consumer,
2631 struct ustctl_consumer_channel_attr *attr)
2632 {
2633 int ret = 0;
2634 struct ust_app_channel *metadata;
2635 struct consumer_socket *socket;
2636 struct ust_registry_session *registry;
2637
2638 assert(ua_sess);
2639 assert(app);
2640 assert(consumer);
2641
2642 registry = get_session_registry(ua_sess);
2643 assert(registry);
2644
2645 /* Metadata already exists for this registry or it was closed previously */
2646 if (registry->metadata_key || registry->metadata_closed) {
2647 ret = 0;
2648 goto error;
2649 }
2650
2651 /* Allocate UST metadata */
2652 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2653 if (!metadata) {
2654 /* malloc() failed */
2655 ret = -ENOMEM;
2656 goto error;
2657 }
2658
2659 if (!attr) {
2660 /* Set default attributes for metadata. */
2661 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2662 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2663 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2664 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2665 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2666 metadata->attr.output = LTTNG_UST_MMAP;
2667 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2668 } else {
2669 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2670 metadata->attr.output = LTTNG_UST_MMAP;
2671 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2672 }
2673
2674 /* Need one fd for the channel. */
2675 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2676 if (ret < 0) {
2677 ERR("Exhausted number of available FD upon create metadata");
2678 goto error;
2679 }
2680
2681 /* Get the right consumer socket for the application. */
2682 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2683 if (!socket) {
2684 ret = -EINVAL;
2685 goto error_consumer;
2686 }
2687
2688 /*
2689 * Keep metadata key so we can identify it on the consumer side. Assign it
2690 * to the registry *before* we ask the consumer so we avoid the race of the
2691 * consumer requesting the metadata and the ask_channel call on our side
2692 * did not returned yet.
2693 */
2694 registry->metadata_key = metadata->key;
2695
2696 /*
2697 * Ask the metadata channel creation to the consumer. The metadata object
2698 * will be created by the consumer and kept their. However, the stream is
2699 * never added or monitored until we do a first push metadata to the
2700 * consumer.
2701 */
2702 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2703 registry);
2704 if (ret < 0) {
2705 /* Nullify the metadata key so we don't try to close it later on. */
2706 registry->metadata_key = 0;
2707 goto error_consumer;
2708 }
2709
2710 /*
2711 * The setup command will make the metadata stream be sent to the relayd,
2712 * if applicable, and the thread managing the metadatas. This is important
2713 * because after this point, if an error occurs, the only way the stream
2714 * can be deleted is to be monitored in the consumer.
2715 */
2716 ret = consumer_setup_metadata(socket, metadata->key);
2717 if (ret < 0) {
2718 /* Nullify the metadata key so we don't try to close it later on. */
2719 registry->metadata_key = 0;
2720 goto error_consumer;
2721 }
2722
2723 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2724 metadata->key, app->pid);
2725
2726 error_consumer:
2727 lttng_fd_put(LTTNG_FD_APPS, 1);
2728 delete_ust_app_channel(-1, metadata, app);
2729 error:
2730 return ret;
2731 }
2732
2733 /*
2734 * Return pointer to traceable apps list.
2735 */
2736 struct lttng_ht *ust_app_get_ht(void)
2737 {
2738 return ust_app_ht;
2739 }
2740
2741 /*
2742 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2743 * acquired before calling this function.
2744 */
2745 struct ust_app *ust_app_find_by_pid(pid_t pid)
2746 {
2747 struct ust_app *app = NULL;
2748 struct lttng_ht_node_ulong *node;
2749 struct lttng_ht_iter iter;
2750
2751 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2752 node = lttng_ht_iter_get_node_ulong(&iter);
2753 if (node == NULL) {
2754 DBG2("UST app no found with pid %d", pid);
2755 goto error;
2756 }
2757
2758 DBG2("Found UST app by pid %d", pid);
2759
2760 app = caa_container_of(node, struct ust_app, pid_n);
2761
2762 error:
2763 return app;
2764 }
2765
2766 /*
2767 * Allocate and init an UST app object using the registration information and
2768 * the command socket. This is called when the command socket connects to the
2769 * session daemon.
2770 *
2771 * The object is returned on success or else NULL.
2772 */
2773 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2774 {
2775 struct ust_app *lta = NULL;
2776
2777 assert(msg);
2778 assert(sock >= 0);
2779
2780 DBG3("UST app creating application for socket %d", sock);
2781
2782 if ((msg->bits_per_long == 64 &&
2783 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2784 || (msg->bits_per_long == 32 &&
2785 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2786 ERR("Registration failed: application \"%s\" (pid: %d) has "
2787 "%d-bit long, but no consumerd for this size is available.\n",
2788 msg->name, msg->pid, msg->bits_per_long);
2789 goto error;
2790 }
2791
2792 lta = zmalloc(sizeof(struct ust_app));
2793 if (lta == NULL) {
2794 PERROR("malloc");
2795 goto error;
2796 }
2797
2798 lta->ppid = msg->ppid;
2799 lta->uid = msg->uid;
2800 lta->gid = msg->gid;
2801
2802 lta->bits_per_long = msg->bits_per_long;
2803 lta->uint8_t_alignment = msg->uint8_t_alignment;
2804 lta->uint16_t_alignment = msg->uint16_t_alignment;
2805 lta->uint32_t_alignment = msg->uint32_t_alignment;
2806 lta->uint64_t_alignment = msg->uint64_t_alignment;
2807 lta->long_alignment = msg->long_alignment;
2808 lta->byte_order = msg->byte_order;
2809
2810 lta->v_major = msg->major;
2811 lta->v_minor = msg->minor;
2812 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2813 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2814 lta->notify_sock = -1;
2815
2816 /* Copy name and make sure it's NULL terminated. */
2817 strncpy(lta->name, msg->name, sizeof(lta->name));
2818 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2819
2820 /*
2821 * Before this can be called, when receiving the registration information,
2822 * the application compatibility is checked. So, at this point, the
2823 * application can work with this session daemon.
2824 */
2825 lta->compatible = 1;
2826
2827 lta->pid = msg->pid;
2828 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2829 lta->sock = sock;
2830 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2831
2832 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2833
2834 error:
2835 return lta;
2836 }
2837
2838 /*
2839 * For a given application object, add it to every hash table.
2840 */
2841 void ust_app_add(struct ust_app *app)
2842 {
2843 assert(app);
2844 assert(app->notify_sock >= 0);
2845
2846 rcu_read_lock();
2847
2848 /*
2849 * On a re-registration, we want to kick out the previous registration of
2850 * that pid
2851 */
2852 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2853
2854 /*
2855 * The socket _should_ be unique until _we_ call close. So, a add_unique
2856 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2857 * already in the table.
2858 */
2859 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2860
2861 /* Add application to the notify socket hash table. */
2862 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2863 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2864
2865 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2866 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2867 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2868 app->v_minor);
2869
2870 rcu_read_unlock();
2871 }
2872
2873 /*
2874 * Set the application version into the object.
2875 *
2876 * Return 0 on success else a negative value either an errno code or a
2877 * LTTng-UST error code.
2878 */
2879 int ust_app_version(struct ust_app *app)
2880 {
2881 int ret;
2882
2883 assert(app);
2884
2885 ret = ustctl_tracer_version(app->sock, &app->version);
2886 if (ret < 0) {
2887 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2888 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2889 } else {
2890 DBG3("UST app %d verion failed. Application is dead", app->sock);
2891 }
2892 }
2893
2894 return ret;
2895 }
2896
2897 /*
2898 * Unregister app by removing it from the global traceable app list and freeing
2899 * the data struct.
2900 *
2901 * The socket is already closed at this point so no close to sock.
2902 */
2903 void ust_app_unregister(int sock)
2904 {
2905 struct ust_app *lta;
2906 struct lttng_ht_node_ulong *node;
2907 struct lttng_ht_iter iter;
2908 struct ust_app_session *ua_sess;
2909 int ret;
2910
2911 rcu_read_lock();
2912
2913 /* Get the node reference for a call_rcu */
2914 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2915 node = lttng_ht_iter_get_node_ulong(&iter);
2916 assert(node);
2917
2918 lta = caa_container_of(node, struct ust_app, sock_n);
2919 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2920
2921 /* Remove application from PID hash table */
2922 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2923 assert(!ret);
2924
2925 /*
2926 * Remove application from notify hash table. The thread handling the
2927 * notify socket could have deleted the node so ignore on error because
2928 * either way it's valid. The close of that socket is handled by the other
2929 * thread.
2930 */
2931 iter.iter.node = &lta->notify_sock_n.node;
2932 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2933
2934 /*
2935 * Ignore return value since the node might have been removed before by an
2936 * add replace during app registration because the PID can be reassigned by
2937 * the OS.
2938 */
2939 iter.iter.node = &lta->pid_n.node;
2940 ret = lttng_ht_del(ust_app_ht, &iter);
2941 if (ret) {
2942 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2943 lta->pid);
2944 }
2945
2946 /* Remove sessions so they are not visible during deletion.*/
2947 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2948 node.node) {
2949 struct ust_registry_session *registry;
2950
2951 ret = lttng_ht_del(lta->sessions, &iter);
2952 if (ret) {
2953 /* The session was already removed so scheduled for teardown. */
2954 continue;
2955 }
2956
2957 /*
2958 * Add session to list for teardown. This is safe since at this point we
2959 * are the only one using this list.
2960 */
2961 pthread_mutex_lock(&ua_sess->lock);
2962
2963 /*
2964 * Normally, this is done in the delete session process which is
2965 * executed in the call rcu below. However, upon registration we can't
2966 * afford to wait for the grace period before pushing data or else the
2967 * data pending feature can race between the unregistration and stop
2968 * command where the data pending command is sent *before* the grace
2969 * period ended.
2970 *
2971 * The close metadata below nullifies the metadata pointer in the
2972 * session so the delete session will NOT push/close a second time.
2973 */
2974 registry = get_session_registry(ua_sess);
2975 if (registry && !registry->metadata_closed) {
2976 /* Push metadata for application before freeing the application. */
2977 (void) push_metadata(registry, ua_sess->consumer);
2978
2979 /*
2980 * Don't ask to close metadata for global per UID buffers. Close
2981 * metadata only on destroy trace session in this case. Also, the
2982 * previous push metadata could have flag the metadata registry to
2983 * close so don't send a close command if closed.
2984 */
2985 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2986 !registry->metadata_closed) {
2987 /* And ask to close it for this session registry. */
2988 (void) close_metadata(registry, ua_sess->consumer);
2989 }
2990 }
2991
2992 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2993 pthread_mutex_unlock(&ua_sess->lock);
2994 }
2995
2996 /* Free memory */
2997 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2998
2999 rcu_read_unlock();
3000 return;
3001 }
3002
3003 /*
3004 * Return traceable_app_count
3005 */
3006 unsigned long ust_app_list_count(void)
3007 {
3008 unsigned long count;
3009
3010 rcu_read_lock();
3011 count = lttng_ht_get_count(ust_app_ht);
3012 rcu_read_unlock();
3013
3014 return count;
3015 }
3016
3017 /*
3018 * Fill events array with all events name of all registered apps.
3019 */
3020 int ust_app_list_events(struct lttng_event **events)
3021 {
3022 int ret, handle;
3023 size_t nbmem, count = 0;
3024 struct lttng_ht_iter iter;
3025 struct ust_app *app;
3026 struct lttng_event *tmp_event;
3027
3028 nbmem = UST_APP_EVENT_LIST_SIZE;
3029 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3030 if (tmp_event == NULL) {
3031 PERROR("zmalloc ust app events");
3032 ret = -ENOMEM;
3033 goto error;
3034 }
3035
3036 rcu_read_lock();
3037
3038 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3039 struct lttng_ust_tracepoint_iter uiter;
3040
3041 health_code_update();
3042
3043 if (!app->compatible) {
3044 /*
3045 * TODO: In time, we should notice the caller of this error by
3046 * telling him that this is a version error.
3047 */
3048 continue;
3049 }
3050 handle = ustctl_tracepoint_list(app->sock);
3051 if (handle < 0) {
3052 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3053 ERR("UST app list events getting handle failed for app pid %d",
3054 app->pid);
3055 }
3056 continue;
3057 }
3058
3059 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3060 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3061 /* Handle ustctl error. */
3062 if (ret < 0) {
3063 free(tmp_event);
3064 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3065 ERR("UST app tp list get failed for app %d with ret %d",
3066 app->sock, ret);
3067 } else {
3068 DBG3("UST app tp list get failed. Application is dead");
3069 /*
3070 * This is normal behavior, an application can die during the
3071 * creation process. Don't report an error so the execution can
3072 * continue normally. Continue normal execution.
3073 */
3074 break;
3075 }
3076 goto rcu_error;
3077 }
3078
3079 health_code_update();
3080 if (count >= nbmem) {
3081 /* In case the realloc fails, we free the memory */
3082 void *ptr;
3083
3084 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3085 2 * nbmem);
3086 nbmem *= 2;
3087 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3088 if (ptr == NULL) {
3089 PERROR("realloc ust app events");
3090 free(tmp_event);
3091 ret = -ENOMEM;
3092 goto rcu_error;
3093 }
3094 tmp_event = ptr;
3095 }
3096 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3097 tmp_event[count].loglevel = uiter.loglevel;
3098 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3099 tmp_event[count].pid = app->pid;
3100 tmp_event[count].enabled = -1;
3101 count++;
3102 }
3103 }
3104
3105 ret = count;
3106 *events = tmp_event;
3107
3108 DBG2("UST app list events done (%zu events)", count);
3109
3110 rcu_error:
3111 rcu_read_unlock();
3112 error:
3113 health_code_update();
3114 return ret;
3115 }
3116
3117 /*
3118 * Fill events array with all events name of all registered apps.
3119 */
3120 int ust_app_list_event_fields(struct lttng_event_field **fields)
3121 {
3122 int ret, handle;
3123 size_t nbmem, count = 0;
3124 struct lttng_ht_iter iter;
3125 struct ust_app *app;
3126 struct lttng_event_field *tmp_event;
3127
3128 nbmem = UST_APP_EVENT_LIST_SIZE;
3129 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3130 if (tmp_event == NULL) {
3131 PERROR("zmalloc ust app event fields");
3132 ret = -ENOMEM;
3133 goto error;
3134 }
3135
3136 rcu_read_lock();
3137
3138 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3139 struct lttng_ust_field_iter uiter;
3140
3141 health_code_update();
3142
3143 if (!app->compatible) {
3144 /*
3145 * TODO: In time, we should notice the caller of this error by
3146 * telling him that this is a version error.
3147 */
3148 continue;
3149 }
3150 handle = ustctl_tracepoint_field_list(app->sock);
3151 if (handle < 0) {
3152 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3153 ERR("UST app list field getting handle failed for app pid %d",
3154 app->pid);
3155 }
3156 continue;
3157 }
3158
3159 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3160 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3161 /* Handle ustctl error. */
3162 if (ret < 0) {
3163 free(tmp_event);
3164 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3165 ERR("UST app tp list field failed for app %d with ret %d",
3166 app->sock, ret);
3167 } else {
3168 DBG3("UST app tp list field failed. Application is dead");
3169 /*
3170 * This is normal behavior, an application can die during the
3171 * creation process. Don't report an error so the execution can
3172 * continue normally.
3173 */
3174 break;
3175 }
3176 goto rcu_error;
3177 }
3178
3179 health_code_update();
3180 if (count >= nbmem) {
3181 /* In case the realloc fails, we free the memory */
3182 void *ptr;
3183
3184 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3185 2 * nbmem);
3186 nbmem *= 2;
3187 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3188 if (ptr == NULL) {
3189 PERROR("realloc ust app event fields");
3190 free(tmp_event);
3191 ret = -ENOMEM;
3192 goto rcu_error;
3193 }
3194 tmp_event = ptr;
3195 }
3196
3197 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3198 tmp_event[count].type = uiter.type;
3199 tmp_event[count].nowrite = uiter.nowrite;
3200
3201 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3202 tmp_event[count].event.loglevel = uiter.loglevel;
3203 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3204 tmp_event[count].event.pid = app->pid;
3205 tmp_event[count].event.enabled = -1;
3206 count++;
3207 }
3208 }
3209
3210 ret = count;
3211 *fields = tmp_event;
3212
3213 DBG2("UST app list event fields done (%zu events)", count);
3214
3215 rcu_error:
3216 rcu_read_unlock();
3217 error:
3218 health_code_update();
3219 return ret;
3220 }
3221
3222 /*
3223 * Free and clean all traceable apps of the global list.
3224 *
3225 * Should _NOT_ be called with RCU read-side lock held.
3226 */
3227 void ust_app_clean_list(void)
3228 {
3229 int ret;
3230 struct ust_app *app;
3231 struct lttng_ht_iter iter;
3232
3233 DBG2("UST app cleaning registered apps hash table");
3234
3235 rcu_read_lock();
3236
3237 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3238 ret = lttng_ht_del(ust_app_ht, &iter);
3239 assert(!ret);
3240 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3241 }
3242
3243 /* Cleanup socket hash table */
3244 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3245 sock_n.node) {
3246 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3247 assert(!ret);
3248 }
3249
3250 /* Cleanup notify socket hash table */
3251 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3252 notify_sock_n.node) {
3253 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3254 assert(!ret);
3255 }
3256 rcu_read_unlock();
3257
3258 /* Destroy is done only when the ht is empty */
3259 ht_cleanup_push(ust_app_ht);
3260 ht_cleanup_push(ust_app_ht_by_sock);
3261 ht_cleanup_push(ust_app_ht_by_notify_sock);
3262 }
3263
3264 /*
3265 * Init UST app hash table.
3266 */
3267 void ust_app_ht_alloc(void)
3268 {
3269 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3270 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3271 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3272 }
3273
3274 /*
3275 * For a specific UST session, disable the channel for all registered apps.
3276 */
3277 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3278 struct ltt_ust_channel *uchan)
3279 {
3280 int ret = 0;
3281 struct lttng_ht_iter iter;
3282 struct lttng_ht_node_str *ua_chan_node;
3283 struct ust_app *app;
3284 struct ust_app_session *ua_sess;
3285 struct ust_app_channel *ua_chan;
3286
3287 if (usess == NULL || uchan == NULL) {
3288 ERR("Disabling UST global channel with NULL values");
3289 ret = -1;
3290 goto error;
3291 }
3292
3293 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3294 uchan->name, usess->id);
3295
3296 rcu_read_lock();
3297
3298 /* For every registered applications */
3299 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3300 struct lttng_ht_iter uiter;
3301 if (!app->compatible) {
3302 /*
3303 * TODO: In time, we should notice the caller of this error by
3304 * telling him that this is a version error.
3305 */
3306 continue;
3307 }
3308 ua_sess = lookup_session_by_app(usess, app);
3309 if (ua_sess == NULL) {
3310 continue;
3311 }
3312
3313 /* Get channel */
3314 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3315 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3316 /* If the session if found for the app, the channel must be there */
3317 assert(ua_chan_node);
3318
3319 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3320 /* The channel must not be already disabled */
3321 assert(ua_chan->enabled == 1);
3322
3323 /* Disable channel onto application */
3324 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3325 if (ret < 0) {
3326 /* XXX: We might want to report this error at some point... */
3327 continue;
3328 }
3329 }
3330
3331 rcu_read_unlock();
3332
3333 error:
3334 return ret;
3335 }
3336
3337 /*
3338 * For a specific UST session, enable the channel for all registered apps.
3339 */
3340 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3341 struct ltt_ust_channel *uchan)
3342 {
3343 int ret = 0;
3344 struct lttng_ht_iter iter;
3345 struct ust_app *app;
3346 struct ust_app_session *ua_sess;
3347
3348 if (usess == NULL || uchan == NULL) {
3349 ERR("Adding UST global channel to NULL values");
3350 ret = -1;
3351 goto error;
3352 }
3353
3354 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3355 uchan->name, usess->id);
3356
3357 rcu_read_lock();
3358
3359 /* For every registered applications */
3360 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3361 if (!app->compatible) {
3362 /*
3363 * TODO: In time, we should notice the caller of this error by
3364 * telling him that this is a version error.
3365 */
3366 continue;
3367 }
3368 ua_sess = lookup_session_by_app(usess, app);
3369 if (ua_sess == NULL) {
3370 continue;
3371 }
3372
3373 /* Enable channel onto application */
3374 ret = enable_ust_app_channel(ua_sess, uchan, app);
3375 if (ret < 0) {
3376 /* XXX: We might want to report this error at some point... */
3377 continue;
3378 }
3379 }
3380
3381 rcu_read_unlock();
3382
3383 error:
3384 return ret;
3385 }
3386
3387 /*
3388 * Disable an event in a channel and for a specific session.
3389 */
3390 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3391 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3392 {
3393 int ret = 0;
3394 struct lttng_ht_iter iter, uiter;
3395 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3396 struct ust_app *app;
3397 struct ust_app_session *ua_sess;
3398 struct ust_app_channel *ua_chan;
3399 struct ust_app_event *ua_event;
3400
3401 DBG("UST app disabling event %s for all apps in channel "
3402 "%s for session id %" PRIu64,
3403 uevent->attr.name, uchan->name, usess->id);
3404
3405 rcu_read_lock();
3406
3407 /* For all registered applications */
3408 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3409 if (!app->compatible) {
3410 /*
3411 * TODO: In time, we should notice the caller of this error by
3412 * telling him that this is a version error.
3413 */
3414 continue;
3415 }
3416 ua_sess = lookup_session_by_app(usess, app);
3417 if (ua_sess == NULL) {
3418 /* Next app */
3419 continue;
3420 }
3421
3422 /* Lookup channel in the ust app session */
3423 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3424 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3425 if (ua_chan_node == NULL) {
3426 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3427 "Skipping", uchan->name, usess->id, app->pid);
3428 continue;
3429 }
3430 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3431
3432 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3433 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3434 if (ua_event_node == NULL) {
3435 DBG2("Event %s not found in channel %s for app pid %d."
3436 "Skipping", uevent->attr.name, uchan->name, app->pid);
3437 continue;
3438 }
3439 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3440
3441 ret = disable_ust_app_event(ua_sess, ua_event, app);
3442 if (ret < 0) {
3443 /* XXX: Report error someday... */
3444 continue;
3445 }
3446 }
3447
3448 rcu_read_unlock();
3449
3450 return ret;
3451 }
3452
3453 /*
3454 * For a specific UST session and UST channel, the event for all
3455 * registered apps.
3456 */
3457 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3458 struct ltt_ust_channel *uchan)
3459 {
3460 int ret = 0;
3461 struct lttng_ht_iter iter, uiter;
3462 struct lttng_ht_node_str *ua_chan_node;
3463 struct ust_app *app;
3464 struct ust_app_session *ua_sess;
3465 struct ust_app_channel *ua_chan;
3466 struct ust_app_event *ua_event;
3467
3468 DBG("UST app disabling all event for all apps in channel "
3469 "%s for session id %" PRIu64, uchan->name, usess->id);
3470
3471 rcu_read_lock();
3472
3473 /* For all registered applications */
3474 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3475 if (!app->compatible) {
3476 /*
3477 * TODO: In time, we should notice the caller of this error by
3478 * telling him that this is a version error.
3479 */
3480 continue;
3481 }
3482 ua_sess = lookup_session_by_app(usess, app);
3483 if (!ua_sess) {
3484 /* The application has problem or is probably dead. */
3485 continue;
3486 }
3487
3488 /* Lookup channel in the ust app session */
3489 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3490 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3491 /* If the channel is not found, there is a code flow error */
3492 assert(ua_chan_node);
3493
3494 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3495
3496 /* Disable each events of channel */
3497 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3498 node.node) {
3499 ret = disable_ust_app_event(ua_sess, ua_event, app);
3500 if (ret < 0) {
3501 /* XXX: Report error someday... */
3502 continue;
3503 }
3504 }
3505 }
3506
3507 rcu_read_unlock();
3508
3509 return ret;
3510 }
3511
3512 /*
3513 * For a specific UST session, create the channel for all registered apps.
3514 */
3515 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3516 struct ltt_ust_channel *uchan)
3517 {
3518 int ret = 0, created;
3519 struct lttng_ht_iter iter;
3520 struct ust_app *app;
3521 struct ust_app_session *ua_sess = NULL;
3522
3523 /* Very wrong code flow */
3524 assert(usess);
3525 assert(uchan);
3526
3527 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3528 uchan->name, usess->id);
3529
3530 rcu_read_lock();
3531
3532 /* For every registered applications */
3533 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3534 if (!app->compatible) {
3535 /*
3536 * TODO: In time, we should notice the caller of this error by
3537 * telling him that this is a version error.
3538 */
3539 continue;
3540 }
3541 /*
3542 * Create session on the tracer side and add it to app session HT. Note
3543 * that if session exist, it will simply return a pointer to the ust
3544 * app session.
3545 */
3546 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3547 if (ret < 0) {
3548 switch (ret) {
3549 case -ENOTCONN:
3550 /*
3551 * The application's socket is not valid. Either a bad socket
3552 * or a timeout on it. We can't inform the caller that for a
3553 * specific app, the session failed so lets continue here.
3554 */
3555 continue;
3556 case -ENOMEM:
3557 default:
3558 goto error_rcu_unlock;
3559 }
3560 }
3561 assert(ua_sess);
3562
3563 pthread_mutex_lock(&ua_sess->lock);
3564 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3565 sizeof(uchan->name))) {
3566 struct ustctl_consumer_channel_attr attr;
3567 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3568 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3569 &attr);
3570 } else {
3571 /* Create channel onto application. We don't need the chan ref. */
3572 ret = create_ust_app_channel(ua_sess, uchan, app,
3573 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3574 }
3575 pthread_mutex_unlock(&ua_sess->lock);
3576 if (ret < 0) {
3577 if (ret == -ENOMEM) {
3578 /* No more memory is a fatal error. Stop right now. */
3579 goto error_rcu_unlock;
3580 }
3581 /* Cleanup the created session if it's the case. */
3582 if (created) {
3583 destroy_app_session(app, ua_sess);
3584 }
3585 }
3586 }
3587
3588 error_rcu_unlock:
3589 rcu_read_unlock();
3590 return ret;
3591 }
3592
3593 /*
3594 * Enable event for a specific session and channel on the tracer.
3595 */
3596 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3597 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3598 {
3599 int ret = 0;
3600 struct lttng_ht_iter iter, uiter;
3601 struct lttng_ht_node_str *ua_chan_node;
3602 struct ust_app *app;
3603 struct ust_app_session *ua_sess;
3604 struct ust_app_channel *ua_chan;
3605 struct ust_app_event *ua_event;
3606
3607 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3608 uevent->attr.name, usess->id);
3609
3610 /*
3611 * NOTE: At this point, this function is called only if the session and
3612 * channel passed are already created for all apps. and enabled on the
3613 * tracer also.
3614 */
3615
3616 rcu_read_lock();
3617
3618 /* For all registered applications */
3619 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3620 if (!app->compatible) {
3621 /*
3622 * TODO: In time, we should notice the caller of this error by
3623 * telling him that this is a version error.
3624 */
3625 continue;
3626 }
3627 ua_sess = lookup_session_by_app(usess, app);
3628 if (!ua_sess) {
3629 /* The application has problem or is probably dead. */
3630 continue;
3631 }
3632
3633 pthread_mutex_lock(&ua_sess->lock);
3634
3635 /* Lookup channel in the ust app session */
3636 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3637 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3638 /* If the channel is not found, there is a code flow error */
3639 assert(ua_chan_node);
3640
3641 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3642
3643 /* Get event node */
3644 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3645 uevent->filter, uevent->attr.loglevel);
3646 if (ua_event == NULL) {
3647 DBG3("UST app enable event %s not found for app PID %d."
3648 "Skipping app", uevent->attr.name, app->pid);
3649 goto next_app;
3650 }
3651
3652 ret = enable_ust_app_event(ua_sess, ua_event, app);
3653 if (ret < 0) {
3654 pthread_mutex_unlock(&ua_sess->lock);
3655 goto error;
3656 }
3657 next_app:
3658 pthread_mutex_unlock(&ua_sess->lock);
3659 }
3660
3661 error:
3662 rcu_read_unlock();
3663 return ret;
3664 }
3665
3666 /*
3667 * For a specific existing UST session and UST channel, creates the event for
3668 * all registered apps.
3669 */
3670 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3671 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3672 {
3673 int ret = 0;
3674 struct lttng_ht_iter iter, uiter;
3675 struct lttng_ht_node_str *ua_chan_node;
3676 struct ust_app *app;
3677 struct ust_app_session *ua_sess;
3678 struct ust_app_channel *ua_chan;
3679
3680 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3681 uevent->attr.name, usess->id);
3682
3683 rcu_read_lock();
3684
3685 /* For all registered applications */
3686 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3687 if (!app->compatible) {
3688 /*
3689 * TODO: In time, we should notice the caller of this error by
3690 * telling him that this is a version error.
3691 */
3692 continue;
3693 }
3694 ua_sess = lookup_session_by_app(usess, app);
3695 if (!ua_sess) {
3696 /* The application has problem or is probably dead. */
3697 continue;
3698 }
3699
3700 pthread_mutex_lock(&ua_sess->lock);
3701 /* Lookup channel in the ust app session */
3702 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3703 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3704 /* If the channel is not found, there is a code flow error */
3705 assert(ua_chan_node);
3706
3707 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3708
3709 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3710 pthread_mutex_unlock(&ua_sess->lock);
3711 if (ret < 0) {
3712 if (ret != -LTTNG_UST_ERR_EXIST) {
3713 /* Possible value at this point: -ENOMEM. If so, we stop! */
3714 break;
3715 }
3716 DBG2("UST app event %s already exist on app PID %d",
3717 uevent->attr.name, app->pid);
3718 continue;
3719 }
3720 }
3721
3722 rcu_read_unlock();
3723
3724 return ret;
3725 }
3726
3727 /*
3728 * Start tracing for a specific UST session and app.
3729 */
3730 static
3731 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3732 {
3733 int ret = 0;
3734 struct ust_app_session *ua_sess;
3735
3736 DBG("Starting tracing for ust app pid %d", app->pid);
3737
3738 rcu_read_lock();
3739
3740 if (!app->compatible) {
3741 goto end;
3742 }
3743
3744 ua_sess = lookup_session_by_app(usess, app);
3745 if (ua_sess == NULL) {
3746 /* The session is in teardown process. Ignore and continue. */
3747 goto end;
3748 }
3749
3750 pthread_mutex_lock(&ua_sess->lock);
3751
3752 /* Upon restart, we skip the setup, already done */
3753 if (ua_sess->started) {
3754 goto skip_setup;
3755 }
3756
3757 /* Create directories if consumer is LOCAL and has a path defined. */
3758 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3759 strlen(usess->consumer->dst.trace_path) > 0) {
3760 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3761 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3762 if (ret < 0) {
3763 if (ret != -EEXIST) {
3764 ERR("Trace directory creation error");
3765 goto error_unlock;
3766 }
3767 }
3768 }
3769
3770 /*
3771 * Create the metadata for the application. This returns gracefully if a
3772 * metadata was already set for the session.
3773 */
3774 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3775 if (ret < 0) {
3776 goto error_unlock;
3777 }
3778
3779 health_code_update();
3780
3781 skip_setup:
3782 /* This start the UST tracing */
3783 ret = ustctl_start_session(app->sock, ua_sess->handle);
3784 if (ret < 0) {
3785 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3786 ERR("Error starting tracing for app pid: %d (ret: %d)",
3787 app->pid, ret);
3788 } else {
3789 DBG("UST app start session failed. Application is dead.");
3790 /*
3791 * This is normal behavior, an application can die during the
3792 * creation process. Don't report an error so the execution can
3793 * continue normally.
3794 */
3795 pthread_mutex_unlock(&ua_sess->lock);
3796 goto end;
3797 }
3798 goto error_unlock;
3799 }
3800
3801 /* Indicate that the session has been started once */
3802 ua_sess->started = 1;
3803
3804 pthread_mutex_unlock(&ua_sess->lock);
3805
3806 health_code_update();
3807
3808 /* Quiescent wait after starting trace */
3809 ret = ustctl_wait_quiescent(app->sock);
3810 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3811 ERR("UST app wait quiescent failed for app pid %d ret %d",
3812 app->pid, ret);
3813 }
3814
3815 end:
3816 rcu_read_unlock();
3817 health_code_update();
3818 return 0;
3819
3820 error_unlock:
3821 pthread_mutex_unlock(&ua_sess->lock);
3822 rcu_read_unlock();
3823 health_code_update();
3824 return -1;
3825 }
3826
3827 /*
3828 * Stop tracing for a specific UST session and app.
3829 */
3830 static
3831 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3832 {
3833 int ret = 0;
3834 struct ust_app_session *ua_sess;
3835 struct ust_registry_session *registry;
3836
3837 DBG("Stopping tracing for ust app pid %d", app->pid);
3838
3839 rcu_read_lock();
3840
3841 if (!app->compatible) {
3842 goto end_no_session;
3843 }
3844
3845 ua_sess = lookup_session_by_app(usess, app);
3846 if (ua_sess == NULL) {
3847 goto end_no_session;
3848 }
3849
3850 pthread_mutex_lock(&ua_sess->lock);
3851
3852 /*
3853 * If started = 0, it means that stop trace has been called for a session
3854 * that was never started. It's possible since we can have a fail start
3855 * from either the application manager thread or the command thread. Simply
3856 * indicate that this is a stop error.
3857 */
3858 if (!ua_sess->started) {
3859 goto error_rcu_unlock;
3860 }
3861
3862 health_code_update();
3863
3864 /* This inhibits UST tracing */
3865 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3866 if (ret < 0) {
3867 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3868 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3869 app->pid, ret);
3870 } else {
3871 DBG("UST app stop session failed. Application is dead.");
3872 /*
3873 * This is normal behavior, an application can die during the
3874 * creation process. Don't report an error so the execution can
3875 * continue normally.
3876 */
3877 goto end_unlock;
3878 }
3879 goto error_rcu_unlock;
3880 }
3881
3882 health_code_update();
3883
3884 /* Quiescent wait after stopping trace */
3885 ret = ustctl_wait_quiescent(app->sock);
3886 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3887 ERR("UST app wait quiescent failed for app pid %d ret %d",
3888 app->pid, ret);
3889 }
3890
3891 health_code_update();
3892
3893 registry = get_session_registry(ua_sess);
3894 assert(registry);
3895
3896 if (!registry->metadata_closed) {
3897 /* Push metadata for application before freeing the application. */
3898 (void) push_metadata(registry, ua_sess->consumer);
3899 }
3900
3901 end_unlock:
3902 pthread_mutex_unlock(&ua_sess->lock);
3903 end_no_session:
3904 rcu_read_unlock();
3905 health_code_update();
3906 return 0;
3907
3908 error_rcu_unlock:
3909 pthread_mutex_unlock(&ua_sess->lock);
3910 rcu_read_unlock();
3911 health_code_update();
3912 return -1;
3913 }
3914
3915 /*
3916 * Flush buffers for a specific UST session and app.
3917 */
3918 static
3919 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3920 {
3921 int ret = 0;
3922 struct lttng_ht_iter iter;
3923 struct ust_app_session *ua_sess;
3924 struct ust_app_channel *ua_chan;
3925
3926 DBG("Flushing buffers for ust app pid %d", app->pid);
3927
3928 rcu_read_lock();
3929
3930 if (!app->compatible) {
3931 goto end_no_session;
3932 }
3933
3934 ua_sess = lookup_session_by_app(usess, app);
3935 if (ua_sess == NULL) {
3936 goto end_no_session;
3937 }
3938
3939 pthread_mutex_lock(&ua_sess->lock);
3940
3941 health_code_update();
3942
3943 /* Flushing buffers */
3944 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3945 node.node) {
3946 health_code_update();
3947 assert(ua_chan->is_sent);
3948 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3949 if (ret < 0) {
3950 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3951 ERR("UST app PID %d channel %s flush failed with ret %d",
3952 app->pid, ua_chan->name, ret);
3953 } else {
3954 DBG3("UST app failed to flush %s. Application is dead.",
3955 ua_chan->name);
3956 /*
3957 * This is normal behavior, an application can die during the
3958 * creation process. Don't report an error so the execution can
3959 * continue normally.
3960 */
3961 }
3962 /* Continuing flushing all buffers */
3963 continue;
3964 }
3965 }
3966
3967 health_code_update();
3968
3969 pthread_mutex_unlock(&ua_sess->lock);
3970 end_no_session:
3971 rcu_read_unlock();
3972 health_code_update();
3973 return 0;
3974 }
3975
3976 /*
3977 * Destroy a specific UST session in apps.
3978 */
3979 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3980 {
3981 int ret;
3982 struct ust_app_session *ua_sess;
3983 struct lttng_ht_iter iter;
3984 struct lttng_ht_node_u64 *node;
3985
3986 DBG("Destroy tracing for ust app pid %d", app->pid);
3987
3988 rcu_read_lock();
3989
3990 if (!app->compatible) {
3991 goto end;
3992 }
3993
3994 __lookup_session_by_app(usess, app, &iter);
3995 node = lttng_ht_iter_get_node_u64(&iter);
3996 if (node == NULL) {
3997 /* Session is being or is deleted. */
3998 goto end;
3999 }
4000 ua_sess = caa_container_of(node, struct ust_app_session, node);
4001
4002 health_code_update();
4003 destroy_app_session(app, ua_sess);
4004
4005 health_code_update();
4006
4007 /* Quiescent wait after stopping trace */
4008 ret = ustctl_wait_quiescent(app->sock);
4009 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4010 ERR("UST app wait quiescent failed for app pid %d ret %d",
4011 app->pid, ret);
4012 }
4013 end:
4014 rcu_read_unlock();
4015 health_code_update();
4016 return 0;
4017 }
4018
4019 /*
4020 * Start tracing for the UST session.
4021 */
4022 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4023 {
4024 int ret = 0;
4025 struct lttng_ht_iter iter;
4026 struct ust_app *app;
4027
4028 DBG("Starting all UST traces");
4029
4030 rcu_read_lock();
4031
4032 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4033 ret = ust_app_start_trace(usess, app);
4034 if (ret < 0) {
4035 /* Continue to next apps even on error */
4036 continue;
4037 }
4038 }
4039
4040 rcu_read_unlock();
4041
4042 return 0;
4043 }
4044
4045 /*
4046 * Start tracing for the UST session.
4047 */
4048 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4049 {
4050 int ret = 0;
4051 struct lttng_ht_iter iter;
4052 struct ust_app *app;
4053
4054 DBG("Stopping all UST traces");
4055
4056 rcu_read_lock();
4057
4058 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4059 ret = ust_app_stop_trace(usess, app);
4060 if (ret < 0) {
4061 /* Continue to next apps even on error */
4062 continue;
4063 }
4064 }
4065
4066 /* Flush buffers and push metadata (for UID buffers). */
4067 switch (usess->buffer_type) {
4068 case LTTNG_BUFFER_PER_UID:
4069 {
4070 struct buffer_reg_uid *reg;
4071
4072 /* Flush all per UID buffers associated to that session. */
4073 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4074 struct ust_registry_session *ust_session_reg;
4075 struct buffer_reg_channel *reg_chan;
4076 struct consumer_socket *socket;
4077
4078 /* Get consumer socket to use to push the metadata.*/
4079 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4080 usess->consumer);
4081 if (!socket) {
4082 /* Ignore request if no consumer is found for the session. */
4083 continue;
4084 }
4085
4086 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4087 reg_chan, node.node) {
4088 /*
4089 * The following call will print error values so the return
4090 * code is of little importance because whatever happens, we
4091 * have to try them all.
4092 */
4093 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4094 }
4095
4096 ust_session_reg = reg->registry->reg.ust;
4097 if (!ust_session_reg->metadata_closed) {
4098 /* Push metadata. */
4099 (void) push_metadata(ust_session_reg, usess->consumer);
4100 }
4101 }
4102
4103 break;
4104 }
4105 case LTTNG_BUFFER_PER_PID:
4106 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4107 ret = ust_app_flush_trace(usess, app);
4108 if (ret < 0) {
4109 /* Continue to next apps even on error */
4110 continue;
4111 }
4112 }
4113 break;
4114 default:
4115 assert(0);
4116 break;
4117 }
4118
4119 rcu_read_unlock();
4120
4121 return 0;
4122 }
4123
4124 /*
4125 * Destroy app UST session.
4126 */
4127 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4128 {
4129 int ret = 0;
4130 struct lttng_ht_iter iter;
4131 struct ust_app *app;
4132
4133 DBG("Destroy all UST traces");
4134
4135 rcu_read_lock();
4136
4137 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4138 ret = destroy_trace(usess, app);
4139 if (ret < 0) {
4140 /* Continue to next apps even on error */
4141 continue;
4142 }
4143 }
4144
4145 rcu_read_unlock();
4146
4147 return 0;
4148 }
4149
4150 /*
4151 * Add channels/events from UST global domain to registered apps at sock.
4152 */
4153 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4154 {
4155 int ret = 0;
4156 struct lttng_ht_iter iter, uiter;
4157 struct ust_app *app;
4158 struct ust_app_session *ua_sess = NULL;
4159 struct ust_app_channel *ua_chan;
4160 struct ust_app_event *ua_event;
4161 struct ust_app_ctx *ua_ctx;
4162
4163 assert(usess);
4164 assert(sock >= 0);
4165
4166 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4167 usess->id);
4168
4169 rcu_read_lock();
4170
4171 app = ust_app_find_by_sock(sock);
4172 if (app == NULL) {
4173 /*
4174 * Application can be unregistered before so this is possible hence
4175 * simply stopping the update.
4176 */
4177 DBG3("UST app update failed to find app sock %d", sock);
4178 goto error;
4179 }
4180
4181 if (!app->compatible) {
4182 goto error;
4183 }
4184
4185 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4186 if (ret < 0) {
4187 /* Tracer is probably gone or ENOMEM. */
4188 goto error;
4189 }
4190 assert(ua_sess);
4191
4192 pthread_mutex_lock(&ua_sess->lock);
4193
4194 /*
4195 * We can iterate safely here over all UST app session since the create ust
4196 * app session above made a shadow copy of the UST global domain from the
4197 * ltt ust session.
4198 */
4199 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4200 node.node) {
4201 /*
4202 * For a metadata channel, handle it differently.
4203 */
4204 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4205 sizeof(ua_chan->name))) {
4206 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4207 &ua_chan->attr);
4208 if (ret < 0) {
4209 goto error_unlock;
4210 }
4211 /* Remove it from the hash table and continue!. */
4212 ret = lttng_ht_del(ua_sess->channels, &iter);
4213 assert(!ret);
4214 delete_ust_app_channel(-1, ua_chan, app);
4215 continue;
4216 } else {
4217 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4218 if (ret < 0) {
4219 /*
4220 * Stop everything. On error, the application failed, no more
4221 * file descriptor are available or ENOMEM so stopping here is
4222 * the only thing we can do for now.
4223 */
4224 goto error_unlock;
4225 }
4226 }
4227
4228 /*
4229 * Add context using the list so they are enabled in the same order the
4230 * user added them.
4231 */
4232 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4233 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4234 if (ret < 0) {
4235 goto error_unlock;
4236 }
4237 }
4238
4239
4240 /* For each events */
4241 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4242 node.node) {
4243 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4244 if (ret < 0) {
4245 goto error_unlock;
4246 }
4247 }
4248 }
4249
4250 pthread_mutex_unlock(&ua_sess->lock);
4251
4252 if (usess->start_trace) {
4253 ret = ust_app_start_trace(usess, app);
4254 if (ret < 0) {
4255 goto error;
4256 }
4257
4258 DBG2("UST trace started for app pid %d", app->pid);
4259 }
4260
4261 /* Everything went well at this point. */
4262 rcu_read_unlock();
4263 return;
4264
4265 error_unlock:
4266 pthread_mutex_unlock(&ua_sess->lock);
4267 error:
4268 if (ua_sess) {
4269 destroy_app_session(app, ua_sess);
4270 }
4271 rcu_read_unlock();
4272 return;
4273 }
4274
4275 /*
4276 * Add context to a specific channel for global UST domain.
4277 */
4278 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4279 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4280 {
4281 int ret = 0;
4282 struct lttng_ht_node_str *ua_chan_node;
4283 struct lttng_ht_iter iter, uiter;
4284 struct ust_app_channel *ua_chan = NULL;
4285 struct ust_app_session *ua_sess;
4286 struct ust_app *app;
4287
4288 rcu_read_lock();
4289
4290 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4291 if (!app->compatible) {
4292 /*
4293 * TODO: In time, we should notice the caller of this error by
4294 * telling him that this is a version error.
4295 */
4296 continue;
4297 }
4298 ua_sess = lookup_session_by_app(usess, app);
4299 if (ua_sess == NULL) {
4300 continue;
4301 }
4302
4303 pthread_mutex_lock(&ua_sess->lock);
4304 /* Lookup channel in the ust app session */
4305 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4306 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4307 if (ua_chan_node == NULL) {
4308 goto next_app;
4309 }
4310 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4311 node);
4312 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4313 if (ret < 0) {
4314 goto next_app;
4315 }
4316 next_app:
4317 pthread_mutex_unlock(&ua_sess->lock);
4318 }
4319
4320 rcu_read_unlock();
4321 return ret;
4322 }
4323
4324 /*
4325 * Enable event for a channel from a UST session for a specific PID.
4326 */
4327 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4328 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4329 {
4330 int ret = 0;
4331 struct lttng_ht_iter iter;
4332 struct lttng_ht_node_str *ua_chan_node;
4333 struct ust_app *app;
4334 struct ust_app_session *ua_sess;
4335 struct ust_app_channel *ua_chan;
4336 struct ust_app_event *ua_event;
4337
4338 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4339
4340 rcu_read_lock();
4341
4342 app = ust_app_find_by_pid(pid);
4343 if (app == NULL) {
4344 ERR("UST app enable event per PID %d not found", pid);
4345 ret = -1;
4346 goto end;
4347 }
4348
4349 if (!app->compatible) {
4350 ret = 0;
4351 goto end;
4352 }
4353
4354 ua_sess = lookup_session_by_app(usess, app);
4355 if (!ua_sess) {
4356 /* The application has problem or is probably dead. */
4357 ret = 0;
4358 goto end;
4359 }
4360
4361 pthread_mutex_lock(&ua_sess->lock);
4362 /* Lookup channel in the ust app session */
4363 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4364 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4365 /* If the channel is not found, there is a code flow error */
4366 assert(ua_chan_node);
4367
4368 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4369
4370 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4371 uevent->filter, uevent->attr.loglevel);
4372 if (ua_event == NULL) {
4373 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4374 if (ret < 0) {
4375 goto end_unlock;
4376 }
4377 } else {
4378 ret = enable_ust_app_event(ua_sess, ua_event, app);
4379 if (ret < 0) {
4380 goto end_unlock;
4381 }
4382 }
4383
4384 end_unlock:
4385 pthread_mutex_unlock(&ua_sess->lock);
4386 end:
4387 rcu_read_unlock();
4388 return ret;
4389 }
4390
4391 /*
4392 * Disable event for a channel from a UST session for a specific PID.
4393 */
4394 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4395 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4396 {
4397 int ret = 0;
4398 struct lttng_ht_iter iter;
4399 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4400 struct ust_app *app;
4401 struct ust_app_session *ua_sess;
4402 struct ust_app_channel *ua_chan;
4403 struct ust_app_event *ua_event;
4404
4405 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4406
4407 rcu_read_lock();
4408
4409 app = ust_app_find_by_pid(pid);
4410 if (app == NULL) {
4411 ERR("UST app disable event per PID %d not found", pid);
4412 ret = -1;
4413 goto error;
4414 }
4415
4416 if (!app->compatible) {
4417 ret = 0;
4418 goto error;
4419 }
4420
4421 ua_sess = lookup_session_by_app(usess, app);
4422 if (!ua_sess) {
4423 /* The application has problem or is probably dead. */
4424 goto error;
4425 }
4426
4427 /* Lookup channel in the ust app session */
4428 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4429 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4430 if (ua_chan_node == NULL) {
4431 /* Channel does not exist, skip disabling */
4432 goto error;
4433 }
4434 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4435
4436 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4437 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4438 if (ua_event_node == NULL) {
4439 /* Event does not exist, skip disabling */
4440 goto error;
4441 }
4442 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4443
4444 ret = disable_ust_app_event(ua_sess, ua_event, app);
4445 if (ret < 0) {
4446 goto error;
4447 }
4448
4449 error:
4450 rcu_read_unlock();
4451 return ret;
4452 }
4453
4454 /*
4455 * Calibrate registered applications.
4456 */
4457 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4458 {
4459 int ret = 0;
4460 struct lttng_ht_iter iter;
4461 struct ust_app *app;
4462
4463 rcu_read_lock();
4464
4465 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4466 if (!app->compatible) {
4467 /*
4468 * TODO: In time, we should notice the caller of this error by
4469 * telling him that this is a version error.
4470 */
4471 continue;
4472 }
4473
4474 health_code_update();
4475
4476 ret = ustctl_calibrate(app->sock, calibrate);
4477 if (ret < 0) {
4478 switch (ret) {
4479 case -ENOSYS:
4480 /* Means that it's not implemented on the tracer side. */
4481 ret = 0;
4482 break;
4483 default:
4484 DBG2("Calibrate app PID %d returned with error %d",
4485 app->pid, ret);
4486 break;
4487 }
4488 }
4489 }
4490
4491 DBG("UST app global domain calibration finished");
4492
4493 rcu_read_unlock();
4494
4495 health_code_update();
4496
4497 return ret;
4498 }
4499
4500 /*
4501 * Receive registration and populate the given msg structure.
4502 *
4503 * On success return 0 else a negative value returned by the ustctl call.
4504 */
4505 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4506 {
4507 int ret;
4508 uint32_t pid, ppid, uid, gid;
4509
4510 assert(msg);
4511
4512 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4513 &pid, &ppid, &uid, &gid,
4514 &msg->bits_per_long,
4515 &msg->uint8_t_alignment,
4516 &msg->uint16_t_alignment,
4517 &msg->uint32_t_alignment,
4518 &msg->uint64_t_alignment,
4519 &msg->long_alignment,
4520 &msg->byte_order,
4521 msg->name);
4522 if (ret < 0) {
4523 switch (-ret) {
4524 case EPIPE:
4525 case ECONNRESET:
4526 case LTTNG_UST_ERR_EXITING:
4527 DBG3("UST app recv reg message failed. Application died");
4528 break;
4529 case LTTNG_UST_ERR_UNSUP_MAJOR:
4530 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4531 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4532 LTTNG_UST_ABI_MINOR_VERSION);
4533 break;
4534 default:
4535 ERR("UST app recv reg message failed with ret %d", ret);
4536 break;
4537 }
4538 goto error;
4539 }
4540 msg->pid = (pid_t) pid;
4541 msg->ppid = (pid_t) ppid;
4542 msg->uid = (uid_t) uid;
4543 msg->gid = (gid_t) gid;
4544
4545 error:
4546 return ret;
4547 }
4548
4549 /*
4550 * Return a ust app channel object using the application object and the channel
4551 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4552 * lock MUST be acquired before calling this function.
4553 */
4554 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4555 int objd)
4556 {
4557 struct lttng_ht_node_ulong *node;
4558 struct lttng_ht_iter iter;
4559 struct ust_app_channel *ua_chan = NULL;
4560
4561 assert(app);
4562
4563 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4564 node = lttng_ht_iter_get_node_ulong(&iter);
4565 if (node == NULL) {
4566 DBG2("UST app channel find by objd %d not found", objd);
4567 goto error;
4568 }
4569
4570 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4571
4572 error:
4573 return ua_chan;
4574 }
4575
4576 /*
4577 * Reply to a register channel notification from an application on the notify
4578 * socket. The channel metadata is also created.
4579 *
4580 * The session UST registry lock is acquired in this function.
4581 *
4582 * On success 0 is returned else a negative value.
4583 */
4584 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4585 size_t nr_fields, struct ustctl_field *fields)
4586 {
4587 int ret, ret_code = 0;
4588 uint32_t chan_id, reg_count;
4589 uint64_t chan_reg_key;
4590 enum ustctl_channel_header type;
4591 struct ust_app *app;
4592 struct ust_app_channel *ua_chan;
4593 struct ust_app_session *ua_sess;
4594 struct ust_registry_session *registry;
4595 struct ust_registry_channel *chan_reg;
4596
4597 rcu_read_lock();
4598
4599 /* Lookup application. If not found, there is a code flow error. */
4600 app = find_app_by_notify_sock(sock);
4601 if (!app) {
4602 DBG("Application socket %d is being teardown. Abort event notify",
4603 sock);
4604 ret = 0;
4605 free(fields);
4606 goto error_rcu_unlock;
4607 }
4608
4609 /* Lookup channel by UST object descriptor. */
4610 ua_chan = find_channel_by_objd(app, cobjd);
4611 if (!ua_chan) {
4612 DBG("Application channel is being teardown. Abort event notify");
4613 ret = 0;
4614 free(fields);
4615 goto error_rcu_unlock;
4616 }
4617
4618 assert(ua_chan->session);
4619 ua_sess = ua_chan->session;
4620
4621 /* Get right session registry depending on the session buffer type. */
4622 registry = get_session_registry(ua_sess);
4623 assert(registry);
4624
4625 /* Depending on the buffer type, a different channel key is used. */
4626 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4627 chan_reg_key = ua_chan->tracing_channel_id;
4628 } else {
4629 chan_reg_key = ua_chan->key;
4630 }
4631
4632 pthread_mutex_lock(&registry->lock);
4633
4634 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4635 assert(chan_reg);
4636
4637 if (!chan_reg->register_done) {
4638 reg_count = ust_registry_get_event_count(chan_reg);
4639 if (reg_count < 31) {
4640 type = USTCTL_CHANNEL_HEADER_COMPACT;
4641 } else {
4642 type = USTCTL_CHANNEL_HEADER_LARGE;
4643 }
4644
4645 chan_reg->nr_ctx_fields = nr_fields;
4646 chan_reg->ctx_fields = fields;
4647 chan_reg->header_type = type;
4648 } else {
4649 /* Get current already assigned values. */
4650 type = chan_reg->header_type;
4651 free(fields);
4652 /* Set to NULL so the error path does not do a double free. */
4653 fields = NULL;
4654 }
4655 /* Channel id is set during the object creation. */
4656 chan_id = chan_reg->chan_id;
4657
4658 /* Append to metadata */
4659 if (!chan_reg->metadata_dumped) {
4660 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4661 if (ret_code) {
4662 ERR("Error appending channel metadata (errno = %d)", ret_code);
4663 goto reply;
4664 }
4665 }
4666
4667 reply:
4668 DBG3("UST app replying to register channel key %" PRIu64
4669 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4670 ret_code);
4671
4672 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4673 if (ret < 0) {
4674 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4675 ERR("UST app reply channel failed with ret %d", ret);
4676 } else {
4677 DBG3("UST app reply channel failed. Application died");
4678 }
4679 goto error;
4680 }
4681
4682 /* This channel registry registration is completed. */
4683 chan_reg->register_done = 1;
4684
4685 error:
4686 pthread_mutex_unlock(&registry->lock);
4687 error_rcu_unlock:
4688 rcu_read_unlock();
4689 if (ret) {
4690 free(fields);
4691 }
4692 return ret;
4693 }
4694
4695 /*
4696 * Add event to the UST channel registry. When the event is added to the
4697 * registry, the metadata is also created. Once done, this replies to the
4698 * application with the appropriate error code.
4699 *
4700 * The session UST registry lock is acquired in the function.
4701 *
4702 * On success 0 is returned else a negative value.
4703 */
4704 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4705 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4706 char *model_emf_uri)
4707 {
4708 int ret, ret_code;
4709 uint32_t event_id = 0;
4710 uint64_t chan_reg_key;
4711 struct ust_app *app;
4712 struct ust_app_channel *ua_chan;
4713 struct ust_app_session *ua_sess;
4714 struct ust_registry_session *registry;
4715
4716 rcu_read_lock();
4717
4718 /* Lookup application. If not found, there is a code flow error. */
4719 app = find_app_by_notify_sock(sock);
4720 if (!app) {
4721 DBG("Application socket %d is being teardown. Abort event notify",
4722 sock);
4723 ret = 0;
4724 free(sig);
4725 free(fields);
4726 free(model_emf_uri);
4727 goto error_rcu_unlock;
4728 }
4729
4730 /* Lookup channel by UST object descriptor. */
4731 ua_chan = find_channel_by_objd(app, cobjd);
4732 if (!ua_chan) {
4733 DBG("Application channel is being teardown. Abort event notify");
4734 ret = 0;
4735 free(sig);
4736 free(fields);
4737 free(model_emf_uri);
4738 goto error_rcu_unlock;
4739 }
4740
4741 assert(ua_chan->session);
4742 ua_sess = ua_chan->session;
4743
4744 registry = get_session_registry(ua_sess);
4745 assert(registry);
4746
4747 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4748 chan_reg_key = ua_chan->tracing_channel_id;
4749 } else {
4750 chan_reg_key = ua_chan->key;
4751 }
4752
4753 pthread_mutex_lock(&registry->lock);
4754
4755 /*
4756 * From this point on, this call acquires the ownership of the sig, fields
4757 * and model_emf_uri meaning any free are done inside it if needed. These
4758 * three variables MUST NOT be read/write after this.
4759 */
4760 ret_code = ust_registry_create_event(registry, chan_reg_key,
4761 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4762 model_emf_uri, ua_sess->buffer_type, &event_id,
4763 app);
4764
4765 /*
4766 * The return value is returned to ustctl so in case of an error, the
4767 * application can be notified. In case of an error, it's important not to
4768 * return a negative error or else the application will get closed.
4769 */
4770 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4771 if (ret < 0) {
4772 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4773 ERR("UST app reply event failed with ret %d", ret);
4774 } else {
4775 DBG3("UST app reply event failed. Application died");
4776 }
4777 /*
4778 * No need to wipe the create event since the application socket will
4779 * get close on error hence cleaning up everything by itself.
4780 */
4781 goto error;
4782 }
4783
4784 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4785 name, event_id);
4786
4787 error:
4788 pthread_mutex_unlock(&registry->lock);
4789 error_rcu_unlock:
4790 rcu_read_unlock();
4791 return ret;
4792 }
4793
4794 /*
4795 * Handle application notification through the given notify socket.
4796 *
4797 * Return 0 on success or else a negative value.
4798 */
4799 int ust_app_recv_notify(int sock)
4800 {
4801 int ret;
4802 enum ustctl_notify_cmd cmd;
4803
4804 DBG3("UST app receiving notify from sock %d", sock);
4805
4806 ret = ustctl_recv_notify(sock, &cmd);
4807 if (ret < 0) {
4808 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4809 ERR("UST app recv notify failed with ret %d", ret);
4810 } else {
4811 DBG3("UST app recv notify failed. Application died");
4812 }
4813 goto error;
4814 }
4815
4816 switch (cmd) {
4817 case USTCTL_NOTIFY_CMD_EVENT:
4818 {
4819 int sobjd, cobjd, loglevel;
4820 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4821 size_t nr_fields;
4822 struct ustctl_field *fields;
4823
4824 DBG2("UST app ustctl register event received");
4825
4826 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4827 &sig, &nr_fields, &fields, &model_emf_uri);
4828 if (ret < 0) {
4829 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4830 ERR("UST app recv event failed with ret %d", ret);
4831 } else {
4832 DBG3("UST app recv event failed. Application died");
4833 }
4834 goto error;
4835 }
4836
4837 /*
4838 * Add event to the UST registry coming from the notify socket. This
4839 * call will free if needed the sig, fields and model_emf_uri. This
4840 * code path loses the ownsership of these variables and transfer them
4841 * to the this function.
4842 */
4843 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4844 fields, loglevel, model_emf_uri);
4845 if (ret < 0) {
4846 goto error;
4847 }
4848
4849 break;
4850 }
4851 case USTCTL_NOTIFY_CMD_CHANNEL:
4852 {
4853 int sobjd, cobjd;
4854 size_t nr_fields;
4855 struct ustctl_field *fields;
4856
4857 DBG2("UST app ustctl register channel received");
4858
4859 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4860 &fields);
4861 if (ret < 0) {
4862 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4863 ERR("UST app recv channel failed with ret %d", ret);
4864 } else {
4865 DBG3("UST app recv channel failed. Application died");
4866 }
4867 goto error;
4868 }
4869
4870 /*
4871 * The fields ownership are transfered to this function call meaning
4872 * that if needed it will be freed. After this, it's invalid to access
4873 * fields or clean it up.
4874 */
4875 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4876 fields);
4877 if (ret < 0) {
4878 goto error;
4879 }
4880
4881 break;
4882 }
4883 default:
4884 /* Should NEVER happen. */
4885 assert(0);
4886 }
4887
4888 error:
4889 return ret;
4890 }
4891
4892 /*
4893 * Once the notify socket hangs up, this is called. First, it tries to find the
4894 * corresponding application. On failure, the call_rcu to close the socket is
4895 * executed. If an application is found, it tries to delete it from the notify
4896 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4897 *
4898 * Note that an object needs to be allocated here so on ENOMEM failure, the
4899 * call RCU is not done but the rest of the cleanup is.
4900 */
4901 void ust_app_notify_sock_unregister(int sock)
4902 {
4903 int err_enomem = 0;
4904 struct lttng_ht_iter iter;
4905 struct ust_app *app;
4906 struct ust_app_notify_sock_obj *obj;
4907
4908 assert(sock >= 0);
4909
4910 rcu_read_lock();
4911
4912 obj = zmalloc(sizeof(*obj));
4913 if (!obj) {
4914 /*
4915 * An ENOMEM is kind of uncool. If this strikes we continue the
4916 * procedure but the call_rcu will not be called. In this case, we
4917 * accept the fd leak rather than possibly creating an unsynchronized
4918 * state between threads.
4919 *
4920 * TODO: The notify object should be created once the notify socket is
4921 * registered and stored independantely from the ust app object. The
4922 * tricky part is to synchronize the teardown of the application and
4923 * this notify object. Let's keep that in mind so we can avoid this
4924 * kind of shenanigans with ENOMEM in the teardown path.
4925 */
4926 err_enomem = 1;
4927 } else {
4928 obj->fd = sock;
4929 }
4930
4931 DBG("UST app notify socket unregister %d", sock);
4932
4933 /*
4934 * Lookup application by notify socket. If this fails, this means that the
4935 * hash table delete has already been done by the application
4936 * unregistration process so we can safely close the notify socket in a
4937 * call RCU.
4938 */
4939 app = find_app_by_notify_sock(sock);
4940 if (!app) {
4941 goto close_socket;
4942 }
4943
4944 iter.iter.node = &app->notify_sock_n.node;
4945
4946 /*
4947 * Whatever happens here either we fail or succeed, in both cases we have
4948 * to close the socket after a grace period to continue to the call RCU
4949 * here. If the deletion is successful, the application is not visible
4950 * anymore by other threads and is it fails it means that it was already
4951 * deleted from the hash table so either way we just have to close the
4952 * socket.
4953 */
4954 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4955
4956 close_socket:
4957 rcu_read_unlock();
4958
4959 /*
4960 * Close socket after a grace period to avoid for the socket to be reused
4961 * before the application object is freed creating potential race between
4962 * threads trying to add unique in the global hash table.
4963 */
4964 if (!err_enomem) {
4965 call_rcu(&obj->head, close_notify_sock_rcu);
4966 }
4967 }
4968
4969 /*
4970 * Destroy a ust app data structure and free its memory.
4971 */
4972 void ust_app_destroy(struct ust_app *app)
4973 {
4974 if (!app) {
4975 return;
4976 }
4977
4978 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4979 }
4980
4981 /*
4982 * Take a snapshot for a given UST session. The snapshot is sent to the given
4983 * output.
4984 *
4985 * Return 0 on success or else a negative value.
4986 */
4987 int ust_app_snapshot_record(struct ltt_ust_session *usess,
4988 struct snapshot_output *output, int wait, unsigned int nb_streams)
4989 {
4990 int ret = 0;
4991 struct lttng_ht_iter iter;
4992 struct ust_app *app;
4993 char pathname[PATH_MAX];
4994 uint64_t max_stream_size = 0;
4995
4996 assert(usess);
4997 assert(output);
4998
4999 rcu_read_lock();
5000
5001 /*
5002 * Compute the maximum size of a single stream if a max size is asked by
5003 * the caller.
5004 */
5005 if (output->max_size > 0 && nb_streams > 0) {
5006 max_stream_size = output->max_size / nb_streams;
5007 }
5008
5009 switch (usess->buffer_type) {
5010 case LTTNG_BUFFER_PER_UID:
5011 {
5012 struct buffer_reg_uid *reg;
5013
5014 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5015 struct buffer_reg_channel *reg_chan;
5016 struct consumer_socket *socket;
5017
5018 /* Get consumer socket to use to push the metadata.*/
5019 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5020 usess->consumer);
5021 if (!socket) {
5022 ret = -EINVAL;
5023 goto error;
5024 }
5025
5026 memset(pathname, 0, sizeof(pathname));
5027 ret = snprintf(pathname, sizeof(pathname),
5028 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5029 reg->uid, reg->bits_per_long);
5030 if (ret < 0) {
5031 PERROR("snprintf snapshot path");
5032 goto error;
5033 }
5034
5035 /* Add the UST default trace dir to path. */
5036 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5037 reg_chan, node.node) {
5038
5039 /*
5040 * Make sure the maximum stream size is not lower than the
5041 * subbuffer size or else it's an error since we won't be able to
5042 * snapshot anything.
5043 */
5044 if (max_stream_size &&
5045 reg_chan->subbuf_size > max_stream_size) {
5046 ret = -EINVAL;
5047 DBG3("UST app snapshot record maximum stream size %" PRIu64
5048 " is smaller than subbuffer size of %zu",
5049 max_stream_size, reg_chan->subbuf_size);
5050 goto error;
5051 }
5052 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
5053 usess->uid, usess->gid, pathname, wait,
5054 max_stream_size);
5055 if (ret < 0) {
5056 goto error;
5057 }
5058 }
5059 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
5060 1, usess->uid, usess->gid, pathname, wait,
5061 max_stream_size);
5062 if (ret < 0) {
5063 goto error;
5064 }
5065 }
5066 break;
5067 }
5068 case LTTNG_BUFFER_PER_PID:
5069 {
5070 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5071 struct consumer_socket *socket;
5072 struct lttng_ht_iter chan_iter;
5073 struct ust_app_channel *ua_chan;
5074 struct ust_app_session *ua_sess;
5075 struct ust_registry_session *registry;
5076
5077 ua_sess = lookup_session_by_app(usess, app);
5078 if (!ua_sess) {
5079 /* Session not associated with this app. */
5080 continue;
5081 }
5082
5083 /* Get the right consumer socket for the application. */
5084 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5085 output->consumer);
5086 if (!socket) {
5087 ret = -EINVAL;
5088 goto error;
5089 }
5090
5091 /* Add the UST default trace dir to path. */
5092 memset(pathname, 0, sizeof(pathname));
5093 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5094 ua_sess->path);
5095 if (ret < 0) {
5096 PERROR("snprintf snapshot path");
5097 goto error;
5098 }
5099
5100 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5101 ua_chan, node.node) {
5102 /*
5103 * Make sure the maximum stream size is not lower than the
5104 * subbuffer size or else it's an error since we won't be able to
5105 * snapshot anything.
5106 */
5107 if (max_stream_size &&
5108 ua_chan->attr.subbuf_size > max_stream_size) {
5109 ret = -EINVAL;
5110 DBG3("UST app snapshot record maximum stream size %" PRIu64
5111 " is smaller than subbuffer size of %" PRIu64,
5112 max_stream_size, ua_chan->attr.subbuf_size);
5113 goto error;
5114 }
5115
5116 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5117 ua_sess->euid, ua_sess->egid, pathname, wait,
5118 max_stream_size);
5119 if (ret < 0) {
5120 goto error;
5121 }
5122 }
5123
5124 registry = get_session_registry(ua_sess);
5125 assert(registry);
5126 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5127 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5128 max_stream_size);
5129 if (ret < 0) {
5130 goto error;
5131 }
5132 }
5133 break;
5134 }
5135 default:
5136 assert(0);
5137 break;
5138 }
5139
5140 error:
5141 rcu_read_unlock();
5142 return ret;
5143 }
5144
5145 /*
5146 * Return the number of streams for a UST session.
5147 */
5148 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5149 {
5150 unsigned int ret = 0;
5151 struct ust_app *app;
5152 struct lttng_ht_iter iter;
5153
5154 assert(usess);
5155
5156 switch (usess->buffer_type) {
5157 case LTTNG_BUFFER_PER_UID:
5158 {
5159 struct buffer_reg_uid *reg;
5160
5161 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5162 struct buffer_reg_channel *reg_chan;
5163
5164 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5165 reg_chan, node.node) {
5166 ret += reg_chan->stream_count;
5167 }
5168 }
5169 break;
5170 }
5171 case LTTNG_BUFFER_PER_PID:
5172 {
5173 rcu_read_lock();
5174 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5175 struct ust_app_channel *ua_chan;
5176 struct ust_app_session *ua_sess;
5177 struct lttng_ht_iter chan_iter;
5178
5179 ua_sess = lookup_session_by_app(usess, app);
5180 if (!ua_sess) {
5181 /* Session not associated with this app. */
5182 continue;
5183 }
5184
5185 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5186 ua_chan, node.node) {
5187 ret += ua_chan->streams.count;
5188 }
5189 }
5190 rcu_read_unlock();
5191 break;
5192 }
5193 default:
5194 assert(0);
5195 break;
5196 }
5197
5198 return ret;
5199 }
This page took 0.180751 seconds and 3 git commands to generate.