2d8cb2c279c8306eb25cc8f6a02d56be70cbb31e
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290
291 if (ua_event->obj != NULL) {
292 ret = ustctl_release_object(sock, ua_event->obj);
293 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
294 ERR("UST app sock %d release event obj failed with ret %d",
295 sock, ret);
296 }
297 free(ua_event->obj);
298 }
299 free(ua_event);
300 }
301
302 /*
303 * Release ust data object of the given stream.
304 *
305 * Return 0 on success or else a negative value.
306 */
307 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
308 {
309 int ret = 0;
310
311 assert(stream);
312
313 if (stream->obj) {
314 ret = ustctl_release_object(sock, stream->obj);
315 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
316 ERR("UST app sock %d release stream obj failed with ret %d",
317 sock, ret);
318 }
319 lttng_fd_put(LTTNG_FD_APPS, 2);
320 free(stream->obj);
321 }
322
323 return ret;
324 }
325
326 /*
327 * Delete ust app stream safely. RCU read lock must be held before calling
328 * this function.
329 */
330 static
331 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
332 {
333 assert(stream);
334
335 (void) release_ust_app_stream(sock, stream);
336 free(stream);
337 }
338
339 /*
340 * We need to execute ht_destroy outside of RCU read-side critical
341 * section and outside of call_rcu thread, so we postpone its execution
342 * using ht_cleanup_push. It is simpler than to change the semantic of
343 * the many callers of delete_ust_app_session().
344 */
345 static
346 void delete_ust_app_channel_rcu(struct rcu_head *head)
347 {
348 struct ust_app_channel *ua_chan =
349 caa_container_of(head, struct ust_app_channel, rcu_head);
350
351 ht_cleanup_push(ua_chan->ctx);
352 ht_cleanup_push(ua_chan->events);
353 free(ua_chan);
354 }
355
356 /*
357 * Delete ust app channel safely. RCU read lock must be held before calling
358 * this function.
359 */
360 static
361 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
362 struct ust_app *app)
363 {
364 int ret;
365 struct lttng_ht_iter iter;
366 struct ust_app_event *ua_event;
367 struct ust_app_ctx *ua_ctx;
368 struct ust_app_stream *stream, *stmp;
369 struct ust_registry_session *registry;
370
371 assert(ua_chan);
372
373 DBG3("UST app deleting channel %s", ua_chan->name);
374
375 /* Wipe stream */
376 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
377 cds_list_del(&stream->list);
378 delete_ust_app_stream(sock, stream);
379 }
380
381 /* Wipe context */
382 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
383 cds_list_del(&ua_ctx->list);
384 ret = lttng_ht_del(ua_chan->ctx, &iter);
385 assert(!ret);
386 delete_ust_app_ctx(sock, ua_ctx);
387 }
388
389 /* Wipe events */
390 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
391 node.node) {
392 ret = lttng_ht_del(ua_chan->events, &iter);
393 assert(!ret);
394 delete_ust_app_event(sock, ua_event);
395 }
396
397 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
398 /* Wipe and free registry from session registry. */
399 registry = get_session_registry(ua_chan->session);
400 if (registry) {
401 ust_registry_channel_del_free(registry, ua_chan->key);
402 }
403 }
404
405 if (ua_chan->obj != NULL) {
406 /* Remove channel from application UST object descriptor. */
407 iter.iter.node = &ua_chan->ust_objd_node.node;
408 lttng_ht_del(app->ust_objd, &iter);
409 ret = ustctl_release_object(sock, ua_chan->obj);
410 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
411 ERR("UST app sock %d release channel obj failed with ret %d",
412 sock, ret);
413 }
414 lttng_fd_put(LTTNG_FD_APPS, 1);
415 free(ua_chan->obj);
416 }
417 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
418 }
419
420 /*
421 * Push metadata to consumer socket.
422 *
423 * The socket lock MUST be acquired.
424 * The ust app session lock MUST be acquired.
425 *
426 * On success, return the len of metadata pushed or else a negative value.
427 */
428 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
429 struct consumer_socket *socket, int send_zero_data)
430 {
431 int ret;
432 char *metadata_str = NULL;
433 size_t len, offset;
434 ssize_t ret_val;
435
436 assert(registry);
437 assert(socket);
438
439 /*
440 * On a push metadata error either the consumer is dead or the metadata
441 * channel has been destroyed because its endpoint might have died (e.g:
442 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
443 * metadata again which is not valid anymore on the consumer side.
444 *
445 * The ust app session mutex locked allows us to make this check without
446 * the registry lock.
447 */
448 if (registry->metadata_closed) {
449 return -EPIPE;
450 }
451
452 pthread_mutex_lock(&registry->lock);
453
454 offset = registry->metadata_len_sent;
455 len = registry->metadata_len - registry->metadata_len_sent;
456 if (len == 0) {
457 DBG3("No metadata to push for metadata key %" PRIu64,
458 registry->metadata_key);
459 ret_val = len;
460 if (send_zero_data) {
461 DBG("No metadata to push");
462 goto push_data;
463 }
464 goto end;
465 }
466
467 /* Allocate only what we have to send. */
468 metadata_str = zmalloc(len);
469 if (!metadata_str) {
470 PERROR("zmalloc ust app metadata string");
471 ret_val = -ENOMEM;
472 goto error;
473 }
474 /* Copy what we haven't send out. */
475 memcpy(metadata_str, registry->metadata + offset, len);
476 registry->metadata_len_sent += len;
477
478 push_data:
479 pthread_mutex_unlock(&registry->lock);
480 ret = consumer_push_metadata(socket, registry->metadata_key,
481 metadata_str, len, offset);
482 if (ret < 0) {
483 ret_val = ret;
484 goto error_push;
485 }
486
487 free(metadata_str);
488 return len;
489
490 end:
491 error:
492 pthread_mutex_unlock(&registry->lock);
493 error_push:
494 free(metadata_str);
495 return ret_val;
496 }
497
498 /*
499 * For a given application and session, push metadata to consumer. The session
500 * lock MUST be acquired here before calling this.
501 * Either sock or consumer is required : if sock is NULL, the default
502 * socket to send the metadata is retrieved from consumer, if sock
503 * is not NULL we use it to send the metadata.
504 *
505 * Return 0 on success else a negative error.
506 */
507 static int push_metadata(struct ust_registry_session *registry,
508 struct consumer_output *consumer)
509 {
510 int ret_val;
511 ssize_t ret;
512 struct consumer_socket *socket;
513
514 assert(registry);
515 assert(consumer);
516
517 rcu_read_lock();
518
519 /*
520 * Means that no metadata was assigned to the session. This can happens if
521 * no start has been done previously.
522 */
523 if (!registry->metadata_key) {
524 ret_val = 0;
525 goto end_rcu_unlock;
526 }
527
528 /* Get consumer socket to use to push the metadata.*/
529 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
530 consumer);
531 if (!socket) {
532 ret_val = -1;
533 goto error_rcu_unlock;
534 }
535
536 /*
537 * TODO: Currently, we hold the socket lock around sampling of the next
538 * metadata segment to ensure we send metadata over the consumer socket in
539 * the correct order. This makes the registry lock nest inside the socket
540 * lock.
541 *
542 * Please note that this is a temporary measure: we should move this lock
543 * back into ust_consumer_push_metadata() when the consumer gets the
544 * ability to reorder the metadata it receives.
545 */
546 pthread_mutex_lock(socket->lock);
547 ret = ust_app_push_metadata(registry, socket, 0);
548 pthread_mutex_unlock(socket->lock);
549 if (ret < 0) {
550 ret_val = ret;
551 goto error_rcu_unlock;
552 }
553
554 rcu_read_unlock();
555 return 0;
556
557 error_rcu_unlock:
558 /*
559 * On error, flag the registry that the metadata is closed. We were unable
560 * to push anything and this means that either the consumer is not
561 * responding or the metadata cache has been destroyed on the consumer.
562 */
563 registry->metadata_closed = 1;
564 end_rcu_unlock:
565 rcu_read_unlock();
566 return ret_val;
567 }
568
569 /*
570 * Send to the consumer a close metadata command for the given session. Once
571 * done, the metadata channel is deleted and the session metadata pointer is
572 * nullified. The session lock MUST be acquired here unless the application is
573 * in the destroy path.
574 *
575 * Return 0 on success else a negative value.
576 */
577 static int close_metadata(struct ust_registry_session *registry,
578 struct consumer_output *consumer)
579 {
580 int ret;
581 struct consumer_socket *socket;
582
583 assert(registry);
584 assert(consumer);
585
586 rcu_read_lock();
587
588 if (!registry->metadata_key || registry->metadata_closed) {
589 ret = 0;
590 goto end;
591 }
592
593 /* Get consumer socket to use to push the metadata.*/
594 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
595 consumer);
596 if (!socket) {
597 ret = -1;
598 goto error;
599 }
600
601 ret = consumer_close_metadata(socket, registry->metadata_key);
602 if (ret < 0) {
603 goto error;
604 }
605
606 error:
607 /*
608 * Metadata closed. Even on error this means that the consumer is not
609 * responding or not found so either way a second close should NOT be emit
610 * for this registry.
611 */
612 registry->metadata_closed = 1;
613 end:
614 rcu_read_unlock();
615 return ret;
616 }
617
618 /*
619 * We need to execute ht_destroy outside of RCU read-side critical
620 * section and outside of call_rcu thread, so we postpone its execution
621 * using ht_cleanup_push. It is simpler than to change the semantic of
622 * the many callers of delete_ust_app_session().
623 */
624 static
625 void delete_ust_app_session_rcu(struct rcu_head *head)
626 {
627 struct ust_app_session *ua_sess =
628 caa_container_of(head, struct ust_app_session, rcu_head);
629
630 ht_cleanup_push(ua_sess->channels);
631 free(ua_sess);
632 }
633
634 /*
635 * Delete ust app session safely. RCU read lock must be held before calling
636 * this function.
637 */
638 static
639 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
640 struct ust_app *app)
641 {
642 int ret;
643 struct lttng_ht_iter iter;
644 struct ust_app_channel *ua_chan;
645 struct ust_registry_session *registry;
646
647 assert(ua_sess);
648
649 pthread_mutex_lock(&ua_sess->lock);
650
651 registry = get_session_registry(ua_sess);
652 if (registry && !registry->metadata_closed) {
653 /* Push metadata for application before freeing the application. */
654 (void) push_metadata(registry, ua_sess->consumer);
655
656 /*
657 * Don't ask to close metadata for global per UID buffers. Close
658 * metadata only on destroy trace session in this case. Also, the
659 * previous push metadata could have flag the metadata registry to
660 * close so don't send a close command if closed.
661 */
662 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
663 !registry->metadata_closed) {
664 /* And ask to close it for this session registry. */
665 (void) close_metadata(registry, ua_sess->consumer);
666 }
667 }
668
669 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
670 node.node) {
671 ret = lttng_ht_del(ua_sess->channels, &iter);
672 assert(!ret);
673 delete_ust_app_channel(sock, ua_chan, app);
674 }
675
676 /* In case of per PID, the registry is kept in the session. */
677 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
678 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
679 if (reg_pid) {
680 buffer_reg_pid_remove(reg_pid);
681 buffer_reg_pid_destroy(reg_pid);
682 }
683 }
684
685 if (ua_sess->handle != -1) {
686 ret = ustctl_release_handle(sock, ua_sess->handle);
687 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
688 ERR("UST app sock %d release session handle failed with ret %d",
689 sock, ret);
690 }
691 }
692 pthread_mutex_unlock(&ua_sess->lock);
693
694 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
695 }
696
697 /*
698 * Delete a traceable application structure from the global list. Never call
699 * this function outside of a call_rcu call.
700 *
701 * RCU read side lock should _NOT_ be held when calling this function.
702 */
703 static
704 void delete_ust_app(struct ust_app *app)
705 {
706 int ret, sock;
707 struct ust_app_session *ua_sess, *tmp_ua_sess;
708
709 /* Delete ust app sessions info */
710 sock = app->sock;
711 app->sock = -1;
712
713 /* Wipe sessions */
714 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
715 teardown_node) {
716 /* Free every object in the session and the session. */
717 rcu_read_lock();
718 delete_ust_app_session(sock, ua_sess, app);
719 rcu_read_unlock();
720 }
721
722 ht_cleanup_push(app->sessions);
723 ht_cleanup_push(app->ust_objd);
724
725 /*
726 * Wait until we have deleted the application from the sock hash table
727 * before closing this socket, otherwise an application could re-use the
728 * socket ID and race with the teardown, using the same hash table entry.
729 *
730 * It's OK to leave the close in call_rcu. We want it to stay unique for
731 * all RCU readers that could run concurrently with unregister app,
732 * therefore we _need_ to only close that socket after a grace period. So
733 * it should stay in this RCU callback.
734 *
735 * This close() is a very important step of the synchronization model so
736 * every modification to this function must be carefully reviewed.
737 */
738 ret = close(sock);
739 if (ret) {
740 PERROR("close");
741 }
742 lttng_fd_put(LTTNG_FD_APPS, 1);
743
744 DBG2("UST app pid %d deleted", app->pid);
745 free(app);
746 }
747
748 /*
749 * URCU intermediate call to delete an UST app.
750 */
751 static
752 void delete_ust_app_rcu(struct rcu_head *head)
753 {
754 struct lttng_ht_node_ulong *node =
755 caa_container_of(head, struct lttng_ht_node_ulong, head);
756 struct ust_app *app =
757 caa_container_of(node, struct ust_app, pid_n);
758
759 DBG3("Call RCU deleting app PID %d", app->pid);
760 delete_ust_app(app);
761 }
762
763 /*
764 * Delete the session from the application ht and delete the data structure by
765 * freeing every object inside and releasing them.
766 */
767 static void destroy_app_session(struct ust_app *app,
768 struct ust_app_session *ua_sess)
769 {
770 int ret;
771 struct lttng_ht_iter iter;
772
773 assert(app);
774 assert(ua_sess);
775
776 iter.iter.node = &ua_sess->node.node;
777 ret = lttng_ht_del(app->sessions, &iter);
778 if (ret) {
779 /* Already scheduled for teardown. */
780 goto end;
781 }
782
783 /* Once deleted, free the data structure. */
784 delete_ust_app_session(app->sock, ua_sess, app);
785
786 end:
787 return;
788 }
789
790 /*
791 * Alloc new UST app session.
792 */
793 static
794 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
795 {
796 struct ust_app_session *ua_sess;
797
798 /* Init most of the default value by allocating and zeroing */
799 ua_sess = zmalloc(sizeof(struct ust_app_session));
800 if (ua_sess == NULL) {
801 PERROR("malloc");
802 goto error_free;
803 }
804
805 ua_sess->handle = -1;
806 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
807 pthread_mutex_init(&ua_sess->lock, NULL);
808
809 return ua_sess;
810
811 error_free:
812 return NULL;
813 }
814
815 /*
816 * Alloc new UST app channel.
817 */
818 static
819 struct ust_app_channel *alloc_ust_app_channel(char *name,
820 struct ust_app_session *ua_sess,
821 struct lttng_ust_channel_attr *attr)
822 {
823 struct ust_app_channel *ua_chan;
824
825 /* Init most of the default value by allocating and zeroing */
826 ua_chan = zmalloc(sizeof(struct ust_app_channel));
827 if (ua_chan == NULL) {
828 PERROR("malloc");
829 goto error;
830 }
831
832 /* Setup channel name */
833 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
834 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
835
836 ua_chan->enabled = 1;
837 ua_chan->handle = -1;
838 ua_chan->session = ua_sess;
839 ua_chan->key = get_next_channel_key();
840 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
841 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
842 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
843
844 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
845 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
846
847 /* Copy attributes */
848 if (attr) {
849 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
850 ua_chan->attr.subbuf_size = attr->subbuf_size;
851 ua_chan->attr.num_subbuf = attr->num_subbuf;
852 ua_chan->attr.overwrite = attr->overwrite;
853 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
854 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
855 ua_chan->attr.output = attr->output;
856 }
857 /* By default, the channel is a per cpu channel. */
858 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
859
860 DBG3("UST app channel %s allocated", ua_chan->name);
861
862 return ua_chan;
863
864 error:
865 return NULL;
866 }
867
868 /*
869 * Allocate and initialize a UST app stream.
870 *
871 * Return newly allocated stream pointer or NULL on error.
872 */
873 struct ust_app_stream *ust_app_alloc_stream(void)
874 {
875 struct ust_app_stream *stream = NULL;
876
877 stream = zmalloc(sizeof(*stream));
878 if (stream == NULL) {
879 PERROR("zmalloc ust app stream");
880 goto error;
881 }
882
883 /* Zero could be a valid value for a handle so flag it to -1. */
884 stream->handle = -1;
885
886 error:
887 return stream;
888 }
889
890 /*
891 * Alloc new UST app event.
892 */
893 static
894 struct ust_app_event *alloc_ust_app_event(char *name,
895 struct lttng_ust_event *attr)
896 {
897 struct ust_app_event *ua_event;
898
899 /* Init most of the default value by allocating and zeroing */
900 ua_event = zmalloc(sizeof(struct ust_app_event));
901 if (ua_event == NULL) {
902 PERROR("malloc");
903 goto error;
904 }
905
906 ua_event->enabled = 1;
907 strncpy(ua_event->name, name, sizeof(ua_event->name));
908 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
909 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
910
911 /* Copy attributes */
912 if (attr) {
913 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
914 }
915
916 DBG3("UST app event %s allocated", ua_event->name);
917
918 return ua_event;
919
920 error:
921 return NULL;
922 }
923
924 /*
925 * Alloc new UST app context.
926 */
927 static
928 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
929 {
930 struct ust_app_ctx *ua_ctx;
931
932 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
933 if (ua_ctx == NULL) {
934 goto error;
935 }
936
937 CDS_INIT_LIST_HEAD(&ua_ctx->list);
938
939 if (uctx) {
940 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
941 }
942
943 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
944
945 error:
946 return ua_ctx;
947 }
948
949 /*
950 * Allocate a filter and copy the given original filter.
951 *
952 * Return allocated filter or NULL on error.
953 */
954 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
955 struct lttng_ust_filter_bytecode *orig_f)
956 {
957 struct lttng_ust_filter_bytecode *filter = NULL;
958
959 /* Copy filter bytecode */
960 filter = zmalloc(sizeof(*filter) + orig_f->len);
961 if (!filter) {
962 PERROR("zmalloc alloc ust app filter");
963 goto error;
964 }
965
966 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
967
968 error:
969 return filter;
970 }
971
972 /*
973 * Find an ust_app using the sock and return it. RCU read side lock must be
974 * held before calling this helper function.
975 */
976 struct ust_app *ust_app_find_by_sock(int sock)
977 {
978 struct lttng_ht_node_ulong *node;
979 struct lttng_ht_iter iter;
980
981 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
982 node = lttng_ht_iter_get_node_ulong(&iter);
983 if (node == NULL) {
984 DBG2("UST app find by sock %d not found", sock);
985 goto error;
986 }
987
988 return caa_container_of(node, struct ust_app, sock_n);
989
990 error:
991 return NULL;
992 }
993
994 /*
995 * Find an ust_app using the notify sock and return it. RCU read side lock must
996 * be held before calling this helper function.
997 */
998 static struct ust_app *find_app_by_notify_sock(int sock)
999 {
1000 struct lttng_ht_node_ulong *node;
1001 struct lttng_ht_iter iter;
1002
1003 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1004 &iter);
1005 node = lttng_ht_iter_get_node_ulong(&iter);
1006 if (node == NULL) {
1007 DBG2("UST app find by notify sock %d not found", sock);
1008 goto error;
1009 }
1010
1011 return caa_container_of(node, struct ust_app, notify_sock_n);
1012
1013 error:
1014 return NULL;
1015 }
1016
1017 /*
1018 * Lookup for an ust app event based on event name, filter bytecode and the
1019 * event loglevel.
1020 *
1021 * Return an ust_app_event object or NULL on error.
1022 */
1023 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1024 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
1025 {
1026 struct lttng_ht_iter iter;
1027 struct lttng_ht_node_str *node;
1028 struct ust_app_event *event = NULL;
1029 struct ust_app_ht_key key;
1030
1031 assert(name);
1032 assert(ht);
1033
1034 /* Setup key for event lookup. */
1035 key.name = name;
1036 key.filter = filter;
1037 key.loglevel = loglevel;
1038
1039 /* Lookup using the event name as hash and a custom match fct. */
1040 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1041 ht_match_ust_app_event, &key, &iter.iter);
1042 node = lttng_ht_iter_get_node_str(&iter);
1043 if (node == NULL) {
1044 goto end;
1045 }
1046
1047 event = caa_container_of(node, struct ust_app_event, node);
1048
1049 end:
1050 return event;
1051 }
1052
1053 /*
1054 * Create the channel context on the tracer.
1055 *
1056 * Called with UST app session lock held.
1057 */
1058 static
1059 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1060 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1061 {
1062 int ret;
1063
1064 health_code_update();
1065
1066 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1067 ua_chan->obj, &ua_ctx->obj);
1068 if (ret < 0) {
1069 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1070 ERR("UST app create channel context failed for app (pid: %d) "
1071 "with ret %d", app->pid, ret);
1072 } else {
1073 /*
1074 * This is normal behavior, an application can die during the
1075 * creation process. Don't report an error so the execution can
1076 * continue normally.
1077 */
1078 ret = 0;
1079 DBG3("UST app disable event failed. Application is dead.");
1080 }
1081 goto error;
1082 }
1083
1084 ua_ctx->handle = ua_ctx->obj->handle;
1085
1086 DBG2("UST app context handle %d created successfully for channel %s",
1087 ua_ctx->handle, ua_chan->name);
1088
1089 error:
1090 health_code_update();
1091 return ret;
1092 }
1093
1094 /*
1095 * Set the filter on the tracer.
1096 */
1097 static
1098 int set_ust_event_filter(struct ust_app_event *ua_event,
1099 struct ust_app *app)
1100 {
1101 int ret;
1102
1103 health_code_update();
1104
1105 if (!ua_event->filter) {
1106 ret = 0;
1107 goto error;
1108 }
1109
1110 ret = ustctl_set_filter(app->sock, ua_event->filter,
1111 ua_event->obj);
1112 if (ret < 0) {
1113 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1114 ERR("UST app event %s filter failed for app (pid: %d) "
1115 "with ret %d", ua_event->attr.name, app->pid, ret);
1116 } else {
1117 /*
1118 * This is normal behavior, an application can die during the
1119 * creation process. Don't report an error so the execution can
1120 * continue normally.
1121 */
1122 ret = 0;
1123 DBG3("UST app filter event failed. Application is dead.");
1124 }
1125 goto error;
1126 }
1127
1128 DBG2("UST filter set successfully for event %s", ua_event->name);
1129
1130 error:
1131 health_code_update();
1132 return ret;
1133 }
1134
1135 /*
1136 * Disable the specified event on to UST tracer for the UST session.
1137 */
1138 static int disable_ust_event(struct ust_app *app,
1139 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1140 {
1141 int ret;
1142
1143 health_code_update();
1144
1145 ret = ustctl_disable(app->sock, ua_event->obj);
1146 if (ret < 0) {
1147 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1148 ERR("UST app event %s disable failed for app (pid: %d) "
1149 "and session handle %d with ret %d",
1150 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1151 } else {
1152 /*
1153 * This is normal behavior, an application can die during the
1154 * creation process. Don't report an error so the execution can
1155 * continue normally.
1156 */
1157 ret = 0;
1158 DBG3("UST app disable event failed. Application is dead.");
1159 }
1160 goto error;
1161 }
1162
1163 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1164 ua_event->attr.name, app->pid);
1165
1166 error:
1167 health_code_update();
1168 return ret;
1169 }
1170
1171 /*
1172 * Disable the specified channel on to UST tracer for the UST session.
1173 */
1174 static int disable_ust_channel(struct ust_app *app,
1175 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1176 {
1177 int ret;
1178
1179 health_code_update();
1180
1181 ret = ustctl_disable(app->sock, ua_chan->obj);
1182 if (ret < 0) {
1183 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1184 ERR("UST app channel %s disable failed for app (pid: %d) "
1185 "and session handle %d with ret %d",
1186 ua_chan->name, app->pid, ua_sess->handle, ret);
1187 } else {
1188 /*
1189 * This is normal behavior, an application can die during the
1190 * creation process. Don't report an error so the execution can
1191 * continue normally.
1192 */
1193 ret = 0;
1194 DBG3("UST app disable channel failed. Application is dead.");
1195 }
1196 goto error;
1197 }
1198
1199 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1200 ua_chan->name, app->pid);
1201
1202 error:
1203 health_code_update();
1204 return ret;
1205 }
1206
1207 /*
1208 * Enable the specified channel on to UST tracer for the UST session.
1209 */
1210 static int enable_ust_channel(struct ust_app *app,
1211 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1212 {
1213 int ret;
1214
1215 health_code_update();
1216
1217 ret = ustctl_enable(app->sock, ua_chan->obj);
1218 if (ret < 0) {
1219 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1220 ERR("UST app channel %s enable failed for app (pid: %d) "
1221 "and session handle %d with ret %d",
1222 ua_chan->name, app->pid, ua_sess->handle, ret);
1223 } else {
1224 /*
1225 * This is normal behavior, an application can die during the
1226 * creation process. Don't report an error so the execution can
1227 * continue normally.
1228 */
1229 ret = 0;
1230 DBG3("UST app enable channel failed. Application is dead.");
1231 }
1232 goto error;
1233 }
1234
1235 ua_chan->enabled = 1;
1236
1237 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1238 ua_chan->name, app->pid);
1239
1240 error:
1241 health_code_update();
1242 return ret;
1243 }
1244
1245 /*
1246 * Enable the specified event on to UST tracer for the UST session.
1247 */
1248 static int enable_ust_event(struct ust_app *app,
1249 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1250 {
1251 int ret;
1252
1253 health_code_update();
1254
1255 ret = ustctl_enable(app->sock, ua_event->obj);
1256 if (ret < 0) {
1257 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1258 ERR("UST app event %s enable failed for app (pid: %d) "
1259 "and session handle %d with ret %d",
1260 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1261 } else {
1262 /*
1263 * This is normal behavior, an application can die during the
1264 * creation process. Don't report an error so the execution can
1265 * continue normally.
1266 */
1267 ret = 0;
1268 DBG3("UST app enable event failed. Application is dead.");
1269 }
1270 goto error;
1271 }
1272
1273 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1274 ua_event->attr.name, app->pid);
1275
1276 error:
1277 health_code_update();
1278 return ret;
1279 }
1280
1281 /*
1282 * Send channel and stream buffer to application.
1283 *
1284 * Return 0 on success. On error, a negative value is returned.
1285 */
1286 static int send_channel_pid_to_ust(struct ust_app *app,
1287 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1288 {
1289 int ret;
1290 struct ust_app_stream *stream, *stmp;
1291
1292 assert(app);
1293 assert(ua_sess);
1294 assert(ua_chan);
1295
1296 health_code_update();
1297
1298 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1299 app->sock);
1300
1301 /* Send channel to the application. */
1302 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1303 if (ret < 0) {
1304 goto error;
1305 }
1306
1307 health_code_update();
1308
1309 /* Send all streams to application. */
1310 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1311 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1312 if (ret < 0) {
1313 goto error;
1314 }
1315 /* We don't need the stream anymore once sent to the tracer. */
1316 cds_list_del(&stream->list);
1317 delete_ust_app_stream(-1, stream);
1318 }
1319 /* Flag the channel that it is sent to the application. */
1320 ua_chan->is_sent = 1;
1321
1322 error:
1323 health_code_update();
1324 return ret;
1325 }
1326
1327 /*
1328 * Create the specified event onto the UST tracer for a UST session.
1329 *
1330 * Should be called with session mutex held.
1331 */
1332 static
1333 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1334 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1335 {
1336 int ret = 0;
1337
1338 health_code_update();
1339
1340 /* Create UST event on tracer */
1341 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1342 &ua_event->obj);
1343 if (ret < 0) {
1344 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1345 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1346 ua_event->attr.name, app->pid, ret);
1347 } else {
1348 /*
1349 * This is normal behavior, an application can die during the
1350 * creation process. Don't report an error so the execution can
1351 * continue normally.
1352 */
1353 ret = 0;
1354 DBG3("UST app create event failed. Application is dead.");
1355 }
1356 goto error;
1357 }
1358
1359 ua_event->handle = ua_event->obj->handle;
1360
1361 DBG2("UST app event %s created successfully for pid:%d",
1362 ua_event->attr.name, app->pid);
1363
1364 health_code_update();
1365
1366 /* Set filter if one is present. */
1367 if (ua_event->filter) {
1368 ret = set_ust_event_filter(ua_event, app);
1369 if (ret < 0) {
1370 goto error;
1371 }
1372 }
1373
1374 /* If event not enabled, disable it on the tracer */
1375 if (ua_event->enabled == 0) {
1376 ret = disable_ust_event(app, ua_sess, ua_event);
1377 if (ret < 0) {
1378 /*
1379 * If we hit an EPERM, something is wrong with our disable call. If
1380 * we get an EEXIST, there is a problem on the tracer side since we
1381 * just created it.
1382 */
1383 switch (ret) {
1384 case -LTTNG_UST_ERR_PERM:
1385 /* Code flow problem */
1386 assert(0);
1387 case -LTTNG_UST_ERR_EXIST:
1388 /* It's OK for our use case. */
1389 ret = 0;
1390 break;
1391 default:
1392 break;
1393 }
1394 goto error;
1395 }
1396 }
1397
1398 error:
1399 health_code_update();
1400 return ret;
1401 }
1402
1403 /*
1404 * Copy data between an UST app event and a LTT event.
1405 */
1406 static void shadow_copy_event(struct ust_app_event *ua_event,
1407 struct ltt_ust_event *uevent)
1408 {
1409 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1410 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1411
1412 ua_event->enabled = uevent->enabled;
1413
1414 /* Copy event attributes */
1415 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1416
1417 /* Copy filter bytecode */
1418 if (uevent->filter) {
1419 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1420 /* Filter might be NULL here in case of ENONEM. */
1421 }
1422 }
1423
1424 /*
1425 * Copy data between an UST app channel and a LTT channel.
1426 */
1427 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1428 struct ltt_ust_channel *uchan)
1429 {
1430 struct lttng_ht_iter iter;
1431 struct ltt_ust_event *uevent;
1432 struct ltt_ust_context *uctx;
1433 struct ust_app_event *ua_event;
1434 struct ust_app_ctx *ua_ctx;
1435
1436 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1437
1438 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1439 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1440
1441 ua_chan->tracefile_size = uchan->tracefile_size;
1442 ua_chan->tracefile_count = uchan->tracefile_count;
1443
1444 /* Copy event attributes since the layout is different. */
1445 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1446 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1447 ua_chan->attr.overwrite = uchan->attr.overwrite;
1448 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1449 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1450 ua_chan->attr.output = uchan->attr.output;
1451 /*
1452 * Note that the attribute channel type is not set since the channel on the
1453 * tracing registry side does not have this information.
1454 */
1455
1456 ua_chan->enabled = uchan->enabled;
1457 ua_chan->tracing_channel_id = uchan->id;
1458
1459 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1460 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1461 if (ua_ctx == NULL) {
1462 continue;
1463 }
1464 lttng_ht_node_init_ulong(&ua_ctx->node,
1465 (unsigned long) ua_ctx->ctx.ctx);
1466 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1467 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1468 }
1469
1470 /* Copy all events from ltt ust channel to ust app channel */
1471 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1472 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1473 uevent->filter, uevent->attr.loglevel);
1474 if (ua_event == NULL) {
1475 DBG2("UST event %s not found on shadow copy channel",
1476 uevent->attr.name);
1477 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1478 if (ua_event == NULL) {
1479 continue;
1480 }
1481 shadow_copy_event(ua_event, uevent);
1482 add_unique_ust_app_event(ua_chan, ua_event);
1483 }
1484 }
1485
1486 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1487 }
1488
1489 /*
1490 * Copy data between a UST app session and a regular LTT session.
1491 */
1492 static void shadow_copy_session(struct ust_app_session *ua_sess,
1493 struct ltt_ust_session *usess, struct ust_app *app)
1494 {
1495 struct lttng_ht_node_str *ua_chan_node;
1496 struct lttng_ht_iter iter;
1497 struct ltt_ust_channel *uchan;
1498 struct ust_app_channel *ua_chan;
1499 time_t rawtime;
1500 struct tm *timeinfo;
1501 char datetime[16];
1502 int ret;
1503
1504 /* Get date and time for unique app path */
1505 time(&rawtime);
1506 timeinfo = localtime(&rawtime);
1507 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1508
1509 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1510
1511 ua_sess->tracing_id = usess->id;
1512 ua_sess->id = get_next_session_id();
1513 ua_sess->uid = app->uid;
1514 ua_sess->gid = app->gid;
1515 ua_sess->euid = usess->uid;
1516 ua_sess->egid = usess->gid;
1517 ua_sess->buffer_type = usess->buffer_type;
1518 ua_sess->bits_per_long = app->bits_per_long;
1519 /* There is only one consumer object per session possible. */
1520 ua_sess->consumer = usess->consumer;
1521 ua_sess->output_traces = usess->output_traces;
1522 ua_sess->live_timer_interval = usess->live_timer_interval;
1523
1524 switch (ua_sess->buffer_type) {
1525 case LTTNG_BUFFER_PER_PID:
1526 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1527 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1528 datetime);
1529 break;
1530 case LTTNG_BUFFER_PER_UID:
1531 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1532 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1533 break;
1534 default:
1535 assert(0);
1536 goto error;
1537 }
1538 if (ret < 0) {
1539 PERROR("asprintf UST shadow copy session");
1540 assert(0);
1541 goto error;
1542 }
1543
1544 /* Iterate over all channels in global domain. */
1545 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1546 uchan, node.node) {
1547 struct lttng_ht_iter uiter;
1548
1549 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1550 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1551 if (ua_chan_node != NULL) {
1552 /* Session exist. Contiuing. */
1553 continue;
1554 }
1555
1556 DBG2("Channel %s not found on shadow session copy, creating it",
1557 uchan->name);
1558 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1559 if (ua_chan == NULL) {
1560 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1561 continue;
1562 }
1563 shadow_copy_channel(ua_chan, uchan);
1564 /*
1565 * The concept of metadata channel does not exist on the tracing
1566 * registry side of the session daemon so this can only be a per CPU
1567 * channel and not metadata.
1568 */
1569 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1570
1571 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1572 }
1573
1574 error:
1575 return;
1576 }
1577
1578 /*
1579 * Lookup sesison wrapper.
1580 */
1581 static
1582 void __lookup_session_by_app(struct ltt_ust_session *usess,
1583 struct ust_app *app, struct lttng_ht_iter *iter)
1584 {
1585 /* Get right UST app session from app */
1586 lttng_ht_lookup(app->sessions, &usess->id, iter);
1587 }
1588
1589 /*
1590 * Return ust app session from the app session hashtable using the UST session
1591 * id.
1592 */
1593 static struct ust_app_session *lookup_session_by_app(
1594 struct ltt_ust_session *usess, struct ust_app *app)
1595 {
1596 struct lttng_ht_iter iter;
1597 struct lttng_ht_node_u64 *node;
1598
1599 __lookup_session_by_app(usess, app, &iter);
1600 node = lttng_ht_iter_get_node_u64(&iter);
1601 if (node == NULL) {
1602 goto error;
1603 }
1604
1605 return caa_container_of(node, struct ust_app_session, node);
1606
1607 error:
1608 return NULL;
1609 }
1610
1611 /*
1612 * Setup buffer registry per PID for the given session and application. If none
1613 * is found, a new one is created, added to the global registry and
1614 * initialized. If regp is valid, it's set with the newly created object.
1615 *
1616 * Return 0 on success or else a negative value.
1617 */
1618 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1619 struct ust_app *app, struct buffer_reg_pid **regp)
1620 {
1621 int ret = 0;
1622 struct buffer_reg_pid *reg_pid;
1623
1624 assert(ua_sess);
1625 assert(app);
1626
1627 rcu_read_lock();
1628
1629 reg_pid = buffer_reg_pid_find(ua_sess->id);
1630 if (!reg_pid) {
1631 /*
1632 * This is the create channel path meaning that if there is NO
1633 * registry available, we have to create one for this session.
1634 */
1635 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1636 if (ret < 0) {
1637 goto error;
1638 }
1639 buffer_reg_pid_add(reg_pid);
1640 } else {
1641 goto end;
1642 }
1643
1644 /* Initialize registry. */
1645 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1646 app->bits_per_long, app->uint8_t_alignment,
1647 app->uint16_t_alignment, app->uint32_t_alignment,
1648 app->uint64_t_alignment, app->long_alignment,
1649 app->byte_order, app->version.major,
1650 app->version.minor);
1651 if (ret < 0) {
1652 goto error;
1653 }
1654
1655 DBG3("UST app buffer registry per PID created successfully");
1656
1657 end:
1658 if (regp) {
1659 *regp = reg_pid;
1660 }
1661 error:
1662 rcu_read_unlock();
1663 return ret;
1664 }
1665
1666 /*
1667 * Setup buffer registry per UID for the given session and application. If none
1668 * is found, a new one is created, added to the global registry and
1669 * initialized. If regp is valid, it's set with the newly created object.
1670 *
1671 * Return 0 on success or else a negative value.
1672 */
1673 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1674 struct ust_app *app, struct buffer_reg_uid **regp)
1675 {
1676 int ret = 0;
1677 struct buffer_reg_uid *reg_uid;
1678
1679 assert(usess);
1680 assert(app);
1681
1682 rcu_read_lock();
1683
1684 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1685 if (!reg_uid) {
1686 /*
1687 * This is the create channel path meaning that if there is NO
1688 * registry available, we have to create one for this session.
1689 */
1690 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1691 LTTNG_DOMAIN_UST, &reg_uid);
1692 if (ret < 0) {
1693 goto error;
1694 }
1695 buffer_reg_uid_add(reg_uid);
1696 } else {
1697 goto end;
1698 }
1699
1700 /* Initialize registry. */
1701 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1702 app->bits_per_long, app->uint8_t_alignment,
1703 app->uint16_t_alignment, app->uint32_t_alignment,
1704 app->uint64_t_alignment, app->long_alignment,
1705 app->byte_order, app->version.major,
1706 app->version.minor);
1707 if (ret < 0) {
1708 goto error;
1709 }
1710 /* Add node to teardown list of the session. */
1711 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1712
1713 DBG3("UST app buffer registry per UID created successfully");
1714
1715 end:
1716 if (regp) {
1717 *regp = reg_uid;
1718 }
1719 error:
1720 rcu_read_unlock();
1721 return ret;
1722 }
1723
1724 /*
1725 * Create a session on the tracer side for the given app.
1726 *
1727 * On success, ua_sess_ptr is populated with the session pointer or else left
1728 * untouched. If the session was created, is_created is set to 1. On error,
1729 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1730 * be NULL.
1731 *
1732 * Returns 0 on success or else a negative code which is either -ENOMEM or
1733 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1734 */
1735 static int create_ust_app_session(struct ltt_ust_session *usess,
1736 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1737 int *is_created)
1738 {
1739 int ret, created = 0;
1740 struct ust_app_session *ua_sess;
1741
1742 assert(usess);
1743 assert(app);
1744 assert(ua_sess_ptr);
1745
1746 health_code_update();
1747
1748 ua_sess = lookup_session_by_app(usess, app);
1749 if (ua_sess == NULL) {
1750 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1751 app->pid, usess->id);
1752 ua_sess = alloc_ust_app_session(app);
1753 if (ua_sess == NULL) {
1754 /* Only malloc can failed so something is really wrong */
1755 ret = -ENOMEM;
1756 goto error;
1757 }
1758 shadow_copy_session(ua_sess, usess, app);
1759 created = 1;
1760 }
1761
1762 switch (usess->buffer_type) {
1763 case LTTNG_BUFFER_PER_PID:
1764 /* Init local registry. */
1765 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1766 if (ret < 0) {
1767 goto error;
1768 }
1769 break;
1770 case LTTNG_BUFFER_PER_UID:
1771 /* Look for a global registry. If none exists, create one. */
1772 ret = setup_buffer_reg_uid(usess, app, NULL);
1773 if (ret < 0) {
1774 goto error;
1775 }
1776 break;
1777 default:
1778 assert(0);
1779 ret = -EINVAL;
1780 goto error;
1781 }
1782
1783 health_code_update();
1784
1785 if (ua_sess->handle == -1) {
1786 ret = ustctl_create_session(app->sock);
1787 if (ret < 0) {
1788 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1789 ERR("Creating session for app pid %d with ret %d",
1790 app->pid, ret);
1791 } else {
1792 DBG("UST app creating session failed. Application is dead");
1793 /*
1794 * This is normal behavior, an application can die during the
1795 * creation process. Don't report an error so the execution can
1796 * continue normally. This will get flagged ENOTCONN and the
1797 * caller will handle it.
1798 */
1799 ret = 0;
1800 }
1801 delete_ust_app_session(-1, ua_sess, app);
1802 if (ret != -ENOMEM) {
1803 /*
1804 * Tracer is probably gone or got an internal error so let's
1805 * behave like it will soon unregister or not usable.
1806 */
1807 ret = -ENOTCONN;
1808 }
1809 goto error;
1810 }
1811
1812 ua_sess->handle = ret;
1813
1814 /* Add ust app session to app's HT */
1815 lttng_ht_node_init_u64(&ua_sess->node,
1816 ua_sess->tracing_id);
1817 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1818
1819 DBG2("UST app session created successfully with handle %d", ret);
1820 }
1821
1822 *ua_sess_ptr = ua_sess;
1823 if (is_created) {
1824 *is_created = created;
1825 }
1826
1827 /* Everything went well. */
1828 ret = 0;
1829
1830 error:
1831 health_code_update();
1832 return ret;
1833 }
1834
1835 /*
1836 * Create a context for the channel on the tracer.
1837 *
1838 * Called with UST app session lock held and a RCU read side lock.
1839 */
1840 static
1841 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1842 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1843 struct ust_app *app)
1844 {
1845 int ret = 0;
1846 struct lttng_ht_iter iter;
1847 struct lttng_ht_node_ulong *node;
1848 struct ust_app_ctx *ua_ctx;
1849
1850 DBG2("UST app adding context to channel %s", ua_chan->name);
1851
1852 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1853 node = lttng_ht_iter_get_node_ulong(&iter);
1854 if (node != NULL) {
1855 ret = -EEXIST;
1856 goto error;
1857 }
1858
1859 ua_ctx = alloc_ust_app_ctx(uctx);
1860 if (ua_ctx == NULL) {
1861 /* malloc failed */
1862 ret = -1;
1863 goto error;
1864 }
1865
1866 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1867 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1868 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1869
1870 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1871 if (ret < 0) {
1872 goto error;
1873 }
1874
1875 error:
1876 return ret;
1877 }
1878
1879 /*
1880 * Enable on the tracer side a ust app event for the session and channel.
1881 *
1882 * Called with UST app session lock held.
1883 */
1884 static
1885 int enable_ust_app_event(struct ust_app_session *ua_sess,
1886 struct ust_app_event *ua_event, struct ust_app *app)
1887 {
1888 int ret;
1889
1890 ret = enable_ust_event(app, ua_sess, ua_event);
1891 if (ret < 0) {
1892 goto error;
1893 }
1894
1895 ua_event->enabled = 1;
1896
1897 error:
1898 return ret;
1899 }
1900
1901 /*
1902 * Disable on the tracer side a ust app event for the session and channel.
1903 */
1904 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1905 struct ust_app_event *ua_event, struct ust_app *app)
1906 {
1907 int ret;
1908
1909 ret = disable_ust_event(app, ua_sess, ua_event);
1910 if (ret < 0) {
1911 goto error;
1912 }
1913
1914 ua_event->enabled = 0;
1915
1916 error:
1917 return ret;
1918 }
1919
1920 /*
1921 * Lookup ust app channel for session and disable it on the tracer side.
1922 */
1923 static
1924 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1925 struct ust_app_channel *ua_chan, struct ust_app *app)
1926 {
1927 int ret;
1928
1929 ret = disable_ust_channel(app, ua_sess, ua_chan);
1930 if (ret < 0) {
1931 goto error;
1932 }
1933
1934 ua_chan->enabled = 0;
1935
1936 error:
1937 return ret;
1938 }
1939
1940 /*
1941 * Lookup ust app channel for session and enable it on the tracer side. This
1942 * MUST be called with a RCU read side lock acquired.
1943 */
1944 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1945 struct ltt_ust_channel *uchan, struct ust_app *app)
1946 {
1947 int ret = 0;
1948 struct lttng_ht_iter iter;
1949 struct lttng_ht_node_str *ua_chan_node;
1950 struct ust_app_channel *ua_chan;
1951
1952 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1953 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1954 if (ua_chan_node == NULL) {
1955 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1956 uchan->name, ua_sess->tracing_id);
1957 goto error;
1958 }
1959
1960 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1961
1962 ret = enable_ust_channel(app, ua_sess, ua_chan);
1963 if (ret < 0) {
1964 goto error;
1965 }
1966
1967 error:
1968 return ret;
1969 }
1970
1971 /*
1972 * Ask the consumer to create a channel and get it if successful.
1973 *
1974 * Return 0 on success or else a negative value.
1975 */
1976 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1977 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1978 int bitness, struct ust_registry_session *registry)
1979 {
1980 int ret;
1981 unsigned int nb_fd = 0;
1982 struct consumer_socket *socket;
1983
1984 assert(usess);
1985 assert(ua_sess);
1986 assert(ua_chan);
1987 assert(registry);
1988
1989 rcu_read_lock();
1990 health_code_update();
1991
1992 /* Get the right consumer socket for the application. */
1993 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1994 if (!socket) {
1995 ret = -EINVAL;
1996 goto error;
1997 }
1998
1999 health_code_update();
2000
2001 /* Need one fd for the channel. */
2002 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2003 if (ret < 0) {
2004 ERR("Exhausted number of available FD upon create channel");
2005 goto error;
2006 }
2007
2008 /*
2009 * Ask consumer to create channel. The consumer will return the number of
2010 * stream we have to expect.
2011 */
2012 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2013 registry);
2014 if (ret < 0) {
2015 goto error_ask;
2016 }
2017
2018 /*
2019 * Compute the number of fd needed before receiving them. It must be 2 per
2020 * stream (2 being the default value here).
2021 */
2022 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2023
2024 /* Reserve the amount of file descriptor we need. */
2025 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2026 if (ret < 0) {
2027 ERR("Exhausted number of available FD upon create channel");
2028 goto error_fd_get_stream;
2029 }
2030
2031 health_code_update();
2032
2033 /*
2034 * Now get the channel from the consumer. This call wil populate the stream
2035 * list of that channel and set the ust objects.
2036 */
2037 if (usess->consumer->enabled) {
2038 ret = ust_consumer_get_channel(socket, ua_chan);
2039 if (ret < 0) {
2040 goto error_destroy;
2041 }
2042 }
2043
2044 rcu_read_unlock();
2045 return 0;
2046
2047 error_destroy:
2048 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2049 error_fd_get_stream:
2050 /*
2051 * Initiate a destroy channel on the consumer since we had an error
2052 * handling it on our side. The return value is of no importance since we
2053 * already have a ret value set by the previous error that we need to
2054 * return.
2055 */
2056 (void) ust_consumer_destroy_channel(socket, ua_chan);
2057 error_ask:
2058 lttng_fd_put(LTTNG_FD_APPS, 1);
2059 error:
2060 health_code_update();
2061 rcu_read_unlock();
2062 return ret;
2063 }
2064
2065 /*
2066 * Duplicate the ust data object of the ust app stream and save it in the
2067 * buffer registry stream.
2068 *
2069 * Return 0 on success or else a negative value.
2070 */
2071 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2072 struct ust_app_stream *stream)
2073 {
2074 int ret;
2075
2076 assert(reg_stream);
2077 assert(stream);
2078
2079 /* Reserve the amount of file descriptor we need. */
2080 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2081 if (ret < 0) {
2082 ERR("Exhausted number of available FD upon duplicate stream");
2083 goto error;
2084 }
2085
2086 /* Duplicate object for stream once the original is in the registry. */
2087 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2088 reg_stream->obj.ust);
2089 if (ret < 0) {
2090 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2091 reg_stream->obj.ust, stream->obj, ret);
2092 lttng_fd_put(LTTNG_FD_APPS, 2);
2093 goto error;
2094 }
2095 stream->handle = stream->obj->handle;
2096
2097 error:
2098 return ret;
2099 }
2100
2101 /*
2102 * Duplicate the ust data object of the ust app. channel and save it in the
2103 * buffer registry channel.
2104 *
2105 * Return 0 on success or else a negative value.
2106 */
2107 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2108 struct ust_app_channel *ua_chan)
2109 {
2110 int ret;
2111
2112 assert(reg_chan);
2113 assert(ua_chan);
2114
2115 /* Need two fds for the channel. */
2116 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2117 if (ret < 0) {
2118 ERR("Exhausted number of available FD upon duplicate channel");
2119 goto error_fd_get;
2120 }
2121
2122 /* Duplicate object for stream once the original is in the registry. */
2123 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2124 if (ret < 0) {
2125 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2126 reg_chan->obj.ust, ua_chan->obj, ret);
2127 goto error;
2128 }
2129 ua_chan->handle = ua_chan->obj->handle;
2130
2131 return 0;
2132
2133 error:
2134 lttng_fd_put(LTTNG_FD_APPS, 1);
2135 error_fd_get:
2136 return ret;
2137 }
2138
2139 /*
2140 * For a given channel buffer registry, setup all streams of the given ust
2141 * application channel.
2142 *
2143 * Return 0 on success or else a negative value.
2144 */
2145 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2146 struct ust_app_channel *ua_chan)
2147 {
2148 int ret = 0;
2149 struct ust_app_stream *stream, *stmp;
2150
2151 assert(reg_chan);
2152 assert(ua_chan);
2153
2154 DBG2("UST app setup buffer registry stream");
2155
2156 /* Send all streams to application. */
2157 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2158 struct buffer_reg_stream *reg_stream;
2159
2160 ret = buffer_reg_stream_create(&reg_stream);
2161 if (ret < 0) {
2162 goto error;
2163 }
2164
2165 /*
2166 * Keep original pointer and nullify it in the stream so the delete
2167 * stream call does not release the object.
2168 */
2169 reg_stream->obj.ust = stream->obj;
2170 stream->obj = NULL;
2171 buffer_reg_stream_add(reg_stream, reg_chan);
2172
2173 /* We don't need the streams anymore. */
2174 cds_list_del(&stream->list);
2175 delete_ust_app_stream(-1, stream);
2176 }
2177
2178 error:
2179 return ret;
2180 }
2181
2182 /*
2183 * Create a buffer registry channel for the given session registry and
2184 * application channel object. If regp pointer is valid, it's set with the
2185 * created object. Important, the created object is NOT added to the session
2186 * registry hash table.
2187 *
2188 * Return 0 on success else a negative value.
2189 */
2190 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2191 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2192 {
2193 int ret;
2194 struct buffer_reg_channel *reg_chan = NULL;
2195
2196 assert(reg_sess);
2197 assert(ua_chan);
2198
2199 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2200
2201 /* Create buffer registry channel. */
2202 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2203 if (ret < 0) {
2204 goto error_create;
2205 }
2206 assert(reg_chan);
2207 reg_chan->consumer_key = ua_chan->key;
2208 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2209
2210 /* Create and add a channel registry to session. */
2211 ret = ust_registry_channel_add(reg_sess->reg.ust,
2212 ua_chan->tracing_channel_id);
2213 if (ret < 0) {
2214 goto error;
2215 }
2216 buffer_reg_channel_add(reg_sess, reg_chan);
2217
2218 if (regp) {
2219 *regp = reg_chan;
2220 }
2221
2222 return 0;
2223
2224 error:
2225 /* Safe because the registry channel object was not added to any HT. */
2226 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2227 error_create:
2228 return ret;
2229 }
2230
2231 /*
2232 * Setup buffer registry channel for the given session registry and application
2233 * channel object. If regp pointer is valid, it's set with the created object.
2234 *
2235 * Return 0 on success else a negative value.
2236 */
2237 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2238 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2239 {
2240 int ret;
2241
2242 assert(reg_sess);
2243 assert(reg_chan);
2244 assert(ua_chan);
2245 assert(ua_chan->obj);
2246
2247 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2248
2249 /* Setup all streams for the registry. */
2250 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2251 if (ret < 0) {
2252 goto error;
2253 }
2254
2255 reg_chan->obj.ust = ua_chan->obj;
2256 ua_chan->obj = NULL;
2257
2258 return 0;
2259
2260 error:
2261 buffer_reg_channel_remove(reg_sess, reg_chan);
2262 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2263 return ret;
2264 }
2265
2266 /*
2267 * Send buffer registry channel to the application.
2268 *
2269 * Return 0 on success else a negative value.
2270 */
2271 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2272 struct ust_app *app, struct ust_app_session *ua_sess,
2273 struct ust_app_channel *ua_chan)
2274 {
2275 int ret;
2276 struct buffer_reg_stream *reg_stream;
2277
2278 assert(reg_chan);
2279 assert(app);
2280 assert(ua_sess);
2281 assert(ua_chan);
2282
2283 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2284
2285 ret = duplicate_channel_object(reg_chan, ua_chan);
2286 if (ret < 0) {
2287 goto error;
2288 }
2289
2290 /* Send channel to the application. */
2291 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2292 if (ret < 0) {
2293 goto error;
2294 }
2295
2296 health_code_update();
2297
2298 /* Send all streams to application. */
2299 pthread_mutex_lock(&reg_chan->stream_list_lock);
2300 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2301 struct ust_app_stream stream;
2302
2303 ret = duplicate_stream_object(reg_stream, &stream);
2304 if (ret < 0) {
2305 goto error_stream_unlock;
2306 }
2307
2308 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2309 if (ret < 0) {
2310 (void) release_ust_app_stream(-1, &stream);
2311 goto error_stream_unlock;
2312 }
2313
2314 /*
2315 * The return value is not important here. This function will output an
2316 * error if needed.
2317 */
2318 (void) release_ust_app_stream(-1, &stream);
2319 }
2320 ua_chan->is_sent = 1;
2321
2322 error_stream_unlock:
2323 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2324 error:
2325 return ret;
2326 }
2327
2328 /*
2329 * Create and send to the application the created buffers with per UID buffers.
2330 *
2331 * Return 0 on success else a negative value.
2332 */
2333 static int create_channel_per_uid(struct ust_app *app,
2334 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2335 struct ust_app_channel *ua_chan)
2336 {
2337 int ret;
2338 struct buffer_reg_uid *reg_uid;
2339 struct buffer_reg_channel *reg_chan;
2340
2341 assert(app);
2342 assert(usess);
2343 assert(ua_sess);
2344 assert(ua_chan);
2345
2346 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2347
2348 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2349 /*
2350 * The session creation handles the creation of this global registry
2351 * object. If none can be find, there is a code flow problem or a
2352 * teardown race.
2353 */
2354 assert(reg_uid);
2355
2356 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2357 reg_uid);
2358 if (!reg_chan) {
2359 /* Create the buffer registry channel object. */
2360 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2361 if (ret < 0) {
2362 goto error;
2363 }
2364 assert(reg_chan);
2365
2366 /*
2367 * Create the buffers on the consumer side. This call populates the
2368 * ust app channel object with all streams and data object.
2369 */
2370 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2371 app->bits_per_long, reg_uid->registry->reg.ust);
2372 if (ret < 0) {
2373 /*
2374 * Let's remove the previously created buffer registry channel so
2375 * it's not visible anymore in the session registry.
2376 */
2377 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2378 ua_chan->tracing_channel_id);
2379 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2380 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2381 goto error;
2382 }
2383
2384 /*
2385 * Setup the streams and add it to the session registry.
2386 */
2387 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2388 if (ret < 0) {
2389 goto error;
2390 }
2391
2392 }
2393
2394 /* Send buffers to the application. */
2395 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2396 if (ret < 0) {
2397 goto error;
2398 }
2399
2400 error:
2401 return ret;
2402 }
2403
2404 /*
2405 * Create and send to the application the created buffers with per PID buffers.
2406 *
2407 * Return 0 on success else a negative value.
2408 */
2409 static int create_channel_per_pid(struct ust_app *app,
2410 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2411 struct ust_app_channel *ua_chan)
2412 {
2413 int ret;
2414 struct ust_registry_session *registry;
2415
2416 assert(app);
2417 assert(usess);
2418 assert(ua_sess);
2419 assert(ua_chan);
2420
2421 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2422
2423 rcu_read_lock();
2424
2425 registry = get_session_registry(ua_sess);
2426 assert(registry);
2427
2428 /* Create and add a new channel registry to session. */
2429 ret = ust_registry_channel_add(registry, ua_chan->key);
2430 if (ret < 0) {
2431 goto error;
2432 }
2433
2434 /* Create and get channel on the consumer side. */
2435 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2436 app->bits_per_long, registry);
2437 if (ret < 0) {
2438 goto error;
2439 }
2440
2441 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2442 if (ret < 0) {
2443 goto error;
2444 }
2445
2446 error:
2447 rcu_read_unlock();
2448 return ret;
2449 }
2450
2451 /*
2452 * From an already allocated ust app channel, create the channel buffers if
2453 * need and send it to the application. This MUST be called with a RCU read
2454 * side lock acquired.
2455 *
2456 * Return 0 on success or else a negative value.
2457 */
2458 static int do_create_channel(struct ust_app *app,
2459 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2460 struct ust_app_channel *ua_chan)
2461 {
2462 int ret;
2463
2464 assert(app);
2465 assert(usess);
2466 assert(ua_sess);
2467 assert(ua_chan);
2468
2469 /* Handle buffer type before sending the channel to the application. */
2470 switch (usess->buffer_type) {
2471 case LTTNG_BUFFER_PER_UID:
2472 {
2473 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2474 if (ret < 0) {
2475 goto error;
2476 }
2477 break;
2478 }
2479 case LTTNG_BUFFER_PER_PID:
2480 {
2481 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2482 if (ret < 0) {
2483 goto error;
2484 }
2485 break;
2486 }
2487 default:
2488 assert(0);
2489 ret = -EINVAL;
2490 goto error;
2491 }
2492
2493 /* Initialize ust objd object using the received handle and add it. */
2494 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2495 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2496
2497 /* If channel is not enabled, disable it on the tracer */
2498 if (!ua_chan->enabled) {
2499 ret = disable_ust_channel(app, ua_sess, ua_chan);
2500 if (ret < 0) {
2501 goto error;
2502 }
2503 }
2504
2505 error:
2506 return ret;
2507 }
2508
2509 /*
2510 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2511 * newly created channel if not NULL.
2512 *
2513 * Called with UST app session lock and RCU read-side lock held.
2514 *
2515 * Return 0 on success or else a negative value.
2516 */
2517 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2518 struct ltt_ust_channel *uchan, struct ust_app *app,
2519 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2520 struct ust_app_channel **ua_chanp)
2521 {
2522 int ret = 0;
2523 struct lttng_ht_iter iter;
2524 struct lttng_ht_node_str *ua_chan_node;
2525 struct ust_app_channel *ua_chan;
2526
2527 /* Lookup channel in the ust app session */
2528 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2529 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2530 if (ua_chan_node != NULL) {
2531 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2532 goto end;
2533 }
2534
2535 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2536 if (ua_chan == NULL) {
2537 /* Only malloc can fail here */
2538 ret = -ENOMEM;
2539 goto error_alloc;
2540 }
2541 shadow_copy_channel(ua_chan, uchan);
2542
2543 /* Set channel type. */
2544 ua_chan->attr.type = type;
2545
2546 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2547 if (ret < 0) {
2548 goto error;
2549 }
2550
2551 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2552 app->pid);
2553
2554 /* Only add the channel if successful on the tracer side. */
2555 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2556
2557 end:
2558 if (ua_chanp) {
2559 *ua_chanp = ua_chan;
2560 }
2561
2562 /* Everything went well. */
2563 return 0;
2564
2565 error:
2566 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2567 error_alloc:
2568 return ret;
2569 }
2570
2571 /*
2572 * Create UST app event and create it on the tracer side.
2573 *
2574 * Called with ust app session mutex held.
2575 */
2576 static
2577 int create_ust_app_event(struct ust_app_session *ua_sess,
2578 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2579 struct ust_app *app)
2580 {
2581 int ret = 0;
2582 struct ust_app_event *ua_event;
2583
2584 /* Get event node */
2585 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2586 uevent->filter, uevent->attr.loglevel);
2587 if (ua_event != NULL) {
2588 ret = -EEXIST;
2589 goto end;
2590 }
2591
2592 /* Does not exist so create one */
2593 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2594 if (ua_event == NULL) {
2595 /* Only malloc can failed so something is really wrong */
2596 ret = -ENOMEM;
2597 goto end;
2598 }
2599 shadow_copy_event(ua_event, uevent);
2600
2601 /* Create it on the tracer side */
2602 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2603 if (ret < 0) {
2604 /* Not found previously means that it does not exist on the tracer */
2605 assert(ret != -LTTNG_UST_ERR_EXIST);
2606 goto error;
2607 }
2608
2609 add_unique_ust_app_event(ua_chan, ua_event);
2610
2611 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2612 app->pid);
2613
2614 end:
2615 return ret;
2616
2617 error:
2618 /* Valid. Calling here is already in a read side lock */
2619 delete_ust_app_event(-1, ua_event);
2620 return ret;
2621 }
2622
2623 /*
2624 * Create UST metadata and open it on the tracer side.
2625 *
2626 * Called with UST app session lock held and RCU read side lock.
2627 */
2628 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2629 struct ust_app *app, struct consumer_output *consumer,
2630 struct ustctl_consumer_channel_attr *attr)
2631 {
2632 int ret = 0;
2633 struct ust_app_channel *metadata;
2634 struct consumer_socket *socket;
2635 struct ust_registry_session *registry;
2636
2637 assert(ua_sess);
2638 assert(app);
2639 assert(consumer);
2640
2641 registry = get_session_registry(ua_sess);
2642 assert(registry);
2643
2644 /* Metadata already exists for this registry or it was closed previously */
2645 if (registry->metadata_key || registry->metadata_closed) {
2646 ret = 0;
2647 goto error;
2648 }
2649
2650 /* Allocate UST metadata */
2651 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2652 if (!metadata) {
2653 /* malloc() failed */
2654 ret = -ENOMEM;
2655 goto error;
2656 }
2657
2658 if (!attr) {
2659 /* Set default attributes for metadata. */
2660 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2661 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2662 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2663 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2664 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2665 metadata->attr.output = LTTNG_UST_MMAP;
2666 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2667 } else {
2668 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2669 metadata->attr.output = LTTNG_UST_MMAP;
2670 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2671 }
2672
2673 /* Need one fd for the channel. */
2674 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2675 if (ret < 0) {
2676 ERR("Exhausted number of available FD upon create metadata");
2677 goto error;
2678 }
2679
2680 /* Get the right consumer socket for the application. */
2681 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2682 if (!socket) {
2683 ret = -EINVAL;
2684 goto error_consumer;
2685 }
2686
2687 /*
2688 * Keep metadata key so we can identify it on the consumer side. Assign it
2689 * to the registry *before* we ask the consumer so we avoid the race of the
2690 * consumer requesting the metadata and the ask_channel call on our side
2691 * did not returned yet.
2692 */
2693 registry->metadata_key = metadata->key;
2694
2695 /*
2696 * Ask the metadata channel creation to the consumer. The metadata object
2697 * will be created by the consumer and kept their. However, the stream is
2698 * never added or monitored until we do a first push metadata to the
2699 * consumer.
2700 */
2701 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2702 registry);
2703 if (ret < 0) {
2704 /* Nullify the metadata key so we don't try to close it later on. */
2705 registry->metadata_key = 0;
2706 goto error_consumer;
2707 }
2708
2709 /*
2710 * The setup command will make the metadata stream be sent to the relayd,
2711 * if applicable, and the thread managing the metadatas. This is important
2712 * because after this point, if an error occurs, the only way the stream
2713 * can be deleted is to be monitored in the consumer.
2714 */
2715 ret = consumer_setup_metadata(socket, metadata->key);
2716 if (ret < 0) {
2717 /* Nullify the metadata key so we don't try to close it later on. */
2718 registry->metadata_key = 0;
2719 goto error_consumer;
2720 }
2721
2722 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2723 metadata->key, app->pid);
2724
2725 error_consumer:
2726 lttng_fd_put(LTTNG_FD_APPS, 1);
2727 delete_ust_app_channel(-1, metadata, app);
2728 error:
2729 return ret;
2730 }
2731
2732 /*
2733 * Return pointer to traceable apps list.
2734 */
2735 struct lttng_ht *ust_app_get_ht(void)
2736 {
2737 return ust_app_ht;
2738 }
2739
2740 /*
2741 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2742 * acquired before calling this function.
2743 */
2744 struct ust_app *ust_app_find_by_pid(pid_t pid)
2745 {
2746 struct ust_app *app = NULL;
2747 struct lttng_ht_node_ulong *node;
2748 struct lttng_ht_iter iter;
2749
2750 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2751 node = lttng_ht_iter_get_node_ulong(&iter);
2752 if (node == NULL) {
2753 DBG2("UST app no found with pid %d", pid);
2754 goto error;
2755 }
2756
2757 DBG2("Found UST app by pid %d", pid);
2758
2759 app = caa_container_of(node, struct ust_app, pid_n);
2760
2761 error:
2762 return app;
2763 }
2764
2765 /*
2766 * Allocate and init an UST app object using the registration information and
2767 * the command socket. This is called when the command socket connects to the
2768 * session daemon.
2769 *
2770 * The object is returned on success or else NULL.
2771 */
2772 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2773 {
2774 struct ust_app *lta = NULL;
2775
2776 assert(msg);
2777 assert(sock >= 0);
2778
2779 DBG3("UST app creating application for socket %d", sock);
2780
2781 if ((msg->bits_per_long == 64 &&
2782 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2783 || (msg->bits_per_long == 32 &&
2784 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2785 ERR("Registration failed: application \"%s\" (pid: %d) has "
2786 "%d-bit long, but no consumerd for this size is available.\n",
2787 msg->name, msg->pid, msg->bits_per_long);
2788 goto error;
2789 }
2790
2791 lta = zmalloc(sizeof(struct ust_app));
2792 if (lta == NULL) {
2793 PERROR("malloc");
2794 goto error;
2795 }
2796
2797 lta->ppid = msg->ppid;
2798 lta->uid = msg->uid;
2799 lta->gid = msg->gid;
2800
2801 lta->bits_per_long = msg->bits_per_long;
2802 lta->uint8_t_alignment = msg->uint8_t_alignment;
2803 lta->uint16_t_alignment = msg->uint16_t_alignment;
2804 lta->uint32_t_alignment = msg->uint32_t_alignment;
2805 lta->uint64_t_alignment = msg->uint64_t_alignment;
2806 lta->long_alignment = msg->long_alignment;
2807 lta->byte_order = msg->byte_order;
2808
2809 lta->v_major = msg->major;
2810 lta->v_minor = msg->minor;
2811 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2812 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2813 lta->notify_sock = -1;
2814
2815 /* Copy name and make sure it's NULL terminated. */
2816 strncpy(lta->name, msg->name, sizeof(lta->name));
2817 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2818
2819 /*
2820 * Before this can be called, when receiving the registration information,
2821 * the application compatibility is checked. So, at this point, the
2822 * application can work with this session daemon.
2823 */
2824 lta->compatible = 1;
2825
2826 lta->pid = msg->pid;
2827 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2828 lta->sock = sock;
2829 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2830
2831 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2832
2833 error:
2834 return lta;
2835 }
2836
2837 /*
2838 * For a given application object, add it to every hash table.
2839 */
2840 void ust_app_add(struct ust_app *app)
2841 {
2842 assert(app);
2843 assert(app->notify_sock >= 0);
2844
2845 rcu_read_lock();
2846
2847 /*
2848 * On a re-registration, we want to kick out the previous registration of
2849 * that pid
2850 */
2851 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2852
2853 /*
2854 * The socket _should_ be unique until _we_ call close. So, a add_unique
2855 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2856 * already in the table.
2857 */
2858 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2859
2860 /* Add application to the notify socket hash table. */
2861 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2862 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2863
2864 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2865 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2866 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2867 app->v_minor);
2868
2869 rcu_read_unlock();
2870 }
2871
2872 /*
2873 * Set the application version into the object.
2874 *
2875 * Return 0 on success else a negative value either an errno code or a
2876 * LTTng-UST error code.
2877 */
2878 int ust_app_version(struct ust_app *app)
2879 {
2880 int ret;
2881
2882 assert(app);
2883
2884 ret = ustctl_tracer_version(app->sock, &app->version);
2885 if (ret < 0) {
2886 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2887 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2888 } else {
2889 DBG3("UST app %d verion failed. Application is dead", app->sock);
2890 }
2891 }
2892
2893 return ret;
2894 }
2895
2896 /*
2897 * Unregister app by removing it from the global traceable app list and freeing
2898 * the data struct.
2899 *
2900 * The socket is already closed at this point so no close to sock.
2901 */
2902 void ust_app_unregister(int sock)
2903 {
2904 struct ust_app *lta;
2905 struct lttng_ht_node_ulong *node;
2906 struct lttng_ht_iter iter;
2907 struct ust_app_session *ua_sess;
2908 int ret;
2909
2910 rcu_read_lock();
2911
2912 /* Get the node reference for a call_rcu */
2913 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2914 node = lttng_ht_iter_get_node_ulong(&iter);
2915 assert(node);
2916
2917 lta = caa_container_of(node, struct ust_app, sock_n);
2918 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2919
2920 /* Remove application from PID hash table */
2921 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2922 assert(!ret);
2923
2924 /*
2925 * Remove application from notify hash table. The thread handling the
2926 * notify socket could have deleted the node so ignore on error because
2927 * either way it's valid. The close of that socket is handled by the other
2928 * thread.
2929 */
2930 iter.iter.node = &lta->notify_sock_n.node;
2931 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2932
2933 /*
2934 * Ignore return value since the node might have been removed before by an
2935 * add replace during app registration because the PID can be reassigned by
2936 * the OS.
2937 */
2938 iter.iter.node = &lta->pid_n.node;
2939 ret = lttng_ht_del(ust_app_ht, &iter);
2940 if (ret) {
2941 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2942 lta->pid);
2943 }
2944
2945 /* Remove sessions so they are not visible during deletion.*/
2946 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2947 node.node) {
2948 struct ust_registry_session *registry;
2949
2950 ret = lttng_ht_del(lta->sessions, &iter);
2951 if (ret) {
2952 /* The session was already removed so scheduled for teardown. */
2953 continue;
2954 }
2955
2956 /*
2957 * Add session to list for teardown. This is safe since at this point we
2958 * are the only one using this list.
2959 */
2960 pthread_mutex_lock(&ua_sess->lock);
2961
2962 /*
2963 * Normally, this is done in the delete session process which is
2964 * executed in the call rcu below. However, upon registration we can't
2965 * afford to wait for the grace period before pushing data or else the
2966 * data pending feature can race between the unregistration and stop
2967 * command where the data pending command is sent *before* the grace
2968 * period ended.
2969 *
2970 * The close metadata below nullifies the metadata pointer in the
2971 * session so the delete session will NOT push/close a second time.
2972 */
2973 registry = get_session_registry(ua_sess);
2974 if (registry && !registry->metadata_closed) {
2975 /* Push metadata for application before freeing the application. */
2976 (void) push_metadata(registry, ua_sess->consumer);
2977
2978 /*
2979 * Don't ask to close metadata for global per UID buffers. Close
2980 * metadata only on destroy trace session in this case. Also, the
2981 * previous push metadata could have flag the metadata registry to
2982 * close so don't send a close command if closed.
2983 */
2984 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2985 !registry->metadata_closed) {
2986 /* And ask to close it for this session registry. */
2987 (void) close_metadata(registry, ua_sess->consumer);
2988 }
2989 }
2990
2991 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2992 pthread_mutex_unlock(&ua_sess->lock);
2993 }
2994
2995 /* Free memory */
2996 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2997
2998 rcu_read_unlock();
2999 return;
3000 }
3001
3002 /*
3003 * Return traceable_app_count
3004 */
3005 unsigned long ust_app_list_count(void)
3006 {
3007 unsigned long count;
3008
3009 rcu_read_lock();
3010 count = lttng_ht_get_count(ust_app_ht);
3011 rcu_read_unlock();
3012
3013 return count;
3014 }
3015
3016 /*
3017 * Fill events array with all events name of all registered apps.
3018 */
3019 int ust_app_list_events(struct lttng_event **events)
3020 {
3021 int ret, handle;
3022 size_t nbmem, count = 0;
3023 struct lttng_ht_iter iter;
3024 struct ust_app *app;
3025 struct lttng_event *tmp_event;
3026
3027 nbmem = UST_APP_EVENT_LIST_SIZE;
3028 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3029 if (tmp_event == NULL) {
3030 PERROR("zmalloc ust app events");
3031 ret = -ENOMEM;
3032 goto error;
3033 }
3034
3035 rcu_read_lock();
3036
3037 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3038 struct lttng_ust_tracepoint_iter uiter;
3039
3040 health_code_update();
3041
3042 if (!app->compatible) {
3043 /*
3044 * TODO: In time, we should notice the caller of this error by
3045 * telling him that this is a version error.
3046 */
3047 continue;
3048 }
3049 handle = ustctl_tracepoint_list(app->sock);
3050 if (handle < 0) {
3051 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3052 ERR("UST app list events getting handle failed for app pid %d",
3053 app->pid);
3054 }
3055 continue;
3056 }
3057
3058 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3059 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3060 /* Handle ustctl error. */
3061 if (ret < 0) {
3062 free(tmp_event);
3063 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3064 ERR("UST app tp list get failed for app %d with ret %d",
3065 app->sock, ret);
3066 } else {
3067 DBG3("UST app tp list get failed. Application is dead");
3068 /*
3069 * This is normal behavior, an application can die during the
3070 * creation process. Don't report an error so the execution can
3071 * continue normally. Continue normal execution.
3072 */
3073 break;
3074 }
3075 goto rcu_error;
3076 }
3077
3078 health_code_update();
3079 if (count >= nbmem) {
3080 /* In case the realloc fails, we free the memory */
3081 void *ptr;
3082
3083 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3084 2 * nbmem);
3085 nbmem *= 2;
3086 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3087 if (ptr == NULL) {
3088 PERROR("realloc ust app events");
3089 free(tmp_event);
3090 ret = -ENOMEM;
3091 goto rcu_error;
3092 }
3093 tmp_event = ptr;
3094 }
3095 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3096 tmp_event[count].loglevel = uiter.loglevel;
3097 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3098 tmp_event[count].pid = app->pid;
3099 tmp_event[count].enabled = -1;
3100 count++;
3101 }
3102 }
3103
3104 ret = count;
3105 *events = tmp_event;
3106
3107 DBG2("UST app list events done (%zu events)", count);
3108
3109 rcu_error:
3110 rcu_read_unlock();
3111 error:
3112 health_code_update();
3113 return ret;
3114 }
3115
3116 /*
3117 * Fill events array with all events name of all registered apps.
3118 */
3119 int ust_app_list_event_fields(struct lttng_event_field **fields)
3120 {
3121 int ret, handle;
3122 size_t nbmem, count = 0;
3123 struct lttng_ht_iter iter;
3124 struct ust_app *app;
3125 struct lttng_event_field *tmp_event;
3126
3127 nbmem = UST_APP_EVENT_LIST_SIZE;
3128 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3129 if (tmp_event == NULL) {
3130 PERROR("zmalloc ust app event fields");
3131 ret = -ENOMEM;
3132 goto error;
3133 }
3134
3135 rcu_read_lock();
3136
3137 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3138 struct lttng_ust_field_iter uiter;
3139
3140 health_code_update();
3141
3142 if (!app->compatible) {
3143 /*
3144 * TODO: In time, we should notice the caller of this error by
3145 * telling him that this is a version error.
3146 */
3147 continue;
3148 }
3149 handle = ustctl_tracepoint_field_list(app->sock);
3150 if (handle < 0) {
3151 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3152 ERR("UST app list field getting handle failed for app pid %d",
3153 app->pid);
3154 }
3155 continue;
3156 }
3157
3158 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3159 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3160 /* Handle ustctl error. */
3161 if (ret < 0) {
3162 free(tmp_event);
3163 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3164 ERR("UST app tp list field failed for app %d with ret %d",
3165 app->sock, ret);
3166 } else {
3167 DBG3("UST app tp list field failed. Application is dead");
3168 /*
3169 * This is normal behavior, an application can die during the
3170 * creation process. Don't report an error so the execution can
3171 * continue normally.
3172 */
3173 break;
3174 }
3175 goto rcu_error;
3176 }
3177
3178 health_code_update();
3179 if (count >= nbmem) {
3180 /* In case the realloc fails, we free the memory */
3181 void *ptr;
3182
3183 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3184 2 * nbmem);
3185 nbmem *= 2;
3186 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3187 if (ptr == NULL) {
3188 PERROR("realloc ust app event fields");
3189 free(tmp_event);
3190 ret = -ENOMEM;
3191 goto rcu_error;
3192 }
3193 tmp_event = ptr;
3194 }
3195
3196 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3197 tmp_event[count].type = uiter.type;
3198 tmp_event[count].nowrite = uiter.nowrite;
3199
3200 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3201 tmp_event[count].event.loglevel = uiter.loglevel;
3202 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3203 tmp_event[count].event.pid = app->pid;
3204 tmp_event[count].event.enabled = -1;
3205 count++;
3206 }
3207 }
3208
3209 ret = count;
3210 *fields = tmp_event;
3211
3212 DBG2("UST app list event fields done (%zu events)", count);
3213
3214 rcu_error:
3215 rcu_read_unlock();
3216 error:
3217 health_code_update();
3218 return ret;
3219 }
3220
3221 /*
3222 * Free and clean all traceable apps of the global list.
3223 *
3224 * Should _NOT_ be called with RCU read-side lock held.
3225 */
3226 void ust_app_clean_list(void)
3227 {
3228 int ret;
3229 struct ust_app *app;
3230 struct lttng_ht_iter iter;
3231
3232 DBG2("UST app cleaning registered apps hash table");
3233
3234 rcu_read_lock();
3235
3236 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3237 ret = lttng_ht_del(ust_app_ht, &iter);
3238 assert(!ret);
3239 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3240 }
3241
3242 /* Cleanup socket hash table */
3243 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3244 sock_n.node) {
3245 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3246 assert(!ret);
3247 }
3248
3249 /* Cleanup notify socket hash table */
3250 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3251 notify_sock_n.node) {
3252 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3253 assert(!ret);
3254 }
3255 rcu_read_unlock();
3256
3257 /* Destroy is done only when the ht is empty */
3258 ht_cleanup_push(ust_app_ht);
3259 ht_cleanup_push(ust_app_ht_by_sock);
3260 ht_cleanup_push(ust_app_ht_by_notify_sock);
3261 }
3262
3263 /*
3264 * Init UST app hash table.
3265 */
3266 void ust_app_ht_alloc(void)
3267 {
3268 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3269 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3270 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3271 }
3272
3273 /*
3274 * For a specific UST session, disable the channel for all registered apps.
3275 */
3276 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3277 struct ltt_ust_channel *uchan)
3278 {
3279 int ret = 0;
3280 struct lttng_ht_iter iter;
3281 struct lttng_ht_node_str *ua_chan_node;
3282 struct ust_app *app;
3283 struct ust_app_session *ua_sess;
3284 struct ust_app_channel *ua_chan;
3285
3286 if (usess == NULL || uchan == NULL) {
3287 ERR("Disabling UST global channel with NULL values");
3288 ret = -1;
3289 goto error;
3290 }
3291
3292 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3293 uchan->name, usess->id);
3294
3295 rcu_read_lock();
3296
3297 /* For every registered applications */
3298 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3299 struct lttng_ht_iter uiter;
3300 if (!app->compatible) {
3301 /*
3302 * TODO: In time, we should notice the caller of this error by
3303 * telling him that this is a version error.
3304 */
3305 continue;
3306 }
3307 ua_sess = lookup_session_by_app(usess, app);
3308 if (ua_sess == NULL) {
3309 continue;
3310 }
3311
3312 /* Get channel */
3313 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3314 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3315 /* If the session if found for the app, the channel must be there */
3316 assert(ua_chan_node);
3317
3318 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3319 /* The channel must not be already disabled */
3320 assert(ua_chan->enabled == 1);
3321
3322 /* Disable channel onto application */
3323 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3324 if (ret < 0) {
3325 /* XXX: We might want to report this error at some point... */
3326 continue;
3327 }
3328 }
3329
3330 rcu_read_unlock();
3331
3332 error:
3333 return ret;
3334 }
3335
3336 /*
3337 * For a specific UST session, enable the channel for all registered apps.
3338 */
3339 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3340 struct ltt_ust_channel *uchan)
3341 {
3342 int ret = 0;
3343 struct lttng_ht_iter iter;
3344 struct ust_app *app;
3345 struct ust_app_session *ua_sess;
3346
3347 if (usess == NULL || uchan == NULL) {
3348 ERR("Adding UST global channel to NULL values");
3349 ret = -1;
3350 goto error;
3351 }
3352
3353 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3354 uchan->name, usess->id);
3355
3356 rcu_read_lock();
3357
3358 /* For every registered applications */
3359 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3360 if (!app->compatible) {
3361 /*
3362 * TODO: In time, we should notice the caller of this error by
3363 * telling him that this is a version error.
3364 */
3365 continue;
3366 }
3367 ua_sess = lookup_session_by_app(usess, app);
3368 if (ua_sess == NULL) {
3369 continue;
3370 }
3371
3372 /* Enable channel onto application */
3373 ret = enable_ust_app_channel(ua_sess, uchan, app);
3374 if (ret < 0) {
3375 /* XXX: We might want to report this error at some point... */
3376 continue;
3377 }
3378 }
3379
3380 rcu_read_unlock();
3381
3382 error:
3383 return ret;
3384 }
3385
3386 /*
3387 * Disable an event in a channel and for a specific session.
3388 */
3389 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3390 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3391 {
3392 int ret = 0;
3393 struct lttng_ht_iter iter, uiter;
3394 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3395 struct ust_app *app;
3396 struct ust_app_session *ua_sess;
3397 struct ust_app_channel *ua_chan;
3398 struct ust_app_event *ua_event;
3399
3400 DBG("UST app disabling event %s for all apps in channel "
3401 "%s for session id %" PRIu64,
3402 uevent->attr.name, uchan->name, usess->id);
3403
3404 rcu_read_lock();
3405
3406 /* For all registered applications */
3407 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3408 if (!app->compatible) {
3409 /*
3410 * TODO: In time, we should notice the caller of this error by
3411 * telling him that this is a version error.
3412 */
3413 continue;
3414 }
3415 ua_sess = lookup_session_by_app(usess, app);
3416 if (ua_sess == NULL) {
3417 /* Next app */
3418 continue;
3419 }
3420
3421 /* Lookup channel in the ust app session */
3422 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3423 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3424 if (ua_chan_node == NULL) {
3425 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3426 "Skipping", uchan->name, usess->id, app->pid);
3427 continue;
3428 }
3429 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3430
3431 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3432 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3433 if (ua_event_node == NULL) {
3434 DBG2("Event %s not found in channel %s for app pid %d."
3435 "Skipping", uevent->attr.name, uchan->name, app->pid);
3436 continue;
3437 }
3438 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3439
3440 ret = disable_ust_app_event(ua_sess, ua_event, app);
3441 if (ret < 0) {
3442 /* XXX: Report error someday... */
3443 continue;
3444 }
3445 }
3446
3447 rcu_read_unlock();
3448
3449 return ret;
3450 }
3451
3452 /*
3453 * For a specific UST session and UST channel, the event for all
3454 * registered apps.
3455 */
3456 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3457 struct ltt_ust_channel *uchan)
3458 {
3459 int ret = 0;
3460 struct lttng_ht_iter iter, uiter;
3461 struct lttng_ht_node_str *ua_chan_node;
3462 struct ust_app *app;
3463 struct ust_app_session *ua_sess;
3464 struct ust_app_channel *ua_chan;
3465 struct ust_app_event *ua_event;
3466
3467 DBG("UST app disabling all event for all apps in channel "
3468 "%s for session id %" PRIu64, uchan->name, usess->id);
3469
3470 rcu_read_lock();
3471
3472 /* For all registered applications */
3473 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3474 if (!app->compatible) {
3475 /*
3476 * TODO: In time, we should notice the caller of this error by
3477 * telling him that this is a version error.
3478 */
3479 continue;
3480 }
3481 ua_sess = lookup_session_by_app(usess, app);
3482 if (!ua_sess) {
3483 /* The application has problem or is probably dead. */
3484 continue;
3485 }
3486
3487 /* Lookup channel in the ust app session */
3488 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3489 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3490 /* If the channel is not found, there is a code flow error */
3491 assert(ua_chan_node);
3492
3493 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3494
3495 /* Disable each events of channel */
3496 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3497 node.node) {
3498 ret = disable_ust_app_event(ua_sess, ua_event, app);
3499 if (ret < 0) {
3500 /* XXX: Report error someday... */
3501 continue;
3502 }
3503 }
3504 }
3505
3506 rcu_read_unlock();
3507
3508 return ret;
3509 }
3510
3511 /*
3512 * For a specific UST session, create the channel for all registered apps.
3513 */
3514 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3515 struct ltt_ust_channel *uchan)
3516 {
3517 int ret = 0, created;
3518 struct lttng_ht_iter iter;
3519 struct ust_app *app;
3520 struct ust_app_session *ua_sess = NULL;
3521
3522 /* Very wrong code flow */
3523 assert(usess);
3524 assert(uchan);
3525
3526 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3527 uchan->name, usess->id);
3528
3529 rcu_read_lock();
3530
3531 /* For every registered applications */
3532 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3533 if (!app->compatible) {
3534 /*
3535 * TODO: In time, we should notice the caller of this error by
3536 * telling him that this is a version error.
3537 */
3538 continue;
3539 }
3540 /*
3541 * Create session on the tracer side and add it to app session HT. Note
3542 * that if session exist, it will simply return a pointer to the ust
3543 * app session.
3544 */
3545 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3546 if (ret < 0) {
3547 switch (ret) {
3548 case -ENOTCONN:
3549 /*
3550 * The application's socket is not valid. Either a bad socket
3551 * or a timeout on it. We can't inform the caller that for a
3552 * specific app, the session failed so lets continue here.
3553 */
3554 continue;
3555 case -ENOMEM:
3556 default:
3557 goto error_rcu_unlock;
3558 }
3559 }
3560 assert(ua_sess);
3561
3562 pthread_mutex_lock(&ua_sess->lock);
3563 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3564 sizeof(uchan->name))) {
3565 struct ustctl_consumer_channel_attr attr;
3566 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3567 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3568 &attr);
3569 } else {
3570 /* Create channel onto application. We don't need the chan ref. */
3571 ret = create_ust_app_channel(ua_sess, uchan, app,
3572 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3573 }
3574 pthread_mutex_unlock(&ua_sess->lock);
3575 if (ret < 0) {
3576 if (ret == -ENOMEM) {
3577 /* No more memory is a fatal error. Stop right now. */
3578 goto error_rcu_unlock;
3579 }
3580 /* Cleanup the created session if it's the case. */
3581 if (created) {
3582 destroy_app_session(app, ua_sess);
3583 }
3584 }
3585 }
3586
3587 error_rcu_unlock:
3588 rcu_read_unlock();
3589 return ret;
3590 }
3591
3592 /*
3593 * Enable event for a specific session and channel on the tracer.
3594 */
3595 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3596 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3597 {
3598 int ret = 0;
3599 struct lttng_ht_iter iter, uiter;
3600 struct lttng_ht_node_str *ua_chan_node;
3601 struct ust_app *app;
3602 struct ust_app_session *ua_sess;
3603 struct ust_app_channel *ua_chan;
3604 struct ust_app_event *ua_event;
3605
3606 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3607 uevent->attr.name, usess->id);
3608
3609 /*
3610 * NOTE: At this point, this function is called only if the session and
3611 * channel passed are already created for all apps. and enabled on the
3612 * tracer also.
3613 */
3614
3615 rcu_read_lock();
3616
3617 /* For all registered applications */
3618 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3619 if (!app->compatible) {
3620 /*
3621 * TODO: In time, we should notice the caller of this error by
3622 * telling him that this is a version error.
3623 */
3624 continue;
3625 }
3626 ua_sess = lookup_session_by_app(usess, app);
3627 if (!ua_sess) {
3628 /* The application has problem or is probably dead. */
3629 continue;
3630 }
3631
3632 pthread_mutex_lock(&ua_sess->lock);
3633
3634 /* Lookup channel in the ust app session */
3635 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3636 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3637 /* If the channel is not found, there is a code flow error */
3638 assert(ua_chan_node);
3639
3640 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3641
3642 /* Get event node */
3643 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3644 uevent->filter, uevent->attr.loglevel);
3645 if (ua_event == NULL) {
3646 DBG3("UST app enable event %s not found for app PID %d."
3647 "Skipping app", uevent->attr.name, app->pid);
3648 goto next_app;
3649 }
3650
3651 ret = enable_ust_app_event(ua_sess, ua_event, app);
3652 if (ret < 0) {
3653 pthread_mutex_unlock(&ua_sess->lock);
3654 goto error;
3655 }
3656 next_app:
3657 pthread_mutex_unlock(&ua_sess->lock);
3658 }
3659
3660 error:
3661 rcu_read_unlock();
3662 return ret;
3663 }
3664
3665 /*
3666 * For a specific existing UST session and UST channel, creates the event for
3667 * all registered apps.
3668 */
3669 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3670 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3671 {
3672 int ret = 0;
3673 struct lttng_ht_iter iter, uiter;
3674 struct lttng_ht_node_str *ua_chan_node;
3675 struct ust_app *app;
3676 struct ust_app_session *ua_sess;
3677 struct ust_app_channel *ua_chan;
3678
3679 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3680 uevent->attr.name, usess->id);
3681
3682 rcu_read_lock();
3683
3684 /* For all registered applications */
3685 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3686 if (!app->compatible) {
3687 /*
3688 * TODO: In time, we should notice the caller of this error by
3689 * telling him that this is a version error.
3690 */
3691 continue;
3692 }
3693 ua_sess = lookup_session_by_app(usess, app);
3694 if (!ua_sess) {
3695 /* The application has problem or is probably dead. */
3696 continue;
3697 }
3698
3699 pthread_mutex_lock(&ua_sess->lock);
3700 /* Lookup channel in the ust app session */
3701 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3702 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3703 /* If the channel is not found, there is a code flow error */
3704 assert(ua_chan_node);
3705
3706 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3707
3708 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3709 pthread_mutex_unlock(&ua_sess->lock);
3710 if (ret < 0) {
3711 if (ret != -LTTNG_UST_ERR_EXIST) {
3712 /* Possible value at this point: -ENOMEM. If so, we stop! */
3713 break;
3714 }
3715 DBG2("UST app event %s already exist on app PID %d",
3716 uevent->attr.name, app->pid);
3717 continue;
3718 }
3719 }
3720
3721 rcu_read_unlock();
3722
3723 return ret;
3724 }
3725
3726 /*
3727 * Start tracing for a specific UST session and app.
3728 */
3729 static
3730 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3731 {
3732 int ret = 0;
3733 struct ust_app_session *ua_sess;
3734
3735 DBG("Starting tracing for ust app pid %d", app->pid);
3736
3737 rcu_read_lock();
3738
3739 if (!app->compatible) {
3740 goto end;
3741 }
3742
3743 ua_sess = lookup_session_by_app(usess, app);
3744 if (ua_sess == NULL) {
3745 /* The session is in teardown process. Ignore and continue. */
3746 goto end;
3747 }
3748
3749 pthread_mutex_lock(&ua_sess->lock);
3750
3751 /* Upon restart, we skip the setup, already done */
3752 if (ua_sess->started) {
3753 goto skip_setup;
3754 }
3755
3756 /* Create directories if consumer is LOCAL and has a path defined. */
3757 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3758 strlen(usess->consumer->dst.trace_path) > 0) {
3759 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3760 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3761 if (ret < 0) {
3762 if (ret != -EEXIST) {
3763 ERR("Trace directory creation error");
3764 goto error_unlock;
3765 }
3766 }
3767 }
3768
3769 /*
3770 * Create the metadata for the application. This returns gracefully if a
3771 * metadata was already set for the session.
3772 */
3773 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3774 if (ret < 0) {
3775 goto error_unlock;
3776 }
3777
3778 health_code_update();
3779
3780 skip_setup:
3781 /* This start the UST tracing */
3782 ret = ustctl_start_session(app->sock, ua_sess->handle);
3783 if (ret < 0) {
3784 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3785 ERR("Error starting tracing for app pid: %d (ret: %d)",
3786 app->pid, ret);
3787 } else {
3788 DBG("UST app start session failed. Application is dead.");
3789 /*
3790 * This is normal behavior, an application can die during the
3791 * creation process. Don't report an error so the execution can
3792 * continue normally.
3793 */
3794 pthread_mutex_unlock(&ua_sess->lock);
3795 goto end;
3796 }
3797 goto error_unlock;
3798 }
3799
3800 /* Indicate that the session has been started once */
3801 ua_sess->started = 1;
3802
3803 pthread_mutex_unlock(&ua_sess->lock);
3804
3805 health_code_update();
3806
3807 /* Quiescent wait after starting trace */
3808 ret = ustctl_wait_quiescent(app->sock);
3809 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3810 ERR("UST app wait quiescent failed for app pid %d ret %d",
3811 app->pid, ret);
3812 }
3813
3814 end:
3815 rcu_read_unlock();
3816 health_code_update();
3817 return 0;
3818
3819 error_unlock:
3820 pthread_mutex_unlock(&ua_sess->lock);
3821 rcu_read_unlock();
3822 health_code_update();
3823 return -1;
3824 }
3825
3826 /*
3827 * Stop tracing for a specific UST session and app.
3828 */
3829 static
3830 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3831 {
3832 int ret = 0;
3833 struct ust_app_session *ua_sess;
3834 struct ust_registry_session *registry;
3835
3836 DBG("Stopping tracing for ust app pid %d", app->pid);
3837
3838 rcu_read_lock();
3839
3840 if (!app->compatible) {
3841 goto end_no_session;
3842 }
3843
3844 ua_sess = lookup_session_by_app(usess, app);
3845 if (ua_sess == NULL) {
3846 goto end_no_session;
3847 }
3848
3849 pthread_mutex_lock(&ua_sess->lock);
3850
3851 /*
3852 * If started = 0, it means that stop trace has been called for a session
3853 * that was never started. It's possible since we can have a fail start
3854 * from either the application manager thread or the command thread. Simply
3855 * indicate that this is a stop error.
3856 */
3857 if (!ua_sess->started) {
3858 goto error_rcu_unlock;
3859 }
3860
3861 health_code_update();
3862
3863 /* This inhibits UST tracing */
3864 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3865 if (ret < 0) {
3866 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3867 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3868 app->pid, ret);
3869 } else {
3870 DBG("UST app stop session failed. Application is dead.");
3871 /*
3872 * This is normal behavior, an application can die during the
3873 * creation process. Don't report an error so the execution can
3874 * continue normally.
3875 */
3876 goto end_unlock;
3877 }
3878 goto error_rcu_unlock;
3879 }
3880
3881 health_code_update();
3882
3883 /* Quiescent wait after stopping trace */
3884 ret = ustctl_wait_quiescent(app->sock);
3885 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3886 ERR("UST app wait quiescent failed for app pid %d ret %d",
3887 app->pid, ret);
3888 }
3889
3890 health_code_update();
3891
3892 registry = get_session_registry(ua_sess);
3893 assert(registry);
3894
3895 if (!registry->metadata_closed) {
3896 /* Push metadata for application before freeing the application. */
3897 (void) push_metadata(registry, ua_sess->consumer);
3898 }
3899
3900 end_unlock:
3901 pthread_mutex_unlock(&ua_sess->lock);
3902 end_no_session:
3903 rcu_read_unlock();
3904 health_code_update();
3905 return 0;
3906
3907 error_rcu_unlock:
3908 pthread_mutex_unlock(&ua_sess->lock);
3909 rcu_read_unlock();
3910 health_code_update();
3911 return -1;
3912 }
3913
3914 /*
3915 * Flush buffers for a specific UST session and app.
3916 */
3917 static
3918 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3919 {
3920 int ret = 0;
3921 struct lttng_ht_iter iter;
3922 struct ust_app_session *ua_sess;
3923 struct ust_app_channel *ua_chan;
3924
3925 DBG("Flushing buffers for ust app pid %d", app->pid);
3926
3927 rcu_read_lock();
3928
3929 if (!app->compatible) {
3930 goto end_no_session;
3931 }
3932
3933 ua_sess = lookup_session_by_app(usess, app);
3934 if (ua_sess == NULL) {
3935 goto end_no_session;
3936 }
3937
3938 pthread_mutex_lock(&ua_sess->lock);
3939
3940 health_code_update();
3941
3942 /* Flushing buffers */
3943 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3944 node.node) {
3945 health_code_update();
3946 assert(ua_chan->is_sent);
3947 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3948 if (ret < 0) {
3949 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3950 ERR("UST app PID %d channel %s flush failed with ret %d",
3951 app->pid, ua_chan->name, ret);
3952 } else {
3953 DBG3("UST app failed to flush %s. Application is dead.",
3954 ua_chan->name);
3955 /*
3956 * This is normal behavior, an application can die during the
3957 * creation process. Don't report an error so the execution can
3958 * continue normally.
3959 */
3960 }
3961 /* Continuing flushing all buffers */
3962 continue;
3963 }
3964 }
3965
3966 health_code_update();
3967
3968 pthread_mutex_unlock(&ua_sess->lock);
3969 end_no_session:
3970 rcu_read_unlock();
3971 health_code_update();
3972 return 0;
3973 }
3974
3975 /*
3976 * Destroy a specific UST session in apps.
3977 */
3978 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3979 {
3980 int ret;
3981 struct ust_app_session *ua_sess;
3982 struct lttng_ht_iter iter;
3983 struct lttng_ht_node_u64 *node;
3984
3985 DBG("Destroy tracing for ust app pid %d", app->pid);
3986
3987 rcu_read_lock();
3988
3989 if (!app->compatible) {
3990 goto end;
3991 }
3992
3993 __lookup_session_by_app(usess, app, &iter);
3994 node = lttng_ht_iter_get_node_u64(&iter);
3995 if (node == NULL) {
3996 /* Session is being or is deleted. */
3997 goto end;
3998 }
3999 ua_sess = caa_container_of(node, struct ust_app_session, node);
4000
4001 health_code_update();
4002 destroy_app_session(app, ua_sess);
4003
4004 health_code_update();
4005
4006 /* Quiescent wait after stopping trace */
4007 ret = ustctl_wait_quiescent(app->sock);
4008 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4009 ERR("UST app wait quiescent failed for app pid %d ret %d",
4010 app->pid, ret);
4011 }
4012 end:
4013 rcu_read_unlock();
4014 health_code_update();
4015 return 0;
4016 }
4017
4018 /*
4019 * Start tracing for the UST session.
4020 */
4021 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4022 {
4023 int ret = 0;
4024 struct lttng_ht_iter iter;
4025 struct ust_app *app;
4026
4027 DBG("Starting all UST traces");
4028
4029 rcu_read_lock();
4030
4031 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4032 ret = ust_app_start_trace(usess, app);
4033 if (ret < 0) {
4034 /* Continue to next apps even on error */
4035 continue;
4036 }
4037 }
4038
4039 rcu_read_unlock();
4040
4041 return 0;
4042 }
4043
4044 /*
4045 * Start tracing for the UST session.
4046 */
4047 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4048 {
4049 int ret = 0;
4050 struct lttng_ht_iter iter;
4051 struct ust_app *app;
4052
4053 DBG("Stopping all UST traces");
4054
4055 rcu_read_lock();
4056
4057 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4058 ret = ust_app_stop_trace(usess, app);
4059 if (ret < 0) {
4060 /* Continue to next apps even on error */
4061 continue;
4062 }
4063 }
4064
4065 /* Flush buffers and push metadata (for UID buffers). */
4066 switch (usess->buffer_type) {
4067 case LTTNG_BUFFER_PER_UID:
4068 {
4069 struct buffer_reg_uid *reg;
4070
4071 /* Flush all per UID buffers associated to that session. */
4072 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4073 struct ust_registry_session *ust_session_reg;
4074 struct buffer_reg_channel *reg_chan;
4075 struct consumer_socket *socket;
4076
4077 /* Get consumer socket to use to push the metadata.*/
4078 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4079 usess->consumer);
4080 if (!socket) {
4081 /* Ignore request if no consumer is found for the session. */
4082 continue;
4083 }
4084
4085 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4086 reg_chan, node.node) {
4087 /*
4088 * The following call will print error values so the return
4089 * code is of little importance because whatever happens, we
4090 * have to try them all.
4091 */
4092 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4093 }
4094
4095 ust_session_reg = reg->registry->reg.ust;
4096 if (!ust_session_reg->metadata_closed) {
4097 /* Push metadata. */
4098 (void) push_metadata(ust_session_reg, usess->consumer);
4099 }
4100 }
4101
4102 break;
4103 }
4104 case LTTNG_BUFFER_PER_PID:
4105 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4106 ret = ust_app_flush_trace(usess, app);
4107 if (ret < 0) {
4108 /* Continue to next apps even on error */
4109 continue;
4110 }
4111 }
4112 break;
4113 default:
4114 assert(0);
4115 break;
4116 }
4117
4118 rcu_read_unlock();
4119
4120 return 0;
4121 }
4122
4123 /*
4124 * Destroy app UST session.
4125 */
4126 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4127 {
4128 int ret = 0;
4129 struct lttng_ht_iter iter;
4130 struct ust_app *app;
4131
4132 DBG("Destroy all UST traces");
4133
4134 rcu_read_lock();
4135
4136 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4137 ret = destroy_trace(usess, app);
4138 if (ret < 0) {
4139 /* Continue to next apps even on error */
4140 continue;
4141 }
4142 }
4143
4144 rcu_read_unlock();
4145
4146 return 0;
4147 }
4148
4149 /*
4150 * Add channels/events from UST global domain to registered apps at sock.
4151 */
4152 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4153 {
4154 int ret = 0;
4155 struct lttng_ht_iter iter, uiter;
4156 struct ust_app *app;
4157 struct ust_app_session *ua_sess = NULL;
4158 struct ust_app_channel *ua_chan;
4159 struct ust_app_event *ua_event;
4160 struct ust_app_ctx *ua_ctx;
4161
4162 assert(usess);
4163 assert(sock >= 0);
4164
4165 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4166 usess->id);
4167
4168 rcu_read_lock();
4169
4170 app = ust_app_find_by_sock(sock);
4171 if (app == NULL) {
4172 /*
4173 * Application can be unregistered before so this is possible hence
4174 * simply stopping the update.
4175 */
4176 DBG3("UST app update failed to find app sock %d", sock);
4177 goto error;
4178 }
4179
4180 if (!app->compatible) {
4181 goto error;
4182 }
4183
4184 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4185 if (ret < 0) {
4186 /* Tracer is probably gone or ENOMEM. */
4187 goto error;
4188 }
4189 assert(ua_sess);
4190
4191 pthread_mutex_lock(&ua_sess->lock);
4192
4193 /*
4194 * We can iterate safely here over all UST app session since the create ust
4195 * app session above made a shadow copy of the UST global domain from the
4196 * ltt ust session.
4197 */
4198 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4199 node.node) {
4200 /*
4201 * For a metadata channel, handle it differently.
4202 */
4203 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4204 sizeof(ua_chan->name))) {
4205 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4206 &ua_chan->attr);
4207 if (ret < 0) {
4208 goto error_unlock;
4209 }
4210 /* Remove it from the hash table and continue!. */
4211 ret = lttng_ht_del(ua_sess->channels, &iter);
4212 assert(!ret);
4213 delete_ust_app_channel(-1, ua_chan, app);
4214 continue;
4215 } else {
4216 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4217 if (ret < 0) {
4218 /*
4219 * Stop everything. On error, the application failed, no more
4220 * file descriptor are available or ENOMEM so stopping here is
4221 * the only thing we can do for now.
4222 */
4223 goto error_unlock;
4224 }
4225 }
4226
4227 /*
4228 * Add context using the list so they are enabled in the same order the
4229 * user added them.
4230 */
4231 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4232 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4233 if (ret < 0) {
4234 goto error_unlock;
4235 }
4236 }
4237
4238
4239 /* For each events */
4240 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4241 node.node) {
4242 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4243 if (ret < 0) {
4244 goto error_unlock;
4245 }
4246 }
4247 }
4248
4249 pthread_mutex_unlock(&ua_sess->lock);
4250
4251 if (usess->start_trace) {
4252 ret = ust_app_start_trace(usess, app);
4253 if (ret < 0) {
4254 goto error;
4255 }
4256
4257 DBG2("UST trace started for app pid %d", app->pid);
4258 }
4259
4260 /* Everything went well at this point. */
4261 rcu_read_unlock();
4262 return;
4263
4264 error_unlock:
4265 pthread_mutex_unlock(&ua_sess->lock);
4266 error:
4267 if (ua_sess) {
4268 destroy_app_session(app, ua_sess);
4269 }
4270 rcu_read_unlock();
4271 return;
4272 }
4273
4274 /*
4275 * Add context to a specific channel for global UST domain.
4276 */
4277 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4278 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4279 {
4280 int ret = 0;
4281 struct lttng_ht_node_str *ua_chan_node;
4282 struct lttng_ht_iter iter, uiter;
4283 struct ust_app_channel *ua_chan = NULL;
4284 struct ust_app_session *ua_sess;
4285 struct ust_app *app;
4286
4287 rcu_read_lock();
4288
4289 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4290 if (!app->compatible) {
4291 /*
4292 * TODO: In time, we should notice the caller of this error by
4293 * telling him that this is a version error.
4294 */
4295 continue;
4296 }
4297 ua_sess = lookup_session_by_app(usess, app);
4298 if (ua_sess == NULL) {
4299 continue;
4300 }
4301
4302 pthread_mutex_lock(&ua_sess->lock);
4303 /* Lookup channel in the ust app session */
4304 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4305 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4306 if (ua_chan_node == NULL) {
4307 goto next_app;
4308 }
4309 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4310 node);
4311 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4312 if (ret < 0) {
4313 goto next_app;
4314 }
4315 next_app:
4316 pthread_mutex_unlock(&ua_sess->lock);
4317 }
4318
4319 rcu_read_unlock();
4320 return ret;
4321 }
4322
4323 /*
4324 * Enable event for a channel from a UST session for a specific PID.
4325 */
4326 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4327 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4328 {
4329 int ret = 0;
4330 struct lttng_ht_iter iter;
4331 struct lttng_ht_node_str *ua_chan_node;
4332 struct ust_app *app;
4333 struct ust_app_session *ua_sess;
4334 struct ust_app_channel *ua_chan;
4335 struct ust_app_event *ua_event;
4336
4337 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4338
4339 rcu_read_lock();
4340
4341 app = ust_app_find_by_pid(pid);
4342 if (app == NULL) {
4343 ERR("UST app enable event per PID %d not found", pid);
4344 ret = -1;
4345 goto end;
4346 }
4347
4348 if (!app->compatible) {
4349 ret = 0;
4350 goto end;
4351 }
4352
4353 ua_sess = lookup_session_by_app(usess, app);
4354 if (!ua_sess) {
4355 /* The application has problem or is probably dead. */
4356 ret = 0;
4357 goto end;
4358 }
4359
4360 pthread_mutex_lock(&ua_sess->lock);
4361 /* Lookup channel in the ust app session */
4362 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4363 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4364 /* If the channel is not found, there is a code flow error */
4365 assert(ua_chan_node);
4366
4367 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4368
4369 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4370 uevent->filter, uevent->attr.loglevel);
4371 if (ua_event == NULL) {
4372 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4373 if (ret < 0) {
4374 goto end_unlock;
4375 }
4376 } else {
4377 ret = enable_ust_app_event(ua_sess, ua_event, app);
4378 if (ret < 0) {
4379 goto end_unlock;
4380 }
4381 }
4382
4383 end_unlock:
4384 pthread_mutex_unlock(&ua_sess->lock);
4385 end:
4386 rcu_read_unlock();
4387 return ret;
4388 }
4389
4390 /*
4391 * Disable event for a channel from a UST session for a specific PID.
4392 */
4393 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4394 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4395 {
4396 int ret = 0;
4397 struct lttng_ht_iter iter;
4398 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4399 struct ust_app *app;
4400 struct ust_app_session *ua_sess;
4401 struct ust_app_channel *ua_chan;
4402 struct ust_app_event *ua_event;
4403
4404 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4405
4406 rcu_read_lock();
4407
4408 app = ust_app_find_by_pid(pid);
4409 if (app == NULL) {
4410 ERR("UST app disable event per PID %d not found", pid);
4411 ret = -1;
4412 goto error;
4413 }
4414
4415 if (!app->compatible) {
4416 ret = 0;
4417 goto error;
4418 }
4419
4420 ua_sess = lookup_session_by_app(usess, app);
4421 if (!ua_sess) {
4422 /* The application has problem or is probably dead. */
4423 goto error;
4424 }
4425
4426 /* Lookup channel in the ust app session */
4427 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4428 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4429 if (ua_chan_node == NULL) {
4430 /* Channel does not exist, skip disabling */
4431 goto error;
4432 }
4433 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4434
4435 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4436 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4437 if (ua_event_node == NULL) {
4438 /* Event does not exist, skip disabling */
4439 goto error;
4440 }
4441 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4442
4443 ret = disable_ust_app_event(ua_sess, ua_event, app);
4444 if (ret < 0) {
4445 goto error;
4446 }
4447
4448 error:
4449 rcu_read_unlock();
4450 return ret;
4451 }
4452
4453 /*
4454 * Calibrate registered applications.
4455 */
4456 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4457 {
4458 int ret = 0;
4459 struct lttng_ht_iter iter;
4460 struct ust_app *app;
4461
4462 rcu_read_lock();
4463
4464 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4465 if (!app->compatible) {
4466 /*
4467 * TODO: In time, we should notice the caller of this error by
4468 * telling him that this is a version error.
4469 */
4470 continue;
4471 }
4472
4473 health_code_update();
4474
4475 ret = ustctl_calibrate(app->sock, calibrate);
4476 if (ret < 0) {
4477 switch (ret) {
4478 case -ENOSYS:
4479 /* Means that it's not implemented on the tracer side. */
4480 ret = 0;
4481 break;
4482 default:
4483 DBG2("Calibrate app PID %d returned with error %d",
4484 app->pid, ret);
4485 break;
4486 }
4487 }
4488 }
4489
4490 DBG("UST app global domain calibration finished");
4491
4492 rcu_read_unlock();
4493
4494 health_code_update();
4495
4496 return ret;
4497 }
4498
4499 /*
4500 * Receive registration and populate the given msg structure.
4501 *
4502 * On success return 0 else a negative value returned by the ustctl call.
4503 */
4504 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4505 {
4506 int ret;
4507 uint32_t pid, ppid, uid, gid;
4508
4509 assert(msg);
4510
4511 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4512 &pid, &ppid, &uid, &gid,
4513 &msg->bits_per_long,
4514 &msg->uint8_t_alignment,
4515 &msg->uint16_t_alignment,
4516 &msg->uint32_t_alignment,
4517 &msg->uint64_t_alignment,
4518 &msg->long_alignment,
4519 &msg->byte_order,
4520 msg->name);
4521 if (ret < 0) {
4522 switch (-ret) {
4523 case EPIPE:
4524 case ECONNRESET:
4525 case LTTNG_UST_ERR_EXITING:
4526 DBG3("UST app recv reg message failed. Application died");
4527 break;
4528 case LTTNG_UST_ERR_UNSUP_MAJOR:
4529 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4530 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4531 LTTNG_UST_ABI_MINOR_VERSION);
4532 break;
4533 default:
4534 ERR("UST app recv reg message failed with ret %d", ret);
4535 break;
4536 }
4537 goto error;
4538 }
4539 msg->pid = (pid_t) pid;
4540 msg->ppid = (pid_t) ppid;
4541 msg->uid = (uid_t) uid;
4542 msg->gid = (gid_t) gid;
4543
4544 error:
4545 return ret;
4546 }
4547
4548 /*
4549 * Return a ust app channel object using the application object and the channel
4550 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4551 * lock MUST be acquired before calling this function.
4552 */
4553 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4554 int objd)
4555 {
4556 struct lttng_ht_node_ulong *node;
4557 struct lttng_ht_iter iter;
4558 struct ust_app_channel *ua_chan = NULL;
4559
4560 assert(app);
4561
4562 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4563 node = lttng_ht_iter_get_node_ulong(&iter);
4564 if (node == NULL) {
4565 DBG2("UST app channel find by objd %d not found", objd);
4566 goto error;
4567 }
4568
4569 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4570
4571 error:
4572 return ua_chan;
4573 }
4574
4575 /*
4576 * Reply to a register channel notification from an application on the notify
4577 * socket. The channel metadata is also created.
4578 *
4579 * The session UST registry lock is acquired in this function.
4580 *
4581 * On success 0 is returned else a negative value.
4582 */
4583 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4584 size_t nr_fields, struct ustctl_field *fields)
4585 {
4586 int ret, ret_code = 0;
4587 uint32_t chan_id, reg_count;
4588 uint64_t chan_reg_key;
4589 enum ustctl_channel_header type;
4590 struct ust_app *app;
4591 struct ust_app_channel *ua_chan;
4592 struct ust_app_session *ua_sess;
4593 struct ust_registry_session *registry;
4594 struct ust_registry_channel *chan_reg;
4595
4596 rcu_read_lock();
4597
4598 /* Lookup application. If not found, there is a code flow error. */
4599 app = find_app_by_notify_sock(sock);
4600 if (!app) {
4601 DBG("Application socket %d is being teardown. Abort event notify",
4602 sock);
4603 ret = 0;
4604 free(fields);
4605 goto error_rcu_unlock;
4606 }
4607
4608 /* Lookup channel by UST object descriptor. */
4609 ua_chan = find_channel_by_objd(app, cobjd);
4610 if (!ua_chan) {
4611 DBG("Application channel is being teardown. Abort event notify");
4612 ret = 0;
4613 free(fields);
4614 goto error_rcu_unlock;
4615 }
4616
4617 assert(ua_chan->session);
4618 ua_sess = ua_chan->session;
4619
4620 /* Get right session registry depending on the session buffer type. */
4621 registry = get_session_registry(ua_sess);
4622 assert(registry);
4623
4624 /* Depending on the buffer type, a different channel key is used. */
4625 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4626 chan_reg_key = ua_chan->tracing_channel_id;
4627 } else {
4628 chan_reg_key = ua_chan->key;
4629 }
4630
4631 pthread_mutex_lock(&registry->lock);
4632
4633 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4634 assert(chan_reg);
4635
4636 if (!chan_reg->register_done) {
4637 reg_count = ust_registry_get_event_count(chan_reg);
4638 if (reg_count < 31) {
4639 type = USTCTL_CHANNEL_HEADER_COMPACT;
4640 } else {
4641 type = USTCTL_CHANNEL_HEADER_LARGE;
4642 }
4643
4644 chan_reg->nr_ctx_fields = nr_fields;
4645 chan_reg->ctx_fields = fields;
4646 chan_reg->header_type = type;
4647 } else {
4648 /* Get current already assigned values. */
4649 type = chan_reg->header_type;
4650 free(fields);
4651 /* Set to NULL so the error path does not do a double free. */
4652 fields = NULL;
4653 }
4654 /* Channel id is set during the object creation. */
4655 chan_id = chan_reg->chan_id;
4656
4657 /* Append to metadata */
4658 if (!chan_reg->metadata_dumped) {
4659 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4660 if (ret_code) {
4661 ERR("Error appending channel metadata (errno = %d)", ret_code);
4662 goto reply;
4663 }
4664 }
4665
4666 reply:
4667 DBG3("UST app replying to register channel key %" PRIu64
4668 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4669 ret_code);
4670
4671 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4672 if (ret < 0) {
4673 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4674 ERR("UST app reply channel failed with ret %d", ret);
4675 } else {
4676 DBG3("UST app reply channel failed. Application died");
4677 }
4678 goto error;
4679 }
4680
4681 /* This channel registry registration is completed. */
4682 chan_reg->register_done = 1;
4683
4684 error:
4685 pthread_mutex_unlock(&registry->lock);
4686 error_rcu_unlock:
4687 rcu_read_unlock();
4688 if (ret) {
4689 free(fields);
4690 }
4691 return ret;
4692 }
4693
4694 /*
4695 * Add event to the UST channel registry. When the event is added to the
4696 * registry, the metadata is also created. Once done, this replies to the
4697 * application with the appropriate error code.
4698 *
4699 * The session UST registry lock is acquired in the function.
4700 *
4701 * On success 0 is returned else a negative value.
4702 */
4703 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4704 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4705 char *model_emf_uri)
4706 {
4707 int ret, ret_code;
4708 uint32_t event_id = 0;
4709 uint64_t chan_reg_key;
4710 struct ust_app *app;
4711 struct ust_app_channel *ua_chan;
4712 struct ust_app_session *ua_sess;
4713 struct ust_registry_session *registry;
4714
4715 rcu_read_lock();
4716
4717 /* Lookup application. If not found, there is a code flow error. */
4718 app = find_app_by_notify_sock(sock);
4719 if (!app) {
4720 DBG("Application socket %d is being teardown. Abort event notify",
4721 sock);
4722 ret = 0;
4723 free(sig);
4724 free(fields);
4725 free(model_emf_uri);
4726 goto error_rcu_unlock;
4727 }
4728
4729 /* Lookup channel by UST object descriptor. */
4730 ua_chan = find_channel_by_objd(app, cobjd);
4731 if (!ua_chan) {
4732 DBG("Application channel is being teardown. Abort event notify");
4733 ret = 0;
4734 free(sig);
4735 free(fields);
4736 free(model_emf_uri);
4737 goto error_rcu_unlock;
4738 }
4739
4740 assert(ua_chan->session);
4741 ua_sess = ua_chan->session;
4742
4743 registry = get_session_registry(ua_sess);
4744 assert(registry);
4745
4746 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4747 chan_reg_key = ua_chan->tracing_channel_id;
4748 } else {
4749 chan_reg_key = ua_chan->key;
4750 }
4751
4752 pthread_mutex_lock(&registry->lock);
4753
4754 /*
4755 * From this point on, this call acquires the ownership of the sig, fields
4756 * and model_emf_uri meaning any free are done inside it if needed. These
4757 * three variables MUST NOT be read/write after this.
4758 */
4759 ret_code = ust_registry_create_event(registry, chan_reg_key,
4760 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4761 model_emf_uri, ua_sess->buffer_type, &event_id,
4762 app);
4763
4764 /*
4765 * The return value is returned to ustctl so in case of an error, the
4766 * application can be notified. In case of an error, it's important not to
4767 * return a negative error or else the application will get closed.
4768 */
4769 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4770 if (ret < 0) {
4771 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4772 ERR("UST app reply event failed with ret %d", ret);
4773 } else {
4774 DBG3("UST app reply event failed. Application died");
4775 }
4776 /*
4777 * No need to wipe the create event since the application socket will
4778 * get close on error hence cleaning up everything by itself.
4779 */
4780 goto error;
4781 }
4782
4783 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4784 name, event_id);
4785
4786 error:
4787 pthread_mutex_unlock(&registry->lock);
4788 error_rcu_unlock:
4789 rcu_read_unlock();
4790 return ret;
4791 }
4792
4793 /*
4794 * Handle application notification through the given notify socket.
4795 *
4796 * Return 0 on success or else a negative value.
4797 */
4798 int ust_app_recv_notify(int sock)
4799 {
4800 int ret;
4801 enum ustctl_notify_cmd cmd;
4802
4803 DBG3("UST app receiving notify from sock %d", sock);
4804
4805 ret = ustctl_recv_notify(sock, &cmd);
4806 if (ret < 0) {
4807 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4808 ERR("UST app recv notify failed with ret %d", ret);
4809 } else {
4810 DBG3("UST app recv notify failed. Application died");
4811 }
4812 goto error;
4813 }
4814
4815 switch (cmd) {
4816 case USTCTL_NOTIFY_CMD_EVENT:
4817 {
4818 int sobjd, cobjd, loglevel;
4819 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4820 size_t nr_fields;
4821 struct ustctl_field *fields;
4822
4823 DBG2("UST app ustctl register event received");
4824
4825 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4826 &sig, &nr_fields, &fields, &model_emf_uri);
4827 if (ret < 0) {
4828 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4829 ERR("UST app recv event failed with ret %d", ret);
4830 } else {
4831 DBG3("UST app recv event failed. Application died");
4832 }
4833 goto error;
4834 }
4835
4836 /*
4837 * Add event to the UST registry coming from the notify socket. This
4838 * call will free if needed the sig, fields and model_emf_uri. This
4839 * code path loses the ownsership of these variables and transfer them
4840 * to the this function.
4841 */
4842 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4843 fields, loglevel, model_emf_uri);
4844 if (ret < 0) {
4845 goto error;
4846 }
4847
4848 break;
4849 }
4850 case USTCTL_NOTIFY_CMD_CHANNEL:
4851 {
4852 int sobjd, cobjd;
4853 size_t nr_fields;
4854 struct ustctl_field *fields;
4855
4856 DBG2("UST app ustctl register channel received");
4857
4858 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4859 &fields);
4860 if (ret < 0) {
4861 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4862 ERR("UST app recv channel failed with ret %d", ret);
4863 } else {
4864 DBG3("UST app recv channel failed. Application died");
4865 }
4866 goto error;
4867 }
4868
4869 /*
4870 * The fields ownership are transfered to this function call meaning
4871 * that if needed it will be freed. After this, it's invalid to access
4872 * fields or clean it up.
4873 */
4874 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4875 fields);
4876 if (ret < 0) {
4877 goto error;
4878 }
4879
4880 break;
4881 }
4882 default:
4883 /* Should NEVER happen. */
4884 assert(0);
4885 }
4886
4887 error:
4888 return ret;
4889 }
4890
4891 /*
4892 * Once the notify socket hangs up, this is called. First, it tries to find the
4893 * corresponding application. On failure, the call_rcu to close the socket is
4894 * executed. If an application is found, it tries to delete it from the notify
4895 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4896 *
4897 * Note that an object needs to be allocated here so on ENOMEM failure, the
4898 * call RCU is not done but the rest of the cleanup is.
4899 */
4900 void ust_app_notify_sock_unregister(int sock)
4901 {
4902 int err_enomem = 0;
4903 struct lttng_ht_iter iter;
4904 struct ust_app *app;
4905 struct ust_app_notify_sock_obj *obj;
4906
4907 assert(sock >= 0);
4908
4909 rcu_read_lock();
4910
4911 obj = zmalloc(sizeof(*obj));
4912 if (!obj) {
4913 /*
4914 * An ENOMEM is kind of uncool. If this strikes we continue the
4915 * procedure but the call_rcu will not be called. In this case, we
4916 * accept the fd leak rather than possibly creating an unsynchronized
4917 * state between threads.
4918 *
4919 * TODO: The notify object should be created once the notify socket is
4920 * registered and stored independantely from the ust app object. The
4921 * tricky part is to synchronize the teardown of the application and
4922 * this notify object. Let's keep that in mind so we can avoid this
4923 * kind of shenanigans with ENOMEM in the teardown path.
4924 */
4925 err_enomem = 1;
4926 } else {
4927 obj->fd = sock;
4928 }
4929
4930 DBG("UST app notify socket unregister %d", sock);
4931
4932 /*
4933 * Lookup application by notify socket. If this fails, this means that the
4934 * hash table delete has already been done by the application
4935 * unregistration process so we can safely close the notify socket in a
4936 * call RCU.
4937 */
4938 app = find_app_by_notify_sock(sock);
4939 if (!app) {
4940 goto close_socket;
4941 }
4942
4943 iter.iter.node = &app->notify_sock_n.node;
4944
4945 /*
4946 * Whatever happens here either we fail or succeed, in both cases we have
4947 * to close the socket after a grace period to continue to the call RCU
4948 * here. If the deletion is successful, the application is not visible
4949 * anymore by other threads and is it fails it means that it was already
4950 * deleted from the hash table so either way we just have to close the
4951 * socket.
4952 */
4953 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4954
4955 close_socket:
4956 rcu_read_unlock();
4957
4958 /*
4959 * Close socket after a grace period to avoid for the socket to be reused
4960 * before the application object is freed creating potential race between
4961 * threads trying to add unique in the global hash table.
4962 */
4963 if (!err_enomem) {
4964 call_rcu(&obj->head, close_notify_sock_rcu);
4965 }
4966 }
4967
4968 /*
4969 * Destroy a ust app data structure and free its memory.
4970 */
4971 void ust_app_destroy(struct ust_app *app)
4972 {
4973 if (!app) {
4974 return;
4975 }
4976
4977 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4978 }
4979
4980 /*
4981 * Take a snapshot for a given UST session. The snapshot is sent to the given
4982 * output.
4983 *
4984 * Return 0 on success or else a negative value.
4985 */
4986 int ust_app_snapshot_record(struct ltt_ust_session *usess,
4987 struct snapshot_output *output, int wait, unsigned int nb_streams)
4988 {
4989 int ret = 0;
4990 struct lttng_ht_iter iter;
4991 struct ust_app *app;
4992 char pathname[PATH_MAX];
4993 uint64_t max_stream_size = 0;
4994
4995 assert(usess);
4996 assert(output);
4997
4998 rcu_read_lock();
4999
5000 /*
5001 * Compute the maximum size of a single stream if a max size is asked by
5002 * the caller.
5003 */
5004 if (output->max_size > 0 && nb_streams > 0) {
5005 max_stream_size = output->max_size / nb_streams;
5006 }
5007
5008 switch (usess->buffer_type) {
5009 case LTTNG_BUFFER_PER_UID:
5010 {
5011 struct buffer_reg_uid *reg;
5012
5013 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5014 struct buffer_reg_channel *reg_chan;
5015 struct consumer_socket *socket;
5016
5017 /* Get consumer socket to use to push the metadata.*/
5018 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5019 usess->consumer);
5020 if (!socket) {
5021 ret = -EINVAL;
5022 goto error;
5023 }
5024
5025 memset(pathname, 0, sizeof(pathname));
5026 ret = snprintf(pathname, sizeof(pathname),
5027 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5028 reg->uid, reg->bits_per_long);
5029 if (ret < 0) {
5030 PERROR("snprintf snapshot path");
5031 goto error;
5032 }
5033
5034 /* Add the UST default trace dir to path. */
5035 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5036 reg_chan, node.node) {
5037
5038 /*
5039 * Make sure the maximum stream size is not lower than the
5040 * subbuffer size or else it's an error since we won't be able to
5041 * snapshot anything.
5042 */
5043 if (max_stream_size &&
5044 reg_chan->subbuf_size > max_stream_size) {
5045 ret = -EINVAL;
5046 DBG3("UST app snapshot record maximum stream size %" PRIu64
5047 " is smaller than subbuffer size of %zu",
5048 max_stream_size, reg_chan->subbuf_size);
5049 goto error;
5050 }
5051 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
5052 usess->uid, usess->gid, pathname, wait,
5053 max_stream_size);
5054 if (ret < 0) {
5055 goto error;
5056 }
5057 }
5058 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
5059 1, usess->uid, usess->gid, pathname, wait,
5060 max_stream_size);
5061 if (ret < 0) {
5062 goto error;
5063 }
5064 }
5065 break;
5066 }
5067 case LTTNG_BUFFER_PER_PID:
5068 {
5069 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5070 struct consumer_socket *socket;
5071 struct lttng_ht_iter chan_iter;
5072 struct ust_app_channel *ua_chan;
5073 struct ust_app_session *ua_sess;
5074 struct ust_registry_session *registry;
5075
5076 ua_sess = lookup_session_by_app(usess, app);
5077 if (!ua_sess) {
5078 /* Session not associated with this app. */
5079 continue;
5080 }
5081
5082 /* Get the right consumer socket for the application. */
5083 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5084 output->consumer);
5085 if (!socket) {
5086 ret = -EINVAL;
5087 goto error;
5088 }
5089
5090 /* Add the UST default trace dir to path. */
5091 memset(pathname, 0, sizeof(pathname));
5092 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5093 ua_sess->path);
5094 if (ret < 0) {
5095 PERROR("snprintf snapshot path");
5096 goto error;
5097 }
5098
5099 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5100 ua_chan, node.node) {
5101 /*
5102 * Make sure the maximum stream size is not lower than the
5103 * subbuffer size or else it's an error since we won't be able to
5104 * snapshot anything.
5105 */
5106 if (max_stream_size &&
5107 ua_chan->attr.subbuf_size > max_stream_size) {
5108 ret = -EINVAL;
5109 DBG3("UST app snapshot record maximum stream size %" PRIu64
5110 " is smaller than subbuffer size of %" PRIu64,
5111 max_stream_size, ua_chan->attr.subbuf_size);
5112 goto error;
5113 }
5114
5115 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5116 ua_sess->euid, ua_sess->egid, pathname, wait,
5117 max_stream_size);
5118 if (ret < 0) {
5119 goto error;
5120 }
5121 }
5122
5123 registry = get_session_registry(ua_sess);
5124 assert(registry);
5125 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5126 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5127 max_stream_size);
5128 if (ret < 0) {
5129 goto error;
5130 }
5131 }
5132 break;
5133 }
5134 default:
5135 assert(0);
5136 break;
5137 }
5138
5139 error:
5140 rcu_read_unlock();
5141 return ret;
5142 }
5143
5144 /*
5145 * Return the number of streams for a UST session.
5146 */
5147 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5148 {
5149 unsigned int ret = 0;
5150 struct ust_app *app;
5151 struct lttng_ht_iter iter;
5152
5153 assert(usess);
5154
5155 switch (usess->buffer_type) {
5156 case LTTNG_BUFFER_PER_UID:
5157 {
5158 struct buffer_reg_uid *reg;
5159
5160 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5161 struct buffer_reg_channel *reg_chan;
5162
5163 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5164 reg_chan, node.node) {
5165 ret += reg_chan->stream_count;
5166 }
5167 }
5168 break;
5169 }
5170 case LTTNG_BUFFER_PER_PID:
5171 {
5172 rcu_read_lock();
5173 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5174 struct ust_app_channel *ua_chan;
5175 struct ust_app_session *ua_sess;
5176 struct lttng_ht_iter chan_iter;
5177
5178 ua_sess = lookup_session_by_app(usess, app);
5179 if (!ua_sess) {
5180 /* Session not associated with this app. */
5181 continue;
5182 }
5183
5184 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5185 ua_chan, node.node) {
5186 ret += ua_chan->streams.count;
5187 }
5188 }
5189 rcu_read_unlock();
5190 break;
5191 }
5192 default:
5193 assert(0);
5194 break;
5195 }
5196
5197 return ret;
5198 }
This page took 0.133881 seconds and 3 git commands to generate.