Fix: don't fail on push metadata if no channel
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* One of the exclusions is NULL, fail. */
144 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
145 goto no_match;
146 }
147
148 if (key->exclusion && event->exclusion) {
149 /* Both exclusions exists, check count followed by the names. */
150 if (event->exclusion->count != key->exclusion->count ||
151 memcmp(event->exclusion->names, key->exclusion->names,
152 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
153 goto no_match;
154 }
155 }
156
157
158 /* Match. */
159 return 1;
160
161 no_match:
162 return 0;
163 }
164
165 /*
166 * Unique add of an ust app event in the given ht. This uses the custom
167 * ht_match_ust_app_event match function and the event name as hash.
168 */
169 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
170 struct ust_app_event *event)
171 {
172 struct cds_lfht_node *node_ptr;
173 struct ust_app_ht_key key;
174 struct lttng_ht *ht;
175
176 assert(ua_chan);
177 assert(ua_chan->events);
178 assert(event);
179
180 ht = ua_chan->events;
181 key.name = event->attr.name;
182 key.filter = event->filter;
183 key.loglevel = event->attr.loglevel;
184 key.exclusion = event->exclusion;
185
186 node_ptr = cds_lfht_add_unique(ht->ht,
187 ht->hash_fct(event->node.key, lttng_ht_seed),
188 ht_match_ust_app_event, &key, &event->node.node);
189 assert(node_ptr == &event->node.node);
190 }
191
192 /*
193 * Close the notify socket from the given RCU head object. This MUST be called
194 * through a call_rcu().
195 */
196 static void close_notify_sock_rcu(struct rcu_head *head)
197 {
198 int ret;
199 struct ust_app_notify_sock_obj *obj =
200 caa_container_of(head, struct ust_app_notify_sock_obj, head);
201
202 /* Must have a valid fd here. */
203 assert(obj->fd >= 0);
204
205 ret = close(obj->fd);
206 if (ret) {
207 ERR("close notify sock %d RCU", obj->fd);
208 }
209 lttng_fd_put(LTTNG_FD_APPS, 1);
210
211 free(obj);
212 }
213
214 /*
215 * Return the session registry according to the buffer type of the given
216 * session.
217 *
218 * A registry per UID object MUST exists before calling this function or else
219 * it assert() if not found. RCU read side lock must be acquired.
220 */
221 static struct ust_registry_session *get_session_registry(
222 struct ust_app_session *ua_sess)
223 {
224 struct ust_registry_session *registry = NULL;
225
226 assert(ua_sess);
227
228 switch (ua_sess->buffer_type) {
229 case LTTNG_BUFFER_PER_PID:
230 {
231 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
232 if (!reg_pid) {
233 goto error;
234 }
235 registry = reg_pid->registry->reg.ust;
236 break;
237 }
238 case LTTNG_BUFFER_PER_UID:
239 {
240 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
241 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
242 if (!reg_uid) {
243 goto error;
244 }
245 registry = reg_uid->registry->reg.ust;
246 break;
247 }
248 default:
249 assert(0);
250 };
251
252 error:
253 return registry;
254 }
255
256 /*
257 * Delete ust context safely. RCU read lock must be held before calling
258 * this function.
259 */
260 static
261 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
262 {
263 int ret;
264
265 assert(ua_ctx);
266
267 if (ua_ctx->obj) {
268 ret = ustctl_release_object(sock, ua_ctx->obj);
269 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
270 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
271 sock, ua_ctx->obj->handle, ret);
272 }
273 free(ua_ctx->obj);
274 }
275 free(ua_ctx);
276 }
277
278 /*
279 * Delete ust app event safely. RCU read lock must be held before calling
280 * this function.
281 */
282 static
283 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
284 {
285 int ret;
286
287 assert(ua_event);
288
289 free(ua_event->filter);
290 if (ua_event->exclusion != NULL)
291 free(ua_event->exclusion);
292 if (ua_event->obj != NULL) {
293 ret = ustctl_release_object(sock, ua_event->obj);
294 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
295 ERR("UST app sock %d release event obj failed with ret %d",
296 sock, ret);
297 }
298 free(ua_event->obj);
299 }
300 free(ua_event);
301 }
302
303 /*
304 * Release ust data object of the given stream.
305 *
306 * Return 0 on success or else a negative value.
307 */
308 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
309 {
310 int ret = 0;
311
312 assert(stream);
313
314 if (stream->obj) {
315 ret = ustctl_release_object(sock, stream->obj);
316 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
317 ERR("UST app sock %d release stream obj failed with ret %d",
318 sock, ret);
319 }
320 lttng_fd_put(LTTNG_FD_APPS, 2);
321 free(stream->obj);
322 }
323
324 return ret;
325 }
326
327 /*
328 * Delete ust app stream safely. RCU read lock must be held before calling
329 * this function.
330 */
331 static
332 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
333 {
334 assert(stream);
335
336 (void) release_ust_app_stream(sock, stream);
337 free(stream);
338 }
339
340 /*
341 * We need to execute ht_destroy outside of RCU read-side critical
342 * section and outside of call_rcu thread, so we postpone its execution
343 * using ht_cleanup_push. It is simpler than to change the semantic of
344 * the many callers of delete_ust_app_session().
345 */
346 static
347 void delete_ust_app_channel_rcu(struct rcu_head *head)
348 {
349 struct ust_app_channel *ua_chan =
350 caa_container_of(head, struct ust_app_channel, rcu_head);
351
352 ht_cleanup_push(ua_chan->ctx);
353 ht_cleanup_push(ua_chan->events);
354 free(ua_chan);
355 }
356
357 /*
358 * Delete ust app channel safely. RCU read lock must be held before calling
359 * this function.
360 */
361 static
362 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
363 struct ust_app *app)
364 {
365 int ret;
366 struct lttng_ht_iter iter;
367 struct ust_app_event *ua_event;
368 struct ust_app_ctx *ua_ctx;
369 struct ust_app_stream *stream, *stmp;
370 struct ust_registry_session *registry;
371
372 assert(ua_chan);
373
374 DBG3("UST app deleting channel %s", ua_chan->name);
375
376 /* Wipe stream */
377 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
378 cds_list_del(&stream->list);
379 delete_ust_app_stream(sock, stream);
380 }
381
382 /* Wipe context */
383 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
384 cds_list_del(&ua_ctx->list);
385 ret = lttng_ht_del(ua_chan->ctx, &iter);
386 assert(!ret);
387 delete_ust_app_ctx(sock, ua_ctx);
388 }
389
390 /* Wipe events */
391 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
392 node.node) {
393 ret = lttng_ht_del(ua_chan->events, &iter);
394 assert(!ret);
395 delete_ust_app_event(sock, ua_event);
396 }
397
398 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
399 /* Wipe and free registry from session registry. */
400 registry = get_session_registry(ua_chan->session);
401 if (registry) {
402 ust_registry_channel_del_free(registry, ua_chan->key);
403 }
404 }
405
406 if (ua_chan->obj != NULL) {
407 /* Remove channel from application UST object descriptor. */
408 iter.iter.node = &ua_chan->ust_objd_node.node;
409 lttng_ht_del(app->ust_objd, &iter);
410 ret = ustctl_release_object(sock, ua_chan->obj);
411 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
412 ERR("UST app sock %d release channel obj failed with ret %d",
413 sock, ret);
414 }
415 lttng_fd_put(LTTNG_FD_APPS, 1);
416 free(ua_chan->obj);
417 }
418 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
419 }
420
421 /*
422 * Push metadata to consumer socket.
423 *
424 * The socket lock MUST be acquired.
425 * The ust app session lock MUST be acquired.
426 *
427 * On success, return the len of metadata pushed or else a negative value.
428 */
429 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
430 struct consumer_socket *socket, int send_zero_data)
431 {
432 int ret;
433 char *metadata_str = NULL;
434 size_t len, offset;
435 ssize_t ret_val;
436
437 assert(registry);
438 assert(socket);
439
440 /*
441 * On a push metadata error either the consumer is dead or the metadata
442 * channel has been destroyed because its endpoint might have died (e.g:
443 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
444 * metadata again which is not valid anymore on the consumer side.
445 *
446 * The ust app session mutex locked allows us to make this check without
447 * the registry lock.
448 */
449 if (registry->metadata_closed) {
450 return -EPIPE;
451 }
452
453 pthread_mutex_lock(&registry->lock);
454
455 offset = registry->metadata_len_sent;
456 len = registry->metadata_len - registry->metadata_len_sent;
457 if (len == 0) {
458 DBG3("No metadata to push for metadata key %" PRIu64,
459 registry->metadata_key);
460 ret_val = len;
461 if (send_zero_data) {
462 DBG("No metadata to push");
463 goto push_data;
464 }
465 goto end;
466 }
467
468 /* Allocate only what we have to send. */
469 metadata_str = zmalloc(len);
470 if (!metadata_str) {
471 PERROR("zmalloc ust app metadata string");
472 ret_val = -ENOMEM;
473 goto error;
474 }
475 /* Copy what we haven't send out. */
476 memcpy(metadata_str, registry->metadata + offset, len);
477 registry->metadata_len_sent += len;
478
479 push_data:
480 pthread_mutex_unlock(&registry->lock);
481 ret = consumer_push_metadata(socket, registry->metadata_key,
482 metadata_str, len, offset);
483 if (ret < 0) {
484 /*
485 * There is an acceptable race here between the registry metadata key
486 * assignment and the creation on the consumer. The session daemon can
487 * concurrently push metadata for this registry while being created on
488 * the consumer since the metadata key of the registry is assigned
489 * *before* it is setup to avoid the consumer to ask for metadata that
490 * could possibly be not found in the session daemon.
491 *
492 * The metadata will get pushed either by the session being stopped or
493 * the consumer requesting metadata if that race is triggered.
494 */
495 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
496 ret = 0;
497 }
498 ret_val = ret;
499 goto error_push;
500 }
501
502 free(metadata_str);
503 return len;
504
505 end:
506 error:
507 pthread_mutex_unlock(&registry->lock);
508 error_push:
509 free(metadata_str);
510 return ret_val;
511 }
512
513 /*
514 * For a given application and session, push metadata to consumer. The session
515 * lock MUST be acquired here before calling this.
516 * Either sock or consumer is required : if sock is NULL, the default
517 * socket to send the metadata is retrieved from consumer, if sock
518 * is not NULL we use it to send the metadata.
519 *
520 * Return 0 on success else a negative error.
521 */
522 static int push_metadata(struct ust_registry_session *registry,
523 struct consumer_output *consumer)
524 {
525 int ret_val;
526 ssize_t ret;
527 struct consumer_socket *socket;
528
529 assert(registry);
530 assert(consumer);
531
532 rcu_read_lock();
533
534 /*
535 * Means that no metadata was assigned to the session. This can happens if
536 * no start has been done previously.
537 */
538 if (!registry->metadata_key) {
539 ret_val = 0;
540 goto end_rcu_unlock;
541 }
542
543 /* Get consumer socket to use to push the metadata.*/
544 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
545 consumer);
546 if (!socket) {
547 ret_val = -1;
548 goto error_rcu_unlock;
549 }
550
551 /*
552 * TODO: Currently, we hold the socket lock around sampling of the next
553 * metadata segment to ensure we send metadata over the consumer socket in
554 * the correct order. This makes the registry lock nest inside the socket
555 * lock.
556 *
557 * Please note that this is a temporary measure: we should move this lock
558 * back into ust_consumer_push_metadata() when the consumer gets the
559 * ability to reorder the metadata it receives.
560 */
561 pthread_mutex_lock(socket->lock);
562 ret = ust_app_push_metadata(registry, socket, 0);
563 pthread_mutex_unlock(socket->lock);
564 if (ret < 0) {
565 ret_val = ret;
566 goto error_rcu_unlock;
567 }
568
569 rcu_read_unlock();
570 return 0;
571
572 error_rcu_unlock:
573 /*
574 * On error, flag the registry that the metadata is closed. We were unable
575 * to push anything and this means that either the consumer is not
576 * responding or the metadata cache has been destroyed on the consumer.
577 */
578 registry->metadata_closed = 1;
579 end_rcu_unlock:
580 rcu_read_unlock();
581 return ret_val;
582 }
583
584 /*
585 * Send to the consumer a close metadata command for the given session. Once
586 * done, the metadata channel is deleted and the session metadata pointer is
587 * nullified. The session lock MUST be acquired here unless the application is
588 * in the destroy path.
589 *
590 * Return 0 on success else a negative value.
591 */
592 static int close_metadata(struct ust_registry_session *registry,
593 struct consumer_output *consumer)
594 {
595 int ret;
596 struct consumer_socket *socket;
597
598 assert(registry);
599 assert(consumer);
600
601 rcu_read_lock();
602
603 if (!registry->metadata_key || registry->metadata_closed) {
604 ret = 0;
605 goto end;
606 }
607
608 /* Get consumer socket to use to push the metadata.*/
609 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
610 consumer);
611 if (!socket) {
612 ret = -1;
613 goto error;
614 }
615
616 ret = consumer_close_metadata(socket, registry->metadata_key);
617 if (ret < 0) {
618 goto error;
619 }
620
621 error:
622 /*
623 * Metadata closed. Even on error this means that the consumer is not
624 * responding or not found so either way a second close should NOT be emit
625 * for this registry.
626 */
627 registry->metadata_closed = 1;
628 end:
629 rcu_read_unlock();
630 return ret;
631 }
632
633 /*
634 * We need to execute ht_destroy outside of RCU read-side critical
635 * section and outside of call_rcu thread, so we postpone its execution
636 * using ht_cleanup_push. It is simpler than to change the semantic of
637 * the many callers of delete_ust_app_session().
638 */
639 static
640 void delete_ust_app_session_rcu(struct rcu_head *head)
641 {
642 struct ust_app_session *ua_sess =
643 caa_container_of(head, struct ust_app_session, rcu_head);
644
645 ht_cleanup_push(ua_sess->channels);
646 free(ua_sess);
647 }
648
649 /*
650 * Delete ust app session safely. RCU read lock must be held before calling
651 * this function.
652 */
653 static
654 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
655 struct ust_app *app)
656 {
657 int ret;
658 struct lttng_ht_iter iter;
659 struct ust_app_channel *ua_chan;
660 struct ust_registry_session *registry;
661
662 assert(ua_sess);
663
664 pthread_mutex_lock(&ua_sess->lock);
665
666 registry = get_session_registry(ua_sess);
667 if (registry && !registry->metadata_closed) {
668 /* Push metadata for application before freeing the application. */
669 (void) push_metadata(registry, ua_sess->consumer);
670
671 /*
672 * Don't ask to close metadata for global per UID buffers. Close
673 * metadata only on destroy trace session in this case. Also, the
674 * previous push metadata could have flag the metadata registry to
675 * close so don't send a close command if closed.
676 */
677 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
678 !registry->metadata_closed) {
679 /* And ask to close it for this session registry. */
680 (void) close_metadata(registry, ua_sess->consumer);
681 }
682 }
683
684 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
685 node.node) {
686 ret = lttng_ht_del(ua_sess->channels, &iter);
687 assert(!ret);
688 delete_ust_app_channel(sock, ua_chan, app);
689 }
690
691 /* In case of per PID, the registry is kept in the session. */
692 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
693 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
694 if (reg_pid) {
695 buffer_reg_pid_remove(reg_pid);
696 buffer_reg_pid_destroy(reg_pid);
697 }
698 }
699
700 if (ua_sess->handle != -1) {
701 ret = ustctl_release_handle(sock, ua_sess->handle);
702 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
703 ERR("UST app sock %d release session handle failed with ret %d",
704 sock, ret);
705 }
706 }
707 pthread_mutex_unlock(&ua_sess->lock);
708
709 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
710 }
711
712 /*
713 * Delete a traceable application structure from the global list. Never call
714 * this function outside of a call_rcu call.
715 *
716 * RCU read side lock should _NOT_ be held when calling this function.
717 */
718 static
719 void delete_ust_app(struct ust_app *app)
720 {
721 int ret, sock;
722 struct ust_app_session *ua_sess, *tmp_ua_sess;
723
724 /* Delete ust app sessions info */
725 sock = app->sock;
726 app->sock = -1;
727
728 /* Wipe sessions */
729 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
730 teardown_node) {
731 /* Free every object in the session and the session. */
732 rcu_read_lock();
733 delete_ust_app_session(sock, ua_sess, app);
734 rcu_read_unlock();
735 }
736
737 ht_cleanup_push(app->sessions);
738 ht_cleanup_push(app->ust_objd);
739
740 /*
741 * Wait until we have deleted the application from the sock hash table
742 * before closing this socket, otherwise an application could re-use the
743 * socket ID and race with the teardown, using the same hash table entry.
744 *
745 * It's OK to leave the close in call_rcu. We want it to stay unique for
746 * all RCU readers that could run concurrently with unregister app,
747 * therefore we _need_ to only close that socket after a grace period. So
748 * it should stay in this RCU callback.
749 *
750 * This close() is a very important step of the synchronization model so
751 * every modification to this function must be carefully reviewed.
752 */
753 ret = close(sock);
754 if (ret) {
755 PERROR("close");
756 }
757 lttng_fd_put(LTTNG_FD_APPS, 1);
758
759 DBG2("UST app pid %d deleted", app->pid);
760 free(app);
761 }
762
763 /*
764 * URCU intermediate call to delete an UST app.
765 */
766 static
767 void delete_ust_app_rcu(struct rcu_head *head)
768 {
769 struct lttng_ht_node_ulong *node =
770 caa_container_of(head, struct lttng_ht_node_ulong, head);
771 struct ust_app *app =
772 caa_container_of(node, struct ust_app, pid_n);
773
774 DBG3("Call RCU deleting app PID %d", app->pid);
775 delete_ust_app(app);
776 }
777
778 /*
779 * Delete the session from the application ht and delete the data structure by
780 * freeing every object inside and releasing them.
781 */
782 static void destroy_app_session(struct ust_app *app,
783 struct ust_app_session *ua_sess)
784 {
785 int ret;
786 struct lttng_ht_iter iter;
787
788 assert(app);
789 assert(ua_sess);
790
791 iter.iter.node = &ua_sess->node.node;
792 ret = lttng_ht_del(app->sessions, &iter);
793 if (ret) {
794 /* Already scheduled for teardown. */
795 goto end;
796 }
797
798 /* Once deleted, free the data structure. */
799 delete_ust_app_session(app->sock, ua_sess, app);
800
801 end:
802 return;
803 }
804
805 /*
806 * Alloc new UST app session.
807 */
808 static
809 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
810 {
811 struct ust_app_session *ua_sess;
812
813 /* Init most of the default value by allocating and zeroing */
814 ua_sess = zmalloc(sizeof(struct ust_app_session));
815 if (ua_sess == NULL) {
816 PERROR("malloc");
817 goto error_free;
818 }
819
820 ua_sess->handle = -1;
821 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
822 pthread_mutex_init(&ua_sess->lock, NULL);
823
824 return ua_sess;
825
826 error_free:
827 return NULL;
828 }
829
830 /*
831 * Alloc new UST app channel.
832 */
833 static
834 struct ust_app_channel *alloc_ust_app_channel(char *name,
835 struct ust_app_session *ua_sess,
836 struct lttng_ust_channel_attr *attr)
837 {
838 struct ust_app_channel *ua_chan;
839
840 /* Init most of the default value by allocating and zeroing */
841 ua_chan = zmalloc(sizeof(struct ust_app_channel));
842 if (ua_chan == NULL) {
843 PERROR("malloc");
844 goto error;
845 }
846
847 /* Setup channel name */
848 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
849 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
850
851 ua_chan->enabled = 1;
852 ua_chan->handle = -1;
853 ua_chan->session = ua_sess;
854 ua_chan->key = get_next_channel_key();
855 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
856 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
857 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
858
859 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
860 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
861
862 /* Copy attributes */
863 if (attr) {
864 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
865 ua_chan->attr.subbuf_size = attr->subbuf_size;
866 ua_chan->attr.num_subbuf = attr->num_subbuf;
867 ua_chan->attr.overwrite = attr->overwrite;
868 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
869 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
870 ua_chan->attr.output = attr->output;
871 }
872 /* By default, the channel is a per cpu channel. */
873 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
874
875 DBG3("UST app channel %s allocated", ua_chan->name);
876
877 return ua_chan;
878
879 error:
880 return NULL;
881 }
882
883 /*
884 * Allocate and initialize a UST app stream.
885 *
886 * Return newly allocated stream pointer or NULL on error.
887 */
888 struct ust_app_stream *ust_app_alloc_stream(void)
889 {
890 struct ust_app_stream *stream = NULL;
891
892 stream = zmalloc(sizeof(*stream));
893 if (stream == NULL) {
894 PERROR("zmalloc ust app stream");
895 goto error;
896 }
897
898 /* Zero could be a valid value for a handle so flag it to -1. */
899 stream->handle = -1;
900
901 error:
902 return stream;
903 }
904
905 /*
906 * Alloc new UST app event.
907 */
908 static
909 struct ust_app_event *alloc_ust_app_event(char *name,
910 struct lttng_ust_event *attr)
911 {
912 struct ust_app_event *ua_event;
913
914 /* Init most of the default value by allocating and zeroing */
915 ua_event = zmalloc(sizeof(struct ust_app_event));
916 if (ua_event == NULL) {
917 PERROR("malloc");
918 goto error;
919 }
920
921 ua_event->enabled = 1;
922 strncpy(ua_event->name, name, sizeof(ua_event->name));
923 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
924 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
925
926 /* Copy attributes */
927 if (attr) {
928 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
929 }
930
931 DBG3("UST app event %s allocated", ua_event->name);
932
933 return ua_event;
934
935 error:
936 return NULL;
937 }
938
939 /*
940 * Alloc new UST app context.
941 */
942 static
943 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
944 {
945 struct ust_app_ctx *ua_ctx;
946
947 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
948 if (ua_ctx == NULL) {
949 goto error;
950 }
951
952 CDS_INIT_LIST_HEAD(&ua_ctx->list);
953
954 if (uctx) {
955 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
956 }
957
958 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
959
960 error:
961 return ua_ctx;
962 }
963
964 /*
965 * Allocate a filter and copy the given original filter.
966 *
967 * Return allocated filter or NULL on error.
968 */
969 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
970 struct lttng_ust_filter_bytecode *orig_f)
971 {
972 struct lttng_ust_filter_bytecode *filter = NULL;
973
974 /* Copy filter bytecode */
975 filter = zmalloc(sizeof(*filter) + orig_f->len);
976 if (!filter) {
977 PERROR("zmalloc alloc ust app filter");
978 goto error;
979 }
980
981 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
982
983 error:
984 return filter;
985 }
986
987 /*
988 * Find an ust_app using the sock and return it. RCU read side lock must be
989 * held before calling this helper function.
990 */
991 struct ust_app *ust_app_find_by_sock(int sock)
992 {
993 struct lttng_ht_node_ulong *node;
994 struct lttng_ht_iter iter;
995
996 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
997 node = lttng_ht_iter_get_node_ulong(&iter);
998 if (node == NULL) {
999 DBG2("UST app find by sock %d not found", sock);
1000 goto error;
1001 }
1002
1003 return caa_container_of(node, struct ust_app, sock_n);
1004
1005 error:
1006 return NULL;
1007 }
1008
1009 /*
1010 * Find an ust_app using the notify sock and return it. RCU read side lock must
1011 * be held before calling this helper function.
1012 */
1013 static struct ust_app *find_app_by_notify_sock(int sock)
1014 {
1015 struct lttng_ht_node_ulong *node;
1016 struct lttng_ht_iter iter;
1017
1018 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1019 &iter);
1020 node = lttng_ht_iter_get_node_ulong(&iter);
1021 if (node == NULL) {
1022 DBG2("UST app find by notify sock %d not found", sock);
1023 goto error;
1024 }
1025
1026 return caa_container_of(node, struct ust_app, notify_sock_n);
1027
1028 error:
1029 return NULL;
1030 }
1031
1032 /*
1033 * Lookup for an ust app event based on event name, filter bytecode and the
1034 * event loglevel.
1035 *
1036 * Return an ust_app_event object or NULL on error.
1037 */
1038 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1039 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1040 const struct lttng_event_exclusion *exclusion)
1041 {
1042 struct lttng_ht_iter iter;
1043 struct lttng_ht_node_str *node;
1044 struct ust_app_event *event = NULL;
1045 struct ust_app_ht_key key;
1046
1047 assert(name);
1048 assert(ht);
1049
1050 /* Setup key for event lookup. */
1051 key.name = name;
1052 key.filter = filter;
1053 key.loglevel = loglevel;
1054 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1055 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1056
1057 /* Lookup using the event name as hash and a custom match fct. */
1058 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1059 ht_match_ust_app_event, &key, &iter.iter);
1060 node = lttng_ht_iter_get_node_str(&iter);
1061 if (node == NULL) {
1062 goto end;
1063 }
1064
1065 event = caa_container_of(node, struct ust_app_event, node);
1066
1067 end:
1068 return event;
1069 }
1070
1071 /*
1072 * Create the channel context on the tracer.
1073 *
1074 * Called with UST app session lock held.
1075 */
1076 static
1077 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1078 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1079 {
1080 int ret;
1081
1082 health_code_update();
1083
1084 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1085 ua_chan->obj, &ua_ctx->obj);
1086 if (ret < 0) {
1087 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1088 ERR("UST app create channel context failed for app (pid: %d) "
1089 "with ret %d", app->pid, ret);
1090 } else {
1091 /*
1092 * This is normal behavior, an application can die during the
1093 * creation process. Don't report an error so the execution can
1094 * continue normally.
1095 */
1096 ret = 0;
1097 DBG3("UST app disable event failed. Application is dead.");
1098 }
1099 goto error;
1100 }
1101
1102 ua_ctx->handle = ua_ctx->obj->handle;
1103
1104 DBG2("UST app context handle %d created successfully for channel %s",
1105 ua_ctx->handle, ua_chan->name);
1106
1107 error:
1108 health_code_update();
1109 return ret;
1110 }
1111
1112 /*
1113 * Set the filter on the tracer.
1114 */
1115 static
1116 int set_ust_event_filter(struct ust_app_event *ua_event,
1117 struct ust_app *app)
1118 {
1119 int ret;
1120
1121 health_code_update();
1122
1123 if (!ua_event->filter) {
1124 ret = 0;
1125 goto error;
1126 }
1127
1128 ret = ustctl_set_filter(app->sock, ua_event->filter,
1129 ua_event->obj);
1130 if (ret < 0) {
1131 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1132 ERR("UST app event %s filter failed for app (pid: %d) "
1133 "with ret %d", ua_event->attr.name, app->pid, ret);
1134 } else {
1135 /*
1136 * This is normal behavior, an application can die during the
1137 * creation process. Don't report an error so the execution can
1138 * continue normally.
1139 */
1140 ret = 0;
1141 DBG3("UST app filter event failed. Application is dead.");
1142 }
1143 goto error;
1144 }
1145
1146 DBG2("UST filter set successfully for event %s", ua_event->name);
1147
1148 error:
1149 health_code_update();
1150 return ret;
1151 }
1152
1153 /*
1154 * Set event exclusions on the tracer.
1155 */
1156 static
1157 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1158 struct ust_app *app)
1159 {
1160 int ret;
1161
1162 health_code_update();
1163
1164 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1165 ret = 0;
1166 goto error;
1167 }
1168
1169 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1170 ua_event->obj);
1171 if (ret < 0) {
1172 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1173 ERR("UST app event %s exclusions failed for app (pid: %d) "
1174 "with ret %d", ua_event->attr.name, app->pid, ret);
1175 } else {
1176 /*
1177 * This is normal behavior, an application can die during the
1178 * creation process. Don't report an error so the execution can
1179 * continue normally.
1180 */
1181 ret = 0;
1182 DBG3("UST app event exclusion failed. Application is dead.");
1183 }
1184 goto error;
1185 }
1186
1187 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1188
1189 error:
1190 health_code_update();
1191 return ret;
1192 }
1193
1194 /*
1195 * Disable the specified event on to UST tracer for the UST session.
1196 */
1197 static int disable_ust_event(struct ust_app *app,
1198 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1199 {
1200 int ret;
1201
1202 health_code_update();
1203
1204 ret = ustctl_disable(app->sock, ua_event->obj);
1205 if (ret < 0) {
1206 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1207 ERR("UST app event %s disable failed for app (pid: %d) "
1208 "and session handle %d with ret %d",
1209 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1210 } else {
1211 /*
1212 * This is normal behavior, an application can die during the
1213 * creation process. Don't report an error so the execution can
1214 * continue normally.
1215 */
1216 ret = 0;
1217 DBG3("UST app disable event failed. Application is dead.");
1218 }
1219 goto error;
1220 }
1221
1222 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1223 ua_event->attr.name, app->pid);
1224
1225 error:
1226 health_code_update();
1227 return ret;
1228 }
1229
1230 /*
1231 * Disable the specified channel on to UST tracer for the UST session.
1232 */
1233 static int disable_ust_channel(struct ust_app *app,
1234 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1235 {
1236 int ret;
1237
1238 health_code_update();
1239
1240 ret = ustctl_disable(app->sock, ua_chan->obj);
1241 if (ret < 0) {
1242 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1243 ERR("UST app channel %s disable failed for app (pid: %d) "
1244 "and session handle %d with ret %d",
1245 ua_chan->name, app->pid, ua_sess->handle, ret);
1246 } else {
1247 /*
1248 * This is normal behavior, an application can die during the
1249 * creation process. Don't report an error so the execution can
1250 * continue normally.
1251 */
1252 ret = 0;
1253 DBG3("UST app disable channel failed. Application is dead.");
1254 }
1255 goto error;
1256 }
1257
1258 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1259 ua_chan->name, app->pid);
1260
1261 error:
1262 health_code_update();
1263 return ret;
1264 }
1265
1266 /*
1267 * Enable the specified channel on to UST tracer for the UST session.
1268 */
1269 static int enable_ust_channel(struct ust_app *app,
1270 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1271 {
1272 int ret;
1273
1274 health_code_update();
1275
1276 ret = ustctl_enable(app->sock, ua_chan->obj);
1277 if (ret < 0) {
1278 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1279 ERR("UST app channel %s enable failed for app (pid: %d) "
1280 "and session handle %d with ret %d",
1281 ua_chan->name, app->pid, ua_sess->handle, ret);
1282 } else {
1283 /*
1284 * This is normal behavior, an application can die during the
1285 * creation process. Don't report an error so the execution can
1286 * continue normally.
1287 */
1288 ret = 0;
1289 DBG3("UST app enable channel failed. Application is dead.");
1290 }
1291 goto error;
1292 }
1293
1294 ua_chan->enabled = 1;
1295
1296 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1297 ua_chan->name, app->pid);
1298
1299 error:
1300 health_code_update();
1301 return ret;
1302 }
1303
1304 /*
1305 * Enable the specified event on to UST tracer for the UST session.
1306 */
1307 static int enable_ust_event(struct ust_app *app,
1308 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1309 {
1310 int ret;
1311
1312 health_code_update();
1313
1314 ret = ustctl_enable(app->sock, ua_event->obj);
1315 if (ret < 0) {
1316 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1317 ERR("UST app event %s enable failed for app (pid: %d) "
1318 "and session handle %d with ret %d",
1319 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1320 } else {
1321 /*
1322 * This is normal behavior, an application can die during the
1323 * creation process. Don't report an error so the execution can
1324 * continue normally.
1325 */
1326 ret = 0;
1327 DBG3("UST app enable event failed. Application is dead.");
1328 }
1329 goto error;
1330 }
1331
1332 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1333 ua_event->attr.name, app->pid);
1334
1335 error:
1336 health_code_update();
1337 return ret;
1338 }
1339
1340 /*
1341 * Send channel and stream buffer to application.
1342 *
1343 * Return 0 on success. On error, a negative value is returned.
1344 */
1345 static int send_channel_pid_to_ust(struct ust_app *app,
1346 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1347 {
1348 int ret;
1349 struct ust_app_stream *stream, *stmp;
1350
1351 assert(app);
1352 assert(ua_sess);
1353 assert(ua_chan);
1354
1355 health_code_update();
1356
1357 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1358 app->sock);
1359
1360 /* Send channel to the application. */
1361 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1362 if (ret < 0) {
1363 goto error;
1364 }
1365
1366 health_code_update();
1367
1368 /* Send all streams to application. */
1369 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1370 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1371 if (ret < 0) {
1372 goto error;
1373 }
1374 /* We don't need the stream anymore once sent to the tracer. */
1375 cds_list_del(&stream->list);
1376 delete_ust_app_stream(-1, stream);
1377 }
1378 /* Flag the channel that it is sent to the application. */
1379 ua_chan->is_sent = 1;
1380
1381 error:
1382 health_code_update();
1383 return ret;
1384 }
1385
1386 /*
1387 * Create the specified event onto the UST tracer for a UST session.
1388 *
1389 * Should be called with session mutex held.
1390 */
1391 static
1392 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1393 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1394 {
1395 int ret = 0;
1396
1397 health_code_update();
1398
1399 /* Create UST event on tracer */
1400 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1401 &ua_event->obj);
1402 if (ret < 0) {
1403 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1404 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1405 ua_event->attr.name, app->pid, ret);
1406 } else {
1407 /*
1408 * This is normal behavior, an application can die during the
1409 * creation process. Don't report an error so the execution can
1410 * continue normally.
1411 */
1412 ret = 0;
1413 DBG3("UST app create event failed. Application is dead.");
1414 }
1415 goto error;
1416 }
1417
1418 ua_event->handle = ua_event->obj->handle;
1419
1420 DBG2("UST app event %s created successfully for pid:%d",
1421 ua_event->attr.name, app->pid);
1422
1423 health_code_update();
1424
1425 /* Set filter if one is present. */
1426 if (ua_event->filter) {
1427 ret = set_ust_event_filter(ua_event, app);
1428 if (ret < 0) {
1429 goto error;
1430 }
1431 }
1432
1433 /* Set exclusions for the event */
1434 if (ua_event->exclusion) {
1435 ret = set_ust_event_exclusion(ua_event, app);
1436 if (ret < 0) {
1437 goto error;
1438 }
1439 }
1440
1441 /* If event not enabled, disable it on the tracer */
1442 if (ua_event->enabled == 0) {
1443 ret = disable_ust_event(app, ua_sess, ua_event);
1444 if (ret < 0) {
1445 /*
1446 * If we hit an EPERM, something is wrong with our disable call. If
1447 * we get an EEXIST, there is a problem on the tracer side since we
1448 * just created it.
1449 */
1450 switch (ret) {
1451 case -LTTNG_UST_ERR_PERM:
1452 /* Code flow problem */
1453 assert(0);
1454 case -LTTNG_UST_ERR_EXIST:
1455 /* It's OK for our use case. */
1456 ret = 0;
1457 break;
1458 default:
1459 break;
1460 }
1461 goto error;
1462 }
1463 }
1464
1465 error:
1466 health_code_update();
1467 return ret;
1468 }
1469
1470 /*
1471 * Copy data between an UST app event and a LTT event.
1472 */
1473 static void shadow_copy_event(struct ust_app_event *ua_event,
1474 struct ltt_ust_event *uevent)
1475 {
1476 size_t exclusion_alloc_size;
1477
1478 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1479 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1480
1481 ua_event->enabled = uevent->enabled;
1482
1483 /* Copy event attributes */
1484 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1485
1486 /* Copy filter bytecode */
1487 if (uevent->filter) {
1488 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1489 /* Filter might be NULL here in case of ENONEM. */
1490 }
1491
1492 /* Copy exclusion data */
1493 if (uevent->exclusion) {
1494 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1495 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1496 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1497 if (ua_event->exclusion == NULL) {
1498 PERROR("malloc");
1499 } else {
1500 memcpy(ua_event->exclusion, uevent->exclusion,
1501 exclusion_alloc_size);
1502 }
1503 }
1504 }
1505
1506 /*
1507 * Copy data between an UST app channel and a LTT channel.
1508 */
1509 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1510 struct ltt_ust_channel *uchan)
1511 {
1512 struct lttng_ht_iter iter;
1513 struct ltt_ust_event *uevent;
1514 struct ltt_ust_context *uctx;
1515 struct ust_app_event *ua_event;
1516 struct ust_app_ctx *ua_ctx;
1517
1518 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1519
1520 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1521 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1522
1523 ua_chan->tracefile_size = uchan->tracefile_size;
1524 ua_chan->tracefile_count = uchan->tracefile_count;
1525
1526 /* Copy event attributes since the layout is different. */
1527 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1528 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1529 ua_chan->attr.overwrite = uchan->attr.overwrite;
1530 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1531 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1532 ua_chan->attr.output = uchan->attr.output;
1533 /*
1534 * Note that the attribute channel type is not set since the channel on the
1535 * tracing registry side does not have this information.
1536 */
1537
1538 ua_chan->enabled = uchan->enabled;
1539 ua_chan->tracing_channel_id = uchan->id;
1540
1541 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1542 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1543 if (ua_ctx == NULL) {
1544 continue;
1545 }
1546 lttng_ht_node_init_ulong(&ua_ctx->node,
1547 (unsigned long) ua_ctx->ctx.ctx);
1548 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1549 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1550 }
1551
1552 /* Copy all events from ltt ust channel to ust app channel */
1553 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1554 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1555 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1556 if (ua_event == NULL) {
1557 DBG2("UST event %s not found on shadow copy channel",
1558 uevent->attr.name);
1559 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1560 if (ua_event == NULL) {
1561 continue;
1562 }
1563 shadow_copy_event(ua_event, uevent);
1564 add_unique_ust_app_event(ua_chan, ua_event);
1565 }
1566 }
1567
1568 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1569 }
1570
1571 /*
1572 * Copy data between a UST app session and a regular LTT session.
1573 */
1574 static void shadow_copy_session(struct ust_app_session *ua_sess,
1575 struct ltt_ust_session *usess, struct ust_app *app)
1576 {
1577 struct lttng_ht_node_str *ua_chan_node;
1578 struct lttng_ht_iter iter;
1579 struct ltt_ust_channel *uchan;
1580 struct ust_app_channel *ua_chan;
1581 time_t rawtime;
1582 struct tm *timeinfo;
1583 char datetime[16];
1584 int ret;
1585
1586 /* Get date and time for unique app path */
1587 time(&rawtime);
1588 timeinfo = localtime(&rawtime);
1589 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1590
1591 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1592
1593 ua_sess->tracing_id = usess->id;
1594 ua_sess->id = get_next_session_id();
1595 ua_sess->uid = app->uid;
1596 ua_sess->gid = app->gid;
1597 ua_sess->euid = usess->uid;
1598 ua_sess->egid = usess->gid;
1599 ua_sess->buffer_type = usess->buffer_type;
1600 ua_sess->bits_per_long = app->bits_per_long;
1601 /* There is only one consumer object per session possible. */
1602 ua_sess->consumer = usess->consumer;
1603 ua_sess->output_traces = usess->output_traces;
1604 ua_sess->live_timer_interval = usess->live_timer_interval;
1605
1606 switch (ua_sess->buffer_type) {
1607 case LTTNG_BUFFER_PER_PID:
1608 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1609 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1610 datetime);
1611 break;
1612 case LTTNG_BUFFER_PER_UID:
1613 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1614 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1615 break;
1616 default:
1617 assert(0);
1618 goto error;
1619 }
1620 if (ret < 0) {
1621 PERROR("asprintf UST shadow copy session");
1622 assert(0);
1623 goto error;
1624 }
1625
1626 /* Iterate over all channels in global domain. */
1627 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1628 uchan, node.node) {
1629 struct lttng_ht_iter uiter;
1630
1631 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1632 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1633 if (ua_chan_node != NULL) {
1634 /* Session exist. Contiuing. */
1635 continue;
1636 }
1637
1638 DBG2("Channel %s not found on shadow session copy, creating it",
1639 uchan->name);
1640 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1641 if (ua_chan == NULL) {
1642 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1643 continue;
1644 }
1645 shadow_copy_channel(ua_chan, uchan);
1646 /*
1647 * The concept of metadata channel does not exist on the tracing
1648 * registry side of the session daemon so this can only be a per CPU
1649 * channel and not metadata.
1650 */
1651 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1652
1653 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1654 }
1655
1656 error:
1657 return;
1658 }
1659
1660 /*
1661 * Lookup sesison wrapper.
1662 */
1663 static
1664 void __lookup_session_by_app(struct ltt_ust_session *usess,
1665 struct ust_app *app, struct lttng_ht_iter *iter)
1666 {
1667 /* Get right UST app session from app */
1668 lttng_ht_lookup(app->sessions, &usess->id, iter);
1669 }
1670
1671 /*
1672 * Return ust app session from the app session hashtable using the UST session
1673 * id.
1674 */
1675 static struct ust_app_session *lookup_session_by_app(
1676 struct ltt_ust_session *usess, struct ust_app *app)
1677 {
1678 struct lttng_ht_iter iter;
1679 struct lttng_ht_node_u64 *node;
1680
1681 __lookup_session_by_app(usess, app, &iter);
1682 node = lttng_ht_iter_get_node_u64(&iter);
1683 if (node == NULL) {
1684 goto error;
1685 }
1686
1687 return caa_container_of(node, struct ust_app_session, node);
1688
1689 error:
1690 return NULL;
1691 }
1692
1693 /*
1694 * Setup buffer registry per PID for the given session and application. If none
1695 * is found, a new one is created, added to the global registry and
1696 * initialized. If regp is valid, it's set with the newly created object.
1697 *
1698 * Return 0 on success or else a negative value.
1699 */
1700 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1701 struct ust_app *app, struct buffer_reg_pid **regp)
1702 {
1703 int ret = 0;
1704 struct buffer_reg_pid *reg_pid;
1705
1706 assert(ua_sess);
1707 assert(app);
1708
1709 rcu_read_lock();
1710
1711 reg_pid = buffer_reg_pid_find(ua_sess->id);
1712 if (!reg_pid) {
1713 /*
1714 * This is the create channel path meaning that if there is NO
1715 * registry available, we have to create one for this session.
1716 */
1717 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1718 if (ret < 0) {
1719 goto error;
1720 }
1721 buffer_reg_pid_add(reg_pid);
1722 } else {
1723 goto end;
1724 }
1725
1726 /* Initialize registry. */
1727 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1728 app->bits_per_long, app->uint8_t_alignment,
1729 app->uint16_t_alignment, app->uint32_t_alignment,
1730 app->uint64_t_alignment, app->long_alignment,
1731 app->byte_order, app->version.major,
1732 app->version.minor);
1733 if (ret < 0) {
1734 goto error;
1735 }
1736
1737 DBG3("UST app buffer registry per PID created successfully");
1738
1739 end:
1740 if (regp) {
1741 *regp = reg_pid;
1742 }
1743 error:
1744 rcu_read_unlock();
1745 return ret;
1746 }
1747
1748 /*
1749 * Setup buffer registry per UID for the given session and application. If none
1750 * is found, a new one is created, added to the global registry and
1751 * initialized. If regp is valid, it's set with the newly created object.
1752 *
1753 * Return 0 on success or else a negative value.
1754 */
1755 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1756 struct ust_app *app, struct buffer_reg_uid **regp)
1757 {
1758 int ret = 0;
1759 struct buffer_reg_uid *reg_uid;
1760
1761 assert(usess);
1762 assert(app);
1763
1764 rcu_read_lock();
1765
1766 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1767 if (!reg_uid) {
1768 /*
1769 * This is the create channel path meaning that if there is NO
1770 * registry available, we have to create one for this session.
1771 */
1772 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1773 LTTNG_DOMAIN_UST, &reg_uid);
1774 if (ret < 0) {
1775 goto error;
1776 }
1777 buffer_reg_uid_add(reg_uid);
1778 } else {
1779 goto end;
1780 }
1781
1782 /* Initialize registry. */
1783 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1784 app->bits_per_long, app->uint8_t_alignment,
1785 app->uint16_t_alignment, app->uint32_t_alignment,
1786 app->uint64_t_alignment, app->long_alignment,
1787 app->byte_order, app->version.major,
1788 app->version.minor);
1789 if (ret < 0) {
1790 goto error;
1791 }
1792 /* Add node to teardown list of the session. */
1793 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1794
1795 DBG3("UST app buffer registry per UID created successfully");
1796
1797 end:
1798 if (regp) {
1799 *regp = reg_uid;
1800 }
1801 error:
1802 rcu_read_unlock();
1803 return ret;
1804 }
1805
1806 /*
1807 * Create a session on the tracer side for the given app.
1808 *
1809 * On success, ua_sess_ptr is populated with the session pointer or else left
1810 * untouched. If the session was created, is_created is set to 1. On error,
1811 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1812 * be NULL.
1813 *
1814 * Returns 0 on success or else a negative code which is either -ENOMEM or
1815 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1816 */
1817 static int create_ust_app_session(struct ltt_ust_session *usess,
1818 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1819 int *is_created)
1820 {
1821 int ret, created = 0;
1822 struct ust_app_session *ua_sess;
1823
1824 assert(usess);
1825 assert(app);
1826 assert(ua_sess_ptr);
1827
1828 health_code_update();
1829
1830 ua_sess = lookup_session_by_app(usess, app);
1831 if (ua_sess == NULL) {
1832 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1833 app->pid, usess->id);
1834 ua_sess = alloc_ust_app_session(app);
1835 if (ua_sess == NULL) {
1836 /* Only malloc can failed so something is really wrong */
1837 ret = -ENOMEM;
1838 goto error;
1839 }
1840 shadow_copy_session(ua_sess, usess, app);
1841 created = 1;
1842 }
1843
1844 switch (usess->buffer_type) {
1845 case LTTNG_BUFFER_PER_PID:
1846 /* Init local registry. */
1847 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1848 if (ret < 0) {
1849 goto error;
1850 }
1851 break;
1852 case LTTNG_BUFFER_PER_UID:
1853 /* Look for a global registry. If none exists, create one. */
1854 ret = setup_buffer_reg_uid(usess, app, NULL);
1855 if (ret < 0) {
1856 goto error;
1857 }
1858 break;
1859 default:
1860 assert(0);
1861 ret = -EINVAL;
1862 goto error;
1863 }
1864
1865 health_code_update();
1866
1867 if (ua_sess->handle == -1) {
1868 ret = ustctl_create_session(app->sock);
1869 if (ret < 0) {
1870 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1871 ERR("Creating session for app pid %d with ret %d",
1872 app->pid, ret);
1873 } else {
1874 DBG("UST app creating session failed. Application is dead");
1875 /*
1876 * This is normal behavior, an application can die during the
1877 * creation process. Don't report an error so the execution can
1878 * continue normally. This will get flagged ENOTCONN and the
1879 * caller will handle it.
1880 */
1881 ret = 0;
1882 }
1883 delete_ust_app_session(-1, ua_sess, app);
1884 if (ret != -ENOMEM) {
1885 /*
1886 * Tracer is probably gone or got an internal error so let's
1887 * behave like it will soon unregister or not usable.
1888 */
1889 ret = -ENOTCONN;
1890 }
1891 goto error;
1892 }
1893
1894 ua_sess->handle = ret;
1895
1896 /* Add ust app session to app's HT */
1897 lttng_ht_node_init_u64(&ua_sess->node,
1898 ua_sess->tracing_id);
1899 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1900
1901 DBG2("UST app session created successfully with handle %d", ret);
1902 }
1903
1904 *ua_sess_ptr = ua_sess;
1905 if (is_created) {
1906 *is_created = created;
1907 }
1908
1909 /* Everything went well. */
1910 ret = 0;
1911
1912 error:
1913 health_code_update();
1914 return ret;
1915 }
1916
1917 /*
1918 * Create a context for the channel on the tracer.
1919 *
1920 * Called with UST app session lock held and a RCU read side lock.
1921 */
1922 static
1923 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1924 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1925 struct ust_app *app)
1926 {
1927 int ret = 0;
1928 struct lttng_ht_iter iter;
1929 struct lttng_ht_node_ulong *node;
1930 struct ust_app_ctx *ua_ctx;
1931
1932 DBG2("UST app adding context to channel %s", ua_chan->name);
1933
1934 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1935 node = lttng_ht_iter_get_node_ulong(&iter);
1936 if (node != NULL) {
1937 ret = -EEXIST;
1938 goto error;
1939 }
1940
1941 ua_ctx = alloc_ust_app_ctx(uctx);
1942 if (ua_ctx == NULL) {
1943 /* malloc failed */
1944 ret = -1;
1945 goto error;
1946 }
1947
1948 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1949 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1950 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1951
1952 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1953 if (ret < 0) {
1954 goto error;
1955 }
1956
1957 error:
1958 return ret;
1959 }
1960
1961 /*
1962 * Enable on the tracer side a ust app event for the session and channel.
1963 *
1964 * Called with UST app session lock held.
1965 */
1966 static
1967 int enable_ust_app_event(struct ust_app_session *ua_sess,
1968 struct ust_app_event *ua_event, struct ust_app *app)
1969 {
1970 int ret;
1971
1972 ret = enable_ust_event(app, ua_sess, ua_event);
1973 if (ret < 0) {
1974 goto error;
1975 }
1976
1977 ua_event->enabled = 1;
1978
1979 error:
1980 return ret;
1981 }
1982
1983 /*
1984 * Disable on the tracer side a ust app event for the session and channel.
1985 */
1986 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1987 struct ust_app_event *ua_event, struct ust_app *app)
1988 {
1989 int ret;
1990
1991 ret = disable_ust_event(app, ua_sess, ua_event);
1992 if (ret < 0) {
1993 goto error;
1994 }
1995
1996 ua_event->enabled = 0;
1997
1998 error:
1999 return ret;
2000 }
2001
2002 /*
2003 * Lookup ust app channel for session and disable it on the tracer side.
2004 */
2005 static
2006 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2007 struct ust_app_channel *ua_chan, struct ust_app *app)
2008 {
2009 int ret;
2010
2011 ret = disable_ust_channel(app, ua_sess, ua_chan);
2012 if (ret < 0) {
2013 goto error;
2014 }
2015
2016 ua_chan->enabled = 0;
2017
2018 error:
2019 return ret;
2020 }
2021
2022 /*
2023 * Lookup ust app channel for session and enable it on the tracer side. This
2024 * MUST be called with a RCU read side lock acquired.
2025 */
2026 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2027 struct ltt_ust_channel *uchan, struct ust_app *app)
2028 {
2029 int ret = 0;
2030 struct lttng_ht_iter iter;
2031 struct lttng_ht_node_str *ua_chan_node;
2032 struct ust_app_channel *ua_chan;
2033
2034 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2035 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2036 if (ua_chan_node == NULL) {
2037 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2038 uchan->name, ua_sess->tracing_id);
2039 goto error;
2040 }
2041
2042 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2043
2044 ret = enable_ust_channel(app, ua_sess, ua_chan);
2045 if (ret < 0) {
2046 goto error;
2047 }
2048
2049 error:
2050 return ret;
2051 }
2052
2053 /*
2054 * Ask the consumer to create a channel and get it if successful.
2055 *
2056 * Return 0 on success or else a negative value.
2057 */
2058 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2059 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2060 int bitness, struct ust_registry_session *registry)
2061 {
2062 int ret;
2063 unsigned int nb_fd = 0;
2064 struct consumer_socket *socket;
2065
2066 assert(usess);
2067 assert(ua_sess);
2068 assert(ua_chan);
2069 assert(registry);
2070
2071 rcu_read_lock();
2072 health_code_update();
2073
2074 /* Get the right consumer socket for the application. */
2075 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2076 if (!socket) {
2077 ret = -EINVAL;
2078 goto error;
2079 }
2080
2081 health_code_update();
2082
2083 /* Need one fd for the channel. */
2084 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2085 if (ret < 0) {
2086 ERR("Exhausted number of available FD upon create channel");
2087 goto error;
2088 }
2089
2090 /*
2091 * Ask consumer to create channel. The consumer will return the number of
2092 * stream we have to expect.
2093 */
2094 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2095 registry);
2096 if (ret < 0) {
2097 goto error_ask;
2098 }
2099
2100 /*
2101 * Compute the number of fd needed before receiving them. It must be 2 per
2102 * stream (2 being the default value here).
2103 */
2104 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2105
2106 /* Reserve the amount of file descriptor we need. */
2107 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2108 if (ret < 0) {
2109 ERR("Exhausted number of available FD upon create channel");
2110 goto error_fd_get_stream;
2111 }
2112
2113 health_code_update();
2114
2115 /*
2116 * Now get the channel from the consumer. This call wil populate the stream
2117 * list of that channel and set the ust objects.
2118 */
2119 if (usess->consumer->enabled) {
2120 ret = ust_consumer_get_channel(socket, ua_chan);
2121 if (ret < 0) {
2122 goto error_destroy;
2123 }
2124 }
2125
2126 rcu_read_unlock();
2127 return 0;
2128
2129 error_destroy:
2130 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2131 error_fd_get_stream:
2132 /*
2133 * Initiate a destroy channel on the consumer since we had an error
2134 * handling it on our side. The return value is of no importance since we
2135 * already have a ret value set by the previous error that we need to
2136 * return.
2137 */
2138 (void) ust_consumer_destroy_channel(socket, ua_chan);
2139 error_ask:
2140 lttng_fd_put(LTTNG_FD_APPS, 1);
2141 error:
2142 health_code_update();
2143 rcu_read_unlock();
2144 return ret;
2145 }
2146
2147 /*
2148 * Duplicate the ust data object of the ust app stream and save it in the
2149 * buffer registry stream.
2150 *
2151 * Return 0 on success or else a negative value.
2152 */
2153 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2154 struct ust_app_stream *stream)
2155 {
2156 int ret;
2157
2158 assert(reg_stream);
2159 assert(stream);
2160
2161 /* Reserve the amount of file descriptor we need. */
2162 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2163 if (ret < 0) {
2164 ERR("Exhausted number of available FD upon duplicate stream");
2165 goto error;
2166 }
2167
2168 /* Duplicate object for stream once the original is in the registry. */
2169 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2170 reg_stream->obj.ust);
2171 if (ret < 0) {
2172 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2173 reg_stream->obj.ust, stream->obj, ret);
2174 lttng_fd_put(LTTNG_FD_APPS, 2);
2175 goto error;
2176 }
2177 stream->handle = stream->obj->handle;
2178
2179 error:
2180 return ret;
2181 }
2182
2183 /*
2184 * Duplicate the ust data object of the ust app. channel and save it in the
2185 * buffer registry channel.
2186 *
2187 * Return 0 on success or else a negative value.
2188 */
2189 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2190 struct ust_app_channel *ua_chan)
2191 {
2192 int ret;
2193
2194 assert(reg_chan);
2195 assert(ua_chan);
2196
2197 /* Need two fds for the channel. */
2198 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2199 if (ret < 0) {
2200 ERR("Exhausted number of available FD upon duplicate channel");
2201 goto error_fd_get;
2202 }
2203
2204 /* Duplicate object for stream once the original is in the registry. */
2205 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2206 if (ret < 0) {
2207 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2208 reg_chan->obj.ust, ua_chan->obj, ret);
2209 goto error;
2210 }
2211 ua_chan->handle = ua_chan->obj->handle;
2212
2213 return 0;
2214
2215 error:
2216 lttng_fd_put(LTTNG_FD_APPS, 1);
2217 error_fd_get:
2218 return ret;
2219 }
2220
2221 /*
2222 * For a given channel buffer registry, setup all streams of the given ust
2223 * application channel.
2224 *
2225 * Return 0 on success or else a negative value.
2226 */
2227 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2228 struct ust_app_channel *ua_chan)
2229 {
2230 int ret = 0;
2231 struct ust_app_stream *stream, *stmp;
2232
2233 assert(reg_chan);
2234 assert(ua_chan);
2235
2236 DBG2("UST app setup buffer registry stream");
2237
2238 /* Send all streams to application. */
2239 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2240 struct buffer_reg_stream *reg_stream;
2241
2242 ret = buffer_reg_stream_create(&reg_stream);
2243 if (ret < 0) {
2244 goto error;
2245 }
2246
2247 /*
2248 * Keep original pointer and nullify it in the stream so the delete
2249 * stream call does not release the object.
2250 */
2251 reg_stream->obj.ust = stream->obj;
2252 stream->obj = NULL;
2253 buffer_reg_stream_add(reg_stream, reg_chan);
2254
2255 /* We don't need the streams anymore. */
2256 cds_list_del(&stream->list);
2257 delete_ust_app_stream(-1, stream);
2258 }
2259
2260 error:
2261 return ret;
2262 }
2263
2264 /*
2265 * Create a buffer registry channel for the given session registry and
2266 * application channel object. If regp pointer is valid, it's set with the
2267 * created object. Important, the created object is NOT added to the session
2268 * registry hash table.
2269 *
2270 * Return 0 on success else a negative value.
2271 */
2272 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2273 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2274 {
2275 int ret;
2276 struct buffer_reg_channel *reg_chan = NULL;
2277
2278 assert(reg_sess);
2279 assert(ua_chan);
2280
2281 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2282
2283 /* Create buffer registry channel. */
2284 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2285 if (ret < 0) {
2286 goto error_create;
2287 }
2288 assert(reg_chan);
2289 reg_chan->consumer_key = ua_chan->key;
2290 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2291
2292 /* Create and add a channel registry to session. */
2293 ret = ust_registry_channel_add(reg_sess->reg.ust,
2294 ua_chan->tracing_channel_id);
2295 if (ret < 0) {
2296 goto error;
2297 }
2298 buffer_reg_channel_add(reg_sess, reg_chan);
2299
2300 if (regp) {
2301 *regp = reg_chan;
2302 }
2303
2304 return 0;
2305
2306 error:
2307 /* Safe because the registry channel object was not added to any HT. */
2308 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2309 error_create:
2310 return ret;
2311 }
2312
2313 /*
2314 * Setup buffer registry channel for the given session registry and application
2315 * channel object. If regp pointer is valid, it's set with the created object.
2316 *
2317 * Return 0 on success else a negative value.
2318 */
2319 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2320 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2321 {
2322 int ret;
2323
2324 assert(reg_sess);
2325 assert(reg_chan);
2326 assert(ua_chan);
2327 assert(ua_chan->obj);
2328
2329 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2330
2331 /* Setup all streams for the registry. */
2332 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2333 if (ret < 0) {
2334 goto error;
2335 }
2336
2337 reg_chan->obj.ust = ua_chan->obj;
2338 ua_chan->obj = NULL;
2339
2340 return 0;
2341
2342 error:
2343 buffer_reg_channel_remove(reg_sess, reg_chan);
2344 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2345 return ret;
2346 }
2347
2348 /*
2349 * Send buffer registry channel to the application.
2350 *
2351 * Return 0 on success else a negative value.
2352 */
2353 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2354 struct ust_app *app, struct ust_app_session *ua_sess,
2355 struct ust_app_channel *ua_chan)
2356 {
2357 int ret;
2358 struct buffer_reg_stream *reg_stream;
2359
2360 assert(reg_chan);
2361 assert(app);
2362 assert(ua_sess);
2363 assert(ua_chan);
2364
2365 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2366
2367 ret = duplicate_channel_object(reg_chan, ua_chan);
2368 if (ret < 0) {
2369 goto error;
2370 }
2371
2372 /* Send channel to the application. */
2373 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2374 if (ret < 0) {
2375 goto error;
2376 }
2377
2378 health_code_update();
2379
2380 /* Send all streams to application. */
2381 pthread_mutex_lock(&reg_chan->stream_list_lock);
2382 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2383 struct ust_app_stream stream;
2384
2385 ret = duplicate_stream_object(reg_stream, &stream);
2386 if (ret < 0) {
2387 goto error_stream_unlock;
2388 }
2389
2390 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2391 if (ret < 0) {
2392 (void) release_ust_app_stream(-1, &stream);
2393 goto error_stream_unlock;
2394 }
2395
2396 /*
2397 * The return value is not important here. This function will output an
2398 * error if needed.
2399 */
2400 (void) release_ust_app_stream(-1, &stream);
2401 }
2402 ua_chan->is_sent = 1;
2403
2404 error_stream_unlock:
2405 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2406 error:
2407 return ret;
2408 }
2409
2410 /*
2411 * Create and send to the application the created buffers with per UID buffers.
2412 *
2413 * Return 0 on success else a negative value.
2414 */
2415 static int create_channel_per_uid(struct ust_app *app,
2416 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2417 struct ust_app_channel *ua_chan)
2418 {
2419 int ret;
2420 struct buffer_reg_uid *reg_uid;
2421 struct buffer_reg_channel *reg_chan;
2422
2423 assert(app);
2424 assert(usess);
2425 assert(ua_sess);
2426 assert(ua_chan);
2427
2428 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2429
2430 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2431 /*
2432 * The session creation handles the creation of this global registry
2433 * object. If none can be find, there is a code flow problem or a
2434 * teardown race.
2435 */
2436 assert(reg_uid);
2437
2438 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2439 reg_uid);
2440 if (!reg_chan) {
2441 /* Create the buffer registry channel object. */
2442 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2443 if (ret < 0) {
2444 goto error;
2445 }
2446 assert(reg_chan);
2447
2448 /*
2449 * Create the buffers on the consumer side. This call populates the
2450 * ust app channel object with all streams and data object.
2451 */
2452 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2453 app->bits_per_long, reg_uid->registry->reg.ust);
2454 if (ret < 0) {
2455 /*
2456 * Let's remove the previously created buffer registry channel so
2457 * it's not visible anymore in the session registry.
2458 */
2459 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2460 ua_chan->tracing_channel_id);
2461 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2462 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2463 goto error;
2464 }
2465
2466 /*
2467 * Setup the streams and add it to the session registry.
2468 */
2469 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2470 if (ret < 0) {
2471 goto error;
2472 }
2473
2474 }
2475
2476 /* Send buffers to the application. */
2477 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2478 if (ret < 0) {
2479 goto error;
2480 }
2481
2482 error:
2483 return ret;
2484 }
2485
2486 /*
2487 * Create and send to the application the created buffers with per PID buffers.
2488 *
2489 * Return 0 on success else a negative value.
2490 */
2491 static int create_channel_per_pid(struct ust_app *app,
2492 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2493 struct ust_app_channel *ua_chan)
2494 {
2495 int ret;
2496 struct ust_registry_session *registry;
2497
2498 assert(app);
2499 assert(usess);
2500 assert(ua_sess);
2501 assert(ua_chan);
2502
2503 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2504
2505 rcu_read_lock();
2506
2507 registry = get_session_registry(ua_sess);
2508 assert(registry);
2509
2510 /* Create and add a new channel registry to session. */
2511 ret = ust_registry_channel_add(registry, ua_chan->key);
2512 if (ret < 0) {
2513 goto error;
2514 }
2515
2516 /* Create and get channel on the consumer side. */
2517 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2518 app->bits_per_long, registry);
2519 if (ret < 0) {
2520 goto error;
2521 }
2522
2523 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2524 if (ret < 0) {
2525 goto error;
2526 }
2527
2528 error:
2529 rcu_read_unlock();
2530 return ret;
2531 }
2532
2533 /*
2534 * From an already allocated ust app channel, create the channel buffers if
2535 * need and send it to the application. This MUST be called with a RCU read
2536 * side lock acquired.
2537 *
2538 * Return 0 on success or else a negative value.
2539 */
2540 static int do_create_channel(struct ust_app *app,
2541 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2542 struct ust_app_channel *ua_chan)
2543 {
2544 int ret;
2545
2546 assert(app);
2547 assert(usess);
2548 assert(ua_sess);
2549 assert(ua_chan);
2550
2551 /* Handle buffer type before sending the channel to the application. */
2552 switch (usess->buffer_type) {
2553 case LTTNG_BUFFER_PER_UID:
2554 {
2555 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2556 if (ret < 0) {
2557 goto error;
2558 }
2559 break;
2560 }
2561 case LTTNG_BUFFER_PER_PID:
2562 {
2563 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2564 if (ret < 0) {
2565 goto error;
2566 }
2567 break;
2568 }
2569 default:
2570 assert(0);
2571 ret = -EINVAL;
2572 goto error;
2573 }
2574
2575 /* Initialize ust objd object using the received handle and add it. */
2576 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2577 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2578
2579 /* If channel is not enabled, disable it on the tracer */
2580 if (!ua_chan->enabled) {
2581 ret = disable_ust_channel(app, ua_sess, ua_chan);
2582 if (ret < 0) {
2583 goto error;
2584 }
2585 }
2586
2587 error:
2588 return ret;
2589 }
2590
2591 /*
2592 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2593 * newly created channel if not NULL.
2594 *
2595 * Called with UST app session lock and RCU read-side lock held.
2596 *
2597 * Return 0 on success or else a negative value.
2598 */
2599 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2600 struct ltt_ust_channel *uchan, struct ust_app *app,
2601 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2602 struct ust_app_channel **ua_chanp)
2603 {
2604 int ret = 0;
2605 struct lttng_ht_iter iter;
2606 struct lttng_ht_node_str *ua_chan_node;
2607 struct ust_app_channel *ua_chan;
2608
2609 /* Lookup channel in the ust app session */
2610 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2611 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2612 if (ua_chan_node != NULL) {
2613 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2614 goto end;
2615 }
2616
2617 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2618 if (ua_chan == NULL) {
2619 /* Only malloc can fail here */
2620 ret = -ENOMEM;
2621 goto error_alloc;
2622 }
2623 shadow_copy_channel(ua_chan, uchan);
2624
2625 /* Set channel type. */
2626 ua_chan->attr.type = type;
2627
2628 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2629 if (ret < 0) {
2630 goto error;
2631 }
2632
2633 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2634 app->pid);
2635
2636 /* Only add the channel if successful on the tracer side. */
2637 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2638
2639 end:
2640 if (ua_chanp) {
2641 *ua_chanp = ua_chan;
2642 }
2643
2644 /* Everything went well. */
2645 return 0;
2646
2647 error:
2648 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2649 error_alloc:
2650 return ret;
2651 }
2652
2653 /*
2654 * Create UST app event and create it on the tracer side.
2655 *
2656 * Called with ust app session mutex held.
2657 */
2658 static
2659 int create_ust_app_event(struct ust_app_session *ua_sess,
2660 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2661 struct ust_app *app)
2662 {
2663 int ret = 0;
2664 struct ust_app_event *ua_event;
2665
2666 /* Get event node */
2667 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2668 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2669 if (ua_event != NULL) {
2670 ret = -EEXIST;
2671 goto end;
2672 }
2673
2674 /* Does not exist so create one */
2675 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2676 if (ua_event == NULL) {
2677 /* Only malloc can failed so something is really wrong */
2678 ret = -ENOMEM;
2679 goto end;
2680 }
2681 shadow_copy_event(ua_event, uevent);
2682
2683 /* Create it on the tracer side */
2684 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2685 if (ret < 0) {
2686 /* Not found previously means that it does not exist on the tracer */
2687 assert(ret != -LTTNG_UST_ERR_EXIST);
2688 goto error;
2689 }
2690
2691 add_unique_ust_app_event(ua_chan, ua_event);
2692
2693 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2694 app->pid);
2695
2696 end:
2697 return ret;
2698
2699 error:
2700 /* Valid. Calling here is already in a read side lock */
2701 delete_ust_app_event(-1, ua_event);
2702 return ret;
2703 }
2704
2705 /*
2706 * Create UST metadata and open it on the tracer side.
2707 *
2708 * Called with UST app session lock held and RCU read side lock.
2709 */
2710 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2711 struct ust_app *app, struct consumer_output *consumer,
2712 struct ustctl_consumer_channel_attr *attr)
2713 {
2714 int ret = 0;
2715 struct ust_app_channel *metadata;
2716 struct consumer_socket *socket;
2717 struct ust_registry_session *registry;
2718
2719 assert(ua_sess);
2720 assert(app);
2721 assert(consumer);
2722
2723 registry = get_session_registry(ua_sess);
2724 assert(registry);
2725
2726 /* Metadata already exists for this registry or it was closed previously */
2727 if (registry->metadata_key || registry->metadata_closed) {
2728 ret = 0;
2729 goto error;
2730 }
2731
2732 /* Allocate UST metadata */
2733 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2734 if (!metadata) {
2735 /* malloc() failed */
2736 ret = -ENOMEM;
2737 goto error;
2738 }
2739
2740 if (!attr) {
2741 /* Set default attributes for metadata. */
2742 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2743 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2744 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2745 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2746 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2747 metadata->attr.output = LTTNG_UST_MMAP;
2748 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2749 } else {
2750 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2751 metadata->attr.output = LTTNG_UST_MMAP;
2752 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2753 }
2754
2755 /* Need one fd for the channel. */
2756 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2757 if (ret < 0) {
2758 ERR("Exhausted number of available FD upon create metadata");
2759 goto error;
2760 }
2761
2762 /* Get the right consumer socket for the application. */
2763 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2764 if (!socket) {
2765 ret = -EINVAL;
2766 goto error_consumer;
2767 }
2768
2769 /*
2770 * Keep metadata key so we can identify it on the consumer side. Assign it
2771 * to the registry *before* we ask the consumer so we avoid the race of the
2772 * consumer requesting the metadata and the ask_channel call on our side
2773 * did not returned yet.
2774 */
2775 registry->metadata_key = metadata->key;
2776
2777 /*
2778 * Ask the metadata channel creation to the consumer. The metadata object
2779 * will be created by the consumer and kept their. However, the stream is
2780 * never added or monitored until we do a first push metadata to the
2781 * consumer.
2782 */
2783 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2784 registry);
2785 if (ret < 0) {
2786 /* Nullify the metadata key so we don't try to close it later on. */
2787 registry->metadata_key = 0;
2788 goto error_consumer;
2789 }
2790
2791 /*
2792 * The setup command will make the metadata stream be sent to the relayd,
2793 * if applicable, and the thread managing the metadatas. This is important
2794 * because after this point, if an error occurs, the only way the stream
2795 * can be deleted is to be monitored in the consumer.
2796 */
2797 ret = consumer_setup_metadata(socket, metadata->key);
2798 if (ret < 0) {
2799 /* Nullify the metadata key so we don't try to close it later on. */
2800 registry->metadata_key = 0;
2801 goto error_consumer;
2802 }
2803
2804 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2805 metadata->key, app->pid);
2806
2807 error_consumer:
2808 lttng_fd_put(LTTNG_FD_APPS, 1);
2809 delete_ust_app_channel(-1, metadata, app);
2810 error:
2811 return ret;
2812 }
2813
2814 /*
2815 * Return pointer to traceable apps list.
2816 */
2817 struct lttng_ht *ust_app_get_ht(void)
2818 {
2819 return ust_app_ht;
2820 }
2821
2822 /*
2823 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2824 * acquired before calling this function.
2825 */
2826 struct ust_app *ust_app_find_by_pid(pid_t pid)
2827 {
2828 struct ust_app *app = NULL;
2829 struct lttng_ht_node_ulong *node;
2830 struct lttng_ht_iter iter;
2831
2832 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2833 node = lttng_ht_iter_get_node_ulong(&iter);
2834 if (node == NULL) {
2835 DBG2("UST app no found with pid %d", pid);
2836 goto error;
2837 }
2838
2839 DBG2("Found UST app by pid %d", pid);
2840
2841 app = caa_container_of(node, struct ust_app, pid_n);
2842
2843 error:
2844 return app;
2845 }
2846
2847 /*
2848 * Allocate and init an UST app object using the registration information and
2849 * the command socket. This is called when the command socket connects to the
2850 * session daemon.
2851 *
2852 * The object is returned on success or else NULL.
2853 */
2854 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2855 {
2856 struct ust_app *lta = NULL;
2857
2858 assert(msg);
2859 assert(sock >= 0);
2860
2861 DBG3("UST app creating application for socket %d", sock);
2862
2863 if ((msg->bits_per_long == 64 &&
2864 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2865 || (msg->bits_per_long == 32 &&
2866 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2867 ERR("Registration failed: application \"%s\" (pid: %d) has "
2868 "%d-bit long, but no consumerd for this size is available.\n",
2869 msg->name, msg->pid, msg->bits_per_long);
2870 goto error;
2871 }
2872
2873 lta = zmalloc(sizeof(struct ust_app));
2874 if (lta == NULL) {
2875 PERROR("malloc");
2876 goto error;
2877 }
2878
2879 lta->ppid = msg->ppid;
2880 lta->uid = msg->uid;
2881 lta->gid = msg->gid;
2882
2883 lta->bits_per_long = msg->bits_per_long;
2884 lta->uint8_t_alignment = msg->uint8_t_alignment;
2885 lta->uint16_t_alignment = msg->uint16_t_alignment;
2886 lta->uint32_t_alignment = msg->uint32_t_alignment;
2887 lta->uint64_t_alignment = msg->uint64_t_alignment;
2888 lta->long_alignment = msg->long_alignment;
2889 lta->byte_order = msg->byte_order;
2890
2891 lta->v_major = msg->major;
2892 lta->v_minor = msg->minor;
2893 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2894 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2895 lta->notify_sock = -1;
2896
2897 /* Copy name and make sure it's NULL terminated. */
2898 strncpy(lta->name, msg->name, sizeof(lta->name));
2899 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2900
2901 /*
2902 * Before this can be called, when receiving the registration information,
2903 * the application compatibility is checked. So, at this point, the
2904 * application can work with this session daemon.
2905 */
2906 lta->compatible = 1;
2907
2908 lta->pid = msg->pid;
2909 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2910 lta->sock = sock;
2911 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2912
2913 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2914
2915 error:
2916 return lta;
2917 }
2918
2919 /*
2920 * For a given application object, add it to every hash table.
2921 */
2922 void ust_app_add(struct ust_app *app)
2923 {
2924 assert(app);
2925 assert(app->notify_sock >= 0);
2926
2927 rcu_read_lock();
2928
2929 /*
2930 * On a re-registration, we want to kick out the previous registration of
2931 * that pid
2932 */
2933 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2934
2935 /*
2936 * The socket _should_ be unique until _we_ call close. So, a add_unique
2937 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2938 * already in the table.
2939 */
2940 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2941
2942 /* Add application to the notify socket hash table. */
2943 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2944 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2945
2946 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2947 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2948 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2949 app->v_minor);
2950
2951 rcu_read_unlock();
2952 }
2953
2954 /*
2955 * Set the application version into the object.
2956 *
2957 * Return 0 on success else a negative value either an errno code or a
2958 * LTTng-UST error code.
2959 */
2960 int ust_app_version(struct ust_app *app)
2961 {
2962 int ret;
2963
2964 assert(app);
2965
2966 ret = ustctl_tracer_version(app->sock, &app->version);
2967 if (ret < 0) {
2968 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2969 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2970 } else {
2971 DBG3("UST app %d verion failed. Application is dead", app->sock);
2972 }
2973 }
2974
2975 return ret;
2976 }
2977
2978 /*
2979 * Unregister app by removing it from the global traceable app list and freeing
2980 * the data struct.
2981 *
2982 * The socket is already closed at this point so no close to sock.
2983 */
2984 void ust_app_unregister(int sock)
2985 {
2986 struct ust_app *lta;
2987 struct lttng_ht_node_ulong *node;
2988 struct lttng_ht_iter iter;
2989 struct ust_app_session *ua_sess;
2990 int ret;
2991
2992 rcu_read_lock();
2993
2994 /* Get the node reference for a call_rcu */
2995 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2996 node = lttng_ht_iter_get_node_ulong(&iter);
2997 assert(node);
2998
2999 lta = caa_container_of(node, struct ust_app, sock_n);
3000 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3001
3002 /* Remove application from PID hash table */
3003 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3004 assert(!ret);
3005
3006 /*
3007 * Remove application from notify hash table. The thread handling the
3008 * notify socket could have deleted the node so ignore on error because
3009 * either way it's valid. The close of that socket is handled by the other
3010 * thread.
3011 */
3012 iter.iter.node = &lta->notify_sock_n.node;
3013 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3014
3015 /*
3016 * Ignore return value since the node might have been removed before by an
3017 * add replace during app registration because the PID can be reassigned by
3018 * the OS.
3019 */
3020 iter.iter.node = &lta->pid_n.node;
3021 ret = lttng_ht_del(ust_app_ht, &iter);
3022 if (ret) {
3023 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3024 lta->pid);
3025 }
3026
3027 /* Remove sessions so they are not visible during deletion.*/
3028 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3029 node.node) {
3030 struct ust_registry_session *registry;
3031
3032 ret = lttng_ht_del(lta->sessions, &iter);
3033 if (ret) {
3034 /* The session was already removed so scheduled for teardown. */
3035 continue;
3036 }
3037
3038 /*
3039 * Add session to list for teardown. This is safe since at this point we
3040 * are the only one using this list.
3041 */
3042 pthread_mutex_lock(&ua_sess->lock);
3043
3044 /*
3045 * Normally, this is done in the delete session process which is
3046 * executed in the call rcu below. However, upon registration we can't
3047 * afford to wait for the grace period before pushing data or else the
3048 * data pending feature can race between the unregistration and stop
3049 * command where the data pending command is sent *before* the grace
3050 * period ended.
3051 *
3052 * The close metadata below nullifies the metadata pointer in the
3053 * session so the delete session will NOT push/close a second time.
3054 */
3055 registry = get_session_registry(ua_sess);
3056 if (registry && !registry->metadata_closed) {
3057 /* Push metadata for application before freeing the application. */
3058 (void) push_metadata(registry, ua_sess->consumer);
3059
3060 /*
3061 * Don't ask to close metadata for global per UID buffers. Close
3062 * metadata only on destroy trace session in this case. Also, the
3063 * previous push metadata could have flag the metadata registry to
3064 * close so don't send a close command if closed.
3065 */
3066 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
3067 !registry->metadata_closed) {
3068 /* And ask to close it for this session registry. */
3069 (void) close_metadata(registry, ua_sess->consumer);
3070 }
3071 }
3072
3073 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3074 pthread_mutex_unlock(&ua_sess->lock);
3075 }
3076
3077 /* Free memory */
3078 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3079
3080 rcu_read_unlock();
3081 return;
3082 }
3083
3084 /*
3085 * Return traceable_app_count
3086 */
3087 unsigned long ust_app_list_count(void)
3088 {
3089 unsigned long count;
3090
3091 rcu_read_lock();
3092 count = lttng_ht_get_count(ust_app_ht);
3093 rcu_read_unlock();
3094
3095 return count;
3096 }
3097
3098 /*
3099 * Fill events array with all events name of all registered apps.
3100 */
3101 int ust_app_list_events(struct lttng_event **events)
3102 {
3103 int ret, handle;
3104 size_t nbmem, count = 0;
3105 struct lttng_ht_iter iter;
3106 struct ust_app *app;
3107 struct lttng_event *tmp_event;
3108
3109 nbmem = UST_APP_EVENT_LIST_SIZE;
3110 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3111 if (tmp_event == NULL) {
3112 PERROR("zmalloc ust app events");
3113 ret = -ENOMEM;
3114 goto error;
3115 }
3116
3117 rcu_read_lock();
3118
3119 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3120 struct lttng_ust_tracepoint_iter uiter;
3121
3122 health_code_update();
3123
3124 if (!app->compatible) {
3125 /*
3126 * TODO: In time, we should notice the caller of this error by
3127 * telling him that this is a version error.
3128 */
3129 continue;
3130 }
3131 handle = ustctl_tracepoint_list(app->sock);
3132 if (handle < 0) {
3133 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3134 ERR("UST app list events getting handle failed for app pid %d",
3135 app->pid);
3136 }
3137 continue;
3138 }
3139
3140 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3141 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3142 /* Handle ustctl error. */
3143 if (ret < 0) {
3144 free(tmp_event);
3145 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3146 ERR("UST app tp list get failed for app %d with ret %d",
3147 app->sock, ret);
3148 } else {
3149 DBG3("UST app tp list get failed. Application is dead");
3150 /*
3151 * This is normal behavior, an application can die during the
3152 * creation process. Don't report an error so the execution can
3153 * continue normally. Continue normal execution.
3154 */
3155 break;
3156 }
3157 goto rcu_error;
3158 }
3159
3160 health_code_update();
3161 if (count >= nbmem) {
3162 /* In case the realloc fails, we free the memory */
3163 void *ptr;
3164
3165 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3166 2 * nbmem);
3167 nbmem *= 2;
3168 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3169 if (ptr == NULL) {
3170 PERROR("realloc ust app events");
3171 free(tmp_event);
3172 ret = -ENOMEM;
3173 goto rcu_error;
3174 }
3175 tmp_event = ptr;
3176 }
3177 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3178 tmp_event[count].loglevel = uiter.loglevel;
3179 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3180 tmp_event[count].pid = app->pid;
3181 tmp_event[count].enabled = -1;
3182 count++;
3183 }
3184 }
3185
3186 ret = count;
3187 *events = tmp_event;
3188
3189 DBG2("UST app list events done (%zu events)", count);
3190
3191 rcu_error:
3192 rcu_read_unlock();
3193 error:
3194 health_code_update();
3195 return ret;
3196 }
3197
3198 /*
3199 * Fill events array with all events name of all registered apps.
3200 */
3201 int ust_app_list_event_fields(struct lttng_event_field **fields)
3202 {
3203 int ret, handle;
3204 size_t nbmem, count = 0;
3205 struct lttng_ht_iter iter;
3206 struct ust_app *app;
3207 struct lttng_event_field *tmp_event;
3208
3209 nbmem = UST_APP_EVENT_LIST_SIZE;
3210 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3211 if (tmp_event == NULL) {
3212 PERROR("zmalloc ust app event fields");
3213 ret = -ENOMEM;
3214 goto error;
3215 }
3216
3217 rcu_read_lock();
3218
3219 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3220 struct lttng_ust_field_iter uiter;
3221
3222 health_code_update();
3223
3224 if (!app->compatible) {
3225 /*
3226 * TODO: In time, we should notice the caller of this error by
3227 * telling him that this is a version error.
3228 */
3229 continue;
3230 }
3231 handle = ustctl_tracepoint_field_list(app->sock);
3232 if (handle < 0) {
3233 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3234 ERR("UST app list field getting handle failed for app pid %d",
3235 app->pid);
3236 }
3237 continue;
3238 }
3239
3240 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3241 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3242 /* Handle ustctl error. */
3243 if (ret < 0) {
3244 free(tmp_event);
3245 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3246 ERR("UST app tp list field failed for app %d with ret %d",
3247 app->sock, ret);
3248 } else {
3249 DBG3("UST app tp list field failed. Application is dead");
3250 /*
3251 * This is normal behavior, an application can die during the
3252 * creation process. Don't report an error so the execution can
3253 * continue normally.
3254 */
3255 break;
3256 }
3257 goto rcu_error;
3258 }
3259
3260 health_code_update();
3261 if (count >= nbmem) {
3262 /* In case the realloc fails, we free the memory */
3263 void *ptr;
3264
3265 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3266 2 * nbmem);
3267 nbmem *= 2;
3268 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3269 if (ptr == NULL) {
3270 PERROR("realloc ust app event fields");
3271 free(tmp_event);
3272 ret = -ENOMEM;
3273 goto rcu_error;
3274 }
3275 tmp_event = ptr;
3276 }
3277
3278 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3279 /* Mapping between these enums matches 1 to 1. */
3280 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3281 tmp_event[count].nowrite = uiter.nowrite;
3282
3283 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3284 tmp_event[count].event.loglevel = uiter.loglevel;
3285 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3286 tmp_event[count].event.pid = app->pid;
3287 tmp_event[count].event.enabled = -1;
3288 count++;
3289 }
3290 }
3291
3292 ret = count;
3293 *fields = tmp_event;
3294
3295 DBG2("UST app list event fields done (%zu events)", count);
3296
3297 rcu_error:
3298 rcu_read_unlock();
3299 error:
3300 health_code_update();
3301 return ret;
3302 }
3303
3304 /*
3305 * Free and clean all traceable apps of the global list.
3306 *
3307 * Should _NOT_ be called with RCU read-side lock held.
3308 */
3309 void ust_app_clean_list(void)
3310 {
3311 int ret;
3312 struct ust_app *app;
3313 struct lttng_ht_iter iter;
3314
3315 DBG2("UST app cleaning registered apps hash table");
3316
3317 rcu_read_lock();
3318
3319 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3320 ret = lttng_ht_del(ust_app_ht, &iter);
3321 assert(!ret);
3322 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3323 }
3324
3325 /* Cleanup socket hash table */
3326 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3327 sock_n.node) {
3328 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3329 assert(!ret);
3330 }
3331
3332 /* Cleanup notify socket hash table */
3333 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3334 notify_sock_n.node) {
3335 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3336 assert(!ret);
3337 }
3338 rcu_read_unlock();
3339
3340 /* Destroy is done only when the ht is empty */
3341 ht_cleanup_push(ust_app_ht);
3342 ht_cleanup_push(ust_app_ht_by_sock);
3343 ht_cleanup_push(ust_app_ht_by_notify_sock);
3344 }
3345
3346 /*
3347 * Init UST app hash table.
3348 */
3349 void ust_app_ht_alloc(void)
3350 {
3351 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3352 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3353 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3354 }
3355
3356 /*
3357 * For a specific UST session, disable the channel for all registered apps.
3358 */
3359 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3360 struct ltt_ust_channel *uchan)
3361 {
3362 int ret = 0;
3363 struct lttng_ht_iter iter;
3364 struct lttng_ht_node_str *ua_chan_node;
3365 struct ust_app *app;
3366 struct ust_app_session *ua_sess;
3367 struct ust_app_channel *ua_chan;
3368
3369 if (usess == NULL || uchan == NULL) {
3370 ERR("Disabling UST global channel with NULL values");
3371 ret = -1;
3372 goto error;
3373 }
3374
3375 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3376 uchan->name, usess->id);
3377
3378 rcu_read_lock();
3379
3380 /* For every registered applications */
3381 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3382 struct lttng_ht_iter uiter;
3383 if (!app->compatible) {
3384 /*
3385 * TODO: In time, we should notice the caller of this error by
3386 * telling him that this is a version error.
3387 */
3388 continue;
3389 }
3390 ua_sess = lookup_session_by_app(usess, app);
3391 if (ua_sess == NULL) {
3392 continue;
3393 }
3394
3395 /* Get channel */
3396 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3397 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3398 /* If the session if found for the app, the channel must be there */
3399 assert(ua_chan_node);
3400
3401 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3402 /* The channel must not be already disabled */
3403 assert(ua_chan->enabled == 1);
3404
3405 /* Disable channel onto application */
3406 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3407 if (ret < 0) {
3408 /* XXX: We might want to report this error at some point... */
3409 continue;
3410 }
3411 }
3412
3413 rcu_read_unlock();
3414
3415 error:
3416 return ret;
3417 }
3418
3419 /*
3420 * For a specific UST session, enable the channel for all registered apps.
3421 */
3422 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3423 struct ltt_ust_channel *uchan)
3424 {
3425 int ret = 0;
3426 struct lttng_ht_iter iter;
3427 struct ust_app *app;
3428 struct ust_app_session *ua_sess;
3429
3430 if (usess == NULL || uchan == NULL) {
3431 ERR("Adding UST global channel to NULL values");
3432 ret = -1;
3433 goto error;
3434 }
3435
3436 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3437 uchan->name, usess->id);
3438
3439 rcu_read_lock();
3440
3441 /* For every registered applications */
3442 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3443 if (!app->compatible) {
3444 /*
3445 * TODO: In time, we should notice the caller of this error by
3446 * telling him that this is a version error.
3447 */
3448 continue;
3449 }
3450 ua_sess = lookup_session_by_app(usess, app);
3451 if (ua_sess == NULL) {
3452 continue;
3453 }
3454
3455 /* Enable channel onto application */
3456 ret = enable_ust_app_channel(ua_sess, uchan, app);
3457 if (ret < 0) {
3458 /* XXX: We might want to report this error at some point... */
3459 continue;
3460 }
3461 }
3462
3463 rcu_read_unlock();
3464
3465 error:
3466 return ret;
3467 }
3468
3469 /*
3470 * Disable an event in a channel and for a specific session.
3471 */
3472 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3473 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3474 {
3475 int ret = 0;
3476 struct lttng_ht_iter iter, uiter;
3477 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3478 struct ust_app *app;
3479 struct ust_app_session *ua_sess;
3480 struct ust_app_channel *ua_chan;
3481 struct ust_app_event *ua_event;
3482
3483 DBG("UST app disabling event %s for all apps in channel "
3484 "%s for session id %" PRIu64,
3485 uevent->attr.name, uchan->name, usess->id);
3486
3487 rcu_read_lock();
3488
3489 /* For all registered applications */
3490 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3491 if (!app->compatible) {
3492 /*
3493 * TODO: In time, we should notice the caller of this error by
3494 * telling him that this is a version error.
3495 */
3496 continue;
3497 }
3498 ua_sess = lookup_session_by_app(usess, app);
3499 if (ua_sess == NULL) {
3500 /* Next app */
3501 continue;
3502 }
3503
3504 /* Lookup channel in the ust app session */
3505 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3506 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3507 if (ua_chan_node == NULL) {
3508 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3509 "Skipping", uchan->name, usess->id, app->pid);
3510 continue;
3511 }
3512 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3513
3514 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3515 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3516 if (ua_event_node == NULL) {
3517 DBG2("Event %s not found in channel %s for app pid %d."
3518 "Skipping", uevent->attr.name, uchan->name, app->pid);
3519 continue;
3520 }
3521 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3522
3523 ret = disable_ust_app_event(ua_sess, ua_event, app);
3524 if (ret < 0) {
3525 /* XXX: Report error someday... */
3526 continue;
3527 }
3528 }
3529
3530 rcu_read_unlock();
3531
3532 return ret;
3533 }
3534
3535 /*
3536 * For a specific UST session and UST channel, the event for all
3537 * registered apps.
3538 */
3539 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3540 struct ltt_ust_channel *uchan)
3541 {
3542 int ret = 0;
3543 struct lttng_ht_iter iter, uiter;
3544 struct lttng_ht_node_str *ua_chan_node;
3545 struct ust_app *app;
3546 struct ust_app_session *ua_sess;
3547 struct ust_app_channel *ua_chan;
3548 struct ust_app_event *ua_event;
3549
3550 DBG("UST app disabling all event for all apps in channel "
3551 "%s for session id %" PRIu64, uchan->name, usess->id);
3552
3553 rcu_read_lock();
3554
3555 /* For all registered applications */
3556 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3557 if (!app->compatible) {
3558 /*
3559 * TODO: In time, we should notice the caller of this error by
3560 * telling him that this is a version error.
3561 */
3562 continue;
3563 }
3564 ua_sess = lookup_session_by_app(usess, app);
3565 if (!ua_sess) {
3566 /* The application has problem or is probably dead. */
3567 continue;
3568 }
3569
3570 /* Lookup channel in the ust app session */
3571 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3572 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3573 /* If the channel is not found, there is a code flow error */
3574 assert(ua_chan_node);
3575
3576 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3577
3578 /* Disable each events of channel */
3579 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3580 node.node) {
3581 ret = disable_ust_app_event(ua_sess, ua_event, app);
3582 if (ret < 0) {
3583 /* XXX: Report error someday... */
3584 continue;
3585 }
3586 }
3587 }
3588
3589 rcu_read_unlock();
3590
3591 return ret;
3592 }
3593
3594 /*
3595 * For a specific UST session, create the channel for all registered apps.
3596 */
3597 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3598 struct ltt_ust_channel *uchan)
3599 {
3600 int ret = 0, created;
3601 struct lttng_ht_iter iter;
3602 struct ust_app *app;
3603 struct ust_app_session *ua_sess = NULL;
3604
3605 /* Very wrong code flow */
3606 assert(usess);
3607 assert(uchan);
3608
3609 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3610 uchan->name, usess->id);
3611
3612 rcu_read_lock();
3613
3614 /* For every registered applications */
3615 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3616 if (!app->compatible) {
3617 /*
3618 * TODO: In time, we should notice the caller of this error by
3619 * telling him that this is a version error.
3620 */
3621 continue;
3622 }
3623 /*
3624 * Create session on the tracer side and add it to app session HT. Note
3625 * that if session exist, it will simply return a pointer to the ust
3626 * app session.
3627 */
3628 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3629 if (ret < 0) {
3630 switch (ret) {
3631 case -ENOTCONN:
3632 /*
3633 * The application's socket is not valid. Either a bad socket
3634 * or a timeout on it. We can't inform the caller that for a
3635 * specific app, the session failed so lets continue here.
3636 */
3637 continue;
3638 case -ENOMEM:
3639 default:
3640 goto error_rcu_unlock;
3641 }
3642 }
3643 assert(ua_sess);
3644
3645 pthread_mutex_lock(&ua_sess->lock);
3646 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3647 sizeof(uchan->name))) {
3648 struct ustctl_consumer_channel_attr attr;
3649 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3650 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3651 &attr);
3652 } else {
3653 /* Create channel onto application. We don't need the chan ref. */
3654 ret = create_ust_app_channel(ua_sess, uchan, app,
3655 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3656 }
3657 pthread_mutex_unlock(&ua_sess->lock);
3658 if (ret < 0) {
3659 if (ret == -ENOMEM) {
3660 /* No more memory is a fatal error. Stop right now. */
3661 goto error_rcu_unlock;
3662 }
3663 /* Cleanup the created session if it's the case. */
3664 if (created) {
3665 destroy_app_session(app, ua_sess);
3666 }
3667 }
3668 }
3669
3670 error_rcu_unlock:
3671 rcu_read_unlock();
3672 return ret;
3673 }
3674
3675 /*
3676 * Enable event for a specific session and channel on the tracer.
3677 */
3678 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3679 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3680 {
3681 int ret = 0;
3682 struct lttng_ht_iter iter, uiter;
3683 struct lttng_ht_node_str *ua_chan_node;
3684 struct ust_app *app;
3685 struct ust_app_session *ua_sess;
3686 struct ust_app_channel *ua_chan;
3687 struct ust_app_event *ua_event;
3688
3689 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3690 uevent->attr.name, usess->id);
3691
3692 /*
3693 * NOTE: At this point, this function is called only if the session and
3694 * channel passed are already created for all apps. and enabled on the
3695 * tracer also.
3696 */
3697
3698 rcu_read_lock();
3699
3700 /* For all registered applications */
3701 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3702 if (!app->compatible) {
3703 /*
3704 * TODO: In time, we should notice the caller of this error by
3705 * telling him that this is a version error.
3706 */
3707 continue;
3708 }
3709 ua_sess = lookup_session_by_app(usess, app);
3710 if (!ua_sess) {
3711 /* The application has problem or is probably dead. */
3712 continue;
3713 }
3714
3715 pthread_mutex_lock(&ua_sess->lock);
3716
3717 /* Lookup channel in the ust app session */
3718 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3719 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3720 /* If the channel is not found, there is a code flow error */
3721 assert(ua_chan_node);
3722
3723 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3724
3725 /* Get event node */
3726 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3727 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3728 if (ua_event == NULL) {
3729 DBG3("UST app enable event %s not found for app PID %d."
3730 "Skipping app", uevent->attr.name, app->pid);
3731 goto next_app;
3732 }
3733
3734 ret = enable_ust_app_event(ua_sess, ua_event, app);
3735 if (ret < 0) {
3736 pthread_mutex_unlock(&ua_sess->lock);
3737 goto error;
3738 }
3739 next_app:
3740 pthread_mutex_unlock(&ua_sess->lock);
3741 }
3742
3743 error:
3744 rcu_read_unlock();
3745 return ret;
3746 }
3747
3748 /*
3749 * For a specific existing UST session and UST channel, creates the event for
3750 * all registered apps.
3751 */
3752 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3753 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3754 {
3755 int ret = 0;
3756 struct lttng_ht_iter iter, uiter;
3757 struct lttng_ht_node_str *ua_chan_node;
3758 struct ust_app *app;
3759 struct ust_app_session *ua_sess;
3760 struct ust_app_channel *ua_chan;
3761
3762 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3763 uevent->attr.name, usess->id);
3764
3765 rcu_read_lock();
3766
3767 /* For all registered applications */
3768 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3769 if (!app->compatible) {
3770 /*
3771 * TODO: In time, we should notice the caller of this error by
3772 * telling him that this is a version error.
3773 */
3774 continue;
3775 }
3776 ua_sess = lookup_session_by_app(usess, app);
3777 if (!ua_sess) {
3778 /* The application has problem or is probably dead. */
3779 continue;
3780 }
3781
3782 pthread_mutex_lock(&ua_sess->lock);
3783 /* Lookup channel in the ust app session */
3784 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3785 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3786 /* If the channel is not found, there is a code flow error */
3787 assert(ua_chan_node);
3788
3789 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3790
3791 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3792 pthread_mutex_unlock(&ua_sess->lock);
3793 if (ret < 0) {
3794 if (ret != -LTTNG_UST_ERR_EXIST) {
3795 /* Possible value at this point: -ENOMEM. If so, we stop! */
3796 break;
3797 }
3798 DBG2("UST app event %s already exist on app PID %d",
3799 uevent->attr.name, app->pid);
3800 continue;
3801 }
3802 }
3803
3804 rcu_read_unlock();
3805
3806 return ret;
3807 }
3808
3809 /*
3810 * Start tracing for a specific UST session and app.
3811 */
3812 static
3813 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3814 {
3815 int ret = 0;
3816 struct ust_app_session *ua_sess;
3817
3818 DBG("Starting tracing for ust app pid %d", app->pid);
3819
3820 rcu_read_lock();
3821
3822 if (!app->compatible) {
3823 goto end;
3824 }
3825
3826 ua_sess = lookup_session_by_app(usess, app);
3827 if (ua_sess == NULL) {
3828 /* The session is in teardown process. Ignore and continue. */
3829 goto end;
3830 }
3831
3832 pthread_mutex_lock(&ua_sess->lock);
3833
3834 /* Upon restart, we skip the setup, already done */
3835 if (ua_sess->started) {
3836 goto skip_setup;
3837 }
3838
3839 /* Create directories if consumer is LOCAL and has a path defined. */
3840 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3841 strlen(usess->consumer->dst.trace_path) > 0) {
3842 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3843 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3844 if (ret < 0) {
3845 if (ret != -EEXIST) {
3846 ERR("Trace directory creation error");
3847 goto error_unlock;
3848 }
3849 }
3850 }
3851
3852 /*
3853 * Create the metadata for the application. This returns gracefully if a
3854 * metadata was already set for the session.
3855 */
3856 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3857 if (ret < 0) {
3858 goto error_unlock;
3859 }
3860
3861 health_code_update();
3862
3863 skip_setup:
3864 /* This start the UST tracing */
3865 ret = ustctl_start_session(app->sock, ua_sess->handle);
3866 if (ret < 0) {
3867 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3868 ERR("Error starting tracing for app pid: %d (ret: %d)",
3869 app->pid, ret);
3870 } else {
3871 DBG("UST app start session failed. Application is dead.");
3872 /*
3873 * This is normal behavior, an application can die during the
3874 * creation process. Don't report an error so the execution can
3875 * continue normally.
3876 */
3877 pthread_mutex_unlock(&ua_sess->lock);
3878 goto end;
3879 }
3880 goto error_unlock;
3881 }
3882
3883 /* Indicate that the session has been started once */
3884 ua_sess->started = 1;
3885
3886 pthread_mutex_unlock(&ua_sess->lock);
3887
3888 health_code_update();
3889
3890 /* Quiescent wait after starting trace */
3891 ret = ustctl_wait_quiescent(app->sock);
3892 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3893 ERR("UST app wait quiescent failed for app pid %d ret %d",
3894 app->pid, ret);
3895 }
3896
3897 end:
3898 rcu_read_unlock();
3899 health_code_update();
3900 return 0;
3901
3902 error_unlock:
3903 pthread_mutex_unlock(&ua_sess->lock);
3904 rcu_read_unlock();
3905 health_code_update();
3906 return -1;
3907 }
3908
3909 /*
3910 * Stop tracing for a specific UST session and app.
3911 */
3912 static
3913 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3914 {
3915 int ret = 0;
3916 struct ust_app_session *ua_sess;
3917 struct ust_registry_session *registry;
3918
3919 DBG("Stopping tracing for ust app pid %d", app->pid);
3920
3921 rcu_read_lock();
3922
3923 if (!app->compatible) {
3924 goto end_no_session;
3925 }
3926
3927 ua_sess = lookup_session_by_app(usess, app);
3928 if (ua_sess == NULL) {
3929 goto end_no_session;
3930 }
3931
3932 pthread_mutex_lock(&ua_sess->lock);
3933
3934 /*
3935 * If started = 0, it means that stop trace has been called for a session
3936 * that was never started. It's possible since we can have a fail start
3937 * from either the application manager thread or the command thread. Simply
3938 * indicate that this is a stop error.
3939 */
3940 if (!ua_sess->started) {
3941 goto error_rcu_unlock;
3942 }
3943
3944 health_code_update();
3945
3946 /* This inhibits UST tracing */
3947 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3948 if (ret < 0) {
3949 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3950 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3951 app->pid, ret);
3952 } else {
3953 DBG("UST app stop session failed. Application is dead.");
3954 /*
3955 * This is normal behavior, an application can die during the
3956 * creation process. Don't report an error so the execution can
3957 * continue normally.
3958 */
3959 goto end_unlock;
3960 }
3961 goto error_rcu_unlock;
3962 }
3963
3964 health_code_update();
3965
3966 /* Quiescent wait after stopping trace */
3967 ret = ustctl_wait_quiescent(app->sock);
3968 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3969 ERR("UST app wait quiescent failed for app pid %d ret %d",
3970 app->pid, ret);
3971 }
3972
3973 health_code_update();
3974
3975 registry = get_session_registry(ua_sess);
3976 assert(registry);
3977
3978 if (!registry->metadata_closed) {
3979 /* Push metadata for application before freeing the application. */
3980 (void) push_metadata(registry, ua_sess->consumer);
3981 }
3982
3983 end_unlock:
3984 pthread_mutex_unlock(&ua_sess->lock);
3985 end_no_session:
3986 rcu_read_unlock();
3987 health_code_update();
3988 return 0;
3989
3990 error_rcu_unlock:
3991 pthread_mutex_unlock(&ua_sess->lock);
3992 rcu_read_unlock();
3993 health_code_update();
3994 return -1;
3995 }
3996
3997 /*
3998 * Flush buffers for a specific UST session and app.
3999 */
4000 static
4001 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
4002 {
4003 int ret = 0;
4004 struct lttng_ht_iter iter;
4005 struct ust_app_session *ua_sess;
4006 struct ust_app_channel *ua_chan;
4007
4008 DBG("Flushing buffers for ust app pid %d", app->pid);
4009
4010 rcu_read_lock();
4011
4012 if (!app->compatible) {
4013 goto end_no_session;
4014 }
4015
4016 ua_sess = lookup_session_by_app(usess, app);
4017 if (ua_sess == NULL) {
4018 goto end_no_session;
4019 }
4020
4021 pthread_mutex_lock(&ua_sess->lock);
4022
4023 health_code_update();
4024
4025 /* Flushing buffers */
4026 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4027 node.node) {
4028 health_code_update();
4029 assert(ua_chan->is_sent);
4030 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
4031 if (ret < 0) {
4032 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4033 ERR("UST app PID %d channel %s flush failed with ret %d",
4034 app->pid, ua_chan->name, ret);
4035 } else {
4036 DBG3("UST app failed to flush %s. Application is dead.",
4037 ua_chan->name);
4038 /*
4039 * This is normal behavior, an application can die during the
4040 * creation process. Don't report an error so the execution can
4041 * continue normally.
4042 */
4043 }
4044 /* Continuing flushing all buffers */
4045 continue;
4046 }
4047 }
4048
4049 health_code_update();
4050
4051 pthread_mutex_unlock(&ua_sess->lock);
4052 end_no_session:
4053 rcu_read_unlock();
4054 health_code_update();
4055 return 0;
4056 }
4057
4058 /*
4059 * Destroy a specific UST session in apps.
4060 */
4061 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4062 {
4063 int ret;
4064 struct ust_app_session *ua_sess;
4065 struct lttng_ht_iter iter;
4066 struct lttng_ht_node_u64 *node;
4067
4068 DBG("Destroy tracing for ust app pid %d", app->pid);
4069
4070 rcu_read_lock();
4071
4072 if (!app->compatible) {
4073 goto end;
4074 }
4075
4076 __lookup_session_by_app(usess, app, &iter);
4077 node = lttng_ht_iter_get_node_u64(&iter);
4078 if (node == NULL) {
4079 /* Session is being or is deleted. */
4080 goto end;
4081 }
4082 ua_sess = caa_container_of(node, struct ust_app_session, node);
4083
4084 health_code_update();
4085 destroy_app_session(app, ua_sess);
4086
4087 health_code_update();
4088
4089 /* Quiescent wait after stopping trace */
4090 ret = ustctl_wait_quiescent(app->sock);
4091 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4092 ERR("UST app wait quiescent failed for app pid %d ret %d",
4093 app->pid, ret);
4094 }
4095 end:
4096 rcu_read_unlock();
4097 health_code_update();
4098 return 0;
4099 }
4100
4101 /*
4102 * Start tracing for the UST session.
4103 */
4104 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4105 {
4106 int ret = 0;
4107 struct lttng_ht_iter iter;
4108 struct ust_app *app;
4109
4110 DBG("Starting all UST traces");
4111
4112 rcu_read_lock();
4113
4114 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4115 ret = ust_app_start_trace(usess, app);
4116 if (ret < 0) {
4117 /* Continue to next apps even on error */
4118 continue;
4119 }
4120 }
4121
4122 rcu_read_unlock();
4123
4124 return 0;
4125 }
4126
4127 /*
4128 * Start tracing for the UST session.
4129 */
4130 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4131 {
4132 int ret = 0;
4133 struct lttng_ht_iter iter;
4134 struct ust_app *app;
4135
4136 DBG("Stopping all UST traces");
4137
4138 rcu_read_lock();
4139
4140 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4141 ret = ust_app_stop_trace(usess, app);
4142 if (ret < 0) {
4143 /* Continue to next apps even on error */
4144 continue;
4145 }
4146 }
4147
4148 /* Flush buffers and push metadata (for UID buffers). */
4149 switch (usess->buffer_type) {
4150 case LTTNG_BUFFER_PER_UID:
4151 {
4152 struct buffer_reg_uid *reg;
4153
4154 /* Flush all per UID buffers associated to that session. */
4155 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4156 struct ust_registry_session *ust_session_reg;
4157 struct buffer_reg_channel *reg_chan;
4158 struct consumer_socket *socket;
4159
4160 /* Get consumer socket to use to push the metadata.*/
4161 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4162 usess->consumer);
4163 if (!socket) {
4164 /* Ignore request if no consumer is found for the session. */
4165 continue;
4166 }
4167
4168 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4169 reg_chan, node.node) {
4170 /*
4171 * The following call will print error values so the return
4172 * code is of little importance because whatever happens, we
4173 * have to try them all.
4174 */
4175 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4176 }
4177
4178 ust_session_reg = reg->registry->reg.ust;
4179 if (!ust_session_reg->metadata_closed) {
4180 /* Push metadata. */
4181 (void) push_metadata(ust_session_reg, usess->consumer);
4182 }
4183 }
4184
4185 break;
4186 }
4187 case LTTNG_BUFFER_PER_PID:
4188 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4189 ret = ust_app_flush_trace(usess, app);
4190 if (ret < 0) {
4191 /* Continue to next apps even on error */
4192 continue;
4193 }
4194 }
4195 break;
4196 default:
4197 assert(0);
4198 break;
4199 }
4200
4201 rcu_read_unlock();
4202
4203 return 0;
4204 }
4205
4206 /*
4207 * Destroy app UST session.
4208 */
4209 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4210 {
4211 int ret = 0;
4212 struct lttng_ht_iter iter;
4213 struct ust_app *app;
4214
4215 DBG("Destroy all UST traces");
4216
4217 rcu_read_lock();
4218
4219 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4220 ret = destroy_trace(usess, app);
4221 if (ret < 0) {
4222 /* Continue to next apps even on error */
4223 continue;
4224 }
4225 }
4226
4227 rcu_read_unlock();
4228
4229 return 0;
4230 }
4231
4232 /*
4233 * Add channels/events from UST global domain to registered apps at sock.
4234 */
4235 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4236 {
4237 int ret = 0;
4238 struct lttng_ht_iter iter, uiter;
4239 struct ust_app *app;
4240 struct ust_app_session *ua_sess = NULL;
4241 struct ust_app_channel *ua_chan;
4242 struct ust_app_event *ua_event;
4243 struct ust_app_ctx *ua_ctx;
4244
4245 assert(usess);
4246 assert(sock >= 0);
4247
4248 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4249 usess->id);
4250
4251 rcu_read_lock();
4252
4253 app = ust_app_find_by_sock(sock);
4254 if (app == NULL) {
4255 /*
4256 * Application can be unregistered before so this is possible hence
4257 * simply stopping the update.
4258 */
4259 DBG3("UST app update failed to find app sock %d", sock);
4260 goto error;
4261 }
4262
4263 if (!app->compatible) {
4264 goto error;
4265 }
4266
4267 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4268 if (ret < 0) {
4269 /* Tracer is probably gone or ENOMEM. */
4270 goto error;
4271 }
4272 assert(ua_sess);
4273
4274 pthread_mutex_lock(&ua_sess->lock);
4275
4276 /*
4277 * We can iterate safely here over all UST app session since the create ust
4278 * app session above made a shadow copy of the UST global domain from the
4279 * ltt ust session.
4280 */
4281 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4282 node.node) {
4283 /*
4284 * For a metadata channel, handle it differently.
4285 */
4286 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4287 sizeof(ua_chan->name))) {
4288 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4289 &ua_chan->attr);
4290 if (ret < 0) {
4291 goto error_unlock;
4292 }
4293 /* Remove it from the hash table and continue!. */
4294 ret = lttng_ht_del(ua_sess->channels, &iter);
4295 assert(!ret);
4296 delete_ust_app_channel(-1, ua_chan, app);
4297 continue;
4298 } else {
4299 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4300 if (ret < 0) {
4301 /*
4302 * Stop everything. On error, the application failed, no more
4303 * file descriptor are available or ENOMEM so stopping here is
4304 * the only thing we can do for now.
4305 */
4306 goto error_unlock;
4307 }
4308 }
4309
4310 /*
4311 * Add context using the list so they are enabled in the same order the
4312 * user added them.
4313 */
4314 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4315 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4316 if (ret < 0) {
4317 goto error_unlock;
4318 }
4319 }
4320
4321
4322 /* For each events */
4323 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4324 node.node) {
4325 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4326 if (ret < 0) {
4327 goto error_unlock;
4328 }
4329 }
4330 }
4331
4332 pthread_mutex_unlock(&ua_sess->lock);
4333
4334 if (usess->start_trace) {
4335 ret = ust_app_start_trace(usess, app);
4336 if (ret < 0) {
4337 goto error;
4338 }
4339
4340 DBG2("UST trace started for app pid %d", app->pid);
4341 }
4342
4343 /* Everything went well at this point. */
4344 rcu_read_unlock();
4345 return;
4346
4347 error_unlock:
4348 pthread_mutex_unlock(&ua_sess->lock);
4349 error:
4350 if (ua_sess) {
4351 destroy_app_session(app, ua_sess);
4352 }
4353 rcu_read_unlock();
4354 return;
4355 }
4356
4357 /*
4358 * Add context to a specific channel for global UST domain.
4359 */
4360 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4361 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4362 {
4363 int ret = 0;
4364 struct lttng_ht_node_str *ua_chan_node;
4365 struct lttng_ht_iter iter, uiter;
4366 struct ust_app_channel *ua_chan = NULL;
4367 struct ust_app_session *ua_sess;
4368 struct ust_app *app;
4369
4370 rcu_read_lock();
4371
4372 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4373 if (!app->compatible) {
4374 /*
4375 * TODO: In time, we should notice the caller of this error by
4376 * telling him that this is a version error.
4377 */
4378 continue;
4379 }
4380 ua_sess = lookup_session_by_app(usess, app);
4381 if (ua_sess == NULL) {
4382 continue;
4383 }
4384
4385 pthread_mutex_lock(&ua_sess->lock);
4386 /* Lookup channel in the ust app session */
4387 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4388 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4389 if (ua_chan_node == NULL) {
4390 goto next_app;
4391 }
4392 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4393 node);
4394 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4395 if (ret < 0) {
4396 goto next_app;
4397 }
4398 next_app:
4399 pthread_mutex_unlock(&ua_sess->lock);
4400 }
4401
4402 rcu_read_unlock();
4403 return ret;
4404 }
4405
4406 /*
4407 * Enable event for a channel from a UST session for a specific PID.
4408 */
4409 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4410 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4411 {
4412 int ret = 0;
4413 struct lttng_ht_iter iter;
4414 struct lttng_ht_node_str *ua_chan_node;
4415 struct ust_app *app;
4416 struct ust_app_session *ua_sess;
4417 struct ust_app_channel *ua_chan;
4418 struct ust_app_event *ua_event;
4419
4420 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4421
4422 rcu_read_lock();
4423
4424 app = ust_app_find_by_pid(pid);
4425 if (app == NULL) {
4426 ERR("UST app enable event per PID %d not found", pid);
4427 ret = -1;
4428 goto end;
4429 }
4430
4431 if (!app->compatible) {
4432 ret = 0;
4433 goto end;
4434 }
4435
4436 ua_sess = lookup_session_by_app(usess, app);
4437 if (!ua_sess) {
4438 /* The application has problem or is probably dead. */
4439 ret = 0;
4440 goto end;
4441 }
4442
4443 pthread_mutex_lock(&ua_sess->lock);
4444 /* Lookup channel in the ust app session */
4445 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4446 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4447 /* If the channel is not found, there is a code flow error */
4448 assert(ua_chan_node);
4449
4450 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4451
4452 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4453 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4454 if (ua_event == NULL) {
4455 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4456 if (ret < 0) {
4457 goto end_unlock;
4458 }
4459 } else {
4460 ret = enable_ust_app_event(ua_sess, ua_event, app);
4461 if (ret < 0) {
4462 goto end_unlock;
4463 }
4464 }
4465
4466 end_unlock:
4467 pthread_mutex_unlock(&ua_sess->lock);
4468 end:
4469 rcu_read_unlock();
4470 return ret;
4471 }
4472
4473 /*
4474 * Disable event for a channel from a UST session for a specific PID.
4475 */
4476 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4477 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4478 {
4479 int ret = 0;
4480 struct lttng_ht_iter iter;
4481 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4482 struct ust_app *app;
4483 struct ust_app_session *ua_sess;
4484 struct ust_app_channel *ua_chan;
4485 struct ust_app_event *ua_event;
4486
4487 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4488
4489 rcu_read_lock();
4490
4491 app = ust_app_find_by_pid(pid);
4492 if (app == NULL) {
4493 ERR("UST app disable event per PID %d not found", pid);
4494 ret = -1;
4495 goto error;
4496 }
4497
4498 if (!app->compatible) {
4499 ret = 0;
4500 goto error;
4501 }
4502
4503 ua_sess = lookup_session_by_app(usess, app);
4504 if (!ua_sess) {
4505 /* The application has problem or is probably dead. */
4506 goto error;
4507 }
4508
4509 /* Lookup channel in the ust app session */
4510 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4511 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4512 if (ua_chan_node == NULL) {
4513 /* Channel does not exist, skip disabling */
4514 goto error;
4515 }
4516 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4517
4518 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4519 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4520 if (ua_event_node == NULL) {
4521 /* Event does not exist, skip disabling */
4522 goto error;
4523 }
4524 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4525
4526 ret = disable_ust_app_event(ua_sess, ua_event, app);
4527 if (ret < 0) {
4528 goto error;
4529 }
4530
4531 error:
4532 rcu_read_unlock();
4533 return ret;
4534 }
4535
4536 /*
4537 * Calibrate registered applications.
4538 */
4539 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4540 {
4541 int ret = 0;
4542 struct lttng_ht_iter iter;
4543 struct ust_app *app;
4544
4545 rcu_read_lock();
4546
4547 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4548 if (!app->compatible) {
4549 /*
4550 * TODO: In time, we should notice the caller of this error by
4551 * telling him that this is a version error.
4552 */
4553 continue;
4554 }
4555
4556 health_code_update();
4557
4558 ret = ustctl_calibrate(app->sock, calibrate);
4559 if (ret < 0) {
4560 switch (ret) {
4561 case -ENOSYS:
4562 /* Means that it's not implemented on the tracer side. */
4563 ret = 0;
4564 break;
4565 default:
4566 DBG2("Calibrate app PID %d returned with error %d",
4567 app->pid, ret);
4568 break;
4569 }
4570 }
4571 }
4572
4573 DBG("UST app global domain calibration finished");
4574
4575 rcu_read_unlock();
4576
4577 health_code_update();
4578
4579 return ret;
4580 }
4581
4582 /*
4583 * Receive registration and populate the given msg structure.
4584 *
4585 * On success return 0 else a negative value returned by the ustctl call.
4586 */
4587 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4588 {
4589 int ret;
4590 uint32_t pid, ppid, uid, gid;
4591
4592 assert(msg);
4593
4594 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4595 &pid, &ppid, &uid, &gid,
4596 &msg->bits_per_long,
4597 &msg->uint8_t_alignment,
4598 &msg->uint16_t_alignment,
4599 &msg->uint32_t_alignment,
4600 &msg->uint64_t_alignment,
4601 &msg->long_alignment,
4602 &msg->byte_order,
4603 msg->name);
4604 if (ret < 0) {
4605 switch (-ret) {
4606 case EPIPE:
4607 case ECONNRESET:
4608 case LTTNG_UST_ERR_EXITING:
4609 DBG3("UST app recv reg message failed. Application died");
4610 break;
4611 case LTTNG_UST_ERR_UNSUP_MAJOR:
4612 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4613 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4614 LTTNG_UST_ABI_MINOR_VERSION);
4615 break;
4616 default:
4617 ERR("UST app recv reg message failed with ret %d", ret);
4618 break;
4619 }
4620 goto error;
4621 }
4622 msg->pid = (pid_t) pid;
4623 msg->ppid = (pid_t) ppid;
4624 msg->uid = (uid_t) uid;
4625 msg->gid = (gid_t) gid;
4626
4627 error:
4628 return ret;
4629 }
4630
4631 /*
4632 * Return a ust app channel object using the application object and the channel
4633 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4634 * lock MUST be acquired before calling this function.
4635 */
4636 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4637 int objd)
4638 {
4639 struct lttng_ht_node_ulong *node;
4640 struct lttng_ht_iter iter;
4641 struct ust_app_channel *ua_chan = NULL;
4642
4643 assert(app);
4644
4645 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4646 node = lttng_ht_iter_get_node_ulong(&iter);
4647 if (node == NULL) {
4648 DBG2("UST app channel find by objd %d not found", objd);
4649 goto error;
4650 }
4651
4652 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4653
4654 error:
4655 return ua_chan;
4656 }
4657
4658 /*
4659 * Reply to a register channel notification from an application on the notify
4660 * socket. The channel metadata is also created.
4661 *
4662 * The session UST registry lock is acquired in this function.
4663 *
4664 * On success 0 is returned else a negative value.
4665 */
4666 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4667 size_t nr_fields, struct ustctl_field *fields)
4668 {
4669 int ret, ret_code = 0;
4670 uint32_t chan_id, reg_count;
4671 uint64_t chan_reg_key;
4672 enum ustctl_channel_header type;
4673 struct ust_app *app;
4674 struct ust_app_channel *ua_chan;
4675 struct ust_app_session *ua_sess;
4676 struct ust_registry_session *registry;
4677 struct ust_registry_channel *chan_reg;
4678
4679 rcu_read_lock();
4680
4681 /* Lookup application. If not found, there is a code flow error. */
4682 app = find_app_by_notify_sock(sock);
4683 if (!app) {
4684 DBG("Application socket %d is being teardown. Abort event notify",
4685 sock);
4686 ret = 0;
4687 free(fields);
4688 goto error_rcu_unlock;
4689 }
4690
4691 /* Lookup channel by UST object descriptor. */
4692 ua_chan = find_channel_by_objd(app, cobjd);
4693 if (!ua_chan) {
4694 DBG("Application channel is being teardown. Abort event notify");
4695 ret = 0;
4696 free(fields);
4697 goto error_rcu_unlock;
4698 }
4699
4700 assert(ua_chan->session);
4701 ua_sess = ua_chan->session;
4702
4703 /* Get right session registry depending on the session buffer type. */
4704 registry = get_session_registry(ua_sess);
4705 assert(registry);
4706
4707 /* Depending on the buffer type, a different channel key is used. */
4708 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4709 chan_reg_key = ua_chan->tracing_channel_id;
4710 } else {
4711 chan_reg_key = ua_chan->key;
4712 }
4713
4714 pthread_mutex_lock(&registry->lock);
4715
4716 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4717 assert(chan_reg);
4718
4719 if (!chan_reg->register_done) {
4720 reg_count = ust_registry_get_event_count(chan_reg);
4721 if (reg_count < 31) {
4722 type = USTCTL_CHANNEL_HEADER_COMPACT;
4723 } else {
4724 type = USTCTL_CHANNEL_HEADER_LARGE;
4725 }
4726
4727 chan_reg->nr_ctx_fields = nr_fields;
4728 chan_reg->ctx_fields = fields;
4729 chan_reg->header_type = type;
4730 } else {
4731 /* Get current already assigned values. */
4732 type = chan_reg->header_type;
4733 free(fields);
4734 /* Set to NULL so the error path does not do a double free. */
4735 fields = NULL;
4736 }
4737 /* Channel id is set during the object creation. */
4738 chan_id = chan_reg->chan_id;
4739
4740 /* Append to metadata */
4741 if (!chan_reg->metadata_dumped) {
4742 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4743 if (ret_code) {
4744 ERR("Error appending channel metadata (errno = %d)", ret_code);
4745 goto reply;
4746 }
4747 }
4748
4749 reply:
4750 DBG3("UST app replying to register channel key %" PRIu64
4751 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4752 ret_code);
4753
4754 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4755 if (ret < 0) {
4756 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4757 ERR("UST app reply channel failed with ret %d", ret);
4758 } else {
4759 DBG3("UST app reply channel failed. Application died");
4760 }
4761 goto error;
4762 }
4763
4764 /* This channel registry registration is completed. */
4765 chan_reg->register_done = 1;
4766
4767 error:
4768 pthread_mutex_unlock(&registry->lock);
4769 error_rcu_unlock:
4770 rcu_read_unlock();
4771 if (ret) {
4772 free(fields);
4773 }
4774 return ret;
4775 }
4776
4777 /*
4778 * Add event to the UST channel registry. When the event is added to the
4779 * registry, the metadata is also created. Once done, this replies to the
4780 * application with the appropriate error code.
4781 *
4782 * The session UST registry lock is acquired in the function.
4783 *
4784 * On success 0 is returned else a negative value.
4785 */
4786 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4787 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4788 char *model_emf_uri)
4789 {
4790 int ret, ret_code;
4791 uint32_t event_id = 0;
4792 uint64_t chan_reg_key;
4793 struct ust_app *app;
4794 struct ust_app_channel *ua_chan;
4795 struct ust_app_session *ua_sess;
4796 struct ust_registry_session *registry;
4797
4798 rcu_read_lock();
4799
4800 /* Lookup application. If not found, there is a code flow error. */
4801 app = find_app_by_notify_sock(sock);
4802 if (!app) {
4803 DBG("Application socket %d is being teardown. Abort event notify",
4804 sock);
4805 ret = 0;
4806 free(sig);
4807 free(fields);
4808 free(model_emf_uri);
4809 goto error_rcu_unlock;
4810 }
4811
4812 /* Lookup channel by UST object descriptor. */
4813 ua_chan = find_channel_by_objd(app, cobjd);
4814 if (!ua_chan) {
4815 DBG("Application channel is being teardown. Abort event notify");
4816 ret = 0;
4817 free(sig);
4818 free(fields);
4819 free(model_emf_uri);
4820 goto error_rcu_unlock;
4821 }
4822
4823 assert(ua_chan->session);
4824 ua_sess = ua_chan->session;
4825
4826 registry = get_session_registry(ua_sess);
4827 assert(registry);
4828
4829 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4830 chan_reg_key = ua_chan->tracing_channel_id;
4831 } else {
4832 chan_reg_key = ua_chan->key;
4833 }
4834
4835 pthread_mutex_lock(&registry->lock);
4836
4837 /*
4838 * From this point on, this call acquires the ownership of the sig, fields
4839 * and model_emf_uri meaning any free are done inside it if needed. These
4840 * three variables MUST NOT be read/write after this.
4841 */
4842 ret_code = ust_registry_create_event(registry, chan_reg_key,
4843 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4844 model_emf_uri, ua_sess->buffer_type, &event_id,
4845 app);
4846
4847 /*
4848 * The return value is returned to ustctl so in case of an error, the
4849 * application can be notified. In case of an error, it's important not to
4850 * return a negative error or else the application will get closed.
4851 */
4852 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4853 if (ret < 0) {
4854 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4855 ERR("UST app reply event failed with ret %d", ret);
4856 } else {
4857 DBG3("UST app reply event failed. Application died");
4858 }
4859 /*
4860 * No need to wipe the create event since the application socket will
4861 * get close on error hence cleaning up everything by itself.
4862 */
4863 goto error;
4864 }
4865
4866 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4867 name, event_id);
4868
4869 error:
4870 pthread_mutex_unlock(&registry->lock);
4871 error_rcu_unlock:
4872 rcu_read_unlock();
4873 return ret;
4874 }
4875
4876 /*
4877 * Handle application notification through the given notify socket.
4878 *
4879 * Return 0 on success or else a negative value.
4880 */
4881 int ust_app_recv_notify(int sock)
4882 {
4883 int ret;
4884 enum ustctl_notify_cmd cmd;
4885
4886 DBG3("UST app receiving notify from sock %d", sock);
4887
4888 ret = ustctl_recv_notify(sock, &cmd);
4889 if (ret < 0) {
4890 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4891 ERR("UST app recv notify failed with ret %d", ret);
4892 } else {
4893 DBG3("UST app recv notify failed. Application died");
4894 }
4895 goto error;
4896 }
4897
4898 switch (cmd) {
4899 case USTCTL_NOTIFY_CMD_EVENT:
4900 {
4901 int sobjd, cobjd, loglevel;
4902 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4903 size_t nr_fields;
4904 struct ustctl_field *fields;
4905
4906 DBG2("UST app ustctl register event received");
4907
4908 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4909 &sig, &nr_fields, &fields, &model_emf_uri);
4910 if (ret < 0) {
4911 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4912 ERR("UST app recv event failed with ret %d", ret);
4913 } else {
4914 DBG3("UST app recv event failed. Application died");
4915 }
4916 goto error;
4917 }
4918
4919 /*
4920 * Add event to the UST registry coming from the notify socket. This
4921 * call will free if needed the sig, fields and model_emf_uri. This
4922 * code path loses the ownsership of these variables and transfer them
4923 * to the this function.
4924 */
4925 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4926 fields, loglevel, model_emf_uri);
4927 if (ret < 0) {
4928 goto error;
4929 }
4930
4931 break;
4932 }
4933 case USTCTL_NOTIFY_CMD_CHANNEL:
4934 {
4935 int sobjd, cobjd;
4936 size_t nr_fields;
4937 struct ustctl_field *fields;
4938
4939 DBG2("UST app ustctl register channel received");
4940
4941 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4942 &fields);
4943 if (ret < 0) {
4944 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4945 ERR("UST app recv channel failed with ret %d", ret);
4946 } else {
4947 DBG3("UST app recv channel failed. Application died");
4948 }
4949 goto error;
4950 }
4951
4952 /*
4953 * The fields ownership are transfered to this function call meaning
4954 * that if needed it will be freed. After this, it's invalid to access
4955 * fields or clean it up.
4956 */
4957 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4958 fields);
4959 if (ret < 0) {
4960 goto error;
4961 }
4962
4963 break;
4964 }
4965 default:
4966 /* Should NEVER happen. */
4967 assert(0);
4968 }
4969
4970 error:
4971 return ret;
4972 }
4973
4974 /*
4975 * Once the notify socket hangs up, this is called. First, it tries to find the
4976 * corresponding application. On failure, the call_rcu to close the socket is
4977 * executed. If an application is found, it tries to delete it from the notify
4978 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4979 *
4980 * Note that an object needs to be allocated here so on ENOMEM failure, the
4981 * call RCU is not done but the rest of the cleanup is.
4982 */
4983 void ust_app_notify_sock_unregister(int sock)
4984 {
4985 int err_enomem = 0;
4986 struct lttng_ht_iter iter;
4987 struct ust_app *app;
4988 struct ust_app_notify_sock_obj *obj;
4989
4990 assert(sock >= 0);
4991
4992 rcu_read_lock();
4993
4994 obj = zmalloc(sizeof(*obj));
4995 if (!obj) {
4996 /*
4997 * An ENOMEM is kind of uncool. If this strikes we continue the
4998 * procedure but the call_rcu will not be called. In this case, we
4999 * accept the fd leak rather than possibly creating an unsynchronized
5000 * state between threads.
5001 *
5002 * TODO: The notify object should be created once the notify socket is
5003 * registered and stored independantely from the ust app object. The
5004 * tricky part is to synchronize the teardown of the application and
5005 * this notify object. Let's keep that in mind so we can avoid this
5006 * kind of shenanigans with ENOMEM in the teardown path.
5007 */
5008 err_enomem = 1;
5009 } else {
5010 obj->fd = sock;
5011 }
5012
5013 DBG("UST app notify socket unregister %d", sock);
5014
5015 /*
5016 * Lookup application by notify socket. If this fails, this means that the
5017 * hash table delete has already been done by the application
5018 * unregistration process so we can safely close the notify socket in a
5019 * call RCU.
5020 */
5021 app = find_app_by_notify_sock(sock);
5022 if (!app) {
5023 goto close_socket;
5024 }
5025
5026 iter.iter.node = &app->notify_sock_n.node;
5027
5028 /*
5029 * Whatever happens here either we fail or succeed, in both cases we have
5030 * to close the socket after a grace period to continue to the call RCU
5031 * here. If the deletion is successful, the application is not visible
5032 * anymore by other threads and is it fails it means that it was already
5033 * deleted from the hash table so either way we just have to close the
5034 * socket.
5035 */
5036 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5037
5038 close_socket:
5039 rcu_read_unlock();
5040
5041 /*
5042 * Close socket after a grace period to avoid for the socket to be reused
5043 * before the application object is freed creating potential race between
5044 * threads trying to add unique in the global hash table.
5045 */
5046 if (!err_enomem) {
5047 call_rcu(&obj->head, close_notify_sock_rcu);
5048 }
5049 }
5050
5051 /*
5052 * Destroy a ust app data structure and free its memory.
5053 */
5054 void ust_app_destroy(struct ust_app *app)
5055 {
5056 if (!app) {
5057 return;
5058 }
5059
5060 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5061 }
5062
5063 /*
5064 * Take a snapshot for a given UST session. The snapshot is sent to the given
5065 * output.
5066 *
5067 * Return 0 on success or else a negative value.
5068 */
5069 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5070 struct snapshot_output *output, int wait, unsigned int nb_streams)
5071 {
5072 int ret = 0;
5073 struct lttng_ht_iter iter;
5074 struct ust_app *app;
5075 char pathname[PATH_MAX];
5076 uint64_t max_stream_size = 0;
5077
5078 assert(usess);
5079 assert(output);
5080
5081 rcu_read_lock();
5082
5083 /*
5084 * Compute the maximum size of a single stream if a max size is asked by
5085 * the caller.
5086 */
5087 if (output->max_size > 0 && nb_streams > 0) {
5088 max_stream_size = output->max_size / nb_streams;
5089 }
5090
5091 switch (usess->buffer_type) {
5092 case LTTNG_BUFFER_PER_UID:
5093 {
5094 struct buffer_reg_uid *reg;
5095
5096 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5097 struct buffer_reg_channel *reg_chan;
5098 struct consumer_socket *socket;
5099
5100 /* Get consumer socket to use to push the metadata.*/
5101 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5102 usess->consumer);
5103 if (!socket) {
5104 ret = -EINVAL;
5105 goto error;
5106 }
5107
5108 memset(pathname, 0, sizeof(pathname));
5109 ret = snprintf(pathname, sizeof(pathname),
5110 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5111 reg->uid, reg->bits_per_long);
5112 if (ret < 0) {
5113 PERROR("snprintf snapshot path");
5114 goto error;
5115 }
5116
5117 /* Add the UST default trace dir to path. */
5118 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5119 reg_chan, node.node) {
5120
5121 /*
5122 * Make sure the maximum stream size is not lower than the
5123 * subbuffer size or else it's an error since we won't be able to
5124 * snapshot anything.
5125 */
5126 if (max_stream_size &&
5127 reg_chan->subbuf_size > max_stream_size) {
5128 ret = -EINVAL;
5129 DBG3("UST app snapshot record maximum stream size %" PRIu64
5130 " is smaller than subbuffer size of %zu",
5131 max_stream_size, reg_chan->subbuf_size);
5132 goto error;
5133 }
5134 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
5135 usess->uid, usess->gid, pathname, wait,
5136 max_stream_size);
5137 if (ret < 0) {
5138 goto error;
5139 }
5140 }
5141 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
5142 1, usess->uid, usess->gid, pathname, wait,
5143 max_stream_size);
5144 if (ret < 0) {
5145 goto error;
5146 }
5147 }
5148 break;
5149 }
5150 case LTTNG_BUFFER_PER_PID:
5151 {
5152 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5153 struct consumer_socket *socket;
5154 struct lttng_ht_iter chan_iter;
5155 struct ust_app_channel *ua_chan;
5156 struct ust_app_session *ua_sess;
5157 struct ust_registry_session *registry;
5158
5159 ua_sess = lookup_session_by_app(usess, app);
5160 if (!ua_sess) {
5161 /* Session not associated with this app. */
5162 continue;
5163 }
5164
5165 /* Get the right consumer socket for the application. */
5166 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5167 output->consumer);
5168 if (!socket) {
5169 ret = -EINVAL;
5170 goto error;
5171 }
5172
5173 /* Add the UST default trace dir to path. */
5174 memset(pathname, 0, sizeof(pathname));
5175 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5176 ua_sess->path);
5177 if (ret < 0) {
5178 PERROR("snprintf snapshot path");
5179 goto error;
5180 }
5181
5182 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5183 ua_chan, node.node) {
5184 /*
5185 * Make sure the maximum stream size is not lower than the
5186 * subbuffer size or else it's an error since we won't be able to
5187 * snapshot anything.
5188 */
5189 if (max_stream_size &&
5190 ua_chan->attr.subbuf_size > max_stream_size) {
5191 ret = -EINVAL;
5192 DBG3("UST app snapshot record maximum stream size %" PRIu64
5193 " is smaller than subbuffer size of %" PRIu64,
5194 max_stream_size, ua_chan->attr.subbuf_size);
5195 goto error;
5196 }
5197
5198 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5199 ua_sess->euid, ua_sess->egid, pathname, wait,
5200 max_stream_size);
5201 if (ret < 0) {
5202 goto error;
5203 }
5204 }
5205
5206 registry = get_session_registry(ua_sess);
5207 assert(registry);
5208 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5209 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5210 max_stream_size);
5211 if (ret < 0) {
5212 goto error;
5213 }
5214 }
5215 break;
5216 }
5217 default:
5218 assert(0);
5219 break;
5220 }
5221
5222 error:
5223 rcu_read_unlock();
5224 return ret;
5225 }
5226
5227 /*
5228 * Return the number of streams for a UST session.
5229 */
5230 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5231 {
5232 unsigned int ret = 0;
5233 struct ust_app *app;
5234 struct lttng_ht_iter iter;
5235
5236 assert(usess);
5237
5238 switch (usess->buffer_type) {
5239 case LTTNG_BUFFER_PER_UID:
5240 {
5241 struct buffer_reg_uid *reg;
5242
5243 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5244 struct buffer_reg_channel *reg_chan;
5245
5246 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5247 reg_chan, node.node) {
5248 ret += reg_chan->stream_count;
5249 }
5250 }
5251 break;
5252 }
5253 case LTTNG_BUFFER_PER_PID:
5254 {
5255 rcu_read_lock();
5256 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5257 struct ust_app_channel *ua_chan;
5258 struct ust_app_session *ua_sess;
5259 struct lttng_ht_iter chan_iter;
5260
5261 ua_sess = lookup_session_by_app(usess, app);
5262 if (!ua_sess) {
5263 /* Session not associated with this app. */
5264 continue;
5265 }
5266
5267 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5268 ua_chan, node.node) {
5269 ret += ua_chan->streams.count;
5270 }
5271 }
5272 rcu_read_unlock();
5273 break;
5274 }
5275 default:
5276 assert(0);
5277 break;
5278 }
5279
5280 return ret;
5281 }
This page took 0.175922 seconds and 5 git commands to generate.