Fix: add UST context in the same order the user enabled them
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 3 elements of the key: name, filter and loglevel. */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* Match. */
144 return 1;
145
146 no_match:
147 return 0;
148 }
149
150 /*
151 * Unique add of an ust app event in the given ht. This uses the custom
152 * ht_match_ust_app_event match function and the event name as hash.
153 */
154 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
155 struct ust_app_event *event)
156 {
157 struct cds_lfht_node *node_ptr;
158 struct ust_app_ht_key key;
159 struct lttng_ht *ht;
160
161 assert(ua_chan);
162 assert(ua_chan->events);
163 assert(event);
164
165 ht = ua_chan->events;
166 key.name = event->attr.name;
167 key.filter = event->filter;
168 key.loglevel = event->attr.loglevel;
169
170 node_ptr = cds_lfht_add_unique(ht->ht,
171 ht->hash_fct(event->node.key, lttng_ht_seed),
172 ht_match_ust_app_event, &key, &event->node.node);
173 assert(node_ptr == &event->node.node);
174 }
175
176 /*
177 * Close the notify socket from the given RCU head object. This MUST be called
178 * through a call_rcu().
179 */
180 static void close_notify_sock_rcu(struct rcu_head *head)
181 {
182 int ret;
183 struct ust_app_notify_sock_obj *obj =
184 caa_container_of(head, struct ust_app_notify_sock_obj, head);
185
186 /* Must have a valid fd here. */
187 assert(obj->fd >= 0);
188
189 ret = close(obj->fd);
190 if (ret) {
191 ERR("close notify sock %d RCU", obj->fd);
192 }
193 lttng_fd_put(LTTNG_FD_APPS, 1);
194
195 free(obj);
196 }
197
198 /*
199 * Return the session registry according to the buffer type of the given
200 * session.
201 *
202 * A registry per UID object MUST exists before calling this function or else
203 * it assert() if not found. RCU read side lock must be acquired.
204 */
205 static struct ust_registry_session *get_session_registry(
206 struct ust_app_session *ua_sess)
207 {
208 struct ust_registry_session *registry = NULL;
209
210 assert(ua_sess);
211
212 switch (ua_sess->buffer_type) {
213 case LTTNG_BUFFER_PER_PID:
214 {
215 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
216 if (!reg_pid) {
217 goto error;
218 }
219 registry = reg_pid->registry->reg.ust;
220 break;
221 }
222 case LTTNG_BUFFER_PER_UID:
223 {
224 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
225 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
226 if (!reg_uid) {
227 goto error;
228 }
229 registry = reg_uid->registry->reg.ust;
230 break;
231 }
232 default:
233 assert(0);
234 };
235
236 error:
237 return registry;
238 }
239
240 /*
241 * Delete ust context safely. RCU read lock must be held before calling
242 * this function.
243 */
244 static
245 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
246 {
247 int ret;
248
249 assert(ua_ctx);
250
251 if (ua_ctx->obj) {
252 ret = ustctl_release_object(sock, ua_ctx->obj);
253 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
254 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
255 sock, ua_ctx->obj->handle, ret);
256 }
257 free(ua_ctx->obj);
258 }
259 free(ua_ctx);
260 }
261
262 /*
263 * Delete ust app event safely. RCU read lock must be held before calling
264 * this function.
265 */
266 static
267 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
268 {
269 int ret;
270
271 assert(ua_event);
272
273 free(ua_event->filter);
274
275 if (ua_event->obj != NULL) {
276 ret = ustctl_release_object(sock, ua_event->obj);
277 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
278 ERR("UST app sock %d release event obj failed with ret %d",
279 sock, ret);
280 }
281 free(ua_event->obj);
282 }
283 free(ua_event);
284 }
285
286 /*
287 * Release ust data object of the given stream.
288 *
289 * Return 0 on success or else a negative value.
290 */
291 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
292 {
293 int ret = 0;
294
295 assert(stream);
296
297 if (stream->obj) {
298 ret = ustctl_release_object(sock, stream->obj);
299 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
300 ERR("UST app sock %d release stream obj failed with ret %d",
301 sock, ret);
302 }
303 lttng_fd_put(LTTNG_FD_APPS, 2);
304 free(stream->obj);
305 }
306
307 return ret;
308 }
309
310 /*
311 * Delete ust app stream safely. RCU read lock must be held before calling
312 * this function.
313 */
314 static
315 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
316 {
317 assert(stream);
318
319 (void) release_ust_app_stream(sock, stream);
320 free(stream);
321 }
322
323 /*
324 * We need to execute ht_destroy outside of RCU read-side critical
325 * section and outside of call_rcu thread, so we postpone its execution
326 * using ht_cleanup_push. It is simpler than to change the semantic of
327 * the many callers of delete_ust_app_session().
328 */
329 static
330 void delete_ust_app_channel_rcu(struct rcu_head *head)
331 {
332 struct ust_app_channel *ua_chan =
333 caa_container_of(head, struct ust_app_channel, rcu_head);
334
335 ht_cleanup_push(ua_chan->ctx);
336 ht_cleanup_push(ua_chan->events);
337 free(ua_chan);
338 }
339
340 /*
341 * Delete ust app channel safely. RCU read lock must be held before calling
342 * this function.
343 */
344 static
345 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
346 struct ust_app *app)
347 {
348 int ret;
349 struct lttng_ht_iter iter;
350 struct ust_app_event *ua_event;
351 struct ust_app_ctx *ua_ctx;
352 struct ust_app_stream *stream, *stmp;
353 struct ust_registry_session *registry;
354
355 assert(ua_chan);
356
357 DBG3("UST app deleting channel %s", ua_chan->name);
358
359 /* Wipe stream */
360 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
361 cds_list_del(&stream->list);
362 delete_ust_app_stream(sock, stream);
363 }
364
365 /* Wipe context */
366 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
367 cds_list_del(&ua_ctx->list);
368 ret = lttng_ht_del(ua_chan->ctx, &iter);
369 assert(!ret);
370 delete_ust_app_ctx(sock, ua_ctx);
371 }
372
373 /* Wipe events */
374 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
375 node.node) {
376 ret = lttng_ht_del(ua_chan->events, &iter);
377 assert(!ret);
378 delete_ust_app_event(sock, ua_event);
379 }
380
381 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
382 /* Wipe and free registry from session registry. */
383 registry = get_session_registry(ua_chan->session);
384 if (registry) {
385 ust_registry_channel_del_free(registry, ua_chan->key);
386 }
387 }
388
389 if (ua_chan->obj != NULL) {
390 /* Remove channel from application UST object descriptor. */
391 iter.iter.node = &ua_chan->ust_objd_node.node;
392 lttng_ht_del(app->ust_objd, &iter);
393 ret = ustctl_release_object(sock, ua_chan->obj);
394 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
395 ERR("UST app sock %d release channel obj failed with ret %d",
396 sock, ret);
397 }
398 lttng_fd_put(LTTNG_FD_APPS, 1);
399 free(ua_chan->obj);
400 }
401 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
402 }
403
404 /*
405 * Push metadata to consumer socket.
406 *
407 * The socket lock MUST be acquired.
408 * The ust app session lock MUST be acquired.
409 *
410 * On success, return the len of metadata pushed or else a negative value.
411 */
412 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
413 struct consumer_socket *socket, int send_zero_data)
414 {
415 int ret;
416 char *metadata_str = NULL;
417 size_t len, offset;
418 ssize_t ret_val;
419
420 assert(registry);
421 assert(socket);
422
423 /*
424 * On a push metadata error either the consumer is dead or the metadata
425 * channel has been destroyed because its endpoint might have died (e.g:
426 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
427 * metadata again which is not valid anymore on the consumer side.
428 *
429 * The ust app session mutex locked allows us to make this check without
430 * the registry lock.
431 */
432 if (registry->metadata_closed) {
433 return -EPIPE;
434 }
435
436 pthread_mutex_lock(&registry->lock);
437
438 offset = registry->metadata_len_sent;
439 len = registry->metadata_len - registry->metadata_len_sent;
440 if (len == 0) {
441 DBG3("No metadata to push for metadata key %" PRIu64,
442 registry->metadata_key);
443 ret_val = len;
444 if (send_zero_data) {
445 DBG("No metadata to push");
446 goto push_data;
447 }
448 goto end;
449 }
450
451 /* Allocate only what we have to send. */
452 metadata_str = zmalloc(len);
453 if (!metadata_str) {
454 PERROR("zmalloc ust app metadata string");
455 ret_val = -ENOMEM;
456 goto error;
457 }
458 /* Copy what we haven't send out. */
459 memcpy(metadata_str, registry->metadata + offset, len);
460 registry->metadata_len_sent += len;
461
462 push_data:
463 pthread_mutex_unlock(&registry->lock);
464 ret = consumer_push_metadata(socket, registry->metadata_key,
465 metadata_str, len, offset);
466 if (ret < 0) {
467 ret_val = ret;
468 goto error_push;
469 }
470
471 free(metadata_str);
472 return len;
473
474 end:
475 error:
476 pthread_mutex_unlock(&registry->lock);
477 error_push:
478 free(metadata_str);
479 return ret_val;
480 }
481
482 /*
483 * For a given application and session, push metadata to consumer. The session
484 * lock MUST be acquired here before calling this.
485 * Either sock or consumer is required : if sock is NULL, the default
486 * socket to send the metadata is retrieved from consumer, if sock
487 * is not NULL we use it to send the metadata.
488 *
489 * Return 0 on success else a negative error.
490 */
491 static int push_metadata(struct ust_registry_session *registry,
492 struct consumer_output *consumer)
493 {
494 int ret_val;
495 ssize_t ret;
496 struct consumer_socket *socket;
497
498 assert(registry);
499 assert(consumer);
500
501 rcu_read_lock();
502
503 /*
504 * Means that no metadata was assigned to the session. This can happens if
505 * no start has been done previously.
506 */
507 if (!registry->metadata_key) {
508 ret_val = 0;
509 goto end_rcu_unlock;
510 }
511
512 /* Get consumer socket to use to push the metadata.*/
513 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
514 consumer);
515 if (!socket) {
516 ret_val = -1;
517 goto error_rcu_unlock;
518 }
519
520 /*
521 * TODO: Currently, we hold the socket lock around sampling of the next
522 * metadata segment to ensure we send metadata over the consumer socket in
523 * the correct order. This makes the registry lock nest inside the socket
524 * lock.
525 *
526 * Please note that this is a temporary measure: we should move this lock
527 * back into ust_consumer_push_metadata() when the consumer gets the
528 * ability to reorder the metadata it receives.
529 */
530 pthread_mutex_lock(socket->lock);
531 ret = ust_app_push_metadata(registry, socket, 0);
532 pthread_mutex_unlock(socket->lock);
533 if (ret < 0) {
534 ret_val = ret;
535 goto error_rcu_unlock;
536 }
537
538 rcu_read_unlock();
539 return 0;
540
541 error_rcu_unlock:
542 /*
543 * On error, flag the registry that the metadata is closed. We were unable
544 * to push anything and this means that either the consumer is not
545 * responding or the metadata cache has been destroyed on the consumer.
546 */
547 registry->metadata_closed = 1;
548 end_rcu_unlock:
549 rcu_read_unlock();
550 return ret_val;
551 }
552
553 /*
554 * Send to the consumer a close metadata command for the given session. Once
555 * done, the metadata channel is deleted and the session metadata pointer is
556 * nullified. The session lock MUST be acquired here unless the application is
557 * in the destroy path.
558 *
559 * Return 0 on success else a negative value.
560 */
561 static int close_metadata(struct ust_registry_session *registry,
562 struct consumer_output *consumer)
563 {
564 int ret;
565 struct consumer_socket *socket;
566
567 assert(registry);
568 assert(consumer);
569
570 rcu_read_lock();
571
572 if (!registry->metadata_key || registry->metadata_closed) {
573 ret = 0;
574 goto end;
575 }
576
577 /* Get consumer socket to use to push the metadata.*/
578 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
579 consumer);
580 if (!socket) {
581 ret = -1;
582 goto error;
583 }
584
585 ret = consumer_close_metadata(socket, registry->metadata_key);
586 if (ret < 0) {
587 goto error;
588 }
589
590 error:
591 /*
592 * Metadata closed. Even on error this means that the consumer is not
593 * responding or not found so either way a second close should NOT be emit
594 * for this registry.
595 */
596 registry->metadata_closed = 1;
597 end:
598 rcu_read_unlock();
599 return ret;
600 }
601
602 /*
603 * We need to execute ht_destroy outside of RCU read-side critical
604 * section and outside of call_rcu thread, so we postpone its execution
605 * using ht_cleanup_push. It is simpler than to change the semantic of
606 * the many callers of delete_ust_app_session().
607 */
608 static
609 void delete_ust_app_session_rcu(struct rcu_head *head)
610 {
611 struct ust_app_session *ua_sess =
612 caa_container_of(head, struct ust_app_session, rcu_head);
613
614 ht_cleanup_push(ua_sess->channels);
615 free(ua_sess);
616 }
617
618 /*
619 * Delete ust app session safely. RCU read lock must be held before calling
620 * this function.
621 */
622 static
623 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
624 struct ust_app *app)
625 {
626 int ret;
627 struct lttng_ht_iter iter;
628 struct ust_app_channel *ua_chan;
629 struct ust_registry_session *registry;
630
631 assert(ua_sess);
632
633 pthread_mutex_lock(&ua_sess->lock);
634
635 registry = get_session_registry(ua_sess);
636 if (registry && !registry->metadata_closed) {
637 /* Push metadata for application before freeing the application. */
638 (void) push_metadata(registry, ua_sess->consumer);
639
640 /*
641 * Don't ask to close metadata for global per UID buffers. Close
642 * metadata only on destroy trace session in this case. Also, the
643 * previous push metadata could have flag the metadata registry to
644 * close so don't send a close command if closed.
645 */
646 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
647 !registry->metadata_closed) {
648 /* And ask to close it for this session registry. */
649 (void) close_metadata(registry, ua_sess->consumer);
650 }
651 }
652
653 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
654 node.node) {
655 ret = lttng_ht_del(ua_sess->channels, &iter);
656 assert(!ret);
657 delete_ust_app_channel(sock, ua_chan, app);
658 }
659
660 /* In case of per PID, the registry is kept in the session. */
661 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
662 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
663 if (reg_pid) {
664 buffer_reg_pid_remove(reg_pid);
665 buffer_reg_pid_destroy(reg_pid);
666 }
667 }
668
669 if (ua_sess->handle != -1) {
670 ret = ustctl_release_handle(sock, ua_sess->handle);
671 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
672 ERR("UST app sock %d release session handle failed with ret %d",
673 sock, ret);
674 }
675 }
676 pthread_mutex_unlock(&ua_sess->lock);
677
678 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
679 }
680
681 /*
682 * Delete a traceable application structure from the global list. Never call
683 * this function outside of a call_rcu call.
684 *
685 * RCU read side lock should _NOT_ be held when calling this function.
686 */
687 static
688 void delete_ust_app(struct ust_app *app)
689 {
690 int ret, sock;
691 struct ust_app_session *ua_sess, *tmp_ua_sess;
692
693 /* Delete ust app sessions info */
694 sock = app->sock;
695 app->sock = -1;
696
697 /* Wipe sessions */
698 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
699 teardown_node) {
700 /* Free every object in the session and the session. */
701 rcu_read_lock();
702 delete_ust_app_session(sock, ua_sess, app);
703 rcu_read_unlock();
704 }
705
706 ht_cleanup_push(app->sessions);
707 ht_cleanup_push(app->ust_objd);
708
709 /*
710 * Wait until we have deleted the application from the sock hash table
711 * before closing this socket, otherwise an application could re-use the
712 * socket ID and race with the teardown, using the same hash table entry.
713 *
714 * It's OK to leave the close in call_rcu. We want it to stay unique for
715 * all RCU readers that could run concurrently with unregister app,
716 * therefore we _need_ to only close that socket after a grace period. So
717 * it should stay in this RCU callback.
718 *
719 * This close() is a very important step of the synchronization model so
720 * every modification to this function must be carefully reviewed.
721 */
722 ret = close(sock);
723 if (ret) {
724 PERROR("close");
725 }
726 lttng_fd_put(LTTNG_FD_APPS, 1);
727
728 DBG2("UST app pid %d deleted", app->pid);
729 free(app);
730 }
731
732 /*
733 * URCU intermediate call to delete an UST app.
734 */
735 static
736 void delete_ust_app_rcu(struct rcu_head *head)
737 {
738 struct lttng_ht_node_ulong *node =
739 caa_container_of(head, struct lttng_ht_node_ulong, head);
740 struct ust_app *app =
741 caa_container_of(node, struct ust_app, pid_n);
742
743 DBG3("Call RCU deleting app PID %d", app->pid);
744 delete_ust_app(app);
745 }
746
747 /*
748 * Delete the session from the application ht and delete the data structure by
749 * freeing every object inside and releasing them.
750 */
751 static void destroy_app_session(struct ust_app *app,
752 struct ust_app_session *ua_sess)
753 {
754 int ret;
755 struct lttng_ht_iter iter;
756
757 assert(app);
758 assert(ua_sess);
759
760 iter.iter.node = &ua_sess->node.node;
761 ret = lttng_ht_del(app->sessions, &iter);
762 if (ret) {
763 /* Already scheduled for teardown. */
764 goto end;
765 }
766
767 /* Once deleted, free the data structure. */
768 delete_ust_app_session(app->sock, ua_sess, app);
769
770 end:
771 return;
772 }
773
774 /*
775 * Alloc new UST app session.
776 */
777 static
778 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
779 {
780 struct ust_app_session *ua_sess;
781
782 /* Init most of the default value by allocating and zeroing */
783 ua_sess = zmalloc(sizeof(struct ust_app_session));
784 if (ua_sess == NULL) {
785 PERROR("malloc");
786 goto error_free;
787 }
788
789 ua_sess->handle = -1;
790 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
791 pthread_mutex_init(&ua_sess->lock, NULL);
792
793 return ua_sess;
794
795 error_free:
796 return NULL;
797 }
798
799 /*
800 * Alloc new UST app channel.
801 */
802 static
803 struct ust_app_channel *alloc_ust_app_channel(char *name,
804 struct ust_app_session *ua_sess,
805 struct lttng_ust_channel_attr *attr)
806 {
807 struct ust_app_channel *ua_chan;
808
809 /* Init most of the default value by allocating and zeroing */
810 ua_chan = zmalloc(sizeof(struct ust_app_channel));
811 if (ua_chan == NULL) {
812 PERROR("malloc");
813 goto error;
814 }
815
816 /* Setup channel name */
817 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
818 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
819
820 ua_chan->enabled = 1;
821 ua_chan->handle = -1;
822 ua_chan->session = ua_sess;
823 ua_chan->key = get_next_channel_key();
824 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
825 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
826 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
827
828 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
829 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
830
831 /* Copy attributes */
832 if (attr) {
833 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
834 ua_chan->attr.subbuf_size = attr->subbuf_size;
835 ua_chan->attr.num_subbuf = attr->num_subbuf;
836 ua_chan->attr.overwrite = attr->overwrite;
837 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
838 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
839 ua_chan->attr.output = attr->output;
840 }
841 /* By default, the channel is a per cpu channel. */
842 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
843
844 DBG3("UST app channel %s allocated", ua_chan->name);
845
846 return ua_chan;
847
848 error:
849 return NULL;
850 }
851
852 /*
853 * Allocate and initialize a UST app stream.
854 *
855 * Return newly allocated stream pointer or NULL on error.
856 */
857 struct ust_app_stream *ust_app_alloc_stream(void)
858 {
859 struct ust_app_stream *stream = NULL;
860
861 stream = zmalloc(sizeof(*stream));
862 if (stream == NULL) {
863 PERROR("zmalloc ust app stream");
864 goto error;
865 }
866
867 /* Zero could be a valid value for a handle so flag it to -1. */
868 stream->handle = -1;
869
870 error:
871 return stream;
872 }
873
874 /*
875 * Alloc new UST app event.
876 */
877 static
878 struct ust_app_event *alloc_ust_app_event(char *name,
879 struct lttng_ust_event *attr)
880 {
881 struct ust_app_event *ua_event;
882
883 /* Init most of the default value by allocating and zeroing */
884 ua_event = zmalloc(sizeof(struct ust_app_event));
885 if (ua_event == NULL) {
886 PERROR("malloc");
887 goto error;
888 }
889
890 ua_event->enabled = 1;
891 strncpy(ua_event->name, name, sizeof(ua_event->name));
892 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
893 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
894
895 /* Copy attributes */
896 if (attr) {
897 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
898 }
899
900 DBG3("UST app event %s allocated", ua_event->name);
901
902 return ua_event;
903
904 error:
905 return NULL;
906 }
907
908 /*
909 * Alloc new UST app context.
910 */
911 static
912 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
913 {
914 struct ust_app_ctx *ua_ctx;
915
916 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
917 if (ua_ctx == NULL) {
918 goto error;
919 }
920
921 CDS_INIT_LIST_HEAD(&ua_ctx->list);
922
923 if (uctx) {
924 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
925 }
926
927 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
928
929 error:
930 return ua_ctx;
931 }
932
933 /*
934 * Allocate a filter and copy the given original filter.
935 *
936 * Return allocated filter or NULL on error.
937 */
938 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
939 struct lttng_ust_filter_bytecode *orig_f)
940 {
941 struct lttng_ust_filter_bytecode *filter = NULL;
942
943 /* Copy filter bytecode */
944 filter = zmalloc(sizeof(*filter) + orig_f->len);
945 if (!filter) {
946 PERROR("zmalloc alloc ust app filter");
947 goto error;
948 }
949
950 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
951
952 error:
953 return filter;
954 }
955
956 /*
957 * Find an ust_app using the sock and return it. RCU read side lock must be
958 * held before calling this helper function.
959 */
960 static
961 struct ust_app *find_app_by_sock(int sock)
962 {
963 struct lttng_ht_node_ulong *node;
964 struct lttng_ht_iter iter;
965
966 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
967 node = lttng_ht_iter_get_node_ulong(&iter);
968 if (node == NULL) {
969 DBG2("UST app find by sock %d not found", sock);
970 goto error;
971 }
972
973 return caa_container_of(node, struct ust_app, sock_n);
974
975 error:
976 return NULL;
977 }
978
979 /*
980 * Find an ust_app using the notify sock and return it. RCU read side lock must
981 * be held before calling this helper function.
982 */
983 static struct ust_app *find_app_by_notify_sock(int sock)
984 {
985 struct lttng_ht_node_ulong *node;
986 struct lttng_ht_iter iter;
987
988 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
989 &iter);
990 node = lttng_ht_iter_get_node_ulong(&iter);
991 if (node == NULL) {
992 DBG2("UST app find by notify sock %d not found", sock);
993 goto error;
994 }
995
996 return caa_container_of(node, struct ust_app, notify_sock_n);
997
998 error:
999 return NULL;
1000 }
1001
1002 /*
1003 * Lookup for an ust app event based on event name, filter bytecode and the
1004 * event loglevel.
1005 *
1006 * Return an ust_app_event object or NULL on error.
1007 */
1008 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1009 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
1010 {
1011 struct lttng_ht_iter iter;
1012 struct lttng_ht_node_str *node;
1013 struct ust_app_event *event = NULL;
1014 struct ust_app_ht_key key;
1015
1016 assert(name);
1017 assert(ht);
1018
1019 /* Setup key for event lookup. */
1020 key.name = name;
1021 key.filter = filter;
1022 key.loglevel = loglevel;
1023
1024 /* Lookup using the event name as hash and a custom match fct. */
1025 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1026 ht_match_ust_app_event, &key, &iter.iter);
1027 node = lttng_ht_iter_get_node_str(&iter);
1028 if (node == NULL) {
1029 goto end;
1030 }
1031
1032 event = caa_container_of(node, struct ust_app_event, node);
1033
1034 end:
1035 return event;
1036 }
1037
1038 /*
1039 * Create the channel context on the tracer.
1040 *
1041 * Called with UST app session lock held.
1042 */
1043 static
1044 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1045 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1046 {
1047 int ret;
1048
1049 health_code_update();
1050
1051 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1052 ua_chan->obj, &ua_ctx->obj);
1053 if (ret < 0) {
1054 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1055 ERR("UST app create channel context failed for app (pid: %d) "
1056 "with ret %d", app->pid, ret);
1057 } else {
1058 DBG3("UST app disable event failed. Application is dead.");
1059 }
1060 goto error;
1061 }
1062
1063 ua_ctx->handle = ua_ctx->obj->handle;
1064
1065 DBG2("UST app context handle %d created successfully for channel %s",
1066 ua_ctx->handle, ua_chan->name);
1067
1068 error:
1069 health_code_update();
1070 return ret;
1071 }
1072
1073 /*
1074 * Set the filter on the tracer.
1075 */
1076 static
1077 int set_ust_event_filter(struct ust_app_event *ua_event,
1078 struct ust_app *app)
1079 {
1080 int ret;
1081
1082 health_code_update();
1083
1084 if (!ua_event->filter) {
1085 ret = 0;
1086 goto error;
1087 }
1088
1089 ret = ustctl_set_filter(app->sock, ua_event->filter,
1090 ua_event->obj);
1091 if (ret < 0) {
1092 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1093 ERR("UST app event %s filter failed for app (pid: %d) "
1094 "with ret %d", ua_event->attr.name, app->pid, ret);
1095 } else {
1096 DBG3("UST app filter event failed. Application is dead.");
1097 }
1098 goto error;
1099 }
1100
1101 DBG2("UST filter set successfully for event %s", ua_event->name);
1102
1103 error:
1104 health_code_update();
1105 return ret;
1106 }
1107
1108 /*
1109 * Disable the specified event on to UST tracer for the UST session.
1110 */
1111 static int disable_ust_event(struct ust_app *app,
1112 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1113 {
1114 int ret;
1115
1116 health_code_update();
1117
1118 ret = ustctl_disable(app->sock, ua_event->obj);
1119 if (ret < 0) {
1120 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1121 ERR("UST app event %s disable failed for app (pid: %d) "
1122 "and session handle %d with ret %d",
1123 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1124 } else {
1125 DBG3("UST app disable event failed. Application is dead.");
1126 }
1127 goto error;
1128 }
1129
1130 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1131 ua_event->attr.name, app->pid);
1132
1133 error:
1134 health_code_update();
1135 return ret;
1136 }
1137
1138 /*
1139 * Disable the specified channel on to UST tracer for the UST session.
1140 */
1141 static int disable_ust_channel(struct ust_app *app,
1142 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1143 {
1144 int ret;
1145
1146 health_code_update();
1147
1148 ret = ustctl_disable(app->sock, ua_chan->obj);
1149 if (ret < 0) {
1150 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1151 ERR("UST app channel %s disable failed for app (pid: %d) "
1152 "and session handle %d with ret %d",
1153 ua_chan->name, app->pid, ua_sess->handle, ret);
1154 } else {
1155 DBG3("UST app disable channel failed. Application is dead.");
1156 }
1157 goto error;
1158 }
1159
1160 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1161 ua_chan->name, app->pid);
1162
1163 error:
1164 health_code_update();
1165 return ret;
1166 }
1167
1168 /*
1169 * Enable the specified channel on to UST tracer for the UST session.
1170 */
1171 static int enable_ust_channel(struct ust_app *app,
1172 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1173 {
1174 int ret;
1175
1176 health_code_update();
1177
1178 ret = ustctl_enable(app->sock, ua_chan->obj);
1179 if (ret < 0) {
1180 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1181 ERR("UST app channel %s enable failed for app (pid: %d) "
1182 "and session handle %d with ret %d",
1183 ua_chan->name, app->pid, ua_sess->handle, ret);
1184 } else {
1185 DBG3("UST app enable channel failed. Application is dead.");
1186 }
1187 goto error;
1188 }
1189
1190 ua_chan->enabled = 1;
1191
1192 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1193 ua_chan->name, app->pid);
1194
1195 error:
1196 health_code_update();
1197 return ret;
1198 }
1199
1200 /*
1201 * Enable the specified event on to UST tracer for the UST session.
1202 */
1203 static int enable_ust_event(struct ust_app *app,
1204 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1205 {
1206 int ret;
1207
1208 health_code_update();
1209
1210 ret = ustctl_enable(app->sock, ua_event->obj);
1211 if (ret < 0) {
1212 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1213 ERR("UST app event %s enable failed for app (pid: %d) "
1214 "and session handle %d with ret %d",
1215 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1216 } else {
1217 DBG3("UST app enable event failed. Application is dead.");
1218 }
1219 goto error;
1220 }
1221
1222 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1223 ua_event->attr.name, app->pid);
1224
1225 error:
1226 health_code_update();
1227 return ret;
1228 }
1229
1230 /*
1231 * Send channel and stream buffer to application.
1232 *
1233 * Return 0 on success. On error, a negative value is returned.
1234 */
1235 static int send_channel_pid_to_ust(struct ust_app *app,
1236 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1237 {
1238 int ret;
1239 struct ust_app_stream *stream, *stmp;
1240
1241 assert(app);
1242 assert(ua_sess);
1243 assert(ua_chan);
1244
1245 health_code_update();
1246
1247 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1248 app->sock);
1249
1250 /* Send channel to the application. */
1251 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1252 if (ret < 0) {
1253 goto error;
1254 }
1255
1256 health_code_update();
1257
1258 /* Send all streams to application. */
1259 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1260 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1261 if (ret < 0) {
1262 goto error;
1263 }
1264 /* We don't need the stream anymore once sent to the tracer. */
1265 cds_list_del(&stream->list);
1266 delete_ust_app_stream(-1, stream);
1267 }
1268 /* Flag the channel that it is sent to the application. */
1269 ua_chan->is_sent = 1;
1270
1271 error:
1272 health_code_update();
1273 return ret;
1274 }
1275
1276 /*
1277 * Create the specified event onto the UST tracer for a UST session.
1278 *
1279 * Should be called with session mutex held.
1280 */
1281 static
1282 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1283 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1284 {
1285 int ret = 0;
1286
1287 health_code_update();
1288
1289 /* Create UST event on tracer */
1290 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1291 &ua_event->obj);
1292 if (ret < 0) {
1293 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1294 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1295 ua_event->attr.name, app->pid, ret);
1296 } else {
1297 DBG3("UST app create event failed. Application is dead.");
1298 }
1299 goto error;
1300 }
1301
1302 ua_event->handle = ua_event->obj->handle;
1303
1304 DBG2("UST app event %s created successfully for pid:%d",
1305 ua_event->attr.name, app->pid);
1306
1307 health_code_update();
1308
1309 /* Set filter if one is present. */
1310 if (ua_event->filter) {
1311 ret = set_ust_event_filter(ua_event, app);
1312 if (ret < 0) {
1313 goto error;
1314 }
1315 }
1316
1317 /* If event not enabled, disable it on the tracer */
1318 if (ua_event->enabled == 0) {
1319 ret = disable_ust_event(app, ua_sess, ua_event);
1320 if (ret < 0) {
1321 /*
1322 * If we hit an EPERM, something is wrong with our disable call. If
1323 * we get an EEXIST, there is a problem on the tracer side since we
1324 * just created it.
1325 */
1326 switch (ret) {
1327 case -LTTNG_UST_ERR_PERM:
1328 /* Code flow problem */
1329 assert(0);
1330 case -LTTNG_UST_ERR_EXIST:
1331 /* It's OK for our use case. */
1332 ret = 0;
1333 break;
1334 default:
1335 break;
1336 }
1337 goto error;
1338 }
1339 }
1340
1341 error:
1342 health_code_update();
1343 return ret;
1344 }
1345
1346 /*
1347 * Copy data between an UST app event and a LTT event.
1348 */
1349 static void shadow_copy_event(struct ust_app_event *ua_event,
1350 struct ltt_ust_event *uevent)
1351 {
1352 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1353 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1354
1355 ua_event->enabled = uevent->enabled;
1356
1357 /* Copy event attributes */
1358 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1359
1360 /* Copy filter bytecode */
1361 if (uevent->filter) {
1362 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1363 /* Filter might be NULL here in case of ENONEM. */
1364 }
1365 }
1366
1367 /*
1368 * Copy data between an UST app channel and a LTT channel.
1369 */
1370 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1371 struct ltt_ust_channel *uchan)
1372 {
1373 struct lttng_ht_iter iter;
1374 struct ltt_ust_event *uevent;
1375 struct ltt_ust_context *uctx;
1376 struct ust_app_event *ua_event;
1377 struct ust_app_ctx *ua_ctx;
1378
1379 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1380
1381 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1382 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1383
1384 ua_chan->tracefile_size = uchan->tracefile_size;
1385 ua_chan->tracefile_count = uchan->tracefile_count;
1386
1387 /* Copy event attributes since the layout is different. */
1388 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1389 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1390 ua_chan->attr.overwrite = uchan->attr.overwrite;
1391 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1392 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1393 ua_chan->attr.output = uchan->attr.output;
1394 /*
1395 * Note that the attribute channel type is not set since the channel on the
1396 * tracing registry side does not have this information.
1397 */
1398
1399 ua_chan->enabled = uchan->enabled;
1400 ua_chan->tracing_channel_id = uchan->id;
1401
1402 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1403 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1404 if (ua_ctx == NULL) {
1405 continue;
1406 }
1407 lttng_ht_node_init_ulong(&ua_ctx->node,
1408 (unsigned long) ua_ctx->ctx.ctx);
1409 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1410 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1411 }
1412
1413 /* Copy all events from ltt ust channel to ust app channel */
1414 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1415 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1416 uevent->filter, uevent->attr.loglevel);
1417 if (ua_event == NULL) {
1418 DBG2("UST event %s not found on shadow copy channel",
1419 uevent->attr.name);
1420 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1421 if (ua_event == NULL) {
1422 continue;
1423 }
1424 shadow_copy_event(ua_event, uevent);
1425 add_unique_ust_app_event(ua_chan, ua_event);
1426 }
1427 }
1428
1429 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1430 }
1431
1432 /*
1433 * Copy data between a UST app session and a regular LTT session.
1434 */
1435 static void shadow_copy_session(struct ust_app_session *ua_sess,
1436 struct ltt_ust_session *usess, struct ust_app *app)
1437 {
1438 struct lttng_ht_node_str *ua_chan_node;
1439 struct lttng_ht_iter iter;
1440 struct ltt_ust_channel *uchan;
1441 struct ust_app_channel *ua_chan;
1442 time_t rawtime;
1443 struct tm *timeinfo;
1444 char datetime[16];
1445 int ret;
1446
1447 /* Get date and time for unique app path */
1448 time(&rawtime);
1449 timeinfo = localtime(&rawtime);
1450 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1451
1452 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1453
1454 ua_sess->tracing_id = usess->id;
1455 ua_sess->id = get_next_session_id();
1456 ua_sess->uid = app->uid;
1457 ua_sess->gid = app->gid;
1458 ua_sess->euid = usess->uid;
1459 ua_sess->egid = usess->gid;
1460 ua_sess->buffer_type = usess->buffer_type;
1461 ua_sess->bits_per_long = app->bits_per_long;
1462 /* There is only one consumer object per session possible. */
1463 ua_sess->consumer = usess->consumer;
1464 ua_sess->output_traces = usess->output_traces;
1465
1466 switch (ua_sess->buffer_type) {
1467 case LTTNG_BUFFER_PER_PID:
1468 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1469 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1470 datetime);
1471 break;
1472 case LTTNG_BUFFER_PER_UID:
1473 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1474 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1475 break;
1476 default:
1477 assert(0);
1478 goto error;
1479 }
1480 if (ret < 0) {
1481 PERROR("asprintf UST shadow copy session");
1482 assert(0);
1483 goto error;
1484 }
1485
1486 /* Iterate over all channels in global domain. */
1487 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1488 uchan, node.node) {
1489 struct lttng_ht_iter uiter;
1490
1491 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1492 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1493 if (ua_chan_node != NULL) {
1494 /* Session exist. Contiuing. */
1495 continue;
1496 }
1497
1498 DBG2("Channel %s not found on shadow session copy, creating it",
1499 uchan->name);
1500 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1501 if (ua_chan == NULL) {
1502 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1503 continue;
1504 }
1505 shadow_copy_channel(ua_chan, uchan);
1506 /*
1507 * The concept of metadata channel does not exist on the tracing
1508 * registry side of the session daemon so this can only be a per CPU
1509 * channel and not metadata.
1510 */
1511 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1512
1513 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1514 }
1515
1516 error:
1517 return;
1518 }
1519
1520 /*
1521 * Lookup sesison wrapper.
1522 */
1523 static
1524 void __lookup_session_by_app(struct ltt_ust_session *usess,
1525 struct ust_app *app, struct lttng_ht_iter *iter)
1526 {
1527 /* Get right UST app session from app */
1528 lttng_ht_lookup(app->sessions, &usess->id, iter);
1529 }
1530
1531 /*
1532 * Return ust app session from the app session hashtable using the UST session
1533 * id.
1534 */
1535 static struct ust_app_session *lookup_session_by_app(
1536 struct ltt_ust_session *usess, struct ust_app *app)
1537 {
1538 struct lttng_ht_iter iter;
1539 struct lttng_ht_node_u64 *node;
1540
1541 __lookup_session_by_app(usess, app, &iter);
1542 node = lttng_ht_iter_get_node_u64(&iter);
1543 if (node == NULL) {
1544 goto error;
1545 }
1546
1547 return caa_container_of(node, struct ust_app_session, node);
1548
1549 error:
1550 return NULL;
1551 }
1552
1553 /*
1554 * Setup buffer registry per PID for the given session and application. If none
1555 * is found, a new one is created, added to the global registry and
1556 * initialized. If regp is valid, it's set with the newly created object.
1557 *
1558 * Return 0 on success or else a negative value.
1559 */
1560 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1561 struct ust_app *app, struct buffer_reg_pid **regp)
1562 {
1563 int ret = 0;
1564 struct buffer_reg_pid *reg_pid;
1565
1566 assert(ua_sess);
1567 assert(app);
1568
1569 rcu_read_lock();
1570
1571 reg_pid = buffer_reg_pid_find(ua_sess->id);
1572 if (!reg_pid) {
1573 /*
1574 * This is the create channel path meaning that if there is NO
1575 * registry available, we have to create one for this session.
1576 */
1577 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1578 if (ret < 0) {
1579 goto error;
1580 }
1581 buffer_reg_pid_add(reg_pid);
1582 } else {
1583 goto end;
1584 }
1585
1586 /* Initialize registry. */
1587 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1588 app->bits_per_long, app->uint8_t_alignment,
1589 app->uint16_t_alignment, app->uint32_t_alignment,
1590 app->uint64_t_alignment, app->long_alignment,
1591 app->byte_order, app->version.major,
1592 app->version.minor);
1593 if (ret < 0) {
1594 goto error;
1595 }
1596
1597 DBG3("UST app buffer registry per PID created successfully");
1598
1599 end:
1600 if (regp) {
1601 *regp = reg_pid;
1602 }
1603 error:
1604 rcu_read_unlock();
1605 return ret;
1606 }
1607
1608 /*
1609 * Setup buffer registry per UID for the given session and application. If none
1610 * is found, a new one is created, added to the global registry and
1611 * initialized. If regp is valid, it's set with the newly created object.
1612 *
1613 * Return 0 on success or else a negative value.
1614 */
1615 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1616 struct ust_app *app, struct buffer_reg_uid **regp)
1617 {
1618 int ret = 0;
1619 struct buffer_reg_uid *reg_uid;
1620
1621 assert(usess);
1622 assert(app);
1623
1624 rcu_read_lock();
1625
1626 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1627 if (!reg_uid) {
1628 /*
1629 * This is the create channel path meaning that if there is NO
1630 * registry available, we have to create one for this session.
1631 */
1632 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1633 LTTNG_DOMAIN_UST, &reg_uid);
1634 if (ret < 0) {
1635 goto error;
1636 }
1637 buffer_reg_uid_add(reg_uid);
1638 } else {
1639 goto end;
1640 }
1641
1642 /* Initialize registry. */
1643 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1644 app->bits_per_long, app->uint8_t_alignment,
1645 app->uint16_t_alignment, app->uint32_t_alignment,
1646 app->uint64_t_alignment, app->long_alignment,
1647 app->byte_order, app->version.major,
1648 app->version.minor);
1649 if (ret < 0) {
1650 goto error;
1651 }
1652 /* Add node to teardown list of the session. */
1653 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1654
1655 DBG3("UST app buffer registry per UID created successfully");
1656
1657 end:
1658 if (regp) {
1659 *regp = reg_uid;
1660 }
1661 error:
1662 rcu_read_unlock();
1663 return ret;
1664 }
1665
1666 /*
1667 * Create a session on the tracer side for the given app.
1668 *
1669 * On success, ua_sess_ptr is populated with the session pointer or else left
1670 * untouched. If the session was created, is_created is set to 1. On error,
1671 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1672 * be NULL.
1673 *
1674 * Returns 0 on success or else a negative code which is either -ENOMEM or
1675 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1676 */
1677 static int create_ust_app_session(struct ltt_ust_session *usess,
1678 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1679 int *is_created)
1680 {
1681 int ret, created = 0;
1682 struct ust_app_session *ua_sess;
1683
1684 assert(usess);
1685 assert(app);
1686 assert(ua_sess_ptr);
1687
1688 health_code_update();
1689
1690 ua_sess = lookup_session_by_app(usess, app);
1691 if (ua_sess == NULL) {
1692 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1693 app->pid, usess->id);
1694 ua_sess = alloc_ust_app_session(app);
1695 if (ua_sess == NULL) {
1696 /* Only malloc can failed so something is really wrong */
1697 ret = -ENOMEM;
1698 goto error;
1699 }
1700 shadow_copy_session(ua_sess, usess, app);
1701 created = 1;
1702 }
1703
1704 switch (usess->buffer_type) {
1705 case LTTNG_BUFFER_PER_PID:
1706 /* Init local registry. */
1707 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1708 if (ret < 0) {
1709 goto error;
1710 }
1711 break;
1712 case LTTNG_BUFFER_PER_UID:
1713 /* Look for a global registry. If none exists, create one. */
1714 ret = setup_buffer_reg_uid(usess, app, NULL);
1715 if (ret < 0) {
1716 goto error;
1717 }
1718 break;
1719 default:
1720 assert(0);
1721 ret = -EINVAL;
1722 goto error;
1723 }
1724
1725 health_code_update();
1726
1727 if (ua_sess->handle == -1) {
1728 ret = ustctl_create_session(app->sock);
1729 if (ret < 0) {
1730 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1731 ERR("Creating session for app pid %d with ret %d",
1732 app->pid, ret);
1733 } else {
1734 DBG("UST app creating session failed. Application is dead");
1735 }
1736 delete_ust_app_session(-1, ua_sess, app);
1737 if (ret != -ENOMEM) {
1738 /*
1739 * Tracer is probably gone or got an internal error so let's
1740 * behave like it will soon unregister or not usable.
1741 */
1742 ret = -ENOTCONN;
1743 }
1744 goto error;
1745 }
1746
1747 ua_sess->handle = ret;
1748
1749 /* Add ust app session to app's HT */
1750 lttng_ht_node_init_u64(&ua_sess->node,
1751 ua_sess->tracing_id);
1752 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1753
1754 DBG2("UST app session created successfully with handle %d", ret);
1755 }
1756
1757 *ua_sess_ptr = ua_sess;
1758 if (is_created) {
1759 *is_created = created;
1760 }
1761
1762 /* Everything went well. */
1763 ret = 0;
1764
1765 error:
1766 health_code_update();
1767 return ret;
1768 }
1769
1770 /*
1771 * Create a context for the channel on the tracer.
1772 *
1773 * Called with UST app session lock held and a RCU read side lock.
1774 */
1775 static
1776 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1777 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1778 struct ust_app *app)
1779 {
1780 int ret = 0;
1781 struct lttng_ht_iter iter;
1782 struct lttng_ht_node_ulong *node;
1783 struct ust_app_ctx *ua_ctx;
1784
1785 DBG2("UST app adding context to channel %s", ua_chan->name);
1786
1787 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1788 node = lttng_ht_iter_get_node_ulong(&iter);
1789 if (node != NULL) {
1790 ret = -EEXIST;
1791 goto error;
1792 }
1793
1794 ua_ctx = alloc_ust_app_ctx(uctx);
1795 if (ua_ctx == NULL) {
1796 /* malloc failed */
1797 ret = -1;
1798 goto error;
1799 }
1800
1801 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1802 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1803 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1804
1805 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1806 if (ret < 0) {
1807 goto error;
1808 }
1809
1810 error:
1811 return ret;
1812 }
1813
1814 /*
1815 * Enable on the tracer side a ust app event for the session and channel.
1816 *
1817 * Called with UST app session lock held.
1818 */
1819 static
1820 int enable_ust_app_event(struct ust_app_session *ua_sess,
1821 struct ust_app_event *ua_event, struct ust_app *app)
1822 {
1823 int ret;
1824
1825 ret = enable_ust_event(app, ua_sess, ua_event);
1826 if (ret < 0) {
1827 goto error;
1828 }
1829
1830 ua_event->enabled = 1;
1831
1832 error:
1833 return ret;
1834 }
1835
1836 /*
1837 * Disable on the tracer side a ust app event for the session and channel.
1838 */
1839 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1840 struct ust_app_event *ua_event, struct ust_app *app)
1841 {
1842 int ret;
1843
1844 ret = disable_ust_event(app, ua_sess, ua_event);
1845 if (ret < 0) {
1846 goto error;
1847 }
1848
1849 ua_event->enabled = 0;
1850
1851 error:
1852 return ret;
1853 }
1854
1855 /*
1856 * Lookup ust app channel for session and disable it on the tracer side.
1857 */
1858 static
1859 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1860 struct ust_app_channel *ua_chan, struct ust_app *app)
1861 {
1862 int ret;
1863
1864 ret = disable_ust_channel(app, ua_sess, ua_chan);
1865 if (ret < 0) {
1866 goto error;
1867 }
1868
1869 ua_chan->enabled = 0;
1870
1871 error:
1872 return ret;
1873 }
1874
1875 /*
1876 * Lookup ust app channel for session and enable it on the tracer side. This
1877 * MUST be called with a RCU read side lock acquired.
1878 */
1879 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1880 struct ltt_ust_channel *uchan, struct ust_app *app)
1881 {
1882 int ret = 0;
1883 struct lttng_ht_iter iter;
1884 struct lttng_ht_node_str *ua_chan_node;
1885 struct ust_app_channel *ua_chan;
1886
1887 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1888 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1889 if (ua_chan_node == NULL) {
1890 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1891 uchan->name, ua_sess->tracing_id);
1892 goto error;
1893 }
1894
1895 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1896
1897 ret = enable_ust_channel(app, ua_sess, ua_chan);
1898 if (ret < 0) {
1899 goto error;
1900 }
1901
1902 error:
1903 return ret;
1904 }
1905
1906 /*
1907 * Ask the consumer to create a channel and get it if successful.
1908 *
1909 * Return 0 on success or else a negative value.
1910 */
1911 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1912 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1913 int bitness, struct ust_registry_session *registry)
1914 {
1915 int ret;
1916 unsigned int nb_fd = 0;
1917 struct consumer_socket *socket;
1918
1919 assert(usess);
1920 assert(ua_sess);
1921 assert(ua_chan);
1922 assert(registry);
1923
1924 rcu_read_lock();
1925 health_code_update();
1926
1927 /* Get the right consumer socket for the application. */
1928 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1929 if (!socket) {
1930 ret = -EINVAL;
1931 goto error;
1932 }
1933
1934 health_code_update();
1935
1936 /* Need one fd for the channel. */
1937 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1938 if (ret < 0) {
1939 ERR("Exhausted number of available FD upon create channel");
1940 goto error;
1941 }
1942
1943 /*
1944 * Ask consumer to create channel. The consumer will return the number of
1945 * stream we have to expect.
1946 */
1947 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
1948 registry);
1949 if (ret < 0) {
1950 goto error_ask;
1951 }
1952
1953 /*
1954 * Compute the number of fd needed before receiving them. It must be 2 per
1955 * stream (2 being the default value here).
1956 */
1957 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
1958
1959 /* Reserve the amount of file descriptor we need. */
1960 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
1961 if (ret < 0) {
1962 ERR("Exhausted number of available FD upon create channel");
1963 goto error_fd_get_stream;
1964 }
1965
1966 health_code_update();
1967
1968 /*
1969 * Now get the channel from the consumer. This call wil populate the stream
1970 * list of that channel and set the ust objects.
1971 */
1972 if (usess->consumer->enabled) {
1973 ret = ust_consumer_get_channel(socket, ua_chan);
1974 if (ret < 0) {
1975 goto error_destroy;
1976 }
1977 }
1978
1979 rcu_read_unlock();
1980 return 0;
1981
1982 error_destroy:
1983 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
1984 error_fd_get_stream:
1985 /*
1986 * Initiate a destroy channel on the consumer since we had an error
1987 * handling it on our side. The return value is of no importance since we
1988 * already have a ret value set by the previous error that we need to
1989 * return.
1990 */
1991 (void) ust_consumer_destroy_channel(socket, ua_chan);
1992 error_ask:
1993 lttng_fd_put(LTTNG_FD_APPS, 1);
1994 error:
1995 health_code_update();
1996 rcu_read_unlock();
1997 return ret;
1998 }
1999
2000 /*
2001 * Duplicate the ust data object of the ust app stream and save it in the
2002 * buffer registry stream.
2003 *
2004 * Return 0 on success or else a negative value.
2005 */
2006 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2007 struct ust_app_stream *stream)
2008 {
2009 int ret;
2010
2011 assert(reg_stream);
2012 assert(stream);
2013
2014 /* Reserve the amount of file descriptor we need. */
2015 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2016 if (ret < 0) {
2017 ERR("Exhausted number of available FD upon duplicate stream");
2018 goto error;
2019 }
2020
2021 /* Duplicate object for stream once the original is in the registry. */
2022 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2023 reg_stream->obj.ust);
2024 if (ret < 0) {
2025 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2026 reg_stream->obj.ust, stream->obj, ret);
2027 lttng_fd_put(LTTNG_FD_APPS, 2);
2028 goto error;
2029 }
2030 stream->handle = stream->obj->handle;
2031
2032 error:
2033 return ret;
2034 }
2035
2036 /*
2037 * Duplicate the ust data object of the ust app. channel and save it in the
2038 * buffer registry channel.
2039 *
2040 * Return 0 on success or else a negative value.
2041 */
2042 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2043 struct ust_app_channel *ua_chan)
2044 {
2045 int ret;
2046
2047 assert(reg_chan);
2048 assert(ua_chan);
2049
2050 /* Need two fds for the channel. */
2051 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2052 if (ret < 0) {
2053 ERR("Exhausted number of available FD upon duplicate channel");
2054 goto error_fd_get;
2055 }
2056
2057 /* Duplicate object for stream once the original is in the registry. */
2058 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2059 if (ret < 0) {
2060 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2061 reg_chan->obj.ust, ua_chan->obj, ret);
2062 goto error;
2063 }
2064 ua_chan->handle = ua_chan->obj->handle;
2065
2066 return 0;
2067
2068 error:
2069 lttng_fd_put(LTTNG_FD_APPS, 1);
2070 error_fd_get:
2071 return ret;
2072 }
2073
2074 /*
2075 * For a given channel buffer registry, setup all streams of the given ust
2076 * application channel.
2077 *
2078 * Return 0 on success or else a negative value.
2079 */
2080 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2081 struct ust_app_channel *ua_chan)
2082 {
2083 int ret = 0;
2084 struct ust_app_stream *stream, *stmp;
2085
2086 assert(reg_chan);
2087 assert(ua_chan);
2088
2089 DBG2("UST app setup buffer registry stream");
2090
2091 /* Send all streams to application. */
2092 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2093 struct buffer_reg_stream *reg_stream;
2094
2095 ret = buffer_reg_stream_create(&reg_stream);
2096 if (ret < 0) {
2097 goto error;
2098 }
2099
2100 /*
2101 * Keep original pointer and nullify it in the stream so the delete
2102 * stream call does not release the object.
2103 */
2104 reg_stream->obj.ust = stream->obj;
2105 stream->obj = NULL;
2106 buffer_reg_stream_add(reg_stream, reg_chan);
2107
2108 /* We don't need the streams anymore. */
2109 cds_list_del(&stream->list);
2110 delete_ust_app_stream(-1, stream);
2111 }
2112
2113 error:
2114 return ret;
2115 }
2116
2117 /*
2118 * Create a buffer registry channel for the given session registry and
2119 * application channel object. If regp pointer is valid, it's set with the
2120 * created object. Important, the created object is NOT added to the session
2121 * registry hash table.
2122 *
2123 * Return 0 on success else a negative value.
2124 */
2125 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2126 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2127 {
2128 int ret;
2129 struct buffer_reg_channel *reg_chan = NULL;
2130
2131 assert(reg_sess);
2132 assert(ua_chan);
2133
2134 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2135
2136 /* Create buffer registry channel. */
2137 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2138 if (ret < 0) {
2139 goto error_create;
2140 }
2141 assert(reg_chan);
2142 reg_chan->consumer_key = ua_chan->key;
2143 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2144
2145 /* Create and add a channel registry to session. */
2146 ret = ust_registry_channel_add(reg_sess->reg.ust,
2147 ua_chan->tracing_channel_id);
2148 if (ret < 0) {
2149 goto error;
2150 }
2151 buffer_reg_channel_add(reg_sess, reg_chan);
2152
2153 if (regp) {
2154 *regp = reg_chan;
2155 }
2156
2157 return 0;
2158
2159 error:
2160 /* Safe because the registry channel object was not added to any HT. */
2161 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2162 error_create:
2163 return ret;
2164 }
2165
2166 /*
2167 * Setup buffer registry channel for the given session registry and application
2168 * channel object. If regp pointer is valid, it's set with the created object.
2169 *
2170 * Return 0 on success else a negative value.
2171 */
2172 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2173 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2174 {
2175 int ret;
2176
2177 assert(reg_sess);
2178 assert(reg_chan);
2179 assert(ua_chan);
2180 assert(ua_chan->obj);
2181
2182 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2183
2184 /* Setup all streams for the registry. */
2185 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2186 if (ret < 0) {
2187 goto error;
2188 }
2189
2190 reg_chan->obj.ust = ua_chan->obj;
2191 ua_chan->obj = NULL;
2192
2193 return 0;
2194
2195 error:
2196 buffer_reg_channel_remove(reg_sess, reg_chan);
2197 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2198 return ret;
2199 }
2200
2201 /*
2202 * Send buffer registry channel to the application.
2203 *
2204 * Return 0 on success else a negative value.
2205 */
2206 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2207 struct ust_app *app, struct ust_app_session *ua_sess,
2208 struct ust_app_channel *ua_chan)
2209 {
2210 int ret;
2211 struct buffer_reg_stream *reg_stream;
2212
2213 assert(reg_chan);
2214 assert(app);
2215 assert(ua_sess);
2216 assert(ua_chan);
2217
2218 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2219
2220 ret = duplicate_channel_object(reg_chan, ua_chan);
2221 if (ret < 0) {
2222 goto error;
2223 }
2224
2225 /* Send channel to the application. */
2226 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2227 if (ret < 0) {
2228 goto error;
2229 }
2230
2231 health_code_update();
2232
2233 /* Send all streams to application. */
2234 pthread_mutex_lock(&reg_chan->stream_list_lock);
2235 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2236 struct ust_app_stream stream;
2237
2238 ret = duplicate_stream_object(reg_stream, &stream);
2239 if (ret < 0) {
2240 goto error_stream_unlock;
2241 }
2242
2243 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2244 if (ret < 0) {
2245 (void) release_ust_app_stream(-1, &stream);
2246 goto error_stream_unlock;
2247 }
2248
2249 /*
2250 * The return value is not important here. This function will output an
2251 * error if needed.
2252 */
2253 (void) release_ust_app_stream(-1, &stream);
2254 }
2255 ua_chan->is_sent = 1;
2256
2257 error_stream_unlock:
2258 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2259 error:
2260 return ret;
2261 }
2262
2263 /*
2264 * Create and send to the application the created buffers with per UID buffers.
2265 *
2266 * Return 0 on success else a negative value.
2267 */
2268 static int create_channel_per_uid(struct ust_app *app,
2269 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2270 struct ust_app_channel *ua_chan)
2271 {
2272 int ret;
2273 struct buffer_reg_uid *reg_uid;
2274 struct buffer_reg_channel *reg_chan;
2275
2276 assert(app);
2277 assert(usess);
2278 assert(ua_sess);
2279 assert(ua_chan);
2280
2281 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2282
2283 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2284 /*
2285 * The session creation handles the creation of this global registry
2286 * object. If none can be find, there is a code flow problem or a
2287 * teardown race.
2288 */
2289 assert(reg_uid);
2290
2291 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2292 reg_uid);
2293 if (!reg_chan) {
2294 /* Create the buffer registry channel object. */
2295 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2296 if (ret < 0) {
2297 goto error;
2298 }
2299 assert(reg_chan);
2300
2301 /*
2302 * Create the buffers on the consumer side. This call populates the
2303 * ust app channel object with all streams and data object.
2304 */
2305 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2306 app->bits_per_long, reg_uid->registry->reg.ust);
2307 if (ret < 0) {
2308 /*
2309 * Let's remove the previously created buffer registry channel so
2310 * it's not visible anymore in the session registry.
2311 */
2312 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2313 ua_chan->tracing_channel_id);
2314 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2315 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2316 goto error;
2317 }
2318
2319 /*
2320 * Setup the streams and add it to the session registry.
2321 */
2322 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2323 if (ret < 0) {
2324 goto error;
2325 }
2326
2327 }
2328
2329 /* Send buffers to the application. */
2330 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2331 if (ret < 0) {
2332 goto error;
2333 }
2334
2335 error:
2336 return ret;
2337 }
2338
2339 /*
2340 * Create and send to the application the created buffers with per PID buffers.
2341 *
2342 * Return 0 on success else a negative value.
2343 */
2344 static int create_channel_per_pid(struct ust_app *app,
2345 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2346 struct ust_app_channel *ua_chan)
2347 {
2348 int ret;
2349 struct ust_registry_session *registry;
2350
2351 assert(app);
2352 assert(usess);
2353 assert(ua_sess);
2354 assert(ua_chan);
2355
2356 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2357
2358 rcu_read_lock();
2359
2360 registry = get_session_registry(ua_sess);
2361 assert(registry);
2362
2363 /* Create and add a new channel registry to session. */
2364 ret = ust_registry_channel_add(registry, ua_chan->key);
2365 if (ret < 0) {
2366 goto error;
2367 }
2368
2369 /* Create and get channel on the consumer side. */
2370 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2371 app->bits_per_long, registry);
2372 if (ret < 0) {
2373 goto error;
2374 }
2375
2376 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2377 if (ret < 0) {
2378 goto error;
2379 }
2380
2381 error:
2382 rcu_read_unlock();
2383 return ret;
2384 }
2385
2386 /*
2387 * From an already allocated ust app channel, create the channel buffers if
2388 * need and send it to the application. This MUST be called with a RCU read
2389 * side lock acquired.
2390 *
2391 * Return 0 on success or else a negative value.
2392 */
2393 static int do_create_channel(struct ust_app *app,
2394 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2395 struct ust_app_channel *ua_chan)
2396 {
2397 int ret;
2398
2399 assert(app);
2400 assert(usess);
2401 assert(ua_sess);
2402 assert(ua_chan);
2403
2404 /* Handle buffer type before sending the channel to the application. */
2405 switch (usess->buffer_type) {
2406 case LTTNG_BUFFER_PER_UID:
2407 {
2408 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2409 if (ret < 0) {
2410 goto error;
2411 }
2412 break;
2413 }
2414 case LTTNG_BUFFER_PER_PID:
2415 {
2416 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2417 if (ret < 0) {
2418 goto error;
2419 }
2420 break;
2421 }
2422 default:
2423 assert(0);
2424 ret = -EINVAL;
2425 goto error;
2426 }
2427
2428 /* Initialize ust objd object using the received handle and add it. */
2429 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2430 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2431
2432 /* If channel is not enabled, disable it on the tracer */
2433 if (!ua_chan->enabled) {
2434 ret = disable_ust_channel(app, ua_sess, ua_chan);
2435 if (ret < 0) {
2436 goto error;
2437 }
2438 }
2439
2440 error:
2441 return ret;
2442 }
2443
2444 /*
2445 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2446 * newly created channel if not NULL.
2447 *
2448 * Called with UST app session lock and RCU read-side lock held.
2449 *
2450 * Return 0 on success or else a negative value.
2451 */
2452 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2453 struct ltt_ust_channel *uchan, struct ust_app *app,
2454 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2455 struct ust_app_channel **ua_chanp)
2456 {
2457 int ret = 0;
2458 struct lttng_ht_iter iter;
2459 struct lttng_ht_node_str *ua_chan_node;
2460 struct ust_app_channel *ua_chan;
2461
2462 /* Lookup channel in the ust app session */
2463 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2464 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2465 if (ua_chan_node != NULL) {
2466 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2467 goto end;
2468 }
2469
2470 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2471 if (ua_chan == NULL) {
2472 /* Only malloc can fail here */
2473 ret = -ENOMEM;
2474 goto error_alloc;
2475 }
2476 shadow_copy_channel(ua_chan, uchan);
2477
2478 /* Set channel type. */
2479 ua_chan->attr.type = type;
2480
2481 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2482 if (ret < 0) {
2483 goto error;
2484 }
2485
2486 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2487 app->pid);
2488
2489 /* Only add the channel if successful on the tracer side. */
2490 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2491
2492 end:
2493 if (ua_chanp) {
2494 *ua_chanp = ua_chan;
2495 }
2496
2497 /* Everything went well. */
2498 return 0;
2499
2500 error:
2501 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2502 error_alloc:
2503 return ret;
2504 }
2505
2506 /*
2507 * Create UST app event and create it on the tracer side.
2508 *
2509 * Called with ust app session mutex held.
2510 */
2511 static
2512 int create_ust_app_event(struct ust_app_session *ua_sess,
2513 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2514 struct ust_app *app)
2515 {
2516 int ret = 0;
2517 struct ust_app_event *ua_event;
2518
2519 /* Get event node */
2520 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2521 uevent->filter, uevent->attr.loglevel);
2522 if (ua_event != NULL) {
2523 ret = -EEXIST;
2524 goto end;
2525 }
2526
2527 /* Does not exist so create one */
2528 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2529 if (ua_event == NULL) {
2530 /* Only malloc can failed so something is really wrong */
2531 ret = -ENOMEM;
2532 goto end;
2533 }
2534 shadow_copy_event(ua_event, uevent);
2535
2536 /* Create it on the tracer side */
2537 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2538 if (ret < 0) {
2539 /* Not found previously means that it does not exist on the tracer */
2540 assert(ret != -LTTNG_UST_ERR_EXIST);
2541 goto error;
2542 }
2543
2544 add_unique_ust_app_event(ua_chan, ua_event);
2545
2546 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2547 app->pid);
2548
2549 end:
2550 return ret;
2551
2552 error:
2553 /* Valid. Calling here is already in a read side lock */
2554 delete_ust_app_event(-1, ua_event);
2555 return ret;
2556 }
2557
2558 /*
2559 * Create UST metadata and open it on the tracer side.
2560 *
2561 * Called with UST app session lock held and RCU read side lock.
2562 */
2563 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2564 struct ust_app *app, struct consumer_output *consumer,
2565 struct ustctl_consumer_channel_attr *attr)
2566 {
2567 int ret = 0;
2568 struct ust_app_channel *metadata;
2569 struct consumer_socket *socket;
2570 struct ust_registry_session *registry;
2571
2572 assert(ua_sess);
2573 assert(app);
2574 assert(consumer);
2575
2576 registry = get_session_registry(ua_sess);
2577 assert(registry);
2578
2579 /* Metadata already exists for this registry or it was closed previously */
2580 if (registry->metadata_key || registry->metadata_closed) {
2581 ret = 0;
2582 goto error;
2583 }
2584
2585 /* Allocate UST metadata */
2586 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2587 if (!metadata) {
2588 /* malloc() failed */
2589 ret = -ENOMEM;
2590 goto error;
2591 }
2592
2593 if (!attr) {
2594 /* Set default attributes for metadata. */
2595 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2596 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2597 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2598 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2599 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2600 metadata->attr.output = LTTNG_UST_MMAP;
2601 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2602 } else {
2603 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2604 metadata->attr.output = LTTNG_UST_MMAP;
2605 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2606 }
2607
2608 /* Need one fd for the channel. */
2609 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2610 if (ret < 0) {
2611 ERR("Exhausted number of available FD upon create metadata");
2612 goto error;
2613 }
2614
2615 /* Get the right consumer socket for the application. */
2616 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2617 if (!socket) {
2618 ret = -EINVAL;
2619 goto error_consumer;
2620 }
2621
2622 /*
2623 * Keep metadata key so we can identify it on the consumer side. Assign it
2624 * to the registry *before* we ask the consumer so we avoid the race of the
2625 * consumer requesting the metadata and the ask_channel call on our side
2626 * did not returned yet.
2627 */
2628 registry->metadata_key = metadata->key;
2629
2630 /*
2631 * Ask the metadata channel creation to the consumer. The metadata object
2632 * will be created by the consumer and kept their. However, the stream is
2633 * never added or monitored until we do a first push metadata to the
2634 * consumer.
2635 */
2636 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2637 registry);
2638 if (ret < 0) {
2639 /* Nullify the metadata key so we don't try to close it later on. */
2640 registry->metadata_key = 0;
2641 goto error_consumer;
2642 }
2643
2644 /*
2645 * The setup command will make the metadata stream be sent to the relayd,
2646 * if applicable, and the thread managing the metadatas. This is important
2647 * because after this point, if an error occurs, the only way the stream
2648 * can be deleted is to be monitored in the consumer.
2649 */
2650 ret = consumer_setup_metadata(socket, metadata->key);
2651 if (ret < 0) {
2652 /* Nullify the metadata key so we don't try to close it later on. */
2653 registry->metadata_key = 0;
2654 goto error_consumer;
2655 }
2656
2657 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2658 metadata->key, app->pid);
2659
2660 error_consumer:
2661 lttng_fd_put(LTTNG_FD_APPS, 1);
2662 delete_ust_app_channel(-1, metadata, app);
2663 error:
2664 return ret;
2665 }
2666
2667 /*
2668 * Return pointer to traceable apps list.
2669 */
2670 struct lttng_ht *ust_app_get_ht(void)
2671 {
2672 return ust_app_ht;
2673 }
2674
2675 /*
2676 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2677 * acquired before calling this function.
2678 */
2679 struct ust_app *ust_app_find_by_pid(pid_t pid)
2680 {
2681 struct ust_app *app = NULL;
2682 struct lttng_ht_node_ulong *node;
2683 struct lttng_ht_iter iter;
2684
2685 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2686 node = lttng_ht_iter_get_node_ulong(&iter);
2687 if (node == NULL) {
2688 DBG2("UST app no found with pid %d", pid);
2689 goto error;
2690 }
2691
2692 DBG2("Found UST app by pid %d", pid);
2693
2694 app = caa_container_of(node, struct ust_app, pid_n);
2695
2696 error:
2697 return app;
2698 }
2699
2700 /*
2701 * Allocate and init an UST app object using the registration information and
2702 * the command socket. This is called when the command socket connects to the
2703 * session daemon.
2704 *
2705 * The object is returned on success or else NULL.
2706 */
2707 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2708 {
2709 struct ust_app *lta = NULL;
2710
2711 assert(msg);
2712 assert(sock >= 0);
2713
2714 DBG3("UST app creating application for socket %d", sock);
2715
2716 if ((msg->bits_per_long == 64 &&
2717 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2718 || (msg->bits_per_long == 32 &&
2719 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2720 ERR("Registration failed: application \"%s\" (pid: %d) has "
2721 "%d-bit long, but no consumerd for this size is available.\n",
2722 msg->name, msg->pid, msg->bits_per_long);
2723 goto error;
2724 }
2725
2726 lta = zmalloc(sizeof(struct ust_app));
2727 if (lta == NULL) {
2728 PERROR("malloc");
2729 goto error;
2730 }
2731
2732 lta->ppid = msg->ppid;
2733 lta->uid = msg->uid;
2734 lta->gid = msg->gid;
2735
2736 lta->bits_per_long = msg->bits_per_long;
2737 lta->uint8_t_alignment = msg->uint8_t_alignment;
2738 lta->uint16_t_alignment = msg->uint16_t_alignment;
2739 lta->uint32_t_alignment = msg->uint32_t_alignment;
2740 lta->uint64_t_alignment = msg->uint64_t_alignment;
2741 lta->long_alignment = msg->long_alignment;
2742 lta->byte_order = msg->byte_order;
2743
2744 lta->v_major = msg->major;
2745 lta->v_minor = msg->minor;
2746 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2747 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2748 lta->notify_sock = -1;
2749
2750 /* Copy name and make sure it's NULL terminated. */
2751 strncpy(lta->name, msg->name, sizeof(lta->name));
2752 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2753
2754 /*
2755 * Before this can be called, when receiving the registration information,
2756 * the application compatibility is checked. So, at this point, the
2757 * application can work with this session daemon.
2758 */
2759 lta->compatible = 1;
2760
2761 lta->pid = msg->pid;
2762 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2763 lta->sock = sock;
2764 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2765
2766 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2767
2768 error:
2769 return lta;
2770 }
2771
2772 /*
2773 * For a given application object, add it to every hash table.
2774 */
2775 void ust_app_add(struct ust_app *app)
2776 {
2777 assert(app);
2778 assert(app->notify_sock >= 0);
2779
2780 rcu_read_lock();
2781
2782 /*
2783 * On a re-registration, we want to kick out the previous registration of
2784 * that pid
2785 */
2786 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2787
2788 /*
2789 * The socket _should_ be unique until _we_ call close. So, a add_unique
2790 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2791 * already in the table.
2792 */
2793 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2794
2795 /* Add application to the notify socket hash table. */
2796 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2797 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2798
2799 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2800 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2801 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2802 app->v_minor);
2803
2804 rcu_read_unlock();
2805 }
2806
2807 /*
2808 * Set the application version into the object.
2809 *
2810 * Return 0 on success else a negative value either an errno code or a
2811 * LTTng-UST error code.
2812 */
2813 int ust_app_version(struct ust_app *app)
2814 {
2815 int ret;
2816
2817 assert(app);
2818
2819 ret = ustctl_tracer_version(app->sock, &app->version);
2820 if (ret < 0) {
2821 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2822 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2823 } else {
2824 DBG3("UST app %d verion failed. Application is dead", app->sock);
2825 }
2826 }
2827
2828 return ret;
2829 }
2830
2831 /*
2832 * Unregister app by removing it from the global traceable app list and freeing
2833 * the data struct.
2834 *
2835 * The socket is already closed at this point so no close to sock.
2836 */
2837 void ust_app_unregister(int sock)
2838 {
2839 struct ust_app *lta;
2840 struct lttng_ht_node_ulong *node;
2841 struct lttng_ht_iter iter;
2842 struct ust_app_session *ua_sess;
2843 int ret;
2844
2845 rcu_read_lock();
2846
2847 /* Get the node reference for a call_rcu */
2848 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2849 node = lttng_ht_iter_get_node_ulong(&iter);
2850 assert(node);
2851
2852 lta = caa_container_of(node, struct ust_app, sock_n);
2853 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2854
2855 /* Remove application from PID hash table */
2856 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2857 assert(!ret);
2858
2859 /*
2860 * Remove application from notify hash table. The thread handling the
2861 * notify socket could have deleted the node so ignore on error because
2862 * either way it's valid. The close of that socket is handled by the other
2863 * thread.
2864 */
2865 iter.iter.node = &lta->notify_sock_n.node;
2866 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2867
2868 /*
2869 * Ignore return value since the node might have been removed before by an
2870 * add replace during app registration because the PID can be reassigned by
2871 * the OS.
2872 */
2873 iter.iter.node = &lta->pid_n.node;
2874 ret = lttng_ht_del(ust_app_ht, &iter);
2875 if (ret) {
2876 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2877 lta->pid);
2878 }
2879
2880 /* Remove sessions so they are not visible during deletion.*/
2881 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2882 node.node) {
2883 struct ust_registry_session *registry;
2884
2885 ret = lttng_ht_del(lta->sessions, &iter);
2886 if (ret) {
2887 /* The session was already removed so scheduled for teardown. */
2888 continue;
2889 }
2890
2891 /*
2892 * Add session to list for teardown. This is safe since at this point we
2893 * are the only one using this list.
2894 */
2895 pthread_mutex_lock(&ua_sess->lock);
2896
2897 /*
2898 * Normally, this is done in the delete session process which is
2899 * executed in the call rcu below. However, upon registration we can't
2900 * afford to wait for the grace period before pushing data or else the
2901 * data pending feature can race between the unregistration and stop
2902 * command where the data pending command is sent *before* the grace
2903 * period ended.
2904 *
2905 * The close metadata below nullifies the metadata pointer in the
2906 * session so the delete session will NOT push/close a second time.
2907 */
2908 registry = get_session_registry(ua_sess);
2909 if (registry && !registry->metadata_closed) {
2910 /* Push metadata for application before freeing the application. */
2911 (void) push_metadata(registry, ua_sess->consumer);
2912
2913 /*
2914 * Don't ask to close metadata for global per UID buffers. Close
2915 * metadata only on destroy trace session in this case. Also, the
2916 * previous push metadata could have flag the metadata registry to
2917 * close so don't send a close command if closed.
2918 */
2919 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2920 !registry->metadata_closed) {
2921 /* And ask to close it for this session registry. */
2922 (void) close_metadata(registry, ua_sess->consumer);
2923 }
2924 }
2925
2926 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2927 pthread_mutex_unlock(&ua_sess->lock);
2928 }
2929
2930 /* Free memory */
2931 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2932
2933 rcu_read_unlock();
2934 return;
2935 }
2936
2937 /*
2938 * Return traceable_app_count
2939 */
2940 unsigned long ust_app_list_count(void)
2941 {
2942 unsigned long count;
2943
2944 rcu_read_lock();
2945 count = lttng_ht_get_count(ust_app_ht);
2946 rcu_read_unlock();
2947
2948 return count;
2949 }
2950
2951 /*
2952 * Fill events array with all events name of all registered apps.
2953 */
2954 int ust_app_list_events(struct lttng_event **events)
2955 {
2956 int ret, handle;
2957 size_t nbmem, count = 0;
2958 struct lttng_ht_iter iter;
2959 struct ust_app *app;
2960 struct lttng_event *tmp_event;
2961
2962 nbmem = UST_APP_EVENT_LIST_SIZE;
2963 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
2964 if (tmp_event == NULL) {
2965 PERROR("zmalloc ust app events");
2966 ret = -ENOMEM;
2967 goto error;
2968 }
2969
2970 rcu_read_lock();
2971
2972 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
2973 struct lttng_ust_tracepoint_iter uiter;
2974
2975 health_code_update();
2976
2977 if (!app->compatible) {
2978 /*
2979 * TODO: In time, we should notice the caller of this error by
2980 * telling him that this is a version error.
2981 */
2982 continue;
2983 }
2984 handle = ustctl_tracepoint_list(app->sock);
2985 if (handle < 0) {
2986 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
2987 ERR("UST app list events getting handle failed for app pid %d",
2988 app->pid);
2989 }
2990 continue;
2991 }
2992
2993 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
2994 &uiter)) != -LTTNG_UST_ERR_NOENT) {
2995 /* Handle ustctl error. */
2996 if (ret < 0) {
2997 free(tmp_event);
2998 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
2999 ERR("UST app tp list get failed for app %d with ret %d",
3000 app->sock, ret);
3001 } else {
3002 DBG3("UST app tp list get failed. Application is dead");
3003 }
3004 goto rcu_error;
3005 }
3006
3007 health_code_update();
3008 if (count >= nbmem) {
3009 /* In case the realloc fails, we free the memory */
3010 void *ptr;
3011
3012 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3013 2 * nbmem);
3014 nbmem *= 2;
3015 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3016 if (ptr == NULL) {
3017 PERROR("realloc ust app events");
3018 free(tmp_event);
3019 ret = -ENOMEM;
3020 goto rcu_error;
3021 }
3022 tmp_event = ptr;
3023 }
3024 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3025 tmp_event[count].loglevel = uiter.loglevel;
3026 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3027 tmp_event[count].pid = app->pid;
3028 tmp_event[count].enabled = -1;
3029 count++;
3030 }
3031 }
3032
3033 ret = count;
3034 *events = tmp_event;
3035
3036 DBG2("UST app list events done (%zu events)", count);
3037
3038 rcu_error:
3039 rcu_read_unlock();
3040 error:
3041 health_code_update();
3042 return ret;
3043 }
3044
3045 /*
3046 * Fill events array with all events name of all registered apps.
3047 */
3048 int ust_app_list_event_fields(struct lttng_event_field **fields)
3049 {
3050 int ret, handle;
3051 size_t nbmem, count = 0;
3052 struct lttng_ht_iter iter;
3053 struct ust_app *app;
3054 struct lttng_event_field *tmp_event;
3055
3056 nbmem = UST_APP_EVENT_LIST_SIZE;
3057 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3058 if (tmp_event == NULL) {
3059 PERROR("zmalloc ust app event fields");
3060 ret = -ENOMEM;
3061 goto error;
3062 }
3063
3064 rcu_read_lock();
3065
3066 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3067 struct lttng_ust_field_iter uiter;
3068
3069 health_code_update();
3070
3071 if (!app->compatible) {
3072 /*
3073 * TODO: In time, we should notice the caller of this error by
3074 * telling him that this is a version error.
3075 */
3076 continue;
3077 }
3078 handle = ustctl_tracepoint_field_list(app->sock);
3079 if (handle < 0) {
3080 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3081 ERR("UST app list field getting handle failed for app pid %d",
3082 app->pid);
3083 }
3084 continue;
3085 }
3086
3087 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3088 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3089 /* Handle ustctl error. */
3090 if (ret < 0) {
3091 free(tmp_event);
3092 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3093 ERR("UST app tp list field failed for app %d with ret %d",
3094 app->sock, ret);
3095 } else {
3096 DBG3("UST app tp list field failed. Application is dead");
3097 }
3098 goto rcu_error;
3099 }
3100
3101 health_code_update();
3102 if (count >= nbmem) {
3103 /* In case the realloc fails, we free the memory */
3104 void *ptr;
3105
3106 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3107 2 * nbmem);
3108 nbmem *= 2;
3109 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3110 if (ptr == NULL) {
3111 PERROR("realloc ust app event fields");
3112 free(tmp_event);
3113 ret = -ENOMEM;
3114 goto rcu_error;
3115 }
3116 tmp_event = ptr;
3117 }
3118
3119 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3120 tmp_event[count].type = uiter.type;
3121 tmp_event[count].nowrite = uiter.nowrite;
3122
3123 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3124 tmp_event[count].event.loglevel = uiter.loglevel;
3125 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3126 tmp_event[count].event.pid = app->pid;
3127 tmp_event[count].event.enabled = -1;
3128 count++;
3129 }
3130 }
3131
3132 ret = count;
3133 *fields = tmp_event;
3134
3135 DBG2("UST app list event fields done (%zu events)", count);
3136
3137 rcu_error:
3138 rcu_read_unlock();
3139 error:
3140 health_code_update();
3141 return ret;
3142 }
3143
3144 /*
3145 * Free and clean all traceable apps of the global list.
3146 *
3147 * Should _NOT_ be called with RCU read-side lock held.
3148 */
3149 void ust_app_clean_list(void)
3150 {
3151 int ret;
3152 struct ust_app *app;
3153 struct lttng_ht_iter iter;
3154
3155 DBG2("UST app cleaning registered apps hash table");
3156
3157 rcu_read_lock();
3158
3159 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3160 ret = lttng_ht_del(ust_app_ht, &iter);
3161 assert(!ret);
3162 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3163 }
3164
3165 /* Cleanup socket hash table */
3166 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3167 sock_n.node) {
3168 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3169 assert(!ret);
3170 }
3171
3172 /* Cleanup notify socket hash table */
3173 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3174 notify_sock_n.node) {
3175 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3176 assert(!ret);
3177 }
3178 rcu_read_unlock();
3179
3180 /* Destroy is done only when the ht is empty */
3181 ht_cleanup_push(ust_app_ht);
3182 ht_cleanup_push(ust_app_ht_by_sock);
3183 ht_cleanup_push(ust_app_ht_by_notify_sock);
3184 }
3185
3186 /*
3187 * Init UST app hash table.
3188 */
3189 void ust_app_ht_alloc(void)
3190 {
3191 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3192 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3193 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3194 }
3195
3196 /*
3197 * For a specific UST session, disable the channel for all registered apps.
3198 */
3199 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3200 struct ltt_ust_channel *uchan)
3201 {
3202 int ret = 0;
3203 struct lttng_ht_iter iter;
3204 struct lttng_ht_node_str *ua_chan_node;
3205 struct ust_app *app;
3206 struct ust_app_session *ua_sess;
3207 struct ust_app_channel *ua_chan;
3208
3209 if (usess == NULL || uchan == NULL) {
3210 ERR("Disabling UST global channel with NULL values");
3211 ret = -1;
3212 goto error;
3213 }
3214
3215 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3216 uchan->name, usess->id);
3217
3218 rcu_read_lock();
3219
3220 /* For every registered applications */
3221 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3222 struct lttng_ht_iter uiter;
3223 if (!app->compatible) {
3224 /*
3225 * TODO: In time, we should notice the caller of this error by
3226 * telling him that this is a version error.
3227 */
3228 continue;
3229 }
3230 ua_sess = lookup_session_by_app(usess, app);
3231 if (ua_sess == NULL) {
3232 continue;
3233 }
3234
3235 /* Get channel */
3236 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3237 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3238 /* If the session if found for the app, the channel must be there */
3239 assert(ua_chan_node);
3240
3241 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3242 /* The channel must not be already disabled */
3243 assert(ua_chan->enabled == 1);
3244
3245 /* Disable channel onto application */
3246 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3247 if (ret < 0) {
3248 /* XXX: We might want to report this error at some point... */
3249 continue;
3250 }
3251 }
3252
3253 rcu_read_unlock();
3254
3255 error:
3256 return ret;
3257 }
3258
3259 /*
3260 * For a specific UST session, enable the channel for all registered apps.
3261 */
3262 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3263 struct ltt_ust_channel *uchan)
3264 {
3265 int ret = 0;
3266 struct lttng_ht_iter iter;
3267 struct ust_app *app;
3268 struct ust_app_session *ua_sess;
3269
3270 if (usess == NULL || uchan == NULL) {
3271 ERR("Adding UST global channel to NULL values");
3272 ret = -1;
3273 goto error;
3274 }
3275
3276 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3277 uchan->name, usess->id);
3278
3279 rcu_read_lock();
3280
3281 /* For every registered applications */
3282 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3283 if (!app->compatible) {
3284 /*
3285 * TODO: In time, we should notice the caller of this error by
3286 * telling him that this is a version error.
3287 */
3288 continue;
3289 }
3290 ua_sess = lookup_session_by_app(usess, app);
3291 if (ua_sess == NULL) {
3292 continue;
3293 }
3294
3295 /* Enable channel onto application */
3296 ret = enable_ust_app_channel(ua_sess, uchan, app);
3297 if (ret < 0) {
3298 /* XXX: We might want to report this error at some point... */
3299 continue;
3300 }
3301 }
3302
3303 rcu_read_unlock();
3304
3305 error:
3306 return ret;
3307 }
3308
3309 /*
3310 * Disable an event in a channel and for a specific session.
3311 */
3312 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3313 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3314 {
3315 int ret = 0;
3316 struct lttng_ht_iter iter, uiter;
3317 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3318 struct ust_app *app;
3319 struct ust_app_session *ua_sess;
3320 struct ust_app_channel *ua_chan;
3321 struct ust_app_event *ua_event;
3322
3323 DBG("UST app disabling event %s for all apps in channel "
3324 "%s for session id %" PRIu64,
3325 uevent->attr.name, uchan->name, usess->id);
3326
3327 rcu_read_lock();
3328
3329 /* For all registered applications */
3330 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3331 if (!app->compatible) {
3332 /*
3333 * TODO: In time, we should notice the caller of this error by
3334 * telling him that this is a version error.
3335 */
3336 continue;
3337 }
3338 ua_sess = lookup_session_by_app(usess, app);
3339 if (ua_sess == NULL) {
3340 /* Next app */
3341 continue;
3342 }
3343
3344 /* Lookup channel in the ust app session */
3345 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3346 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3347 if (ua_chan_node == NULL) {
3348 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3349 "Skipping", uchan->name, usess->id, app->pid);
3350 continue;
3351 }
3352 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3353
3354 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3355 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3356 if (ua_event_node == NULL) {
3357 DBG2("Event %s not found in channel %s for app pid %d."
3358 "Skipping", uevent->attr.name, uchan->name, app->pid);
3359 continue;
3360 }
3361 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3362
3363 ret = disable_ust_app_event(ua_sess, ua_event, app);
3364 if (ret < 0) {
3365 /* XXX: Report error someday... */
3366 continue;
3367 }
3368 }
3369
3370 rcu_read_unlock();
3371
3372 return ret;
3373 }
3374
3375 /*
3376 * For a specific UST session and UST channel, the event for all
3377 * registered apps.
3378 */
3379 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3380 struct ltt_ust_channel *uchan)
3381 {
3382 int ret = 0;
3383 struct lttng_ht_iter iter, uiter;
3384 struct lttng_ht_node_str *ua_chan_node;
3385 struct ust_app *app;
3386 struct ust_app_session *ua_sess;
3387 struct ust_app_channel *ua_chan;
3388 struct ust_app_event *ua_event;
3389
3390 DBG("UST app disabling all event for all apps in channel "
3391 "%s for session id %" PRIu64, uchan->name, usess->id);
3392
3393 rcu_read_lock();
3394
3395 /* For all registered applications */
3396 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3397 if (!app->compatible) {
3398 /*
3399 * TODO: In time, we should notice the caller of this error by
3400 * telling him that this is a version error.
3401 */
3402 continue;
3403 }
3404 ua_sess = lookup_session_by_app(usess, app);
3405 if (!ua_sess) {
3406 /* The application has problem or is probably dead. */
3407 continue;
3408 }
3409
3410 /* Lookup channel in the ust app session */
3411 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3412 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3413 /* If the channel is not found, there is a code flow error */
3414 assert(ua_chan_node);
3415
3416 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3417
3418 /* Disable each events of channel */
3419 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3420 node.node) {
3421 ret = disable_ust_app_event(ua_sess, ua_event, app);
3422 if (ret < 0) {
3423 /* XXX: Report error someday... */
3424 continue;
3425 }
3426 }
3427 }
3428
3429 rcu_read_unlock();
3430
3431 return ret;
3432 }
3433
3434 /*
3435 * For a specific UST session, create the channel for all registered apps.
3436 */
3437 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3438 struct ltt_ust_channel *uchan)
3439 {
3440 int ret = 0, created;
3441 struct lttng_ht_iter iter;
3442 struct ust_app *app;
3443 struct ust_app_session *ua_sess = NULL;
3444
3445 /* Very wrong code flow */
3446 assert(usess);
3447 assert(uchan);
3448
3449 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3450 uchan->name, usess->id);
3451
3452 rcu_read_lock();
3453
3454 /* For every registered applications */
3455 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3456 if (!app->compatible) {
3457 /*
3458 * TODO: In time, we should notice the caller of this error by
3459 * telling him that this is a version error.
3460 */
3461 continue;
3462 }
3463 /*
3464 * Create session on the tracer side and add it to app session HT. Note
3465 * that if session exist, it will simply return a pointer to the ust
3466 * app session.
3467 */
3468 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3469 if (ret < 0) {
3470 switch (ret) {
3471 case -ENOTCONN:
3472 /*
3473 * The application's socket is not valid. Either a bad socket
3474 * or a timeout on it. We can't inform the caller that for a
3475 * specific app, the session failed so lets continue here.
3476 */
3477 continue;
3478 case -ENOMEM:
3479 default:
3480 goto error_rcu_unlock;
3481 }
3482 }
3483 assert(ua_sess);
3484
3485 pthread_mutex_lock(&ua_sess->lock);
3486 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3487 sizeof(uchan->name))) {
3488 struct ustctl_consumer_channel_attr attr;
3489 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3490 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3491 &attr);
3492 } else {
3493 /* Create channel onto application. We don't need the chan ref. */
3494 ret = create_ust_app_channel(ua_sess, uchan, app,
3495 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3496 }
3497 pthread_mutex_unlock(&ua_sess->lock);
3498 if (ret < 0) {
3499 if (ret == -ENOMEM) {
3500 /* No more memory is a fatal error. Stop right now. */
3501 goto error_rcu_unlock;
3502 }
3503 /* Cleanup the created session if it's the case. */
3504 if (created) {
3505 destroy_app_session(app, ua_sess);
3506 }
3507 }
3508 }
3509
3510 error_rcu_unlock:
3511 rcu_read_unlock();
3512 return ret;
3513 }
3514
3515 /*
3516 * Enable event for a specific session and channel on the tracer.
3517 */
3518 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3519 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3520 {
3521 int ret = 0;
3522 struct lttng_ht_iter iter, uiter;
3523 struct lttng_ht_node_str *ua_chan_node;
3524 struct ust_app *app;
3525 struct ust_app_session *ua_sess;
3526 struct ust_app_channel *ua_chan;
3527 struct ust_app_event *ua_event;
3528
3529 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3530 uevent->attr.name, usess->id);
3531
3532 /*
3533 * NOTE: At this point, this function is called only if the session and
3534 * channel passed are already created for all apps. and enabled on the
3535 * tracer also.
3536 */
3537
3538 rcu_read_lock();
3539
3540 /* For all registered applications */
3541 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3542 if (!app->compatible) {
3543 /*
3544 * TODO: In time, we should notice the caller of this error by
3545 * telling him that this is a version error.
3546 */
3547 continue;
3548 }
3549 ua_sess = lookup_session_by_app(usess, app);
3550 if (!ua_sess) {
3551 /* The application has problem or is probably dead. */
3552 continue;
3553 }
3554
3555 pthread_mutex_lock(&ua_sess->lock);
3556
3557 /* Lookup channel in the ust app session */
3558 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3559 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3560 /* If the channel is not found, there is a code flow error */
3561 assert(ua_chan_node);
3562
3563 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3564
3565 /* Get event node */
3566 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3567 uevent->filter, uevent->attr.loglevel);
3568 if (ua_event == NULL) {
3569 DBG3("UST app enable event %s not found for app PID %d."
3570 "Skipping app", uevent->attr.name, app->pid);
3571 goto next_app;
3572 }
3573
3574 ret = enable_ust_app_event(ua_sess, ua_event, app);
3575 if (ret < 0) {
3576 pthread_mutex_unlock(&ua_sess->lock);
3577 goto error;
3578 }
3579 next_app:
3580 pthread_mutex_unlock(&ua_sess->lock);
3581 }
3582
3583 error:
3584 rcu_read_unlock();
3585 return ret;
3586 }
3587
3588 /*
3589 * For a specific existing UST session and UST channel, creates the event for
3590 * all registered apps.
3591 */
3592 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3593 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3594 {
3595 int ret = 0;
3596 struct lttng_ht_iter iter, uiter;
3597 struct lttng_ht_node_str *ua_chan_node;
3598 struct ust_app *app;
3599 struct ust_app_session *ua_sess;
3600 struct ust_app_channel *ua_chan;
3601
3602 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3603 uevent->attr.name, usess->id);
3604
3605 rcu_read_lock();
3606
3607 /* For all registered applications */
3608 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3609 if (!app->compatible) {
3610 /*
3611 * TODO: In time, we should notice the caller of this error by
3612 * telling him that this is a version error.
3613 */
3614 continue;
3615 }
3616 ua_sess = lookup_session_by_app(usess, app);
3617 if (!ua_sess) {
3618 /* The application has problem or is probably dead. */
3619 continue;
3620 }
3621
3622 pthread_mutex_lock(&ua_sess->lock);
3623 /* Lookup channel in the ust app session */
3624 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3625 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3626 /* If the channel is not found, there is a code flow error */
3627 assert(ua_chan_node);
3628
3629 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3630
3631 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3632 pthread_mutex_unlock(&ua_sess->lock);
3633 if (ret < 0) {
3634 if (ret != -LTTNG_UST_ERR_EXIST) {
3635 /* Possible value at this point: -ENOMEM. If so, we stop! */
3636 break;
3637 }
3638 DBG2("UST app event %s already exist on app PID %d",
3639 uevent->attr.name, app->pid);
3640 continue;
3641 }
3642 }
3643
3644 rcu_read_unlock();
3645
3646 return ret;
3647 }
3648
3649 /*
3650 * Start tracing for a specific UST session and app.
3651 */
3652 static
3653 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3654 {
3655 int ret = 0;
3656 struct ust_app_session *ua_sess;
3657
3658 DBG("Starting tracing for ust app pid %d", app->pid);
3659
3660 rcu_read_lock();
3661
3662 if (!app->compatible) {
3663 goto end;
3664 }
3665
3666 ua_sess = lookup_session_by_app(usess, app);
3667 if (ua_sess == NULL) {
3668 /* The session is in teardown process. Ignore and continue. */
3669 goto end;
3670 }
3671
3672 pthread_mutex_lock(&ua_sess->lock);
3673
3674 /* Upon restart, we skip the setup, already done */
3675 if (ua_sess->started) {
3676 goto skip_setup;
3677 }
3678
3679 /* Create directories if consumer is LOCAL and has a path defined. */
3680 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3681 strlen(usess->consumer->dst.trace_path) > 0) {
3682 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3683 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3684 if (ret < 0) {
3685 if (ret != -EEXIST) {
3686 ERR("Trace directory creation error");
3687 goto error_unlock;
3688 }
3689 }
3690 }
3691
3692 /*
3693 * Create the metadata for the application. This returns gracefully if a
3694 * metadata was already set for the session.
3695 */
3696 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3697 if (ret < 0) {
3698 goto error_unlock;
3699 }
3700
3701 health_code_update();
3702
3703 skip_setup:
3704 /* This start the UST tracing */
3705 ret = ustctl_start_session(app->sock, ua_sess->handle);
3706 if (ret < 0) {
3707 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3708 ERR("Error starting tracing for app pid: %d (ret: %d)",
3709 app->pid, ret);
3710 } else {
3711 DBG("UST app start session failed. Application is dead.");
3712 }
3713 goto error_unlock;
3714 }
3715
3716 /* Indicate that the session has been started once */
3717 ua_sess->started = 1;
3718
3719 pthread_mutex_unlock(&ua_sess->lock);
3720
3721 health_code_update();
3722
3723 /* Quiescent wait after starting trace */
3724 ret = ustctl_wait_quiescent(app->sock);
3725 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3726 ERR("UST app wait quiescent failed for app pid %d ret %d",
3727 app->pid, ret);
3728 }
3729
3730 end:
3731 rcu_read_unlock();
3732 health_code_update();
3733 return 0;
3734
3735 error_unlock:
3736 pthread_mutex_unlock(&ua_sess->lock);
3737 rcu_read_unlock();
3738 health_code_update();
3739 return -1;
3740 }
3741
3742 /*
3743 * Stop tracing for a specific UST session and app.
3744 */
3745 static
3746 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3747 {
3748 int ret = 0;
3749 struct ust_app_session *ua_sess;
3750 struct ust_registry_session *registry;
3751
3752 DBG("Stopping tracing for ust app pid %d", app->pid);
3753
3754 rcu_read_lock();
3755
3756 if (!app->compatible) {
3757 goto end_no_session;
3758 }
3759
3760 ua_sess = lookup_session_by_app(usess, app);
3761 if (ua_sess == NULL) {
3762 goto end_no_session;
3763 }
3764
3765 pthread_mutex_lock(&ua_sess->lock);
3766
3767 /*
3768 * If started = 0, it means that stop trace has been called for a session
3769 * that was never started. It's possible since we can have a fail start
3770 * from either the application manager thread or the command thread. Simply
3771 * indicate that this is a stop error.
3772 */
3773 if (!ua_sess->started) {
3774 goto error_rcu_unlock;
3775 }
3776
3777 health_code_update();
3778
3779 /* This inhibits UST tracing */
3780 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3781 if (ret < 0) {
3782 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3783 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3784 app->pid, ret);
3785 } else {
3786 DBG("UST app stop session failed. Application is dead.");
3787 }
3788 goto error_rcu_unlock;
3789 }
3790
3791 health_code_update();
3792
3793 /* Quiescent wait after stopping trace */
3794 ret = ustctl_wait_quiescent(app->sock);
3795 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3796 ERR("UST app wait quiescent failed for app pid %d ret %d",
3797 app->pid, ret);
3798 }
3799
3800 health_code_update();
3801
3802 registry = get_session_registry(ua_sess);
3803 assert(registry);
3804
3805 if (!registry->metadata_closed) {
3806 /* Push metadata for application before freeing the application. */
3807 (void) push_metadata(registry, ua_sess->consumer);
3808 }
3809
3810 pthread_mutex_unlock(&ua_sess->lock);
3811 end_no_session:
3812 rcu_read_unlock();
3813 health_code_update();
3814 return 0;
3815
3816 error_rcu_unlock:
3817 pthread_mutex_unlock(&ua_sess->lock);
3818 rcu_read_unlock();
3819 health_code_update();
3820 return -1;
3821 }
3822
3823 /*
3824 * Flush buffers for a specific UST session and app.
3825 */
3826 static
3827 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3828 {
3829 int ret = 0;
3830 struct lttng_ht_iter iter;
3831 struct ust_app_session *ua_sess;
3832 struct ust_app_channel *ua_chan;
3833
3834 DBG("Flushing buffers for ust app pid %d", app->pid);
3835
3836 rcu_read_lock();
3837
3838 if (!app->compatible) {
3839 goto end_no_session;
3840 }
3841
3842 ua_sess = lookup_session_by_app(usess, app);
3843 if (ua_sess == NULL) {
3844 goto end_no_session;
3845 }
3846
3847 pthread_mutex_lock(&ua_sess->lock);
3848
3849 health_code_update();
3850
3851 /* Flushing buffers */
3852 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3853 node.node) {
3854 health_code_update();
3855 assert(ua_chan->is_sent);
3856 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3857 if (ret < 0) {
3858 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3859 ERR("UST app PID %d channel %s flush failed with ret %d",
3860 app->pid, ua_chan->name, ret);
3861 } else {
3862 DBG3("UST app failed to flush %s. Application is dead.",
3863 ua_chan->name);
3864 /* No need to continue. */
3865 break;
3866 }
3867 /* Continuing flushing all buffers */
3868 continue;
3869 }
3870 }
3871
3872 health_code_update();
3873
3874 pthread_mutex_unlock(&ua_sess->lock);
3875 end_no_session:
3876 rcu_read_unlock();
3877 health_code_update();
3878 return 0;
3879 }
3880
3881 /*
3882 * Destroy a specific UST session in apps.
3883 */
3884 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3885 {
3886 int ret;
3887 struct ust_app_session *ua_sess;
3888 struct lttng_ht_iter iter;
3889 struct lttng_ht_node_u64 *node;
3890
3891 DBG("Destroy tracing for ust app pid %d", app->pid);
3892
3893 rcu_read_lock();
3894
3895 if (!app->compatible) {
3896 goto end;
3897 }
3898
3899 __lookup_session_by_app(usess, app, &iter);
3900 node = lttng_ht_iter_get_node_u64(&iter);
3901 if (node == NULL) {
3902 /* Session is being or is deleted. */
3903 goto end;
3904 }
3905 ua_sess = caa_container_of(node, struct ust_app_session, node);
3906
3907 health_code_update();
3908 destroy_app_session(app, ua_sess);
3909
3910 health_code_update();
3911
3912 /* Quiescent wait after stopping trace */
3913 ret = ustctl_wait_quiescent(app->sock);
3914 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3915 ERR("UST app wait quiescent failed for app pid %d ret %d",
3916 app->pid, ret);
3917 }
3918 end:
3919 rcu_read_unlock();
3920 health_code_update();
3921 return 0;
3922 }
3923
3924 /*
3925 * Start tracing for the UST session.
3926 */
3927 int ust_app_start_trace_all(struct ltt_ust_session *usess)
3928 {
3929 int ret = 0;
3930 struct lttng_ht_iter iter;
3931 struct ust_app *app;
3932
3933 DBG("Starting all UST traces");
3934
3935 rcu_read_lock();
3936
3937 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3938 ret = ust_app_start_trace(usess, app);
3939 if (ret < 0) {
3940 /* Continue to next apps even on error */
3941 continue;
3942 }
3943 }
3944
3945 rcu_read_unlock();
3946
3947 return 0;
3948 }
3949
3950 /*
3951 * Start tracing for the UST session.
3952 */
3953 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
3954 {
3955 int ret = 0;
3956 struct lttng_ht_iter iter;
3957 struct ust_app *app;
3958
3959 DBG("Stopping all UST traces");
3960
3961 rcu_read_lock();
3962
3963 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3964 ret = ust_app_stop_trace(usess, app);
3965 if (ret < 0) {
3966 /* Continue to next apps even on error */
3967 continue;
3968 }
3969 }
3970
3971 /* Flush buffers and push metadata (for UID buffers). */
3972 switch (usess->buffer_type) {
3973 case LTTNG_BUFFER_PER_UID:
3974 {
3975 struct buffer_reg_uid *reg;
3976
3977 /* Flush all per UID buffers associated to that session. */
3978 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3979 struct ust_registry_session *ust_session_reg;
3980 struct buffer_reg_channel *reg_chan;
3981 struct consumer_socket *socket;
3982
3983 /* Get consumer socket to use to push the metadata.*/
3984 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
3985 usess->consumer);
3986 if (!socket) {
3987 /* Ignore request if no consumer is found for the session. */
3988 continue;
3989 }
3990
3991 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3992 reg_chan, node.node) {
3993 /*
3994 * The following call will print error values so the return
3995 * code is of little importance because whatever happens, we
3996 * have to try them all.
3997 */
3998 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
3999 }
4000
4001 ust_session_reg = reg->registry->reg.ust;
4002 if (!ust_session_reg->metadata_closed) {
4003 /* Push metadata. */
4004 (void) push_metadata(ust_session_reg, usess->consumer);
4005 }
4006 }
4007
4008 break;
4009 }
4010 case LTTNG_BUFFER_PER_PID:
4011 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4012 ret = ust_app_flush_trace(usess, app);
4013 if (ret < 0) {
4014 /* Continue to next apps even on error */
4015 continue;
4016 }
4017 }
4018 break;
4019 default:
4020 assert(0);
4021 break;
4022 }
4023
4024 rcu_read_unlock();
4025
4026 return 0;
4027 }
4028
4029 /*
4030 * Destroy app UST session.
4031 */
4032 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4033 {
4034 int ret = 0;
4035 struct lttng_ht_iter iter;
4036 struct ust_app *app;
4037
4038 DBG("Destroy all UST traces");
4039
4040 rcu_read_lock();
4041
4042 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4043 ret = destroy_trace(usess, app);
4044 if (ret < 0) {
4045 /* Continue to next apps even on error */
4046 continue;
4047 }
4048 }
4049
4050 rcu_read_unlock();
4051
4052 return 0;
4053 }
4054
4055 /*
4056 * Add channels/events from UST global domain to registered apps at sock.
4057 */
4058 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4059 {
4060 int ret = 0;
4061 struct lttng_ht_iter iter, uiter;
4062 struct ust_app *app;
4063 struct ust_app_session *ua_sess = NULL;
4064 struct ust_app_channel *ua_chan;
4065 struct ust_app_event *ua_event;
4066 struct ust_app_ctx *ua_ctx;
4067
4068 assert(usess);
4069 assert(sock >= 0);
4070
4071 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4072 usess->id);
4073
4074 rcu_read_lock();
4075
4076 app = find_app_by_sock(sock);
4077 if (app == NULL) {
4078 /*
4079 * Application can be unregistered before so this is possible hence
4080 * simply stopping the update.
4081 */
4082 DBG3("UST app update failed to find app sock %d", sock);
4083 goto error;
4084 }
4085
4086 if (!app->compatible) {
4087 goto error;
4088 }
4089
4090 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4091 if (ret < 0) {
4092 /* Tracer is probably gone or ENOMEM. */
4093 goto error;
4094 }
4095 assert(ua_sess);
4096
4097 pthread_mutex_lock(&ua_sess->lock);
4098
4099 /*
4100 * We can iterate safely here over all UST app session since the create ust
4101 * app session above made a shadow copy of the UST global domain from the
4102 * ltt ust session.
4103 */
4104 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4105 node.node) {
4106 /*
4107 * For a metadata channel, handle it differently.
4108 */
4109 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4110 sizeof(ua_chan->name))) {
4111 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4112 &ua_chan->attr);
4113 if (ret < 0) {
4114 goto error_unlock;
4115 }
4116 /* Remove it from the hash table and continue!. */
4117 ret = lttng_ht_del(ua_sess->channels, &iter);
4118 assert(!ret);
4119 delete_ust_app_channel(-1, ua_chan, app);
4120 continue;
4121 } else {
4122 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4123 if (ret < 0) {
4124 /*
4125 * Stop everything. On error, the application failed, no more
4126 * file descriptor are available or ENOMEM so stopping here is
4127 * the only thing we can do for now.
4128 */
4129 goto error_unlock;
4130 }
4131 }
4132
4133 /*
4134 * Add context using the list so they are enabled in the same order the
4135 * user added them.
4136 */
4137 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4138 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4139 if (ret < 0) {
4140 goto error_unlock;
4141 }
4142 }
4143
4144
4145 /* For each events */
4146 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4147 node.node) {
4148 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4149 if (ret < 0) {
4150 goto error_unlock;
4151 }
4152 }
4153 }
4154
4155 pthread_mutex_unlock(&ua_sess->lock);
4156
4157 if (usess->start_trace) {
4158 ret = ust_app_start_trace(usess, app);
4159 if (ret < 0) {
4160 goto error;
4161 }
4162
4163 DBG2("UST trace started for app pid %d", app->pid);
4164 }
4165
4166 /* Everything went well at this point. */
4167 rcu_read_unlock();
4168 return;
4169
4170 error_unlock:
4171 pthread_mutex_unlock(&ua_sess->lock);
4172 error:
4173 if (ua_sess) {
4174 destroy_app_session(app, ua_sess);
4175 }
4176 rcu_read_unlock();
4177 return;
4178 }
4179
4180 /*
4181 * Add context to a specific channel for global UST domain.
4182 */
4183 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4184 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4185 {
4186 int ret = 0;
4187 struct lttng_ht_node_str *ua_chan_node;
4188 struct lttng_ht_iter iter, uiter;
4189 struct ust_app_channel *ua_chan = NULL;
4190 struct ust_app_session *ua_sess;
4191 struct ust_app *app;
4192
4193 rcu_read_lock();
4194
4195 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4196 if (!app->compatible) {
4197 /*
4198 * TODO: In time, we should notice the caller of this error by
4199 * telling him that this is a version error.
4200 */
4201 continue;
4202 }
4203 ua_sess = lookup_session_by_app(usess, app);
4204 if (ua_sess == NULL) {
4205 continue;
4206 }
4207
4208 pthread_mutex_lock(&ua_sess->lock);
4209 /* Lookup channel in the ust app session */
4210 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4211 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4212 if (ua_chan_node == NULL) {
4213 goto next_app;
4214 }
4215 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4216 node);
4217 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4218 if (ret < 0) {
4219 goto next_app;
4220 }
4221 next_app:
4222 pthread_mutex_unlock(&ua_sess->lock);
4223 }
4224
4225 rcu_read_unlock();
4226 return ret;
4227 }
4228
4229 /*
4230 * Enable event for a channel from a UST session for a specific PID.
4231 */
4232 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4233 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4234 {
4235 int ret = 0;
4236 struct lttng_ht_iter iter;
4237 struct lttng_ht_node_str *ua_chan_node;
4238 struct ust_app *app;
4239 struct ust_app_session *ua_sess;
4240 struct ust_app_channel *ua_chan;
4241 struct ust_app_event *ua_event;
4242
4243 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4244
4245 rcu_read_lock();
4246
4247 app = ust_app_find_by_pid(pid);
4248 if (app == NULL) {
4249 ERR("UST app enable event per PID %d not found", pid);
4250 ret = -1;
4251 goto end;
4252 }
4253
4254 if (!app->compatible) {
4255 ret = 0;
4256 goto end;
4257 }
4258
4259 ua_sess = lookup_session_by_app(usess, app);
4260 if (!ua_sess) {
4261 /* The application has problem or is probably dead. */
4262 ret = 0;
4263 goto end;
4264 }
4265
4266 pthread_mutex_lock(&ua_sess->lock);
4267 /* Lookup channel in the ust app session */
4268 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4269 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4270 /* If the channel is not found, there is a code flow error */
4271 assert(ua_chan_node);
4272
4273 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4274
4275 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4276 uevent->filter, uevent->attr.loglevel);
4277 if (ua_event == NULL) {
4278 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4279 if (ret < 0) {
4280 goto end_unlock;
4281 }
4282 } else {
4283 ret = enable_ust_app_event(ua_sess, ua_event, app);
4284 if (ret < 0) {
4285 goto end_unlock;
4286 }
4287 }
4288
4289 end_unlock:
4290 pthread_mutex_unlock(&ua_sess->lock);
4291 end:
4292 rcu_read_unlock();
4293 return ret;
4294 }
4295
4296 /*
4297 * Disable event for a channel from a UST session for a specific PID.
4298 */
4299 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4300 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4301 {
4302 int ret = 0;
4303 struct lttng_ht_iter iter;
4304 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4305 struct ust_app *app;
4306 struct ust_app_session *ua_sess;
4307 struct ust_app_channel *ua_chan;
4308 struct ust_app_event *ua_event;
4309
4310 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4311
4312 rcu_read_lock();
4313
4314 app = ust_app_find_by_pid(pid);
4315 if (app == NULL) {
4316 ERR("UST app disable event per PID %d not found", pid);
4317 ret = -1;
4318 goto error;
4319 }
4320
4321 if (!app->compatible) {
4322 ret = 0;
4323 goto error;
4324 }
4325
4326 ua_sess = lookup_session_by_app(usess, app);
4327 if (!ua_sess) {
4328 /* The application has problem or is probably dead. */
4329 goto error;
4330 }
4331
4332 /* Lookup channel in the ust app session */
4333 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4334 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4335 if (ua_chan_node == NULL) {
4336 /* Channel does not exist, skip disabling */
4337 goto error;
4338 }
4339 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4340
4341 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4342 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4343 if (ua_event_node == NULL) {
4344 /* Event does not exist, skip disabling */
4345 goto error;
4346 }
4347 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4348
4349 ret = disable_ust_app_event(ua_sess, ua_event, app);
4350 if (ret < 0) {
4351 goto error;
4352 }
4353
4354 error:
4355 rcu_read_unlock();
4356 return ret;
4357 }
4358
4359 /*
4360 * Calibrate registered applications.
4361 */
4362 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4363 {
4364 int ret = 0;
4365 struct lttng_ht_iter iter;
4366 struct ust_app *app;
4367
4368 rcu_read_lock();
4369
4370 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4371 if (!app->compatible) {
4372 /*
4373 * TODO: In time, we should notice the caller of this error by
4374 * telling him that this is a version error.
4375 */
4376 continue;
4377 }
4378
4379 health_code_update();
4380
4381 ret = ustctl_calibrate(app->sock, calibrate);
4382 if (ret < 0) {
4383 switch (ret) {
4384 case -ENOSYS:
4385 /* Means that it's not implemented on the tracer side. */
4386 ret = 0;
4387 break;
4388 default:
4389 DBG2("Calibrate app PID %d returned with error %d",
4390 app->pid, ret);
4391 break;
4392 }
4393 }
4394 }
4395
4396 DBG("UST app global domain calibration finished");
4397
4398 rcu_read_unlock();
4399
4400 health_code_update();
4401
4402 return ret;
4403 }
4404
4405 /*
4406 * Receive registration and populate the given msg structure.
4407 *
4408 * On success return 0 else a negative value returned by the ustctl call.
4409 */
4410 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4411 {
4412 int ret;
4413 uint32_t pid, ppid, uid, gid;
4414
4415 assert(msg);
4416
4417 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4418 &pid, &ppid, &uid, &gid,
4419 &msg->bits_per_long,
4420 &msg->uint8_t_alignment,
4421 &msg->uint16_t_alignment,
4422 &msg->uint32_t_alignment,
4423 &msg->uint64_t_alignment,
4424 &msg->long_alignment,
4425 &msg->byte_order,
4426 msg->name);
4427 if (ret < 0) {
4428 switch (-ret) {
4429 case EPIPE:
4430 case ECONNRESET:
4431 case LTTNG_UST_ERR_EXITING:
4432 DBG3("UST app recv reg message failed. Application died");
4433 break;
4434 case LTTNG_UST_ERR_UNSUP_MAJOR:
4435 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4436 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4437 LTTNG_UST_ABI_MINOR_VERSION);
4438 break;
4439 default:
4440 ERR("UST app recv reg message failed with ret %d", ret);
4441 break;
4442 }
4443 goto error;
4444 }
4445 msg->pid = (pid_t) pid;
4446 msg->ppid = (pid_t) ppid;
4447 msg->uid = (uid_t) uid;
4448 msg->gid = (gid_t) gid;
4449
4450 error:
4451 return ret;
4452 }
4453
4454 /*
4455 * Return a ust app channel object using the application object and the channel
4456 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4457 * lock MUST be acquired before calling this function.
4458 */
4459 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4460 int objd)
4461 {
4462 struct lttng_ht_node_ulong *node;
4463 struct lttng_ht_iter iter;
4464 struct ust_app_channel *ua_chan = NULL;
4465
4466 assert(app);
4467
4468 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4469 node = lttng_ht_iter_get_node_ulong(&iter);
4470 if (node == NULL) {
4471 DBG2("UST app channel find by objd %d not found", objd);
4472 goto error;
4473 }
4474
4475 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4476
4477 error:
4478 return ua_chan;
4479 }
4480
4481 /*
4482 * Reply to a register channel notification from an application on the notify
4483 * socket. The channel metadata is also created.
4484 *
4485 * The session UST registry lock is acquired in this function.
4486 *
4487 * On success 0 is returned else a negative value.
4488 */
4489 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4490 size_t nr_fields, struct ustctl_field *fields)
4491 {
4492 int ret, ret_code = 0;
4493 uint32_t chan_id, reg_count;
4494 uint64_t chan_reg_key;
4495 enum ustctl_channel_header type;
4496 struct ust_app *app;
4497 struct ust_app_channel *ua_chan;
4498 struct ust_app_session *ua_sess;
4499 struct ust_registry_session *registry;
4500 struct ust_registry_channel *chan_reg;
4501
4502 rcu_read_lock();
4503
4504 /* Lookup application. If not found, there is a code flow error. */
4505 app = find_app_by_notify_sock(sock);
4506 if (!app) {
4507 DBG("Application socket %d is being teardown. Abort event notify",
4508 sock);
4509 ret = 0;
4510 free(fields);
4511 goto error_rcu_unlock;
4512 }
4513
4514 /* Lookup channel by UST object descriptor. */
4515 ua_chan = find_channel_by_objd(app, cobjd);
4516 if (!ua_chan) {
4517 DBG("Application channel is being teardown. Abort event notify");
4518 ret = 0;
4519 free(fields);
4520 goto error_rcu_unlock;
4521 }
4522
4523 assert(ua_chan->session);
4524 ua_sess = ua_chan->session;
4525
4526 /* Get right session registry depending on the session buffer type. */
4527 registry = get_session_registry(ua_sess);
4528 assert(registry);
4529
4530 /* Depending on the buffer type, a different channel key is used. */
4531 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4532 chan_reg_key = ua_chan->tracing_channel_id;
4533 } else {
4534 chan_reg_key = ua_chan->key;
4535 }
4536
4537 pthread_mutex_lock(&registry->lock);
4538
4539 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4540 assert(chan_reg);
4541
4542 if (!chan_reg->register_done) {
4543 reg_count = ust_registry_get_event_count(chan_reg);
4544 if (reg_count < 31) {
4545 type = USTCTL_CHANNEL_HEADER_COMPACT;
4546 } else {
4547 type = USTCTL_CHANNEL_HEADER_LARGE;
4548 }
4549
4550 chan_reg->nr_ctx_fields = nr_fields;
4551 chan_reg->ctx_fields = fields;
4552 chan_reg->header_type = type;
4553 } else {
4554 /* Get current already assigned values. */
4555 type = chan_reg->header_type;
4556 free(fields);
4557 /* Set to NULL so the error path does not do a double free. */
4558 fields = NULL;
4559 }
4560 /* Channel id is set during the object creation. */
4561 chan_id = chan_reg->chan_id;
4562
4563 /* Append to metadata */
4564 if (!chan_reg->metadata_dumped) {
4565 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4566 if (ret_code) {
4567 ERR("Error appending channel metadata (errno = %d)", ret_code);
4568 goto reply;
4569 }
4570 }
4571
4572 reply:
4573 DBG3("UST app replying to register channel key %" PRIu64
4574 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4575 ret_code);
4576
4577 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4578 if (ret < 0) {
4579 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4580 ERR("UST app reply channel failed with ret %d", ret);
4581 } else {
4582 DBG3("UST app reply channel failed. Application died");
4583 }
4584 goto error;
4585 }
4586
4587 /* This channel registry registration is completed. */
4588 chan_reg->register_done = 1;
4589
4590 error:
4591 pthread_mutex_unlock(&registry->lock);
4592 error_rcu_unlock:
4593 rcu_read_unlock();
4594 if (ret) {
4595 free(fields);
4596 }
4597 return ret;
4598 }
4599
4600 /*
4601 * Add event to the UST channel registry. When the event is added to the
4602 * registry, the metadata is also created. Once done, this replies to the
4603 * application with the appropriate error code.
4604 *
4605 * The session UST registry lock is acquired in the function.
4606 *
4607 * On success 0 is returned else a negative value.
4608 */
4609 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4610 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4611 char *model_emf_uri)
4612 {
4613 int ret, ret_code;
4614 uint32_t event_id = 0;
4615 uint64_t chan_reg_key;
4616 struct ust_app *app;
4617 struct ust_app_channel *ua_chan;
4618 struct ust_app_session *ua_sess;
4619 struct ust_registry_session *registry;
4620
4621 rcu_read_lock();
4622
4623 /* Lookup application. If not found, there is a code flow error. */
4624 app = find_app_by_notify_sock(sock);
4625 if (!app) {
4626 DBG("Application socket %d is being teardown. Abort event notify",
4627 sock);
4628 ret = 0;
4629 free(sig);
4630 free(fields);
4631 free(model_emf_uri);
4632 goto error_rcu_unlock;
4633 }
4634
4635 /* Lookup channel by UST object descriptor. */
4636 ua_chan = find_channel_by_objd(app, cobjd);
4637 if (!ua_chan) {
4638 DBG("Application channel is being teardown. Abort event notify");
4639 ret = 0;
4640 free(sig);
4641 free(fields);
4642 free(model_emf_uri);
4643 goto error_rcu_unlock;
4644 }
4645
4646 assert(ua_chan->session);
4647 ua_sess = ua_chan->session;
4648
4649 registry = get_session_registry(ua_sess);
4650 assert(registry);
4651
4652 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4653 chan_reg_key = ua_chan->tracing_channel_id;
4654 } else {
4655 chan_reg_key = ua_chan->key;
4656 }
4657
4658 pthread_mutex_lock(&registry->lock);
4659
4660 /*
4661 * From this point on, this call acquires the ownership of the sig, fields
4662 * and model_emf_uri meaning any free are done inside it if needed. These
4663 * three variables MUST NOT be read/write after this.
4664 */
4665 ret_code = ust_registry_create_event(registry, chan_reg_key,
4666 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4667 model_emf_uri, ua_sess->buffer_type, &event_id);
4668
4669 /*
4670 * The return value is returned to ustctl so in case of an error, the
4671 * application can be notified. In case of an error, it's important not to
4672 * return a negative error or else the application will get closed.
4673 */
4674 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4675 if (ret < 0) {
4676 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4677 ERR("UST app reply event failed with ret %d", ret);
4678 } else {
4679 DBG3("UST app reply event failed. Application died");
4680 }
4681 /*
4682 * No need to wipe the create event since the application socket will
4683 * get close on error hence cleaning up everything by itself.
4684 */
4685 goto error;
4686 }
4687
4688 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4689 name, event_id);
4690
4691 error:
4692 pthread_mutex_unlock(&registry->lock);
4693 error_rcu_unlock:
4694 rcu_read_unlock();
4695 return ret;
4696 }
4697
4698 /*
4699 * Handle application notification through the given notify socket.
4700 *
4701 * Return 0 on success or else a negative value.
4702 */
4703 int ust_app_recv_notify(int sock)
4704 {
4705 int ret;
4706 enum ustctl_notify_cmd cmd;
4707
4708 DBG3("UST app receiving notify from sock %d", sock);
4709
4710 ret = ustctl_recv_notify(sock, &cmd);
4711 if (ret < 0) {
4712 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4713 ERR("UST app recv notify failed with ret %d", ret);
4714 } else {
4715 DBG3("UST app recv notify failed. Application died");
4716 }
4717 goto error;
4718 }
4719
4720 switch (cmd) {
4721 case USTCTL_NOTIFY_CMD_EVENT:
4722 {
4723 int sobjd, cobjd, loglevel;
4724 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4725 size_t nr_fields;
4726 struct ustctl_field *fields;
4727
4728 DBG2("UST app ustctl register event received");
4729
4730 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4731 &sig, &nr_fields, &fields, &model_emf_uri);
4732 if (ret < 0) {
4733 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4734 ERR("UST app recv event failed with ret %d", ret);
4735 } else {
4736 DBG3("UST app recv event failed. Application died");
4737 }
4738 goto error;
4739 }
4740
4741 /*
4742 * Add event to the UST registry coming from the notify socket. This
4743 * call will free if needed the sig, fields and model_emf_uri. This
4744 * code path loses the ownsership of these variables and transfer them
4745 * to the this function.
4746 */
4747 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4748 fields, loglevel, model_emf_uri);
4749 if (ret < 0) {
4750 goto error;
4751 }
4752
4753 break;
4754 }
4755 case USTCTL_NOTIFY_CMD_CHANNEL:
4756 {
4757 int sobjd, cobjd;
4758 size_t nr_fields;
4759 struct ustctl_field *fields;
4760
4761 DBG2("UST app ustctl register channel received");
4762
4763 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4764 &fields);
4765 if (ret < 0) {
4766 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4767 ERR("UST app recv channel failed with ret %d", ret);
4768 } else {
4769 DBG3("UST app recv channel failed. Application died");
4770 }
4771 goto error;
4772 }
4773
4774 /*
4775 * The fields ownership are transfered to this function call meaning
4776 * that if needed it will be freed. After this, it's invalid to access
4777 * fields or clean it up.
4778 */
4779 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4780 fields);
4781 if (ret < 0) {
4782 goto error;
4783 }
4784
4785 break;
4786 }
4787 default:
4788 /* Should NEVER happen. */
4789 assert(0);
4790 }
4791
4792 error:
4793 return ret;
4794 }
4795
4796 /*
4797 * Once the notify socket hangs up, this is called. First, it tries to find the
4798 * corresponding application. On failure, the call_rcu to close the socket is
4799 * executed. If an application is found, it tries to delete it from the notify
4800 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4801 *
4802 * Note that an object needs to be allocated here so on ENOMEM failure, the
4803 * call RCU is not done but the rest of the cleanup is.
4804 */
4805 void ust_app_notify_sock_unregister(int sock)
4806 {
4807 int err_enomem = 0;
4808 struct lttng_ht_iter iter;
4809 struct ust_app *app;
4810 struct ust_app_notify_sock_obj *obj;
4811
4812 assert(sock >= 0);
4813
4814 rcu_read_lock();
4815
4816 obj = zmalloc(sizeof(*obj));
4817 if (!obj) {
4818 /*
4819 * An ENOMEM is kind of uncool. If this strikes we continue the
4820 * procedure but the call_rcu will not be called. In this case, we
4821 * accept the fd leak rather than possibly creating an unsynchronized
4822 * state between threads.
4823 *
4824 * TODO: The notify object should be created once the notify socket is
4825 * registered and stored independantely from the ust app object. The
4826 * tricky part is to synchronize the teardown of the application and
4827 * this notify object. Let's keep that in mind so we can avoid this
4828 * kind of shenanigans with ENOMEM in the teardown path.
4829 */
4830 err_enomem = 1;
4831 } else {
4832 obj->fd = sock;
4833 }
4834
4835 DBG("UST app notify socket unregister %d", sock);
4836
4837 /*
4838 * Lookup application by notify socket. If this fails, this means that the
4839 * hash table delete has already been done by the application
4840 * unregistration process so we can safely close the notify socket in a
4841 * call RCU.
4842 */
4843 app = find_app_by_notify_sock(sock);
4844 if (!app) {
4845 goto close_socket;
4846 }
4847
4848 iter.iter.node = &app->notify_sock_n.node;
4849
4850 /*
4851 * Whatever happens here either we fail or succeed, in both cases we have
4852 * to close the socket after a grace period to continue to the call RCU
4853 * here. If the deletion is successful, the application is not visible
4854 * anymore by other threads and is it fails it means that it was already
4855 * deleted from the hash table so either way we just have to close the
4856 * socket.
4857 */
4858 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4859
4860 close_socket:
4861 rcu_read_unlock();
4862
4863 /*
4864 * Close socket after a grace period to avoid for the socket to be reused
4865 * before the application object is freed creating potential race between
4866 * threads trying to add unique in the global hash table.
4867 */
4868 if (!err_enomem) {
4869 call_rcu(&obj->head, close_notify_sock_rcu);
4870 }
4871 }
4872
4873 /*
4874 * Destroy a ust app data structure and free its memory.
4875 */
4876 void ust_app_destroy(struct ust_app *app)
4877 {
4878 if (!app) {
4879 return;
4880 }
4881
4882 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4883 }
4884
4885 /*
4886 * Take a snapshot for a given UST session. The snapshot is sent to the given
4887 * output.
4888 *
4889 * Return 0 on success or else a negative value.
4890 */
4891 int ust_app_snapshot_record(struct ltt_ust_session *usess,
4892 struct snapshot_output *output, int wait, unsigned int nb_streams)
4893 {
4894 int ret = 0;
4895 struct lttng_ht_iter iter;
4896 struct ust_app *app;
4897 char pathname[PATH_MAX];
4898 uint64_t max_stream_size = 0;
4899
4900 assert(usess);
4901 assert(output);
4902
4903 rcu_read_lock();
4904
4905 /*
4906 * Compute the maximum size of a single stream if a max size is asked by
4907 * the caller.
4908 */
4909 if (output->max_size > 0 && nb_streams > 0) {
4910 max_stream_size = output->max_size / nb_streams;
4911 }
4912
4913 switch (usess->buffer_type) {
4914 case LTTNG_BUFFER_PER_UID:
4915 {
4916 struct buffer_reg_uid *reg;
4917
4918 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4919 struct buffer_reg_channel *reg_chan;
4920 struct consumer_socket *socket;
4921
4922 /* Get consumer socket to use to push the metadata.*/
4923 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4924 usess->consumer);
4925 if (!socket) {
4926 ret = -EINVAL;
4927 goto error;
4928 }
4929
4930 memset(pathname, 0, sizeof(pathname));
4931 ret = snprintf(pathname, sizeof(pathname),
4932 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
4933 reg->uid, reg->bits_per_long);
4934 if (ret < 0) {
4935 PERROR("snprintf snapshot path");
4936 goto error;
4937 }
4938
4939 /* Add the UST default trace dir to path. */
4940 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4941 reg_chan, node.node) {
4942
4943 /*
4944 * Make sure the maximum stream size is not lower than the
4945 * subbuffer size or else it's an error since we won't be able to
4946 * snapshot anything.
4947 */
4948 if (max_stream_size &&
4949 reg_chan->subbuf_size > max_stream_size) {
4950 ret = -EINVAL;
4951 DBG3("UST app snapshot record maximum stream size %" PRIu64
4952 " is smaller than subbuffer size of %zu",
4953 max_stream_size, reg_chan->subbuf_size);
4954 goto error;
4955 }
4956 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
4957 usess->uid, usess->gid, pathname, wait,
4958 max_stream_size);
4959 if (ret < 0) {
4960 goto error;
4961 }
4962 }
4963 ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
4964 1, usess->uid, usess->gid, pathname, wait,
4965 max_stream_size);
4966 if (ret < 0) {
4967 goto error;
4968 }
4969 }
4970 break;
4971 }
4972 case LTTNG_BUFFER_PER_PID:
4973 {
4974 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4975 struct consumer_socket *socket;
4976 struct lttng_ht_iter chan_iter;
4977 struct ust_app_channel *ua_chan;
4978 struct ust_app_session *ua_sess;
4979 struct ust_registry_session *registry;
4980
4981 ua_sess = lookup_session_by_app(usess, app);
4982 if (!ua_sess) {
4983 /* Session not associated with this app. */
4984 continue;
4985 }
4986
4987 /* Get the right consumer socket for the application. */
4988 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4989 output->consumer);
4990 if (!socket) {
4991 ret = -EINVAL;
4992 goto error;
4993 }
4994
4995 /* Add the UST default trace dir to path. */
4996 memset(pathname, 0, sizeof(pathname));
4997 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
4998 ua_sess->path);
4999 if (ret < 0) {
5000 PERROR("snprintf snapshot path");
5001 goto error;
5002 }
5003
5004 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5005 ua_chan, node.node) {
5006 /*
5007 * Make sure the maximum stream size is not lower than the
5008 * subbuffer size or else it's an error since we won't be able to
5009 * snapshot anything.
5010 */
5011 if (max_stream_size &&
5012 ua_chan->attr.subbuf_size > max_stream_size) {
5013 ret = -EINVAL;
5014 DBG3("UST app snapshot record maximum stream size %" PRIu64
5015 " is smaller than subbuffer size of %" PRIu64,
5016 max_stream_size, ua_chan->attr.subbuf_size);
5017 goto error;
5018 }
5019
5020 ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
5021 ua_sess->euid, ua_sess->egid, pathname, wait,
5022 max_stream_size);
5023 if (ret < 0) {
5024 goto error;
5025 }
5026 }
5027
5028 registry = get_session_registry(ua_sess);
5029 assert(registry);
5030 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5031 1, ua_sess->euid, ua_sess->egid, pathname, wait,
5032 max_stream_size);
5033 if (ret < 0) {
5034 goto error;
5035 }
5036 }
5037 break;
5038 }
5039 default:
5040 assert(0);
5041 break;
5042 }
5043
5044 error:
5045 rcu_read_unlock();
5046 return ret;
5047 }
5048
5049 /*
5050 * Return the number of streams for a UST session.
5051 */
5052 unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
5053 {
5054 unsigned int ret = 0;
5055 struct ust_app *app;
5056 struct lttng_ht_iter iter;
5057
5058 assert(usess);
5059
5060 switch (usess->buffer_type) {
5061 case LTTNG_BUFFER_PER_UID:
5062 {
5063 struct buffer_reg_uid *reg;
5064
5065 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5066 struct buffer_reg_channel *reg_chan;
5067
5068 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5069 reg_chan, node.node) {
5070 ret += reg_chan->stream_count;
5071 }
5072 }
5073 break;
5074 }
5075 case LTTNG_BUFFER_PER_PID:
5076 {
5077 rcu_read_lock();
5078 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5079 struct ust_app_channel *ua_chan;
5080 struct ust_app_session *ua_sess;
5081 struct lttng_ht_iter chan_iter;
5082
5083 ua_sess = lookup_session_by_app(usess, app);
5084 if (!ua_sess) {
5085 /* Session not associated with this app. */
5086 continue;
5087 }
5088
5089 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5090 ua_chan, node.node) {
5091 ret += ua_chan->streams.count;
5092 }
5093 }
5094 rcu_read_unlock();
5095 break;
5096 }
5097 default:
5098 assert(0);
5099 break;
5100 }
5101
5102 return ret;
5103 }
This page took 0.17814 seconds and 5 git commands to generate.