Tests: kernel wildcards
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53
DG
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
d14d33bf
AM
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
91d76f53
DG
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
d14d33bf
AM
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
91d76f53
DG
16 */
17
18#define _GNU_SOURCE
6c1c0768 19#define _LGPL_SOURCE
91d76f53 20#include <errno.h>
7972aab2 21#include <inttypes.h>
91d76f53
DG
22#include <pthread.h>
23#include <stdio.h>
24#include <stdlib.h>
099e26bd 25#include <string.h>
aba8e916
DG
26#include <sys/stat.h>
27#include <sys/types.h>
099e26bd 28#include <unistd.h>
0df502fd 29#include <urcu/compiler.h>
fb54cdbf 30#include <lttng/ust-error.h>
331744e3 31#include <signal.h>
bec39940 32
990570ed 33#include <common/common.h>
86acf0da 34#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 35
7972aab2 36#include "buffer-registry.h"
86acf0da 37#include "fd-limit.h"
8782cc74 38#include "health-sessiond.h"
56fff090 39#include "ust-app.h"
48842b30 40#include "ust-consumer.h"
d80a6244 41#include "ust-ctl.h"
0b2dc8df 42#include "utils.h"
d80a6244 43
c4b88406
MD
44static
45int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
46
d9bf3ca4
MD
47/* Next available channel key. Access under next_channel_key_lock. */
48static uint64_t _next_channel_key;
49static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
50
51/* Next available session ID. Access under next_session_id_lock. */
52static uint64_t _next_session_id;
53static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
54
55/*
d9bf3ca4 56 * Return the incremented value of next_channel_key.
ffe60014 57 */
d9bf3ca4 58static uint64_t get_next_channel_key(void)
ffe60014 59{
d9bf3ca4
MD
60 uint64_t ret;
61
62 pthread_mutex_lock(&next_channel_key_lock);
63 ret = ++_next_channel_key;
64 pthread_mutex_unlock(&next_channel_key_lock);
65 return ret;
ffe60014
DG
66}
67
68/*
7972aab2 69 * Return the atomically incremented value of next_session_id.
ffe60014 70 */
d9bf3ca4 71static uint64_t get_next_session_id(void)
ffe60014 72{
d9bf3ca4
MD
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_session_id_lock);
76 ret = ++_next_session_id;
77 pthread_mutex_unlock(&next_session_id_lock);
78 return ret;
ffe60014
DG
79}
80
d65d2de8
DG
81static void copy_channel_attr_to_ustctl(
82 struct ustctl_consumer_channel_attr *attr,
83 struct lttng_ust_channel_attr *uattr)
84{
85 /* Copy event attributes since the layout is different. */
86 attr->subbuf_size = uattr->subbuf_size;
87 attr->num_subbuf = uattr->num_subbuf;
88 attr->overwrite = uattr->overwrite;
89 attr->switch_timer_interval = uattr->switch_timer_interval;
90 attr->read_timer_interval = uattr->read_timer_interval;
91 attr->output = uattr->output;
92}
93
025faf73
DG
94/*
95 * Match function for the hash table lookup.
96 *
97 * It matches an ust app event based on three attributes which are the event
98 * name, the filter bytecode and the loglevel.
99 */
18eace3b
DG
100static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
101{
102 struct ust_app_event *event;
103 const struct ust_app_ht_key *key;
104
105 assert(node);
106 assert(_key);
107
108 event = caa_container_of(node, struct ust_app_event, node.node);
109 key = _key;
110
1af53eb5 111 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
112
113 /* Event name */
114 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
115 goto no_match;
116 }
117
118 /* Event loglevel. */
119 if (event->attr.loglevel != key->loglevel) {
025faf73
DG
120 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
121 && key->loglevel == 0 && event->attr.loglevel == -1) {
122 /*
123 * Match is accepted. This is because on event creation, the
124 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
125 * -1 are accepted for this loglevel type since 0 is the one set by
126 * the API when receiving an enable event.
127 */
128 } else {
129 goto no_match;
130 }
18eace3b
DG
131 }
132
133 /* One of the filters is NULL, fail. */
134 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
135 goto no_match;
136 }
137
025faf73
DG
138 if (key->filter && event->filter) {
139 /* Both filters exists, check length followed by the bytecode. */
140 if (event->filter->len != key->filter->len ||
141 memcmp(event->filter->data, key->filter->data,
142 event->filter->len) != 0) {
143 goto no_match;
144 }
18eace3b
DG
145 }
146
1af53eb5
JI
147 /* One of the exclusions is NULL, fail. */
148 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
149 goto no_match;
150 }
151
152 if (key->exclusion && event->exclusion) {
153 /* Both exclusions exists, check count followed by the names. */
154 if (event->exclusion->count != key->exclusion->count ||
155 memcmp(event->exclusion->names, key->exclusion->names,
156 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
157 goto no_match;
158 }
159 }
160
161
025faf73 162 /* Match. */
18eace3b
DG
163 return 1;
164
165no_match:
166 return 0;
18eace3b
DG
167}
168
025faf73
DG
169/*
170 * Unique add of an ust app event in the given ht. This uses the custom
171 * ht_match_ust_app_event match function and the event name as hash.
172 */
d0b96690 173static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
174 struct ust_app_event *event)
175{
176 struct cds_lfht_node *node_ptr;
177 struct ust_app_ht_key key;
d0b96690 178 struct lttng_ht *ht;
18eace3b 179
d0b96690
DG
180 assert(ua_chan);
181 assert(ua_chan->events);
18eace3b
DG
182 assert(event);
183
d0b96690 184 ht = ua_chan->events;
18eace3b
DG
185 key.name = event->attr.name;
186 key.filter = event->filter;
187 key.loglevel = event->attr.loglevel;
91c89f23 188 key.exclusion = event->exclusion;
18eace3b
DG
189
190 node_ptr = cds_lfht_add_unique(ht->ht,
191 ht->hash_fct(event->node.key, lttng_ht_seed),
192 ht_match_ust_app_event, &key, &event->node.node);
193 assert(node_ptr == &event->node.node);
194}
195
d88aee68
DG
196/*
197 * Close the notify socket from the given RCU head object. This MUST be called
198 * through a call_rcu().
199 */
200static void close_notify_sock_rcu(struct rcu_head *head)
201{
202 int ret;
203 struct ust_app_notify_sock_obj *obj =
204 caa_container_of(head, struct ust_app_notify_sock_obj, head);
205
206 /* Must have a valid fd here. */
207 assert(obj->fd >= 0);
208
209 ret = close(obj->fd);
210 if (ret) {
211 ERR("close notify sock %d RCU", obj->fd);
212 }
213 lttng_fd_put(LTTNG_FD_APPS, 1);
214
215 free(obj);
216}
217
7972aab2
DG
218/*
219 * Return the session registry according to the buffer type of the given
220 * session.
221 *
222 * A registry per UID object MUST exists before calling this function or else
223 * it assert() if not found. RCU read side lock must be acquired.
224 */
225static struct ust_registry_session *get_session_registry(
226 struct ust_app_session *ua_sess)
227{
228 struct ust_registry_session *registry = NULL;
229
230 assert(ua_sess);
231
232 switch (ua_sess->buffer_type) {
233 case LTTNG_BUFFER_PER_PID:
234 {
235 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
236 if (!reg_pid) {
237 goto error;
238 }
239 registry = reg_pid->registry->reg.ust;
240 break;
241 }
242 case LTTNG_BUFFER_PER_UID:
243 {
244 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
245 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
246 if (!reg_uid) {
247 goto error;
248 }
249 registry = reg_uid->registry->reg.ust;
250 break;
251 }
252 default:
253 assert(0);
254 };
255
256error:
257 return registry;
258}
259
55cc08a6
DG
260/*
261 * Delete ust context safely. RCU read lock must be held before calling
262 * this function.
263 */
264static
fb45065e
MD
265void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
266 struct ust_app *app)
55cc08a6 267{
ffe60014
DG
268 int ret;
269
270 assert(ua_ctx);
271
55cc08a6 272 if (ua_ctx->obj) {
fb45065e 273 pthread_mutex_lock(&app->sock_lock);
ffe60014 274 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 275 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
276 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
277 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
278 sock, ua_ctx->obj->handle, ret);
ffe60014 279 }
55cc08a6
DG
280 free(ua_ctx->obj);
281 }
282 free(ua_ctx);
283}
284
d80a6244
DG
285/*
286 * Delete ust app event safely. RCU read lock must be held before calling
287 * this function.
288 */
8b366481 289static
fb45065e
MD
290void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
291 struct ust_app *app)
d80a6244 292{
ffe60014
DG
293 int ret;
294
295 assert(ua_event);
296
53a80697 297 free(ua_event->filter);
951f0b71
JI
298 if (ua_event->exclusion != NULL)
299 free(ua_event->exclusion);
edb67388 300 if (ua_event->obj != NULL) {
fb45065e 301 pthread_mutex_lock(&app->sock_lock);
ffe60014 302 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 303 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
304 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
305 ERR("UST app sock %d release event obj failed with ret %d",
306 sock, ret);
307 }
edb67388
DG
308 free(ua_event->obj);
309 }
d80a6244
DG
310 free(ua_event);
311}
312
313/*
7972aab2
DG
314 * Release ust data object of the given stream.
315 *
316 * Return 0 on success or else a negative value.
d80a6244 317 */
fb45065e
MD
318static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
319 struct ust_app *app)
d80a6244 320{
7972aab2 321 int ret = 0;
ffe60014
DG
322
323 assert(stream);
324
8b366481 325 if (stream->obj) {
fb45065e 326 pthread_mutex_lock(&app->sock_lock);
ffe60014 327 ret = ustctl_release_object(sock, stream->obj);
fb45065e 328 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
329 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
330 ERR("UST app sock %d release stream obj failed with ret %d",
331 sock, ret);
332 }
4063050c 333 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
334 free(stream->obj);
335 }
7972aab2
DG
336
337 return ret;
338}
339
340/*
341 * Delete ust app stream safely. RCU read lock must be held before calling
342 * this function.
343 */
344static
fb45065e
MD
345void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
346 struct ust_app *app)
7972aab2
DG
347{
348 assert(stream);
349
fb45065e 350 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 351 free(stream);
d80a6244
DG
352}
353
36b588ed
MD
354/*
355 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
356 * section and outside of call_rcu thread, so we postpone its execution
357 * using ht_cleanup_push. It is simpler than to change the semantic of
358 * the many callers of delete_ust_app_session().
36b588ed
MD
359 */
360static
361void delete_ust_app_channel_rcu(struct rcu_head *head)
362{
363 struct ust_app_channel *ua_chan =
364 caa_container_of(head, struct ust_app_channel, rcu_head);
365
0b2dc8df
MD
366 ht_cleanup_push(ua_chan->ctx);
367 ht_cleanup_push(ua_chan->events);
36b588ed
MD
368 free(ua_chan);
369}
370
d80a6244
DG
371/*
372 * Delete ust app channel safely. RCU read lock must be held before calling
373 * this function.
374 */
8b366481 375static
d0b96690
DG
376void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
377 struct ust_app *app)
d80a6244
DG
378{
379 int ret;
bec39940 380 struct lttng_ht_iter iter;
d80a6244 381 struct ust_app_event *ua_event;
55cc08a6 382 struct ust_app_ctx *ua_ctx;
030a66fa 383 struct ust_app_stream *stream, *stmp;
7972aab2 384 struct ust_registry_session *registry;
d80a6244 385
ffe60014
DG
386 assert(ua_chan);
387
388 DBG3("UST app deleting channel %s", ua_chan->name);
389
55cc08a6 390 /* Wipe stream */
d80a6244 391 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 392 cds_list_del(&stream->list);
fb45065e 393 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
394 }
395
55cc08a6 396 /* Wipe context */
bec39940 397 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 398 cds_list_del(&ua_ctx->list);
bec39940 399 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 400 assert(!ret);
fb45065e 401 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 402 }
d80a6244 403
55cc08a6 404 /* Wipe events */
bec39940
DG
405 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
406 node.node) {
407 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 408 assert(!ret);
fb45065e 409 delete_ust_app_event(sock, ua_event, app);
d80a6244 410 }
edb67388 411
c8335706
MD
412 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
413 /* Wipe and free registry from session registry. */
414 registry = get_session_registry(ua_chan->session);
415 if (registry) {
416 ust_registry_channel_del_free(registry, ua_chan->key);
417 }
7972aab2 418 }
d0b96690 419
edb67388 420 if (ua_chan->obj != NULL) {
d0b96690
DG
421 /* Remove channel from application UST object descriptor. */
422 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
423 ret = lttng_ht_del(app->ust_objd, &iter);
424 assert(!ret);
fb45065e 425 pthread_mutex_lock(&app->sock_lock);
ffe60014 426 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 427 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
428 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
429 ERR("UST app sock %d release channel obj failed with ret %d",
430 sock, ret);
431 }
7972aab2 432 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
433 free(ua_chan->obj);
434 }
36b588ed 435 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
436}
437
fb45065e
MD
438int ust_app_register_done(struct ust_app *app)
439{
440 int ret;
441
442 pthread_mutex_lock(&app->sock_lock);
443 ret = ustctl_register_done(app->sock);
444 pthread_mutex_unlock(&app->sock_lock);
445 return ret;
446}
447
448int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
449{
450 int ret, sock;
451
452 if (app) {
453 pthread_mutex_lock(&app->sock_lock);
454 sock = app->sock;
455 } else {
456 sock = -1;
457 }
458 ret = ustctl_release_object(sock, data);
459 if (app) {
460 pthread_mutex_unlock(&app->sock_lock);
461 }
462 return ret;
463}
464
331744e3 465/*
1b532a60
DG
466 * Push metadata to consumer socket.
467 *
dc2bbdae
MD
468 * RCU read-side lock must be held to guarantee existance of socket.
469 * Must be called with the ust app session lock held.
470 * Must be called with the registry lock held.
331744e3
JD
471 *
472 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
473 * Returning a -EPIPE return value means we could not send the metadata,
474 * but it can be caused by recoverable errors (e.g. the application has
475 * terminated concurrently).
331744e3
JD
476 */
477ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
478 struct consumer_socket *socket, int send_zero_data)
479{
480 int ret;
481 char *metadata_str = NULL;
c585821b 482 size_t len, offset, new_metadata_len_sent;
331744e3 483 ssize_t ret_val;
c585821b 484 uint64_t metadata_key;
331744e3
JD
485
486 assert(registry);
487 assert(socket);
1b532a60 488
c585821b
MD
489 metadata_key = registry->metadata_key;
490
ce34fcd0 491 /*
dc2bbdae
MD
492 * Means that no metadata was assigned to the session. This can
493 * happens if no start has been done previously.
ce34fcd0 494 */
c585821b 495 if (!metadata_key) {
ce34fcd0
MD
496 return 0;
497 }
498
1b532a60 499 /*
dc2bbdae
MD
500 * On a push metadata error either the consumer is dead or the
501 * metadata channel has been destroyed because its endpoint
2c57e06d
MD
502 * might have died (e.g: relayd), or because the application has
503 * exited. If so, the metadata closed flag is set to 1 so we
504 * deny pushing metadata again which is not valid anymore on the
505 * consumer side.
1b532a60
DG
506 */
507 if (registry->metadata_closed) {
508 return -EPIPE;
509 }
331744e3 510
331744e3
JD
511 offset = registry->metadata_len_sent;
512 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 513 new_metadata_len_sent = registry->metadata_len;
331744e3
JD
514 if (len == 0) {
515 DBG3("No metadata to push for metadata key %" PRIu64,
516 registry->metadata_key);
517 ret_val = len;
518 if (send_zero_data) {
519 DBG("No metadata to push");
520 goto push_data;
521 }
522 goto end;
523 }
524
525 /* Allocate only what we have to send. */
526 metadata_str = zmalloc(len);
527 if (!metadata_str) {
528 PERROR("zmalloc ust app metadata string");
529 ret_val = -ENOMEM;
530 goto error;
531 }
c585821b 532 /* Copy what we haven't sent out. */
331744e3 533 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
534
535push_data:
c585821b
MD
536 pthread_mutex_unlock(&registry->lock);
537 /*
538 * We need to unlock the registry while we push metadata to
539 * break a circular dependency between the consumerd metadata
540 * lock and the sessiond registry lock. Indeed, pushing metadata
541 * to the consumerd awaits that it gets pushed all the way to
542 * relayd, but doing so requires grabbing the metadata lock. If
543 * a concurrent metadata request is being performed by
544 * consumerd, this can try to grab the registry lock on the
545 * sessiond while holding the metadata lock on the consumer
546 * daemon. Those push and pull schemes are performed on two
547 * different bidirectionnal communication sockets.
548 */
549 ret = consumer_push_metadata(socket, metadata_key,
331744e3 550 metadata_str, len, offset);
c585821b 551 pthread_mutex_lock(&registry->lock);
331744e3 552 if (ret < 0) {
000baf6a 553 /*
dc2bbdae
MD
554 * There is an acceptable race here between the registry
555 * metadata key assignment and the creation on the
556 * consumer. The session daemon can concurrently push
557 * metadata for this registry while being created on the
558 * consumer since the metadata key of the registry is
559 * assigned *before* it is setup to avoid the consumer
560 * to ask for metadata that could possibly be not found
561 * in the session daemon.
000baf6a 562 *
dc2bbdae
MD
563 * The metadata will get pushed either by the session
564 * being stopped or the consumer requesting metadata if
565 * that race is triggered.
000baf6a
DG
566 */
567 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
568 ret = 0;
c585821b
MD
569 } else {
570 ERR("Error pushing metadata to consumer");
000baf6a 571 }
331744e3
JD
572 ret_val = ret;
573 goto error_push;
c585821b
MD
574 } else {
575 /*
576 * Metadata may have been concurrently pushed, since
577 * we're not holding the registry lock while pushing to
578 * consumer. This is handled by the fact that we send
579 * the metadata content, size, and the offset at which
580 * that metadata belongs. This may arrive out of order
581 * on the consumer side, and the consumer is able to
582 * deal with overlapping fragments. The consumer
583 * supports overlapping fragments, which must be
584 * contiguous starting from offset 0. We keep the
585 * largest metadata_len_sent value of the concurrent
586 * send.
587 */
588 registry->metadata_len_sent =
589 max_t(size_t, registry->metadata_len_sent,
590 new_metadata_len_sent);
331744e3 591 }
331744e3
JD
592 free(metadata_str);
593 return len;
594
595end:
596error:
ce34fcd0
MD
597 if (ret_val) {
598 /*
dc2bbdae
MD
599 * On error, flag the registry that the metadata is
600 * closed. We were unable to push anything and this
601 * means that either the consumer is not responding or
602 * the metadata cache has been destroyed on the
603 * consumer.
ce34fcd0
MD
604 */
605 registry->metadata_closed = 1;
606 }
331744e3
JD
607error_push:
608 free(metadata_str);
609 return ret_val;
610}
611
d88aee68 612/*
ce34fcd0 613 * For a given application and session, push metadata to consumer.
331744e3
JD
614 * Either sock or consumer is required : if sock is NULL, the default
615 * socket to send the metadata is retrieved from consumer, if sock
616 * is not NULL we use it to send the metadata.
ce34fcd0 617 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
618 * therefore ensuring existance of registry. It also ensures existance
619 * of socket throughout this function.
d88aee68
DG
620 *
621 * Return 0 on success else a negative error.
2c57e06d
MD
622 * Returning a -EPIPE return value means we could not send the metadata,
623 * but it can be caused by recoverable errors (e.g. the application has
624 * terminated concurrently).
d88aee68 625 */
7972aab2
DG
626static int push_metadata(struct ust_registry_session *registry,
627 struct consumer_output *consumer)
d88aee68 628{
331744e3
JD
629 int ret_val;
630 ssize_t ret;
d88aee68
DG
631 struct consumer_socket *socket;
632
7972aab2
DG
633 assert(registry);
634 assert(consumer);
635
ce34fcd0 636 pthread_mutex_lock(&registry->lock);
ce34fcd0 637 if (registry->metadata_closed) {
dc2bbdae
MD
638 ret_val = -EPIPE;
639 goto error;
d88aee68
DG
640 }
641
d88aee68 642 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
643 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
644 consumer);
d88aee68 645 if (!socket) {
331744e3 646 ret_val = -1;
ce34fcd0 647 goto error;
d88aee68
DG
648 }
649
331744e3 650 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 651 if (ret < 0) {
331744e3 652 ret_val = ret;
ce34fcd0 653 goto error;
d88aee68 654 }
dc2bbdae 655 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
656 return 0;
657
ce34fcd0 658error:
dc2bbdae 659 pthread_mutex_unlock(&registry->lock);
331744e3 660 return ret_val;
d88aee68
DG
661}
662
663/*
664 * Send to the consumer a close metadata command for the given session. Once
665 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 666 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
667 * in the destroy path.
668 *
669 * Return 0 on success else a negative value.
670 */
7972aab2
DG
671static int close_metadata(struct ust_registry_session *registry,
672 struct consumer_output *consumer)
d88aee68
DG
673{
674 int ret;
675 struct consumer_socket *socket;
676
7972aab2
DG
677 assert(registry);
678 assert(consumer);
d88aee68 679
7972aab2
DG
680 rcu_read_lock();
681
ce34fcd0
MD
682 pthread_mutex_lock(&registry->lock);
683
7972aab2 684 if (!registry->metadata_key || registry->metadata_closed) {
d88aee68 685 ret = 0;
1b532a60 686 goto end;
d88aee68
DG
687 }
688
d88aee68 689 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
690 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
691 consumer);
d88aee68
DG
692 if (!socket) {
693 ret = -1;
7972aab2 694 goto error;
d88aee68
DG
695 }
696
7972aab2 697 ret = consumer_close_metadata(socket, registry->metadata_key);
d88aee68 698 if (ret < 0) {
7972aab2 699 goto error;
d88aee68
DG
700 }
701
d88aee68 702error:
1b532a60
DG
703 /*
704 * Metadata closed. Even on error this means that the consumer is not
705 * responding or not found so either way a second close should NOT be emit
706 * for this registry.
707 */
708 registry->metadata_closed = 1;
709end:
ce34fcd0 710 pthread_mutex_unlock(&registry->lock);
7972aab2 711 rcu_read_unlock();
d88aee68
DG
712 return ret;
713}
714
36b588ed
MD
715/*
716 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
717 * section and outside of call_rcu thread, so we postpone its execution
718 * using ht_cleanup_push. It is simpler than to change the semantic of
719 * the many callers of delete_ust_app_session().
36b588ed
MD
720 */
721static
722void delete_ust_app_session_rcu(struct rcu_head *head)
723{
724 struct ust_app_session *ua_sess =
725 caa_container_of(head, struct ust_app_session, rcu_head);
726
0b2dc8df 727 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
728 free(ua_sess);
729}
730
d80a6244
DG
731/*
732 * Delete ust app session safely. RCU read lock must be held before calling
733 * this function.
734 */
8b366481 735static
d0b96690
DG
736void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
737 struct ust_app *app)
d80a6244
DG
738{
739 int ret;
bec39940 740 struct lttng_ht_iter iter;
d80a6244 741 struct ust_app_channel *ua_chan;
7972aab2 742 struct ust_registry_session *registry;
d80a6244 743
d88aee68
DG
744 assert(ua_sess);
745
1b532a60
DG
746 pthread_mutex_lock(&ua_sess->lock);
747
b161602a
MD
748 assert(!ua_sess->deleted);
749 ua_sess->deleted = true;
750
7972aab2 751 registry = get_session_registry(ua_sess);
ce34fcd0 752 if (registry) {
d88aee68 753 /* Push metadata for application before freeing the application. */
7972aab2 754 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 755
7972aab2
DG
756 /*
757 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
758 * metadata only on destroy trace session in this case. Also, the
759 * previous push metadata could have flag the metadata registry to
760 * close so don't send a close command if closed.
7972aab2 761 */
ce34fcd0 762 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
763 /* And ask to close it for this session registry. */
764 (void) close_metadata(registry, ua_sess->consumer);
765 }
d80a6244
DG
766 }
767
bec39940
DG
768 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
769 node.node) {
770 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 771 assert(!ret);
d0b96690 772 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 773 }
d80a6244 774
7972aab2
DG
775 /* In case of per PID, the registry is kept in the session. */
776 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
777 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
778 if (reg_pid) {
779 buffer_reg_pid_remove(reg_pid);
780 buffer_reg_pid_destroy(reg_pid);
781 }
782 }
d0b96690 783
aee6bafd 784 if (ua_sess->handle != -1) {
fb45065e 785 pthread_mutex_lock(&app->sock_lock);
ffe60014 786 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 787 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
788 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
789 ERR("UST app sock %d release session handle failed with ret %d",
790 sock, ret);
791 }
aee6bafd 792 }
1b532a60
DG
793 pthread_mutex_unlock(&ua_sess->lock);
794
6addfa37
MD
795 consumer_output_put(ua_sess->consumer);
796
36b588ed 797 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 798}
91d76f53
DG
799
800/*
284d8f55
DG
801 * Delete a traceable application structure from the global list. Never call
802 * this function outside of a call_rcu call.
36b588ed
MD
803 *
804 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 805 */
8b366481
DG
806static
807void delete_ust_app(struct ust_app *app)
91d76f53 808{
8b366481 809 int ret, sock;
d42f20df 810 struct ust_app_session *ua_sess, *tmp_ua_sess;
44d3bd01 811
d80a6244 812 /* Delete ust app sessions info */
852d0037
DG
813 sock = app->sock;
814 app->sock = -1;
d80a6244 815
8b366481 816 /* Wipe sessions */
d42f20df
DG
817 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
818 teardown_node) {
819 /* Free every object in the session and the session. */
36b588ed 820 rcu_read_lock();
d0b96690 821 delete_ust_app_session(sock, ua_sess, app);
36b588ed 822 rcu_read_unlock();
d80a6244 823 }
36b588ed 824
0b2dc8df
MD
825 ht_cleanup_push(app->sessions);
826 ht_cleanup_push(app->ust_objd);
d80a6244 827
6414a713 828 /*
852d0037
DG
829 * Wait until we have deleted the application from the sock hash table
830 * before closing this socket, otherwise an application could re-use the
831 * socket ID and race with the teardown, using the same hash table entry.
832 *
833 * It's OK to leave the close in call_rcu. We want it to stay unique for
834 * all RCU readers that could run concurrently with unregister app,
835 * therefore we _need_ to only close that socket after a grace period. So
836 * it should stay in this RCU callback.
837 *
838 * This close() is a very important step of the synchronization model so
839 * every modification to this function must be carefully reviewed.
6414a713 840 */
799e2c4f
MD
841 ret = close(sock);
842 if (ret) {
843 PERROR("close");
844 }
4063050c 845 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 846
852d0037 847 DBG2("UST app pid %d deleted", app->pid);
284d8f55 848 free(app);
099e26bd
DG
849}
850
851/*
f6a9efaa 852 * URCU intermediate call to delete an UST app.
099e26bd 853 */
8b366481
DG
854static
855void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 856{
bec39940
DG
857 struct lttng_ht_node_ulong *node =
858 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 859 struct ust_app *app =
852d0037 860 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 861
852d0037 862 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 863 delete_ust_app(app);
099e26bd
DG
864}
865
ffe60014
DG
866/*
867 * Delete the session from the application ht and delete the data structure by
868 * freeing every object inside and releasing them.
869 */
d0b96690 870static void destroy_app_session(struct ust_app *app,
ffe60014
DG
871 struct ust_app_session *ua_sess)
872{
873 int ret;
874 struct lttng_ht_iter iter;
875
876 assert(app);
877 assert(ua_sess);
878
879 iter.iter.node = &ua_sess->node.node;
880 ret = lttng_ht_del(app->sessions, &iter);
881 if (ret) {
882 /* Already scheduled for teardown. */
883 goto end;
884 }
885
886 /* Once deleted, free the data structure. */
d0b96690 887 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
888
889end:
890 return;
891}
892
8b366481
DG
893/*
894 * Alloc new UST app session.
895 */
896static
d0b96690 897struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
8b366481
DG
898{
899 struct ust_app_session *ua_sess;
900
901 /* Init most of the default value by allocating and zeroing */
902 ua_sess = zmalloc(sizeof(struct ust_app_session));
903 if (ua_sess == NULL) {
904 PERROR("malloc");
ffe60014 905 goto error_free;
8b366481
DG
906 }
907
908 ua_sess->handle = -1;
bec39940 909 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 910 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 911 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 912
8b366481
DG
913 return ua_sess;
914
ffe60014 915error_free:
8b366481
DG
916 return NULL;
917}
918
919/*
920 * Alloc new UST app channel.
921 */
922static
923struct ust_app_channel *alloc_ust_app_channel(char *name,
d0b96690 924 struct ust_app_session *ua_sess,
ffe60014 925 struct lttng_ust_channel_attr *attr)
8b366481
DG
926{
927 struct ust_app_channel *ua_chan;
928
929 /* Init most of the default value by allocating and zeroing */
930 ua_chan = zmalloc(sizeof(struct ust_app_channel));
931 if (ua_chan == NULL) {
932 PERROR("malloc");
933 goto error;
934 }
935
936 /* Setup channel name */
937 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
938 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
939
940 ua_chan->enabled = 1;
941 ua_chan->handle = -1;
45893984 942 ua_chan->session = ua_sess;
ffe60014 943 ua_chan->key = get_next_channel_key();
bec39940
DG
944 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
945 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
946 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
947
948 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 949 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
950
951 /* Copy attributes */
952 if (attr) {
ffe60014 953 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
954 ua_chan->attr.subbuf_size = attr->subbuf_size;
955 ua_chan->attr.num_subbuf = attr->num_subbuf;
956 ua_chan->attr.overwrite = attr->overwrite;
957 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
958 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
959 ua_chan->attr.output = attr->output;
8b366481 960 }
ffe60014
DG
961 /* By default, the channel is a per cpu channel. */
962 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
963
964 DBG3("UST app channel %s allocated", ua_chan->name);
965
966 return ua_chan;
967
968error:
969 return NULL;
970}
971
37f1c236
DG
972/*
973 * Allocate and initialize a UST app stream.
974 *
975 * Return newly allocated stream pointer or NULL on error.
976 */
ffe60014 977struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
978{
979 struct ust_app_stream *stream = NULL;
980
981 stream = zmalloc(sizeof(*stream));
982 if (stream == NULL) {
983 PERROR("zmalloc ust app stream");
984 goto error;
985 }
986
987 /* Zero could be a valid value for a handle so flag it to -1. */
988 stream->handle = -1;
989
990error:
991 return stream;
992}
993
8b366481
DG
994/*
995 * Alloc new UST app event.
996 */
997static
998struct ust_app_event *alloc_ust_app_event(char *name,
999 struct lttng_ust_event *attr)
1000{
1001 struct ust_app_event *ua_event;
1002
1003 /* Init most of the default value by allocating and zeroing */
1004 ua_event = zmalloc(sizeof(struct ust_app_event));
1005 if (ua_event == NULL) {
1006 PERROR("malloc");
1007 goto error;
1008 }
1009
1010 ua_event->enabled = 1;
1011 strncpy(ua_event->name, name, sizeof(ua_event->name));
1012 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1013 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1014
1015 /* Copy attributes */
1016 if (attr) {
1017 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1018 }
1019
1020 DBG3("UST app event %s allocated", ua_event->name);
1021
1022 return ua_event;
1023
1024error:
1025 return NULL;
1026}
1027
1028/*
1029 * Alloc new UST app context.
1030 */
1031static
1032struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
1033{
1034 struct ust_app_ctx *ua_ctx;
1035
1036 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1037 if (ua_ctx == NULL) {
1038 goto error;
1039 }
1040
31746f93
DG
1041 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1042
8b366481
DG
1043 if (uctx) {
1044 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1045 }
1046
1047 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1048
1049error:
1050 return ua_ctx;
1051}
1052
025faf73
DG
1053/*
1054 * Allocate a filter and copy the given original filter.
1055 *
1056 * Return allocated filter or NULL on error.
1057 */
51755dc8
JG
1058static struct lttng_filter_bytecode *copy_filter_bytecode(
1059 struct lttng_filter_bytecode *orig_f)
025faf73 1060{
51755dc8 1061 struct lttng_filter_bytecode *filter = NULL;
025faf73
DG
1062
1063 /* Copy filter bytecode */
1064 filter = zmalloc(sizeof(*filter) + orig_f->len);
1065 if (!filter) {
51755dc8 1066 PERROR("zmalloc alloc filter bytecode");
025faf73
DG
1067 goto error;
1068 }
1069
1070 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1071
1072error:
1073 return filter;
1074}
1075
51755dc8
JG
1076/*
1077 * Create a liblttng-ust filter bytecode from given bytecode.
1078 *
1079 * Return allocated filter or NULL on error.
1080 */
1081static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1082 struct lttng_filter_bytecode *orig_f)
1083{
1084 struct lttng_ust_filter_bytecode *filter = NULL;
1085
1086 /* Copy filter bytecode */
1087 filter = zmalloc(sizeof(*filter) + orig_f->len);
1088 if (!filter) {
1089 PERROR("zmalloc alloc ust filter bytecode");
1090 goto error;
1091 }
1092
1093 assert(sizeof(struct lttng_filter_bytecode) ==
1094 sizeof(struct lttng_ust_filter_bytecode));
1095 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1096error:
1097 return filter;
1098}
1099
099e26bd 1100/*
421cb601
DG
1101 * Find an ust_app using the sock and return it. RCU read side lock must be
1102 * held before calling this helper function.
099e26bd 1103 */
f20baf8e 1104struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1105{
bec39940 1106 struct lttng_ht_node_ulong *node;
bec39940 1107 struct lttng_ht_iter iter;
f6a9efaa 1108
852d0037 1109 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1110 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1111 if (node == NULL) {
1112 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1113 goto error;
1114 }
852d0037
DG
1115
1116 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1117
1118error:
1119 return NULL;
099e26bd
DG
1120}
1121
d0b96690
DG
1122/*
1123 * Find an ust_app using the notify sock and return it. RCU read side lock must
1124 * be held before calling this helper function.
1125 */
1126static struct ust_app *find_app_by_notify_sock(int sock)
1127{
1128 struct lttng_ht_node_ulong *node;
1129 struct lttng_ht_iter iter;
1130
1131 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1132 &iter);
1133 node = lttng_ht_iter_get_node_ulong(&iter);
1134 if (node == NULL) {
1135 DBG2("UST app find by notify sock %d not found", sock);
1136 goto error;
1137 }
1138
1139 return caa_container_of(node, struct ust_app, notify_sock_n);
1140
1141error:
1142 return NULL;
1143}
1144
025faf73
DG
1145/*
1146 * Lookup for an ust app event based on event name, filter bytecode and the
1147 * event loglevel.
1148 *
1149 * Return an ust_app_event object or NULL on error.
1150 */
18eace3b 1151static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
51755dc8 1152 char *name, struct lttng_filter_bytecode *filter, int loglevel,
39c5a3a7 1153 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1154{
1155 struct lttng_ht_iter iter;
1156 struct lttng_ht_node_str *node;
1157 struct ust_app_event *event = NULL;
1158 struct ust_app_ht_key key;
18eace3b
DG
1159
1160 assert(name);
1161 assert(ht);
1162
1163 /* Setup key for event lookup. */
1164 key.name = name;
1165 key.filter = filter;
1166 key.loglevel = loglevel;
39c5a3a7 1167 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1168 key.exclusion = exclusion;
18eace3b 1169
025faf73
DG
1170 /* Lookup using the event name as hash and a custom match fct. */
1171 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1172 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1173 node = lttng_ht_iter_get_node_str(&iter);
1174 if (node == NULL) {
1175 goto end;
1176 }
1177
1178 event = caa_container_of(node, struct ust_app_event, node);
1179
1180end:
18eace3b
DG
1181 return event;
1182}
1183
55cc08a6
DG
1184/*
1185 * Create the channel context on the tracer.
d0b96690
DG
1186 *
1187 * Called with UST app session lock held.
55cc08a6
DG
1188 */
1189static
1190int create_ust_channel_context(struct ust_app_channel *ua_chan,
1191 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1192{
1193 int ret;
1194
840cb59c 1195 health_code_update();
86acf0da 1196
fb45065e 1197 pthread_mutex_lock(&app->sock_lock);
852d0037 1198 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1199 ua_chan->obj, &ua_ctx->obj);
fb45065e 1200 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1201 if (ret < 0) {
ffe60014
DG
1202 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1203 ERR("UST app create channel context failed for app (pid: %d) "
1204 "with ret %d", app->pid, ret);
1205 } else {
3757b385
DG
1206 /*
1207 * This is normal behavior, an application can die during the
1208 * creation process. Don't report an error so the execution can
1209 * continue normally.
1210 */
1211 ret = 0;
ffe60014
DG
1212 DBG3("UST app disable event failed. Application is dead.");
1213 }
55cc08a6
DG
1214 goto error;
1215 }
1216
1217 ua_ctx->handle = ua_ctx->obj->handle;
1218
d0b96690
DG
1219 DBG2("UST app context handle %d created successfully for channel %s",
1220 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1221
1222error:
840cb59c 1223 health_code_update();
55cc08a6
DG
1224 return ret;
1225}
1226
53a80697
MD
1227/*
1228 * Set the filter on the tracer.
1229 */
1230static
1231int set_ust_event_filter(struct ust_app_event *ua_event,
1232 struct ust_app *app)
1233{
1234 int ret;
51755dc8 1235 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1236
840cb59c 1237 health_code_update();
86acf0da 1238
53a80697 1239 if (!ua_event->filter) {
86acf0da
DG
1240 ret = 0;
1241 goto error;
53a80697
MD
1242 }
1243
51755dc8
JG
1244 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1245 if (!ust_bytecode) {
1246 ret = -LTTNG_ERR_NOMEM;
1247 goto error;
1248 }
fb45065e 1249 pthread_mutex_lock(&app->sock_lock);
51755dc8 1250 ret = ustctl_set_filter(app->sock, ust_bytecode,
53a80697 1251 ua_event->obj);
fb45065e 1252 pthread_mutex_unlock(&app->sock_lock);
53a80697 1253 if (ret < 0) {
ffe60014
DG
1254 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1255 ERR("UST app event %s filter failed for app (pid: %d) "
1256 "with ret %d", ua_event->attr.name, app->pid, ret);
1257 } else {
3757b385
DG
1258 /*
1259 * This is normal behavior, an application can die during the
1260 * creation process. Don't report an error so the execution can
1261 * continue normally.
1262 */
1263 ret = 0;
ffe60014
DG
1264 DBG3("UST app filter event failed. Application is dead.");
1265 }
53a80697
MD
1266 goto error;
1267 }
1268
1269 DBG2("UST filter set successfully for event %s", ua_event->name);
1270
1271error:
840cb59c 1272 health_code_update();
51755dc8 1273 free(ust_bytecode);
53a80697
MD
1274 return ret;
1275}
1276
51755dc8
JG
1277static
1278struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1279 struct lttng_event_exclusion *exclusion)
1280{
1281 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1282 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1283 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1284
1285 ust_exclusion = zmalloc(exclusion_alloc_size);
1286 if (!ust_exclusion) {
1287 PERROR("malloc");
1288 goto end;
1289 }
1290
1291 assert(sizeof(struct lttng_event_exclusion) ==
1292 sizeof(struct lttng_ust_event_exclusion));
1293 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1294end:
1295 return ust_exclusion;
1296}
1297
7cc9a73c
JI
1298/*
1299 * Set event exclusions on the tracer.
1300 */
1301static
1302int set_ust_event_exclusion(struct ust_app_event *ua_event,
1303 struct ust_app *app)
1304{
1305 int ret;
51755dc8 1306 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
7cc9a73c
JI
1307
1308 health_code_update();
1309
1310 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1311 ret = 0;
1312 goto error;
1313 }
1314
51755dc8
JG
1315 ust_exclusion = create_ust_exclusion_from_exclusion(
1316 ua_event->exclusion);
1317 if (!ust_exclusion) {
1318 ret = -LTTNG_ERR_NOMEM;
1319 goto error;
1320 }
fb45065e 1321 pthread_mutex_lock(&app->sock_lock);
51755dc8 1322 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
fb45065e 1323 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1324 if (ret < 0) {
1325 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1326 ERR("UST app event %s exclusions failed for app (pid: %d) "
1327 "with ret %d", ua_event->attr.name, app->pid, ret);
1328 } else {
1329 /*
1330 * This is normal behavior, an application can die during the
1331 * creation process. Don't report an error so the execution can
1332 * continue normally.
1333 */
1334 ret = 0;
1335 DBG3("UST app event exclusion failed. Application is dead.");
1336 }
1337 goto error;
1338 }
1339
1340 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1341
1342error:
1343 health_code_update();
51755dc8 1344 free(ust_exclusion);
7cc9a73c
JI
1345 return ret;
1346}
1347
9730260e
DG
1348/*
1349 * Disable the specified event on to UST tracer for the UST session.
1350 */
1351static int disable_ust_event(struct ust_app *app,
1352 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1353{
1354 int ret;
1355
840cb59c 1356 health_code_update();
86acf0da 1357
fb45065e 1358 pthread_mutex_lock(&app->sock_lock);
852d0037 1359 ret = ustctl_disable(app->sock, ua_event->obj);
fb45065e 1360 pthread_mutex_unlock(&app->sock_lock);
9730260e 1361 if (ret < 0) {
ffe60014
DG
1362 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1363 ERR("UST app event %s disable failed for app (pid: %d) "
1364 "and session handle %d with ret %d",
1365 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1366 } else {
3757b385
DG
1367 /*
1368 * This is normal behavior, an application can die during the
1369 * creation process. Don't report an error so the execution can
1370 * continue normally.
1371 */
1372 ret = 0;
ffe60014
DG
1373 DBG3("UST app disable event failed. Application is dead.");
1374 }
9730260e
DG
1375 goto error;
1376 }
1377
1378 DBG2("UST app event %s disabled successfully for app (pid: %d)",
852d0037 1379 ua_event->attr.name, app->pid);
9730260e
DG
1380
1381error:
840cb59c 1382 health_code_update();
9730260e
DG
1383 return ret;
1384}
1385
78f0bacd
DG
1386/*
1387 * Disable the specified channel on to UST tracer for the UST session.
1388 */
1389static int disable_ust_channel(struct ust_app *app,
1390 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1391{
1392 int ret;
1393
840cb59c 1394 health_code_update();
86acf0da 1395
fb45065e 1396 pthread_mutex_lock(&app->sock_lock);
852d0037 1397 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1398 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1399 if (ret < 0) {
ffe60014
DG
1400 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1401 ERR("UST app channel %s disable failed for app (pid: %d) "
1402 "and session handle %d with ret %d",
1403 ua_chan->name, app->pid, ua_sess->handle, ret);
1404 } else {
3757b385
DG
1405 /*
1406 * This is normal behavior, an application can die during the
1407 * creation process. Don't report an error so the execution can
1408 * continue normally.
1409 */
1410 ret = 0;
ffe60014
DG
1411 DBG3("UST app disable channel failed. Application is dead.");
1412 }
78f0bacd
DG
1413 goto error;
1414 }
1415
78f0bacd 1416 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1417 ua_chan->name, app->pid);
78f0bacd
DG
1418
1419error:
840cb59c 1420 health_code_update();
78f0bacd
DG
1421 return ret;
1422}
1423
1424/*
1425 * Enable the specified channel on to UST tracer for the UST session.
1426 */
1427static int enable_ust_channel(struct ust_app *app,
1428 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1429{
1430 int ret;
1431
840cb59c 1432 health_code_update();
86acf0da 1433
fb45065e 1434 pthread_mutex_lock(&app->sock_lock);
852d0037 1435 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1436 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1437 if (ret < 0) {
ffe60014
DG
1438 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1439 ERR("UST app channel %s enable failed for app (pid: %d) "
1440 "and session handle %d with ret %d",
1441 ua_chan->name, app->pid, ua_sess->handle, ret);
1442 } else {
3757b385
DG
1443 /*
1444 * This is normal behavior, an application can die during the
1445 * creation process. Don't report an error so the execution can
1446 * continue normally.
1447 */
1448 ret = 0;
ffe60014
DG
1449 DBG3("UST app enable channel failed. Application is dead.");
1450 }
78f0bacd
DG
1451 goto error;
1452 }
1453
1454 ua_chan->enabled = 1;
1455
1456 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1457 ua_chan->name, app->pid);
78f0bacd
DG
1458
1459error:
840cb59c 1460 health_code_update();
78f0bacd
DG
1461 return ret;
1462}
1463
edb67388
DG
1464/*
1465 * Enable the specified event on to UST tracer for the UST session.
1466 */
1467static int enable_ust_event(struct ust_app *app,
1468 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1469{
1470 int ret;
1471
840cb59c 1472 health_code_update();
86acf0da 1473
fb45065e 1474 pthread_mutex_lock(&app->sock_lock);
852d0037 1475 ret = ustctl_enable(app->sock, ua_event->obj);
fb45065e 1476 pthread_mutex_unlock(&app->sock_lock);
edb67388 1477 if (ret < 0) {
ffe60014
DG
1478 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1479 ERR("UST app event %s enable failed for app (pid: %d) "
1480 "and session handle %d with ret %d",
1481 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1482 } else {
3757b385
DG
1483 /*
1484 * This is normal behavior, an application can die during the
1485 * creation process. Don't report an error so the execution can
1486 * continue normally.
1487 */
1488 ret = 0;
ffe60014
DG
1489 DBG3("UST app enable event failed. Application is dead.");
1490 }
edb67388
DG
1491 goto error;
1492 }
1493
1494 DBG2("UST app event %s enabled successfully for app (pid: %d)",
852d0037 1495 ua_event->attr.name, app->pid);
edb67388
DG
1496
1497error:
840cb59c 1498 health_code_update();
edb67388
DG
1499 return ret;
1500}
1501
099e26bd 1502/*
7972aab2 1503 * Send channel and stream buffer to application.
4f3ab6ee 1504 *
ffe60014 1505 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1506 */
7972aab2
DG
1507static int send_channel_pid_to_ust(struct ust_app *app,
1508 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1509{
1510 int ret;
ffe60014 1511 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1512
1513 assert(app);
ffe60014 1514 assert(ua_sess);
4f3ab6ee 1515 assert(ua_chan);
4f3ab6ee 1516
840cb59c 1517 health_code_update();
4f3ab6ee 1518
7972aab2
DG
1519 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1520 app->sock);
86acf0da 1521
ffe60014
DG
1522 /* Send channel to the application. */
1523 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1524 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1525 ret = -ENOTCONN; /* Caused by app exiting. */
1526 goto error;
1527 } else if (ret < 0) {
b551a063
DG
1528 goto error;
1529 }
1530
d88aee68
DG
1531 health_code_update();
1532
ffe60014
DG
1533 /* Send all streams to application. */
1534 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1535 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1536 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1537 ret = -ENOTCONN; /* Caused by app exiting. */
1538 goto error;
1539 } else if (ret < 0) {
ffe60014
DG
1540 goto error;
1541 }
1542 /* We don't need the stream anymore once sent to the tracer. */
1543 cds_list_del(&stream->list);
fb45065e 1544 delete_ust_app_stream(-1, stream, app);
ffe60014 1545 }
ffe60014
DG
1546 /* Flag the channel that it is sent to the application. */
1547 ua_chan->is_sent = 1;
ffe60014 1548
b551a063 1549error:
840cb59c 1550 health_code_update();
b551a063
DG
1551 return ret;
1552}
1553
91d76f53 1554/*
5b4a0ec0 1555 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1556 *
1557 * Should be called with session mutex held.
91d76f53 1558 */
edb67388
DG
1559static
1560int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1561 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1562{
5b4a0ec0 1563 int ret = 0;
284d8f55 1564
840cb59c 1565 health_code_update();
86acf0da 1566
5b4a0ec0 1567 /* Create UST event on tracer */
fb45065e 1568 pthread_mutex_lock(&app->sock_lock);
852d0037 1569 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1570 &ua_event->obj);
fb45065e 1571 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1572 if (ret < 0) {
ffe60014
DG
1573 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1574 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1575 ua_event->attr.name, app->pid, ret);
1576 } else {
3757b385
DG
1577 /*
1578 * This is normal behavior, an application can die during the
1579 * creation process. Don't report an error so the execution can
1580 * continue normally.
1581 */
1582 ret = 0;
ffe60014
DG
1583 DBG3("UST app create event failed. Application is dead.");
1584 }
5b4a0ec0 1585 goto error;
91d76f53 1586 }
f6a9efaa 1587
5b4a0ec0 1588 ua_event->handle = ua_event->obj->handle;
284d8f55 1589
5b4a0ec0 1590 DBG2("UST app event %s created successfully for pid:%d",
852d0037 1591 ua_event->attr.name, app->pid);
f6a9efaa 1592
840cb59c 1593 health_code_update();
86acf0da 1594
025faf73
DG
1595 /* Set filter if one is present. */
1596 if (ua_event->filter) {
1597 ret = set_ust_event_filter(ua_event, app);
1598 if (ret < 0) {
1599 goto error;
1600 }
1601 }
1602
7cc9a73c
JI
1603 /* Set exclusions for the event */
1604 if (ua_event->exclusion) {
1605 ret = set_ust_event_exclusion(ua_event, app);
1606 if (ret < 0) {
1607 goto error;
1608 }
1609 }
1610
8535a6d9 1611 /* If event not enabled, disable it on the tracer */
40113787
MD
1612 if (ua_event->enabled) {
1613 /*
1614 * We now need to explicitly enable the event, since it
1615 * is now disabled at creation.
1616 */
1617 ret = enable_ust_event(app, ua_sess, ua_event);
1618 if (ret < 0) {
1619 /*
1620 * If we hit an EPERM, something is wrong with our enable call. If
1621 * we get an EEXIST, there is a problem on the tracer side since we
1622 * just created it.
1623 */
1624 switch (ret) {
1625 case -LTTNG_UST_ERR_PERM:
1626 /* Code flow problem */
1627 assert(0);
1628 case -LTTNG_UST_ERR_EXIST:
1629 /* It's OK for our use case. */
1630 ret = 0;
1631 break;
1632 default:
1633 break;
1634 }
1635 goto error;
1636 }
8535a6d9
DG
1637 }
1638
5b4a0ec0 1639error:
840cb59c 1640 health_code_update();
5b4a0ec0 1641 return ret;
91d76f53 1642}
48842b30 1643
5b4a0ec0
DG
1644/*
1645 * Copy data between an UST app event and a LTT event.
1646 */
421cb601 1647static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
1648 struct ltt_ust_event *uevent)
1649{
b4ffad32
JI
1650 size_t exclusion_alloc_size;
1651
48842b30
DG
1652 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1653 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1654
fc34caaa
DG
1655 ua_event->enabled = uevent->enabled;
1656
5b4a0ec0
DG
1657 /* Copy event attributes */
1658 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1659
53a80697
MD
1660 /* Copy filter bytecode */
1661 if (uevent->filter) {
51755dc8 1662 ua_event->filter = copy_filter_bytecode(uevent->filter);
025faf73 1663 /* Filter might be NULL here in case of ENONEM. */
53a80697 1664 }
b4ffad32
JI
1665
1666 /* Copy exclusion data */
1667 if (uevent->exclusion) {
51755dc8 1668 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
1669 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1670 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
1671 if (ua_event->exclusion == NULL) {
1672 PERROR("malloc");
1673 } else {
1674 memcpy(ua_event->exclusion, uevent->exclusion,
1675 exclusion_alloc_size);
b4ffad32
JI
1676 }
1677 }
48842b30
DG
1678}
1679
5b4a0ec0
DG
1680/*
1681 * Copy data between an UST app channel and a LTT channel.
1682 */
421cb601 1683static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
1684 struct ltt_ust_channel *uchan)
1685{
bec39940 1686 struct lttng_ht_iter iter;
48842b30 1687 struct ltt_ust_event *uevent;
55cc08a6 1688 struct ltt_ust_context *uctx;
48842b30 1689 struct ust_app_event *ua_event;
55cc08a6 1690 struct ust_app_ctx *ua_ctx;
48842b30 1691
fc34caaa 1692 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
1693
1694 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1695 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 1696
1624d5b7
JD
1697 ua_chan->tracefile_size = uchan->tracefile_size;
1698 ua_chan->tracefile_count = uchan->tracefile_count;
1699
ffe60014
DG
1700 /* Copy event attributes since the layout is different. */
1701 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1702 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1703 ua_chan->attr.overwrite = uchan->attr.overwrite;
1704 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1705 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1706 ua_chan->attr.output = uchan->attr.output;
1707 /*
1708 * Note that the attribute channel type is not set since the channel on the
1709 * tracing registry side does not have this information.
1710 */
48842b30 1711
fc34caaa 1712 ua_chan->enabled = uchan->enabled;
7972aab2 1713 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 1714
31746f93 1715 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
55cc08a6
DG
1716 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1717 if (ua_ctx == NULL) {
1718 continue;
1719 }
bec39940
DG
1720 lttng_ht_node_init_ulong(&ua_ctx->node,
1721 (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 1722 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 1723 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
55cc08a6 1724 }
48842b30 1725
421cb601 1726 /* Copy all events from ltt ust channel to ust app channel */
bec39940 1727 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
18eace3b 1728 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 1729 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 1730 if (ua_event == NULL) {
421cb601 1731 DBG2("UST event %s not found on shadow copy channel",
48842b30 1732 uevent->attr.name);
284d8f55 1733 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
48842b30 1734 if (ua_event == NULL) {
5b4a0ec0 1735 continue;
48842b30 1736 }
421cb601 1737 shadow_copy_event(ua_event, uevent);
d0b96690 1738 add_unique_ust_app_event(ua_chan, ua_event);
48842b30 1739 }
48842b30
DG
1740 }
1741
fc34caaa 1742 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
1743}
1744
5b4a0ec0
DG
1745/*
1746 * Copy data between a UST app session and a regular LTT session.
1747 */
421cb601 1748static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 1749 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1750{
bec39940
DG
1751 struct lttng_ht_node_str *ua_chan_node;
1752 struct lttng_ht_iter iter;
48842b30
DG
1753 struct ltt_ust_channel *uchan;
1754 struct ust_app_channel *ua_chan;
477d7741
MD
1755 time_t rawtime;
1756 struct tm *timeinfo;
1757 char datetime[16];
1758 int ret;
d7ba1388 1759 char tmp_shm_path[PATH_MAX];
477d7741
MD
1760
1761 /* Get date and time for unique app path */
1762 time(&rawtime);
1763 timeinfo = localtime(&rawtime);
1764 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 1765
421cb601 1766 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 1767
7972aab2
DG
1768 ua_sess->tracing_id = usess->id;
1769 ua_sess->id = get_next_session_id();
1770 ua_sess->uid = app->uid;
1771 ua_sess->gid = app->gid;
1772 ua_sess->euid = usess->uid;
1773 ua_sess->egid = usess->gid;
1774 ua_sess->buffer_type = usess->buffer_type;
1775 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 1776
7972aab2 1777 /* There is only one consumer object per session possible. */
6addfa37 1778 consumer_output_get(usess->consumer);
7972aab2 1779 ua_sess->consumer = usess->consumer;
6addfa37 1780
2bba9e53 1781 ua_sess->output_traces = usess->output_traces;
ecc48a90 1782 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
1783 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1784 &usess->metadata_attr);
7972aab2
DG
1785
1786 switch (ua_sess->buffer_type) {
1787 case LTTNG_BUFFER_PER_PID:
1788 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 1789 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
1790 datetime);
1791 break;
1792 case LTTNG_BUFFER_PER_UID:
1793 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1794 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1795 break;
1796 default:
1797 assert(0);
1798 goto error;
1799 }
477d7741
MD
1800 if (ret < 0) {
1801 PERROR("asprintf UST shadow copy session");
477d7741 1802 assert(0);
7972aab2 1803 goto error;
477d7741
MD
1804 }
1805
3d071855
MD
1806 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1807 sizeof(ua_sess->root_shm_path));
1808 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
1809 strncpy(ua_sess->shm_path, usess->shm_path,
1810 sizeof(ua_sess->shm_path));
1811 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1812 if (ua_sess->shm_path[0]) {
1813 switch (ua_sess->buffer_type) {
1814 case LTTNG_BUFFER_PER_PID:
1815 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1816 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1817 app->name, app->pid, datetime);
1818 break;
1819 case LTTNG_BUFFER_PER_UID:
1820 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1821 DEFAULT_UST_TRACE_UID_PATH,
1822 app->uid, app->bits_per_long);
1823 break;
1824 default:
1825 assert(0);
1826 goto error;
1827 }
1828 if (ret < 0) {
1829 PERROR("sprintf UST shadow copy session");
1830 assert(0);
1831 goto error;
1832 }
1833 strncat(ua_sess->shm_path, tmp_shm_path,
1834 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1835 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1836 }
1837
48842b30 1838 /* Iterate over all channels in global domain. */
bec39940
DG
1839 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1840 uchan, node.node) {
1841 struct lttng_ht_iter uiter;
ba767faf 1842
bec39940
DG
1843 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1844 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5b4a0ec0 1845 if (ua_chan_node != NULL) {
fc34caaa 1846 /* Session exist. Contiuing. */
5b4a0ec0
DG
1847 continue;
1848 }
421cb601 1849
5b4a0ec0
DG
1850 DBG2("Channel %s not found on shadow session copy, creating it",
1851 uchan->name);
d0b96690 1852 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
5b4a0ec0 1853 if (ua_chan == NULL) {
fc34caaa 1854 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
5b4a0ec0 1855 continue;
48842b30 1856 }
5b4a0ec0 1857 shadow_copy_channel(ua_chan, uchan);
ffe60014
DG
1858 /*
1859 * The concept of metadata channel does not exist on the tracing
1860 * registry side of the session daemon so this can only be a per CPU
1861 * channel and not metadata.
1862 */
1863 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1864
bec39940 1865 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
48842b30 1866 }
6addfa37 1867 return;
7972aab2
DG
1868
1869error:
6addfa37 1870 consumer_output_put(ua_sess->consumer);
48842b30
DG
1871}
1872
78f0bacd
DG
1873/*
1874 * Lookup sesison wrapper.
1875 */
84cd17c6
MD
1876static
1877void __lookup_session_by_app(struct ltt_ust_session *usess,
bec39940 1878 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
1879{
1880 /* Get right UST app session from app */
d9bf3ca4 1881 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
1882}
1883
421cb601
DG
1884/*
1885 * Return ust app session from the app session hashtable using the UST session
a991f516 1886 * id.
421cb601 1887 */
48842b30
DG
1888static struct ust_app_session *lookup_session_by_app(
1889 struct ltt_ust_session *usess, struct ust_app *app)
1890{
bec39940 1891 struct lttng_ht_iter iter;
d9bf3ca4 1892 struct lttng_ht_node_u64 *node;
48842b30 1893
84cd17c6 1894 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 1895 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
1896 if (node == NULL) {
1897 goto error;
1898 }
1899
1900 return caa_container_of(node, struct ust_app_session, node);
1901
1902error:
1903 return NULL;
1904}
1905
7972aab2
DG
1906/*
1907 * Setup buffer registry per PID for the given session and application. If none
1908 * is found, a new one is created, added to the global registry and
1909 * initialized. If regp is valid, it's set with the newly created object.
1910 *
1911 * Return 0 on success or else a negative value.
1912 */
1913static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1914 struct ust_app *app, struct buffer_reg_pid **regp)
1915{
1916 int ret = 0;
1917 struct buffer_reg_pid *reg_pid;
1918
1919 assert(ua_sess);
1920 assert(app);
1921
1922 rcu_read_lock();
1923
1924 reg_pid = buffer_reg_pid_find(ua_sess->id);
1925 if (!reg_pid) {
1926 /*
1927 * This is the create channel path meaning that if there is NO
1928 * registry available, we have to create one for this session.
1929 */
d7ba1388 1930 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 1931 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
1932 if (ret < 0) {
1933 goto error;
1934 }
7972aab2
DG
1935 } else {
1936 goto end;
1937 }
1938
1939 /* Initialize registry. */
1940 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1941 app->bits_per_long, app->uint8_t_alignment,
1942 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
1943 app->uint64_t_alignment, app->long_alignment,
1944 app->byte_order, app->version.major,
3d071855
MD
1945 app->version.minor, reg_pid->root_shm_path,
1946 reg_pid->shm_path,
d7ba1388 1947 ua_sess->euid, ua_sess->egid);
7972aab2 1948 if (ret < 0) {
286c991a
MD
1949 /*
1950 * reg_pid->registry->reg.ust is NULL upon error, so we need to
1951 * destroy the buffer registry, because it is always expected
1952 * that if the buffer registry can be found, its ust registry is
1953 * non-NULL.
1954 */
1955 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
1956 goto error;
1957 }
1958
286c991a
MD
1959 buffer_reg_pid_add(reg_pid);
1960
7972aab2
DG
1961 DBG3("UST app buffer registry per PID created successfully");
1962
1963end:
1964 if (regp) {
1965 *regp = reg_pid;
1966 }
1967error:
1968 rcu_read_unlock();
1969 return ret;
1970}
1971
1972/*
1973 * Setup buffer registry per UID for the given session and application. If none
1974 * is found, a new one is created, added to the global registry and
1975 * initialized. If regp is valid, it's set with the newly created object.
1976 *
1977 * Return 0 on success or else a negative value.
1978 */
1979static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 1980 struct ust_app_session *ua_sess,
7972aab2
DG
1981 struct ust_app *app, struct buffer_reg_uid **regp)
1982{
1983 int ret = 0;
1984 struct buffer_reg_uid *reg_uid;
1985
1986 assert(usess);
1987 assert(app);
1988
1989 rcu_read_lock();
1990
1991 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1992 if (!reg_uid) {
1993 /*
1994 * This is the create channel path meaning that if there is NO
1995 * registry available, we have to create one for this session.
1996 */
1997 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
1998 LTTNG_DOMAIN_UST, &reg_uid,
1999 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2000 if (ret < 0) {
2001 goto error;
2002 }
7972aab2
DG
2003 } else {
2004 goto end;
2005 }
2006
2007 /* Initialize registry. */
af6142cf 2008 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2009 app->bits_per_long, app->uint8_t_alignment,
2010 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2011 app->uint64_t_alignment, app->long_alignment,
2012 app->byte_order, app->version.major,
3d071855
MD
2013 app->version.minor, reg_uid->root_shm_path,
2014 reg_uid->shm_path, usess->uid, usess->gid);
7972aab2 2015 if (ret < 0) {
286c991a
MD
2016 /*
2017 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2018 * destroy the buffer registry, because it is always expected
2019 * that if the buffer registry can be found, its ust registry is
2020 * non-NULL.
2021 */
2022 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2023 goto error;
2024 }
2025 /* Add node to teardown list of the session. */
2026 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2027
286c991a 2028 buffer_reg_uid_add(reg_uid);
7972aab2 2029
286c991a 2030 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2031end:
2032 if (regp) {
2033 *regp = reg_uid;
2034 }
2035error:
2036 rcu_read_unlock();
2037 return ret;
2038}
2039
421cb601 2040/*
3d8ca23b 2041 * Create a session on the tracer side for the given app.
421cb601 2042 *
3d8ca23b
DG
2043 * On success, ua_sess_ptr is populated with the session pointer or else left
2044 * untouched. If the session was created, is_created is set to 1. On error,
2045 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2046 * be NULL.
2047 *
2048 * Returns 0 on success or else a negative code which is either -ENOMEM or
2049 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2050 */
3d8ca23b
DG
2051static int create_ust_app_session(struct ltt_ust_session *usess,
2052 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2053 int *is_created)
421cb601 2054{
3d8ca23b 2055 int ret, created = 0;
421cb601
DG
2056 struct ust_app_session *ua_sess;
2057
3d8ca23b
DG
2058 assert(usess);
2059 assert(app);
2060 assert(ua_sess_ptr);
2061
840cb59c 2062 health_code_update();
86acf0da 2063
421cb601
DG
2064 ua_sess = lookup_session_by_app(usess, app);
2065 if (ua_sess == NULL) {
d9bf3ca4 2066 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2067 app->pid, usess->id);
d0b96690 2068 ua_sess = alloc_ust_app_session(app);
421cb601
DG
2069 if (ua_sess == NULL) {
2070 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2071 ret = -ENOMEM;
2072 goto error;
421cb601 2073 }
477d7741 2074 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2075 created = 1;
421cb601
DG
2076 }
2077
7972aab2
DG
2078 switch (usess->buffer_type) {
2079 case LTTNG_BUFFER_PER_PID:
2080 /* Init local registry. */
2081 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2082 if (ret < 0) {
e64207cf 2083 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2084 goto error;
2085 }
2086 break;
2087 case LTTNG_BUFFER_PER_UID:
2088 /* Look for a global registry. If none exists, create one. */
d7ba1388 2089 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2090 if (ret < 0) {
e64207cf 2091 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2092 goto error;
2093 }
2094 break;
2095 default:
2096 assert(0);
2097 ret = -EINVAL;
2098 goto error;
2099 }
2100
2101 health_code_update();
2102
2103 if (ua_sess->handle == -1) {
fb45065e 2104 pthread_mutex_lock(&app->sock_lock);
7972aab2 2105 ret = ustctl_create_session(app->sock);
fb45065e 2106 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2107 if (ret < 0) {
2108 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2109 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2110 app->pid, ret);
2111 } else {
2112 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2113 /*
2114 * This is normal behavior, an application can die during the
2115 * creation process. Don't report an error so the execution can
2116 * continue normally. This will get flagged ENOTCONN and the
2117 * caller will handle it.
2118 */
2119 ret = 0;
ffe60014 2120 }
d0b96690 2121 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2122 if (ret != -ENOMEM) {
2123 /*
2124 * Tracer is probably gone or got an internal error so let's
2125 * behave like it will soon unregister or not usable.
2126 */
2127 ret = -ENOTCONN;
2128 }
2129 goto error;
421cb601
DG
2130 }
2131
7972aab2
DG
2132 ua_sess->handle = ret;
2133
2134 /* Add ust app session to app's HT */
d9bf3ca4
MD
2135 lttng_ht_node_init_u64(&ua_sess->node,
2136 ua_sess->tracing_id);
2137 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
7972aab2
DG
2138
2139 DBG2("UST app session created successfully with handle %d", ret);
2140 }
2141
2142 *ua_sess_ptr = ua_sess;
2143 if (is_created) {
2144 *is_created = created;
2145 }
2146
2147 /* Everything went well. */
2148 ret = 0;
2149
2150error:
2151 health_code_update();
2152 return ret;
2153}
2154
6a6b2068
JG
2155/*
2156 * Match function for a hash table lookup of ust_app_ctx.
2157 *
2158 * It matches an ust app context based on the context type and, in the case
2159 * of perf counters, their name.
2160 */
2161static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2162{
2163 struct ust_app_ctx *ctx;
2164 const struct lttng_ust_context *key;
2165
2166 assert(node);
2167 assert(_key);
2168
2169 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2170 key = _key;
2171
2172 /* Context type */
2173 if (ctx->ctx.ctx != key->ctx) {
2174 goto no_match;
2175 }
2176
2177 /* Check the name in the case of perf thread counters. */
2178 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
2179 if (strncmp(key->u.perf_counter.name,
2180 ctx->ctx.u.perf_counter.name,
2181 sizeof(key->u.perf_counter.name))) {
2182 goto no_match;
2183 }
2184 }
2185
2186 /* Match. */
2187 return 1;
2188
2189no_match:
2190 return 0;
2191}
2192
2193/*
2194 * Lookup for an ust app context from an lttng_ust_context.
2195 *
be184a0f 2196 * Must be called while holding RCU read side lock.
6a6b2068
JG
2197 * Return an ust_app_ctx object or NULL on error.
2198 */
2199static
2200struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2201 struct lttng_ust_context *uctx)
2202{
2203 struct lttng_ht_iter iter;
2204 struct lttng_ht_node_ulong *node;
2205 struct ust_app_ctx *app_ctx = NULL;
2206
2207 assert(uctx);
2208 assert(ht);
2209
2210 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2211 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2212 ht_match_ust_app_ctx, uctx, &iter.iter);
2213 node = lttng_ht_iter_get_node_ulong(&iter);
2214 if (!node) {
2215 goto end;
2216 }
2217
2218 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2219
2220end:
2221 return app_ctx;
2222}
2223
7972aab2
DG
2224/*
2225 * Create a context for the channel on the tracer.
2226 *
2227 * Called with UST app session lock held and a RCU read side lock.
2228 */
2229static
2230int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2231 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2232 struct ust_app *app)
2233{
2234 int ret = 0;
7972aab2
DG
2235 struct ust_app_ctx *ua_ctx;
2236
2237 DBG2("UST app adding context to channel %s", ua_chan->name);
2238
6a6b2068
JG
2239 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2240 if (ua_ctx) {
7972aab2
DG
2241 ret = -EEXIST;
2242 goto error;
2243 }
2244
2245 ua_ctx = alloc_ust_app_ctx(uctx);
2246 if (ua_ctx == NULL) {
2247 /* malloc failed */
2248 ret = -1;
2249 goto error;
2250 }
2251
2252 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2253 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2254 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2255
2256 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2257 if (ret < 0) {
2258 goto error;
2259 }
2260
2261error:
2262 return ret;
2263}
2264
2265/*
2266 * Enable on the tracer side a ust app event for the session and channel.
2267 *
2268 * Called with UST app session lock held.
2269 */
2270static
2271int enable_ust_app_event(struct ust_app_session *ua_sess,
2272 struct ust_app_event *ua_event, struct ust_app *app)
2273{
2274 int ret;
2275
2276 ret = enable_ust_event(app, ua_sess, ua_event);
2277 if (ret < 0) {
2278 goto error;
2279 }
2280
2281 ua_event->enabled = 1;
2282
2283error:
2284 return ret;
2285}
2286
2287/*
2288 * Disable on the tracer side a ust app event for the session and channel.
2289 */
2290static int disable_ust_app_event(struct ust_app_session *ua_sess,
2291 struct ust_app_event *ua_event, struct ust_app *app)
2292{
2293 int ret;
2294
2295 ret = disable_ust_event(app, ua_sess, ua_event);
2296 if (ret < 0) {
2297 goto error;
2298 }
2299
2300 ua_event->enabled = 0;
2301
2302error:
2303 return ret;
2304}
2305
2306/*
2307 * Lookup ust app channel for session and disable it on the tracer side.
2308 */
2309static
2310int disable_ust_app_channel(struct ust_app_session *ua_sess,
2311 struct ust_app_channel *ua_chan, struct ust_app *app)
2312{
2313 int ret;
2314
2315 ret = disable_ust_channel(app, ua_sess, ua_chan);
2316 if (ret < 0) {
2317 goto error;
2318 }
2319
2320 ua_chan->enabled = 0;
2321
2322error:
2323 return ret;
2324}
2325
2326/*
2327 * Lookup ust app channel for session and enable it on the tracer side. This
2328 * MUST be called with a RCU read side lock acquired.
2329 */
2330static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2331 struct ltt_ust_channel *uchan, struct ust_app *app)
2332{
2333 int ret = 0;
2334 struct lttng_ht_iter iter;
2335 struct lttng_ht_node_str *ua_chan_node;
2336 struct ust_app_channel *ua_chan;
2337
2338 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2339 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2340 if (ua_chan_node == NULL) {
d9bf3ca4 2341 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2342 uchan->name, ua_sess->tracing_id);
2343 goto error;
2344 }
2345
2346 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2347
2348 ret = enable_ust_channel(app, ua_sess, ua_chan);
2349 if (ret < 0) {
2350 goto error;
2351 }
2352
2353error:
2354 return ret;
2355}
2356
2357/*
2358 * Ask the consumer to create a channel and get it if successful.
2359 *
2360 * Return 0 on success or else a negative value.
2361 */
2362static int do_consumer_create_channel(struct ltt_ust_session *usess,
2363 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2364 int bitness, struct ust_registry_session *registry)
2365{
2366 int ret;
2367 unsigned int nb_fd = 0;
2368 struct consumer_socket *socket;
2369
2370 assert(usess);
2371 assert(ua_sess);
2372 assert(ua_chan);
2373 assert(registry);
2374
2375 rcu_read_lock();
2376 health_code_update();
2377
2378 /* Get the right consumer socket for the application. */
2379 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2380 if (!socket) {
2381 ret = -EINVAL;
2382 goto error;
2383 }
2384
2385 health_code_update();
2386
2387 /* Need one fd for the channel. */
2388 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2389 if (ret < 0) {
2390 ERR("Exhausted number of available FD upon create channel");
2391 goto error;
2392 }
2393
2394 /*
2395 * Ask consumer to create channel. The consumer will return the number of
2396 * stream we have to expect.
2397 */
2398 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2399 registry);
2400 if (ret < 0) {
2401 goto error_ask;
2402 }
2403
2404 /*
2405 * Compute the number of fd needed before receiving them. It must be 2 per
2406 * stream (2 being the default value here).
2407 */
2408 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2409
2410 /* Reserve the amount of file descriptor we need. */
2411 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2412 if (ret < 0) {
2413 ERR("Exhausted number of available FD upon create channel");
2414 goto error_fd_get_stream;
2415 }
2416
2417 health_code_update();
2418
2419 /*
2420 * Now get the channel from the consumer. This call wil populate the stream
2421 * list of that channel and set the ust objects.
2422 */
d9078d0c
DG
2423 if (usess->consumer->enabled) {
2424 ret = ust_consumer_get_channel(socket, ua_chan);
2425 if (ret < 0) {
2426 goto error_destroy;
2427 }
7972aab2
DG
2428 }
2429
2430 rcu_read_unlock();
2431 return 0;
2432
2433error_destroy:
2434 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2435error_fd_get_stream:
2436 /*
2437 * Initiate a destroy channel on the consumer since we had an error
2438 * handling it on our side. The return value is of no importance since we
2439 * already have a ret value set by the previous error that we need to
2440 * return.
2441 */
2442 (void) ust_consumer_destroy_channel(socket, ua_chan);
2443error_ask:
2444 lttng_fd_put(LTTNG_FD_APPS, 1);
2445error:
2446 health_code_update();
2447 rcu_read_unlock();
2448 return ret;
2449}
2450
2451/*
2452 * Duplicate the ust data object of the ust app stream and save it in the
2453 * buffer registry stream.
2454 *
2455 * Return 0 on success or else a negative value.
2456 */
2457static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2458 struct ust_app_stream *stream)
2459{
2460 int ret;
2461
2462 assert(reg_stream);
2463 assert(stream);
2464
2465 /* Reserve the amount of file descriptor we need. */
2466 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2467 if (ret < 0) {
2468 ERR("Exhausted number of available FD upon duplicate stream");
2469 goto error;
2470 }
2471
2472 /* Duplicate object for stream once the original is in the registry. */
2473 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2474 reg_stream->obj.ust);
2475 if (ret < 0) {
2476 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2477 reg_stream->obj.ust, stream->obj, ret);
2478 lttng_fd_put(LTTNG_FD_APPS, 2);
2479 goto error;
2480 }
2481 stream->handle = stream->obj->handle;
2482
2483error:
2484 return ret;
2485}
2486
2487/*
2488 * Duplicate the ust data object of the ust app. channel and save it in the
2489 * buffer registry channel.
2490 *
2491 * Return 0 on success or else a negative value.
2492 */
2493static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2494 struct ust_app_channel *ua_chan)
2495{
2496 int ret;
2497
2498 assert(reg_chan);
2499 assert(ua_chan);
2500
2501 /* Need two fds for the channel. */
2502 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2503 if (ret < 0) {
2504 ERR("Exhausted number of available FD upon duplicate channel");
2505 goto error_fd_get;
2506 }
2507
2508 /* Duplicate object for stream once the original is in the registry. */
2509 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2510 if (ret < 0) {
2511 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2512 reg_chan->obj.ust, ua_chan->obj, ret);
2513 goto error;
2514 }
2515 ua_chan->handle = ua_chan->obj->handle;
2516
2517 return 0;
2518
2519error:
2520 lttng_fd_put(LTTNG_FD_APPS, 1);
2521error_fd_get:
2522 return ret;
2523}
2524
2525/*
2526 * For a given channel buffer registry, setup all streams of the given ust
2527 * application channel.
2528 *
2529 * Return 0 on success or else a negative value.
2530 */
2531static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2532 struct ust_app_channel *ua_chan,
2533 struct ust_app *app)
7972aab2
DG
2534{
2535 int ret = 0;
2536 struct ust_app_stream *stream, *stmp;
2537
2538 assert(reg_chan);
2539 assert(ua_chan);
2540
2541 DBG2("UST app setup buffer registry stream");
2542
2543 /* Send all streams to application. */
2544 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2545 struct buffer_reg_stream *reg_stream;
2546
2547 ret = buffer_reg_stream_create(&reg_stream);
2548 if (ret < 0) {
2549 goto error;
2550 }
2551
2552 /*
2553 * Keep original pointer and nullify it in the stream so the delete
2554 * stream call does not release the object.
2555 */
2556 reg_stream->obj.ust = stream->obj;
2557 stream->obj = NULL;
2558 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2559
7972aab2
DG
2560 /* We don't need the streams anymore. */
2561 cds_list_del(&stream->list);
fb45065e 2562 delete_ust_app_stream(-1, stream, app);
7972aab2 2563 }
421cb601 2564
7972aab2
DG
2565error:
2566 return ret;
2567}
2568
2569/*
2570 * Create a buffer registry channel for the given session registry and
2571 * application channel object. If regp pointer is valid, it's set with the
2572 * created object. Important, the created object is NOT added to the session
2573 * registry hash table.
2574 *
2575 * Return 0 on success else a negative value.
2576 */
2577static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2578 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2579{
2580 int ret;
2581 struct buffer_reg_channel *reg_chan = NULL;
2582
2583 assert(reg_sess);
2584 assert(ua_chan);
2585
2586 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2587
2588 /* Create buffer registry channel. */
2589 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2590 if (ret < 0) {
2591 goto error_create;
421cb601 2592 }
7972aab2
DG
2593 assert(reg_chan);
2594 reg_chan->consumer_key = ua_chan->key;
8c924c7b 2595 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 2596 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 2597
7972aab2
DG
2598 /* Create and add a channel registry to session. */
2599 ret = ust_registry_channel_add(reg_sess->reg.ust,
2600 ua_chan->tracing_channel_id);
2601 if (ret < 0) {
2602 goto error;
d88aee68 2603 }
7972aab2 2604 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 2605
7972aab2
DG
2606 if (regp) {
2607 *regp = reg_chan;
3d8ca23b 2608 }
d88aee68 2609
7972aab2 2610 return 0;
3d8ca23b
DG
2611
2612error:
7972aab2
DG
2613 /* Safe because the registry channel object was not added to any HT. */
2614 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2615error_create:
3d8ca23b 2616 return ret;
421cb601
DG
2617}
2618
55cc08a6 2619/*
7972aab2
DG
2620 * Setup buffer registry channel for the given session registry and application
2621 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 2622 *
7972aab2 2623 * Return 0 on success else a negative value.
55cc08a6 2624 */
7972aab2 2625static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
2626 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2627 struct ust_app *app)
55cc08a6 2628{
7972aab2 2629 int ret;
55cc08a6 2630
7972aab2
DG
2631 assert(reg_sess);
2632 assert(reg_chan);
2633 assert(ua_chan);
2634 assert(ua_chan->obj);
55cc08a6 2635
7972aab2 2636 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 2637
7972aab2 2638 /* Setup all streams for the registry. */
fb45065e 2639 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 2640 if (ret < 0) {
55cc08a6
DG
2641 goto error;
2642 }
2643
7972aab2
DG
2644 reg_chan->obj.ust = ua_chan->obj;
2645 ua_chan->obj = NULL;
55cc08a6 2646
7972aab2 2647 return 0;
55cc08a6
DG
2648
2649error:
7972aab2
DG
2650 buffer_reg_channel_remove(reg_sess, reg_chan);
2651 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
2652 return ret;
2653}
2654
edb67388 2655/*
7972aab2 2656 * Send buffer registry channel to the application.
d0b96690 2657 *
7972aab2 2658 * Return 0 on success else a negative value.
edb67388 2659 */
7972aab2
DG
2660static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2661 struct ust_app *app, struct ust_app_session *ua_sess,
2662 struct ust_app_channel *ua_chan)
edb67388
DG
2663{
2664 int ret;
7972aab2 2665 struct buffer_reg_stream *reg_stream;
edb67388 2666
7972aab2
DG
2667 assert(reg_chan);
2668 assert(app);
2669 assert(ua_sess);
2670 assert(ua_chan);
2671
2672 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2673
2674 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
2675 if (ret < 0) {
2676 goto error;
2677 }
2678
7972aab2
DG
2679 /* Send channel to the application. */
2680 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
2681 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2682 ret = -ENOTCONN; /* Caused by app exiting. */
2683 goto error;
2684 } else if (ret < 0) {
7972aab2
DG
2685 goto error;
2686 }
2687
2688 health_code_update();
2689
2690 /* Send all streams to application. */
2691 pthread_mutex_lock(&reg_chan->stream_list_lock);
2692 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2693 struct ust_app_stream stream;
2694
2695 ret = duplicate_stream_object(reg_stream, &stream);
2696 if (ret < 0) {
2697 goto error_stream_unlock;
2698 }
2699
2700 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2701 if (ret < 0) {
fb45065e 2702 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
2703 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2704 ret = -ENOTCONN; /* Caused by app exiting. */
2705 goto error_stream_unlock;
2706 } else if (ret < 0) {
2707 goto error_stream_unlock;
2708 }
7972aab2
DG
2709 goto error_stream_unlock;
2710 }
edb67388 2711
7972aab2
DG
2712 /*
2713 * The return value is not important here. This function will output an
2714 * error if needed.
2715 */
fb45065e 2716 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
2717 }
2718 ua_chan->is_sent = 1;
2719
2720error_stream_unlock:
2721 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
2722error:
2723 return ret;
2724}
2725
9730260e 2726/*
7972aab2
DG
2727 * Create and send to the application the created buffers with per UID buffers.
2728 *
2729 * Return 0 on success else a negative value.
9730260e 2730 */
7972aab2
DG
2731static int create_channel_per_uid(struct ust_app *app,
2732 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2733 struct ust_app_channel *ua_chan)
9730260e
DG
2734{
2735 int ret;
7972aab2
DG
2736 struct buffer_reg_uid *reg_uid;
2737 struct buffer_reg_channel *reg_chan;
9730260e 2738
7972aab2
DG
2739 assert(app);
2740 assert(usess);
2741 assert(ua_sess);
2742 assert(ua_chan);
2743
2744 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2745
2746 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2747 /*
2748 * The session creation handles the creation of this global registry
2749 * object. If none can be find, there is a code flow problem or a
2750 * teardown race.
2751 */
2752 assert(reg_uid);
2753
2754 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2755 reg_uid);
2756 if (!reg_chan) {
2757 /* Create the buffer registry channel object. */
2758 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2759 if (ret < 0) {
f14256d6
MD
2760 ERR("Error creating the UST channel \"%s\" registry instance",
2761 ua_chan->name);
7972aab2
DG
2762 goto error;
2763 }
2764 assert(reg_chan);
2765
2766 /*
2767 * Create the buffers on the consumer side. This call populates the
2768 * ust app channel object with all streams and data object.
2769 */
2770 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2771 app->bits_per_long, reg_uid->registry->reg.ust);
2772 if (ret < 0) {
f14256d6
MD
2773 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2774 ua_chan->name);
2775
07d2ae95
DG
2776 /*
2777 * Let's remove the previously created buffer registry channel so
2778 * it's not visible anymore in the session registry.
2779 */
2780 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2781 ua_chan->tracing_channel_id);
2782 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2783 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
7972aab2
DG
2784 goto error;
2785 }
2786
2787 /*
2788 * Setup the streams and add it to the session registry.
2789 */
fb45065e
MD
2790 ret = setup_buffer_reg_channel(reg_uid->registry,
2791 ua_chan, reg_chan, app);
7972aab2 2792 if (ret < 0) {
f14256d6
MD
2793 ERR("Error setting up UST channel \"%s\"",
2794 ua_chan->name);
7972aab2
DG
2795 goto error;
2796 }
2797
2798 }
2799
2800 /* Send buffers to the application. */
2801 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
9730260e 2802 if (ret < 0) {
a7169585
MD
2803 if (ret != -ENOTCONN) {
2804 ERR("Error sending channel to application");
2805 }
9730260e
DG
2806 goto error;
2807 }
2808
9730260e
DG
2809error:
2810 return ret;
2811}
2812
78f0bacd 2813/*
7972aab2
DG
2814 * Create and send to the application the created buffers with per PID buffers.
2815 *
2816 * Return 0 on success else a negative value.
78f0bacd 2817 */
7972aab2
DG
2818static int create_channel_per_pid(struct ust_app *app,
2819 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2820 struct ust_app_channel *ua_chan)
78f0bacd 2821{
8535a6d9 2822 int ret;
7972aab2 2823 struct ust_registry_session *registry;
78f0bacd 2824
7972aab2
DG
2825 assert(app);
2826 assert(usess);
2827 assert(ua_sess);
2828 assert(ua_chan);
2829
2830 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2831
2832 rcu_read_lock();
2833
2834 registry = get_session_registry(ua_sess);
2835 assert(registry);
2836
2837 /* Create and add a new channel registry to session. */
2838 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 2839 if (ret < 0) {
f14256d6
MD
2840 ERR("Error creating the UST channel \"%s\" registry instance",
2841 ua_chan->name);
78f0bacd
DG
2842 goto error;
2843 }
2844
7972aab2
DG
2845 /* Create and get channel on the consumer side. */
2846 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2847 app->bits_per_long, registry);
2848 if (ret < 0) {
f14256d6
MD
2849 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2850 ua_chan->name);
7972aab2
DG
2851 goto error;
2852 }
2853
2854 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2855 if (ret < 0) {
a7169585
MD
2856 if (ret != -ENOTCONN) {
2857 ERR("Error sending channel to application");
2858 }
7972aab2
DG
2859 goto error;
2860 }
8535a6d9 2861
78f0bacd 2862error:
7972aab2 2863 rcu_read_unlock();
78f0bacd
DG
2864 return ret;
2865}
2866
2867/*
7972aab2
DG
2868 * From an already allocated ust app channel, create the channel buffers if
2869 * need and send it to the application. This MUST be called with a RCU read
2870 * side lock acquired.
2871 *
a7169585
MD
2872 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2873 * the application exited concurrently.
78f0bacd 2874 */
7972aab2
DG
2875static int do_create_channel(struct ust_app *app,
2876 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2877 struct ust_app_channel *ua_chan)
78f0bacd 2878{
7972aab2 2879 int ret;
78f0bacd 2880
7972aab2
DG
2881 assert(app);
2882 assert(usess);
2883 assert(ua_sess);
2884 assert(ua_chan);
2885
2886 /* Handle buffer type before sending the channel to the application. */
2887 switch (usess->buffer_type) {
2888 case LTTNG_BUFFER_PER_UID:
2889 {
2890 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2891 if (ret < 0) {
2892 goto error;
2893 }
2894 break;
2895 }
2896 case LTTNG_BUFFER_PER_PID:
2897 {
2898 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2899 if (ret < 0) {
2900 goto error;
2901 }
2902 break;
2903 }
2904 default:
2905 assert(0);
2906 ret = -EINVAL;
78f0bacd
DG
2907 goto error;
2908 }
2909
7972aab2
DG
2910 /* Initialize ust objd object using the received handle and add it. */
2911 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2912 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 2913
7972aab2
DG
2914 /* If channel is not enabled, disable it on the tracer */
2915 if (!ua_chan->enabled) {
2916 ret = disable_ust_channel(app, ua_sess, ua_chan);
2917 if (ret < 0) {
2918 goto error;
2919 }
78f0bacd
DG
2920 }
2921
2922error:
2923 return ret;
2924}
2925
284d8f55 2926/*
4d710ac2
DG
2927 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2928 * newly created channel if not NULL.
d0b96690 2929 *
36b588ed 2930 * Called with UST app session lock and RCU read-side lock held.
7972aab2 2931 *
a7169585
MD
2932 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2933 * the application exited concurrently.
284d8f55 2934 */
4d710ac2
DG
2935static int create_ust_app_channel(struct ust_app_session *ua_sess,
2936 struct ltt_ust_channel *uchan, struct ust_app *app,
7972aab2 2937 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
4d710ac2 2938 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
2939{
2940 int ret = 0;
bec39940
DG
2941 struct lttng_ht_iter iter;
2942 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
2943 struct ust_app_channel *ua_chan;
2944
2945 /* Lookup channel in the ust app session */
bec39940
DG
2946 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2947 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 2948 if (ua_chan_node != NULL) {
5b4a0ec0 2949 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 2950 goto end;
5b4a0ec0
DG
2951 }
2952
d0b96690 2953 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
2954 if (ua_chan == NULL) {
2955 /* Only malloc can fail here */
4d710ac2 2956 ret = -ENOMEM;
094d1690 2957 goto error_alloc;
fc34caaa
DG
2958 }
2959 shadow_copy_channel(ua_chan, uchan);
2960
ffe60014
DG
2961 /* Set channel type. */
2962 ua_chan->attr.type = type;
2963
7972aab2 2964 ret = do_create_channel(app, usess, ua_sess, ua_chan);
5b4a0ec0
DG
2965 if (ret < 0) {
2966 goto error;
2967 }
2968
fc34caaa 2969 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
852d0037 2970 app->pid);
fc34caaa 2971
d0b96690
DG
2972 /* Only add the channel if successful on the tracer side. */
2973 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2974
fc34caaa 2975end:
4d710ac2
DG
2976 if (ua_chanp) {
2977 *ua_chanp = ua_chan;
2978 }
2979
2980 /* Everything went well. */
2981 return 0;
5b4a0ec0
DG
2982
2983error:
d0b96690 2984 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
094d1690 2985error_alloc:
4d710ac2 2986 return ret;
5b4a0ec0
DG
2987}
2988
2989/*
2990 * Create UST app event and create it on the tracer side.
d0b96690
DG
2991 *
2992 * Called with ust app session mutex held.
5b4a0ec0 2993 */
edb67388
DG
2994static
2995int create_ust_app_event(struct ust_app_session *ua_sess,
2996 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2997 struct ust_app *app)
284d8f55 2998{
edb67388 2999 int ret = 0;
5b4a0ec0 3000 struct ust_app_event *ua_event;
284d8f55 3001
5b4a0ec0 3002 /* Get event node */
18eace3b 3003 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 3004 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 3005 if (ua_event != NULL) {
fc34caaa 3006 ret = -EEXIST;
edb67388
DG
3007 goto end;
3008 }
5b4a0ec0 3009
edb67388
DG
3010 /* Does not exist so create one */
3011 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3012 if (ua_event == NULL) {
3013 /* Only malloc can failed so something is really wrong */
3014 ret = -ENOMEM;
fc34caaa 3015 goto end;
5b4a0ec0 3016 }
edb67388 3017 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3018
edb67388 3019 /* Create it on the tracer side */
5b4a0ec0 3020 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3021 if (ret < 0) {
fc34caaa 3022 /* Not found previously means that it does not exist on the tracer */
76f66f63 3023 assert(ret != -LTTNG_UST_ERR_EXIST);
284d8f55
DG
3024 goto error;
3025 }
3026
d0b96690 3027 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3028
fc34caaa 3029 DBG2("UST app create event %s for PID %d completed", ua_event->name,
852d0037 3030 app->pid);
7f79d3a1 3031
edb67388 3032end:
fc34caaa
DG
3033 return ret;
3034
5b4a0ec0 3035error:
fc34caaa 3036 /* Valid. Calling here is already in a read side lock */
fb45065e 3037 delete_ust_app_event(-1, ua_event, app);
edb67388 3038 return ret;
5b4a0ec0
DG
3039}
3040
3041/*
3042 * Create UST metadata and open it on the tracer side.
d0b96690 3043 *
7972aab2 3044 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3045 */
3046static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3047 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3048{
3049 int ret = 0;
ffe60014 3050 struct ust_app_channel *metadata;
d88aee68 3051 struct consumer_socket *socket;
7972aab2 3052 struct ust_registry_session *registry;
5b4a0ec0 3053
ffe60014
DG
3054 assert(ua_sess);
3055 assert(app);
d88aee68 3056 assert(consumer);
5b4a0ec0 3057
7972aab2
DG
3058 registry = get_session_registry(ua_sess);
3059 assert(registry);
3060
ce34fcd0
MD
3061 pthread_mutex_lock(&registry->lock);
3062
1b532a60
DG
3063 /* Metadata already exists for this registry or it was closed previously */
3064 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3065 ret = 0;
3066 goto error;
5b4a0ec0
DG
3067 }
3068
ffe60014 3069 /* Allocate UST metadata */
d0b96690 3070 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3071 if (!metadata) {
3072 /* malloc() failed */
3073 ret = -ENOMEM;
3074 goto error;
3075 }
5b4a0ec0 3076
ad7a9107 3077 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3078
7972aab2
DG
3079 /* Need one fd for the channel. */
3080 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3081 if (ret < 0) {
3082 ERR("Exhausted number of available FD upon create metadata");
3083 goto error;
3084 }
3085
4dc3dfc5
DG
3086 /* Get the right consumer socket for the application. */
3087 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3088 if (!socket) {
3089 ret = -EINVAL;
3090 goto error_consumer;
3091 }
3092
331744e3
JD
3093 /*
3094 * Keep metadata key so we can identify it on the consumer side. Assign it
3095 * to the registry *before* we ask the consumer so we avoid the race of the
3096 * consumer requesting the metadata and the ask_channel call on our side
3097 * did not returned yet.
3098 */
3099 registry->metadata_key = metadata->key;
3100
d88aee68
DG
3101 /*
3102 * Ask the metadata channel creation to the consumer. The metadata object
3103 * will be created by the consumer and kept their. However, the stream is
3104 * never added or monitored until we do a first push metadata to the
3105 * consumer.
3106 */
7972aab2
DG
3107 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3108 registry);
d88aee68 3109 if (ret < 0) {
f2a444f1
DG
3110 /* Nullify the metadata key so we don't try to close it later on. */
3111 registry->metadata_key = 0;
d88aee68
DG
3112 goto error_consumer;
3113 }
3114
3115 /*
3116 * The setup command will make the metadata stream be sent to the relayd,
3117 * if applicable, and the thread managing the metadatas. This is important
3118 * because after this point, if an error occurs, the only way the stream
3119 * can be deleted is to be monitored in the consumer.
3120 */
7972aab2 3121 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3122 if (ret < 0) {
f2a444f1
DG
3123 /* Nullify the metadata key so we don't try to close it later on. */
3124 registry->metadata_key = 0;
d88aee68 3125 goto error_consumer;
5b4a0ec0
DG
3126 }
3127
7972aab2
DG
3128 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3129 metadata->key, app->pid);
5b4a0ec0 3130
d88aee68 3131error_consumer:
b80f0b6c 3132 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3133 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3134error:
ce34fcd0 3135 pthread_mutex_unlock(&registry->lock);
ffe60014 3136 return ret;
5b4a0ec0
DG
3137}
3138
5b4a0ec0 3139/*
d88aee68
DG
3140 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3141 * acquired before calling this function.
5b4a0ec0
DG
3142 */
3143struct ust_app *ust_app_find_by_pid(pid_t pid)
3144{
d88aee68 3145 struct ust_app *app = NULL;
bec39940
DG
3146 struct lttng_ht_node_ulong *node;
3147 struct lttng_ht_iter iter;
5b4a0ec0 3148
bec39940
DG
3149 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3150 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3151 if (node == NULL) {
3152 DBG2("UST app no found with pid %d", pid);
3153 goto error;
3154 }
5b4a0ec0
DG
3155
3156 DBG2("Found UST app by pid %d", pid);
3157
d88aee68 3158 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3159
3160error:
d88aee68 3161 return app;
5b4a0ec0
DG
3162}
3163
d88aee68
DG
3164/*
3165 * Allocate and init an UST app object using the registration information and
3166 * the command socket. This is called when the command socket connects to the
3167 * session daemon.
3168 *
3169 * The object is returned on success or else NULL.
3170 */
d0b96690 3171struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3172{
d0b96690
DG
3173 struct ust_app *lta = NULL;
3174
3175 assert(msg);
3176 assert(sock >= 0);
3177
3178 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3179
173af62f
DG
3180 if ((msg->bits_per_long == 64 &&
3181 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3182 || (msg->bits_per_long == 32 &&
3183 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
f943b0fb 3184 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3185 "%d-bit long, but no consumerd for this size is available.\n",
3186 msg->name, msg->pid, msg->bits_per_long);
3187 goto error;
3f2c5fcc 3188 }
d0b96690 3189
5b4a0ec0
DG
3190 lta = zmalloc(sizeof(struct ust_app));
3191 if (lta == NULL) {
3192 PERROR("malloc");
d0b96690 3193 goto error;
5b4a0ec0
DG
3194 }
3195
3196 lta->ppid = msg->ppid;
3197 lta->uid = msg->uid;
3198 lta->gid = msg->gid;
d0b96690 3199
7753dea8 3200 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3201 lta->uint8_t_alignment = msg->uint8_t_alignment;
3202 lta->uint16_t_alignment = msg->uint16_t_alignment;
3203 lta->uint32_t_alignment = msg->uint32_t_alignment;
3204 lta->uint64_t_alignment = msg->uint64_t_alignment;
3205 lta->long_alignment = msg->long_alignment;
3206 lta->byte_order = msg->byte_order;
3207
5b4a0ec0
DG
3208 lta->v_major = msg->major;
3209 lta->v_minor = msg->minor;
d9bf3ca4 3210 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690
DG
3211 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3212 lta->notify_sock = -1;
d88aee68
DG
3213
3214 /* Copy name and make sure it's NULL terminated. */
3215 strncpy(lta->name, msg->name, sizeof(lta->name));
3216 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3217
3218 /*
3219 * Before this can be called, when receiving the registration information,
3220 * the application compatibility is checked. So, at this point, the
3221 * application can work with this session daemon.
3222 */
d0b96690 3223 lta->compatible = 1;
5b4a0ec0 3224
852d0037 3225 lta->pid = msg->pid;
d0b96690 3226 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 3227 lta->sock = sock;
fb45065e 3228 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 3229 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 3230
d42f20df 3231 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690
DG
3232error:
3233 return lta;
3234}
3235
d88aee68
DG
3236/*
3237 * For a given application object, add it to every hash table.
3238 */
d0b96690
DG
3239void ust_app_add(struct ust_app *app)
3240{
3241 assert(app);
3242 assert(app->notify_sock >= 0);
3243
5b4a0ec0 3244 rcu_read_lock();
852d0037
DG
3245
3246 /*
3247 * On a re-registration, we want to kick out the previous registration of
3248 * that pid
3249 */
d0b96690 3250 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
3251
3252 /*
3253 * The socket _should_ be unique until _we_ call close. So, a add_unique
3254 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3255 * already in the table.
3256 */
d0b96690 3257 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 3258
d0b96690
DG
3259 /* Add application to the notify socket hash table. */
3260 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3261 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 3262
d0b96690 3263 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
d88aee68
DG
3264 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3265 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3266 app->v_minor);
5b4a0ec0 3267
d0b96690
DG
3268 rcu_read_unlock();
3269}
3270
d88aee68
DG
3271/*
3272 * Set the application version into the object.
3273 *
3274 * Return 0 on success else a negative value either an errno code or a
3275 * LTTng-UST error code.
3276 */
d0b96690
DG
3277int ust_app_version(struct ust_app *app)
3278{
d88aee68
DG
3279 int ret;
3280
d0b96690 3281 assert(app);
d88aee68 3282
fb45065e 3283 pthread_mutex_lock(&app->sock_lock);
d88aee68 3284 ret = ustctl_tracer_version(app->sock, &app->version);
fb45065e 3285 pthread_mutex_unlock(&app->sock_lock);
d88aee68
DG
3286 if (ret < 0) {
3287 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
5368d366 3288 ERR("UST app %d version failed with ret %d", app->sock, ret);
d88aee68 3289 } else {
5368d366 3290 DBG3("UST app %d version failed. Application is dead", app->sock);
d88aee68
DG
3291 }
3292 }
3293
3294 return ret;
5b4a0ec0
DG
3295}
3296
3297/*
3298 * Unregister app by removing it from the global traceable app list and freeing
3299 * the data struct.
3300 *
3301 * The socket is already closed at this point so no close to sock.
3302 */
3303void ust_app_unregister(int sock)
3304{
3305 struct ust_app *lta;
bec39940 3306 struct lttng_ht_node_ulong *node;
c4b88406 3307 struct lttng_ht_iter ust_app_sock_iter;
bec39940 3308 struct lttng_ht_iter iter;
d42f20df 3309 struct ust_app_session *ua_sess;
525b0740 3310 int ret;
5b4a0ec0
DG
3311
3312 rcu_read_lock();
886459c6 3313
5b4a0ec0 3314 /* Get the node reference for a call_rcu */
c4b88406
MD
3315 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3316 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
d0b96690 3317 assert(node);
284d8f55 3318
852d0037 3319 lta = caa_container_of(node, struct ust_app, sock_n);
852d0037
DG
3320 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3321
d88aee68 3322 /*
ce34fcd0
MD
3323 * For per-PID buffers, perform "push metadata" and flush all
3324 * application streams before removing app from hash tables,
3325 * ensuring proper behavior of data_pending check.
c4b88406 3326 * Remove sessions so they are not visible during deletion.
d88aee68 3327 */
d42f20df
DG
3328 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3329 node.node) {
7972aab2
DG
3330 struct ust_registry_session *registry;
3331
d42f20df
DG
3332 ret = lttng_ht_del(lta->sessions, &iter);
3333 if (ret) {
3334 /* The session was already removed so scheduled for teardown. */
3335 continue;
3336 }
3337
ce34fcd0
MD
3338 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3339 (void) ust_app_flush_app_session(lta, ua_sess);
3340 }
c4b88406 3341
d42f20df
DG
3342 /*
3343 * Add session to list for teardown. This is safe since at this point we
3344 * are the only one using this list.
3345 */
d88aee68
DG
3346 pthread_mutex_lock(&ua_sess->lock);
3347
b161602a
MD
3348 if (ua_sess->deleted) {
3349 pthread_mutex_unlock(&ua_sess->lock);
3350 continue;
3351 }
3352
d88aee68
DG
3353 /*
3354 * Normally, this is done in the delete session process which is
3355 * executed in the call rcu below. However, upon registration we can't
3356 * afford to wait for the grace period before pushing data or else the
3357 * data pending feature can race between the unregistration and stop
3358 * command where the data pending command is sent *before* the grace
3359 * period ended.
3360 *
3361 * The close metadata below nullifies the metadata pointer in the
3362 * session so the delete session will NOT push/close a second time.
3363 */
7972aab2 3364 registry = get_session_registry(ua_sess);
ce34fcd0 3365 if (registry) {
7972aab2
DG
3366 /* Push metadata for application before freeing the application. */
3367 (void) push_metadata(registry, ua_sess->consumer);
3368
3369 /*
3370 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
3371 * metadata only on destroy trace session in this case. Also, the
3372 * previous push metadata could have flag the metadata registry to
3373 * close so don't send a close command if closed.
7972aab2 3374 */
ce34fcd0 3375 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
3376 /* And ask to close it for this session registry. */
3377 (void) close_metadata(registry, ua_sess->consumer);
3378 }
3379 }
d42f20df 3380 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
c4b88406 3381
d88aee68 3382 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
3383 }
3384
c4b88406
MD
3385 /* Remove application from PID hash table */
3386 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3387 assert(!ret);
3388
3389 /*
3390 * Remove application from notify hash table. The thread handling the
3391 * notify socket could have deleted the node so ignore on error because
3392 * either way it's valid. The close of that socket is handled by the other
3393 * thread.
3394 */
3395 iter.iter.node = &lta->notify_sock_n.node;
3396 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3397
3398 /*
3399 * Ignore return value since the node might have been removed before by an
3400 * add replace during app registration because the PID can be reassigned by
3401 * the OS.
3402 */
3403 iter.iter.node = &lta->pid_n.node;
3404 ret = lttng_ht_del(ust_app_ht, &iter);
3405 if (ret) {
3406 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3407 lta->pid);
3408 }
3409
852d0037
DG
3410 /* Free memory */
3411 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3412
5b4a0ec0
DG
3413 rcu_read_unlock();
3414 return;
284d8f55
DG
3415}
3416
5b4a0ec0
DG
3417/*
3418 * Fill events array with all events name of all registered apps.
3419 */
3420int ust_app_list_events(struct lttng_event **events)
421cb601 3421{
5b4a0ec0
DG
3422 int ret, handle;
3423 size_t nbmem, count = 0;
bec39940 3424 struct lttng_ht_iter iter;
5b4a0ec0 3425 struct ust_app *app;
c617c0c6 3426 struct lttng_event *tmp_event;
421cb601 3427
5b4a0ec0 3428 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3429 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3430 if (tmp_event == NULL) {
5b4a0ec0
DG
3431 PERROR("zmalloc ust app events");
3432 ret = -ENOMEM;
421cb601
DG
3433 goto error;
3434 }
3435
5b4a0ec0 3436 rcu_read_lock();
421cb601 3437
852d0037 3438 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
90eaa0d2 3439 struct lttng_ust_tracepoint_iter uiter;
ac3bd9c0 3440
840cb59c 3441 health_code_update();
86acf0da 3442
e0c7ec2b
DG
3443 if (!app->compatible) {
3444 /*
3445 * TODO: In time, we should notice the caller of this error by
3446 * telling him that this is a version error.
3447 */
3448 continue;
3449 }
fb45065e 3450 pthread_mutex_lock(&app->sock_lock);
852d0037 3451 handle = ustctl_tracepoint_list(app->sock);
5b4a0ec0 3452 if (handle < 0) {
ffe60014
DG
3453 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3454 ERR("UST app list events getting handle failed for app pid %d",
3455 app->pid);
3456 }
fb45065e 3457 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3458 continue;
3459 }
421cb601 3460
852d0037 3461 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 3462 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3463 /* Handle ustctl error. */
3464 if (ret < 0) {
fb45065e
MD
3465 int release_ret;
3466
a2ba1ab0 3467 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3468 ERR("UST app tp list get failed for app %d with ret %d",
3469 app->sock, ret);
3470 } else {
3471 DBG3("UST app tp list get failed. Application is dead");
3757b385
DG
3472 /*
3473 * This is normal behavior, an application can die during the
3474 * creation process. Don't report an error so the execution can
3475 * continue normally. Continue normal execution.
3476 */
3477 break;
ffe60014 3478 }
98f595d4 3479 free(tmp_event);
fb45065e
MD
3480 release_ret = ustctl_release_handle(app->sock, handle);
3481 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3482 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3483 }
3484 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
3485 goto rcu_error;
3486 }
3487
840cb59c 3488 health_code_update();
815564d8 3489 if (count >= nbmem) {
d7b3776f 3490 /* In case the realloc fails, we free the memory */
53efb85a
MD
3491 struct lttng_event *new_tmp_event;
3492 size_t new_nbmem;
3493
3494 new_nbmem = nbmem << 1;
3495 DBG2("Reallocating event list from %zu to %zu entries",
3496 nbmem, new_nbmem);
3497 new_tmp_event = realloc(tmp_event,
3498 new_nbmem * sizeof(struct lttng_event));
3499 if (new_tmp_event == NULL) {
fb45065e
MD
3500 int release_ret;
3501
5b4a0ec0 3502 PERROR("realloc ust app events");
c617c0c6 3503 free(tmp_event);
5b4a0ec0 3504 ret = -ENOMEM;
fb45065e
MD
3505 release_ret = ustctl_release_handle(app->sock, handle);
3506 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3507 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3508 }
3509 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3510 goto rcu_error;
3511 }
53efb85a
MD
3512 /* Zero the new memory */
3513 memset(new_tmp_event + nbmem, 0,
3514 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3515 nbmem = new_nbmem;
3516 tmp_event = new_tmp_event;
5b4a0ec0 3517 }
c617c0c6
MD
3518 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3519 tmp_event[count].loglevel = uiter.loglevel;
3520 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3521 tmp_event[count].pid = app->pid;
3522 tmp_event[count].enabled = -1;
5b4a0ec0 3523 count++;
421cb601 3524 }
fb45065e
MD
3525 ret = ustctl_release_handle(app->sock, handle);
3526 pthread_mutex_unlock(&app->sock_lock);
3527 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3528 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3529 }
421cb601
DG
3530 }
3531
5b4a0ec0 3532 ret = count;
c617c0c6 3533 *events = tmp_event;
421cb601 3534
5b4a0ec0 3535 DBG2("UST app list events done (%zu events)", count);
421cb601 3536
5b4a0ec0
DG
3537rcu_error:
3538 rcu_read_unlock();
421cb601 3539error:
840cb59c 3540 health_code_update();
5b4a0ec0 3541 return ret;
421cb601
DG
3542}
3543
f37d259d
MD
3544/*
3545 * Fill events array with all events name of all registered apps.
3546 */
3547int ust_app_list_event_fields(struct lttng_event_field **fields)
3548{
3549 int ret, handle;
3550 size_t nbmem, count = 0;
3551 struct lttng_ht_iter iter;
3552 struct ust_app *app;
c617c0c6 3553 struct lttng_event_field *tmp_event;
f37d259d
MD
3554
3555 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3556 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3557 if (tmp_event == NULL) {
f37d259d
MD
3558 PERROR("zmalloc ust app event fields");
3559 ret = -ENOMEM;
3560 goto error;
3561 }
3562
3563 rcu_read_lock();
3564
3565 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3566 struct lttng_ust_field_iter uiter;
3567
840cb59c 3568 health_code_update();
86acf0da 3569
f37d259d
MD
3570 if (!app->compatible) {
3571 /*
3572 * TODO: In time, we should notice the caller of this error by
3573 * telling him that this is a version error.
3574 */
3575 continue;
3576 }
fb45065e 3577 pthread_mutex_lock(&app->sock_lock);
f37d259d
MD
3578 handle = ustctl_tracepoint_field_list(app->sock);
3579 if (handle < 0) {
ffe60014
DG
3580 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3581 ERR("UST app list field getting handle failed for app pid %d",
3582 app->pid);
3583 }
fb45065e 3584 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
3585 continue;
3586 }
3587
3588 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 3589 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3590 /* Handle ustctl error. */
3591 if (ret < 0) {
fb45065e
MD
3592 int release_ret;
3593
a2ba1ab0 3594 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3595 ERR("UST app tp list field failed for app %d with ret %d",
3596 app->sock, ret);
3597 } else {
3598 DBG3("UST app tp list field failed. Application is dead");
3757b385
DG
3599 /*
3600 * This is normal behavior, an application can die during the
3601 * creation process. Don't report an error so the execution can
98f595d4 3602 * continue normally. Reset list and count for next app.
3757b385
DG
3603 */
3604 break;
ffe60014 3605 }
98f595d4 3606 free(tmp_event);
fb45065e
MD
3607 release_ret = ustctl_release_handle(app->sock, handle);
3608 pthread_mutex_unlock(&app->sock_lock);
3609 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3610 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3611 }
ffe60014
DG
3612 goto rcu_error;
3613 }
3614
840cb59c 3615 health_code_update();
f37d259d 3616 if (count >= nbmem) {
d7b3776f 3617 /* In case the realloc fails, we free the memory */
53efb85a
MD
3618 struct lttng_event_field *new_tmp_event;
3619 size_t new_nbmem;
3620
3621 new_nbmem = nbmem << 1;
3622 DBG2("Reallocating event field list from %zu to %zu entries",
3623 nbmem, new_nbmem);
3624 new_tmp_event = realloc(tmp_event,
3625 new_nbmem * sizeof(struct lttng_event_field));
3626 if (new_tmp_event == NULL) {
fb45065e
MD
3627 int release_ret;
3628
f37d259d 3629 PERROR("realloc ust app event fields");
c617c0c6 3630 free(tmp_event);
f37d259d 3631 ret = -ENOMEM;
fb45065e
MD
3632 release_ret = ustctl_release_handle(app->sock, handle);
3633 pthread_mutex_unlock(&app->sock_lock);
3634 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3635 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3636 }
f37d259d
MD
3637 goto rcu_error;
3638 }
53efb85a
MD
3639 /* Zero the new memory */
3640 memset(new_tmp_event + nbmem, 0,
3641 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3642 nbmem = new_nbmem;
3643 tmp_event = new_tmp_event;
f37d259d 3644 }
f37d259d 3645
c617c0c6 3646 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
2e84128e
DG
3647 /* Mapping between these enums matches 1 to 1. */
3648 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 3649 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 3650
c617c0c6
MD
3651 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3652 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 3653 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
3654 tmp_event[count].event.pid = app->pid;
3655 tmp_event[count].event.enabled = -1;
f37d259d
MD
3656 count++;
3657 }
fb45065e
MD
3658 ret = ustctl_release_handle(app->sock, handle);
3659 pthread_mutex_unlock(&app->sock_lock);
3660 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3661 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3662 }
f37d259d
MD
3663 }
3664
3665 ret = count;
c617c0c6 3666 *fields = tmp_event;
f37d259d
MD
3667
3668 DBG2("UST app list event fields done (%zu events)", count);
3669
3670rcu_error:
3671 rcu_read_unlock();
3672error:
840cb59c 3673 health_code_update();
f37d259d
MD
3674 return ret;
3675}
3676
5b4a0ec0
DG
3677/*
3678 * Free and clean all traceable apps of the global list.
36b588ed
MD
3679 *
3680 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
3681 */
3682void ust_app_clean_list(void)
421cb601 3683{
5b4a0ec0 3684 int ret;
659ed79f 3685 struct ust_app *app;
bec39940 3686 struct lttng_ht_iter iter;
421cb601 3687
5b4a0ec0 3688 DBG2("UST app cleaning registered apps hash table");
421cb601 3689
5b4a0ec0 3690 rcu_read_lock();
421cb601 3691
f1b711c4
MD
3692 if (ust_app_ht) {
3693 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3694 ret = lttng_ht_del(ust_app_ht, &iter);
3695 assert(!ret);
3696 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3697 }
421cb601
DG
3698 }
3699
852d0037 3700 /* Cleanup socket hash table */
f1b711c4
MD
3701 if (ust_app_ht_by_sock) {
3702 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3703 sock_n.node) {
3704 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3705 assert(!ret);
3706 }
bec39940 3707 }
852d0037 3708
d88aee68 3709 /* Cleanup notify socket hash table */
f1b711c4
MD
3710 if (ust_app_ht_by_notify_sock) {
3711 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3712 notify_sock_n.node) {
3713 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3714 assert(!ret);
3715 }
d88aee68 3716 }
36b588ed 3717 rcu_read_unlock();
d88aee68 3718
bec39940 3719 /* Destroy is done only when the ht is empty */
f1b711c4
MD
3720 if (ust_app_ht) {
3721 ht_cleanup_push(ust_app_ht);
3722 }
3723 if (ust_app_ht_by_sock) {
3724 ht_cleanup_push(ust_app_ht_by_sock);
3725 }
3726 if (ust_app_ht_by_notify_sock) {
3727 ht_cleanup_push(ust_app_ht_by_notify_sock);
3728 }
5b4a0ec0
DG
3729}
3730
3731/*
3732 * Init UST app hash table.
3733 */
57703f6e 3734int ust_app_ht_alloc(void)
5b4a0ec0 3735{
bec39940 3736 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3737 if (!ust_app_ht) {
3738 return -1;
3739 }
852d0037 3740 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3741 if (!ust_app_ht_by_sock) {
3742 return -1;
3743 }
d0b96690 3744 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3745 if (!ust_app_ht_by_notify_sock) {
3746 return -1;
3747 }
3748 return 0;
421cb601
DG
3749}
3750
78f0bacd
DG
3751/*
3752 * For a specific UST session, disable the channel for all registered apps.
3753 */
35a9059d 3754int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3755 struct ltt_ust_channel *uchan)
3756{
3757 int ret = 0;
bec39940
DG
3758 struct lttng_ht_iter iter;
3759 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
3760 struct ust_app *app;
3761 struct ust_app_session *ua_sess;
8535a6d9 3762 struct ust_app_channel *ua_chan;
78f0bacd
DG
3763
3764 if (usess == NULL || uchan == NULL) {
3765 ERR("Disabling UST global channel with NULL values");
3766 ret = -1;
3767 goto error;
3768 }
3769
d9bf3ca4 3770 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 3771 uchan->name, usess->id);
78f0bacd
DG
3772
3773 rcu_read_lock();
3774
3775 /* For every registered applications */
852d0037 3776 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 3777 struct lttng_ht_iter uiter;
e0c7ec2b
DG
3778 if (!app->compatible) {
3779 /*
3780 * TODO: In time, we should notice the caller of this error by
3781 * telling him that this is a version error.
3782 */
3783 continue;
3784 }
78f0bacd
DG
3785 ua_sess = lookup_session_by_app(usess, app);
3786 if (ua_sess == NULL) {
3787 continue;
3788 }
3789
8535a6d9 3790 /* Get channel */
bec39940
DG
3791 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3792 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
3793 /* If the session if found for the app, the channel must be there */
3794 assert(ua_chan_node);
3795
3796 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3797 /* The channel must not be already disabled */
3798 assert(ua_chan->enabled == 1);
3799
3800 /* Disable channel onto application */
3801 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
3802 if (ret < 0) {
3803 /* XXX: We might want to report this error at some point... */
3804 continue;
3805 }
3806 }
3807
3808 rcu_read_unlock();
3809
3810error:
3811 return ret;
3812}
3813
3814/*
3815 * For a specific UST session, enable the channel for all registered apps.
3816 */
35a9059d 3817int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3818 struct ltt_ust_channel *uchan)
3819{
3820 int ret = 0;
bec39940 3821 struct lttng_ht_iter iter;
78f0bacd
DG
3822 struct ust_app *app;
3823 struct ust_app_session *ua_sess;
3824
3825 if (usess == NULL || uchan == NULL) {
3826 ERR("Adding UST global channel to NULL values");
3827 ret = -1;
3828 goto error;
3829 }
3830
d9bf3ca4 3831 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 3832 uchan->name, usess->id);
78f0bacd
DG
3833
3834 rcu_read_lock();
3835
3836 /* For every registered applications */
852d0037 3837 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
3838 if (!app->compatible) {
3839 /*
3840 * TODO: In time, we should notice the caller of this error by
3841 * telling him that this is a version error.
3842 */
3843 continue;
3844 }
78f0bacd
DG
3845 ua_sess = lookup_session_by_app(usess, app);
3846 if (ua_sess == NULL) {
3847 continue;
3848 }
3849
3850 /* Enable channel onto application */
3851 ret = enable_ust_app_channel(ua_sess, uchan, app);
3852 if (ret < 0) {
3853 /* XXX: We might want to report this error at some point... */
3854 continue;
3855 }
3856 }
3857
3858 rcu_read_unlock();
3859
3860error:
3861 return ret;
3862}
3863
b0a40d28
DG
3864/*
3865 * Disable an event in a channel and for a specific session.
3866 */
35a9059d
DG
3867int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3868 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
3869{
3870 int ret = 0;
bec39940
DG
3871 struct lttng_ht_iter iter, uiter;
3872 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
b0a40d28
DG
3873 struct ust_app *app;
3874 struct ust_app_session *ua_sess;
3875 struct ust_app_channel *ua_chan;
3876 struct ust_app_event *ua_event;
3877
3878 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
3879 "%s for session id %" PRIu64,
3880 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
3881
3882 rcu_read_lock();
3883
3884 /* For all registered applications */
852d0037 3885 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
3886 if (!app->compatible) {
3887 /*
3888 * TODO: In time, we should notice the caller of this error by
3889 * telling him that this is a version error.
3890 */
3891 continue;
3892 }
b0a40d28
DG
3893 ua_sess = lookup_session_by_app(usess, app);
3894 if (ua_sess == NULL) {
3895 /* Next app */
3896 continue;
3897 }
3898
3899 /* Lookup channel in the ust app session */
bec39940
DG
3900 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3901 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 3902 if (ua_chan_node == NULL) {
d9bf3ca4 3903 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 3904 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
3905 continue;
3906 }
3907 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3908
bec39940
DG
3909 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3910 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28
DG
3911 if (ua_event_node == NULL) {
3912 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 3913 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
3914 continue;
3915 }
3916 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3917
7f79d3a1 3918 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
3919 if (ret < 0) {
3920 /* XXX: Report error someday... */
3921 continue;
3922 }
3923 }
3924
3925 rcu_read_unlock();
3926
3927 return ret;
3928}
3929
421cb601 3930/*
5b4a0ec0 3931 * For a specific UST session, create the channel for all registered apps.
421cb601 3932 */
35a9059d 3933int ust_app_create_channel_glb(struct ltt_ust_session *usess,
48842b30
DG
3934 struct ltt_ust_channel *uchan)
3935{
3d8ca23b 3936 int ret = 0, created;
bec39940 3937 struct lttng_ht_iter iter;
48842b30 3938 struct ust_app *app;
3d8ca23b 3939 struct ust_app_session *ua_sess = NULL;
48842b30 3940
fc34caaa
DG
3941 /* Very wrong code flow */
3942 assert(usess);
3943 assert(uchan);
421cb601 3944
d9bf3ca4 3945 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
a991f516 3946 uchan->name, usess->id);
48842b30
DG
3947
3948 rcu_read_lock();
421cb601 3949
5b4a0ec0 3950 /* For every registered applications */
852d0037 3951 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
3952 if (!app->compatible) {
3953 /*
3954 * TODO: In time, we should notice the caller of this error by
3955 * telling him that this is a version error.
3956 */
3957 continue;
3958 }
a9ad0c8f
MD
3959 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
3960 /* Skip. */
3961 continue;
3962 }
3963
edb67388
DG
3964 /*
3965 * Create session on the tracer side and add it to app session HT. Note
3966 * that if session exist, it will simply return a pointer to the ust
3967 * app session.
3968 */
3d8ca23b
DG
3969 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3970 if (ret < 0) {
3971 switch (ret) {
3972 case -ENOTCONN:
3973 /*
3974 * The application's socket is not valid. Either a bad socket
3975 * or a timeout on it. We can't inform the caller that for a
3976 * specific app, the session failed so lets continue here.
3977 */
a7169585 3978 ret = 0; /* Not an error. */
3d8ca23b
DG
3979 continue;
3980 case -ENOMEM:
3981 default:
3982 goto error_rcu_unlock;
3983 }
48842b30 3984 }
3d8ca23b 3985 assert(ua_sess);
48842b30 3986
d0b96690 3987 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
3988
3989 if (ua_sess->deleted) {
3990 pthread_mutex_unlock(&ua_sess->lock);
3991 continue;
3992 }
3993
d65d2de8
DG
3994 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3995 sizeof(uchan->name))) {
ad7a9107
DG
3996 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3997 ret = 0;
d65d2de8
DG
3998 } else {
3999 /* Create channel onto application. We don't need the chan ref. */
4000 ret = create_ust_app_channel(ua_sess, uchan, app,
4001 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4002 }
d0b96690 4003 pthread_mutex_unlock(&ua_sess->lock);
3d8ca23b 4004 if (ret < 0) {
3d8ca23b
DG
4005 /* Cleanup the created session if it's the case. */
4006 if (created) {
d0b96690 4007 destroy_app_session(app, ua_sess);
3d8ca23b 4008 }
a7169585
MD
4009 switch (ret) {
4010 case -ENOTCONN:
4011 /*
4012 * The application's socket is not valid. Either a bad socket
4013 * or a timeout on it. We can't inform the caller that for a
4014 * specific app, the session failed so lets continue here.
4015 */
4016 ret = 0; /* Not an error. */
4017 continue;
4018 case -ENOMEM:
4019 default:
4020 goto error_rcu_unlock;
4021 }
48842b30 4022 }
48842b30 4023 }
5b4a0ec0 4024
95e047ff 4025error_rcu_unlock:
48842b30 4026 rcu_read_unlock();
3c14c33f 4027 return ret;
48842b30
DG
4028}
4029
5b4a0ec0 4030/*
edb67388 4031 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4032 */
35a9059d 4033int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4034 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4035{
4036 int ret = 0;
bec39940 4037 struct lttng_ht_iter iter, uiter;
18eace3b 4038 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4039 struct ust_app *app;
4040 struct ust_app_session *ua_sess;
4041 struct ust_app_channel *ua_chan;
4042 struct ust_app_event *ua_event;
48842b30 4043
d9bf3ca4 4044 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4045 uevent->attr.name, usess->id);
48842b30 4046
edb67388
DG
4047 /*
4048 * NOTE: At this point, this function is called only if the session and
4049 * channel passed are already created for all apps. and enabled on the
4050 * tracer also.
4051 */
4052
48842b30 4053 rcu_read_lock();
421cb601
DG
4054
4055 /* For all registered applications */
852d0037 4056 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4057 if (!app->compatible) {
4058 /*
4059 * TODO: In time, we should notice the caller of this error by
4060 * telling him that this is a version error.
4061 */
4062 continue;
4063 }
edb67388 4064 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4065 if (!ua_sess) {
4066 /* The application has problem or is probably dead. */
4067 continue;
4068 }
ba767faf 4069
d0b96690
DG
4070 pthread_mutex_lock(&ua_sess->lock);
4071
b161602a
MD
4072 if (ua_sess->deleted) {
4073 pthread_mutex_unlock(&ua_sess->lock);
4074 continue;
4075 }
4076
edb67388 4077 /* Lookup channel in the ust app session */
bec39940
DG
4078 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4079 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4080 /*
4081 * It is possible that the channel cannot be found is
4082 * the channel/event creation occurs concurrently with
4083 * an application exit.
4084 */
4085 if (!ua_chan_node) {
4086 pthread_mutex_unlock(&ua_sess->lock);
4087 continue;
4088 }
edb67388
DG
4089
4090 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4091
18eace3b
DG
4092 /* Get event node */
4093 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4094 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4095 if (ua_event == NULL) {
7f79d3a1 4096 DBG3("UST app enable event %s not found for app PID %d."
852d0037 4097 "Skipping app", uevent->attr.name, app->pid);
d0b96690 4098 goto next_app;
35a9059d 4099 }
35a9059d
DG
4100
4101 ret = enable_ust_app_event(ua_sess, ua_event, app);
4102 if (ret < 0) {
d0b96690 4103 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 4104 goto error;
48842b30 4105 }
d0b96690
DG
4106 next_app:
4107 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
4108 }
4109
7f79d3a1 4110error:
edb67388 4111 rcu_read_unlock();
edb67388
DG
4112 return ret;
4113}
4114
4115/*
4116 * For a specific existing UST session and UST channel, creates the event for
4117 * all registered apps.
4118 */
35a9059d 4119int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
4120 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4121{
4122 int ret = 0;
bec39940
DG
4123 struct lttng_ht_iter iter, uiter;
4124 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
4125 struct ust_app *app;
4126 struct ust_app_session *ua_sess;
4127 struct ust_app_channel *ua_chan;
4128
d9bf3ca4 4129 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 4130 uevent->attr.name, usess->id);
edb67388 4131
edb67388
DG
4132 rcu_read_lock();
4133
4134 /* For all registered applications */
852d0037 4135 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4136 if (!app->compatible) {
4137 /*
4138 * TODO: In time, we should notice the caller of this error by
4139 * telling him that this is a version error.
4140 */
4141 continue;
4142 }
edb67388 4143 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4144 if (!ua_sess) {
4145 /* The application has problem or is probably dead. */
4146 continue;
4147 }
48842b30 4148
d0b96690 4149 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4150
4151 if (ua_sess->deleted) {
4152 pthread_mutex_unlock(&ua_sess->lock);
4153 continue;
4154 }
4155
48842b30 4156 /* Lookup channel in the ust app session */
bec39940
DG
4157 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4158 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
4159 /* If the channel is not found, there is a code flow error */
4160 assert(ua_chan_node);
4161
48842b30
DG
4162 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4163
edb67388 4164 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 4165 pthread_mutex_unlock(&ua_sess->lock);
edb67388 4166 if (ret < 0) {
49c336c1 4167 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
4168 /* Possible value at this point: -ENOMEM. If so, we stop! */
4169 break;
4170 }
4171 DBG2("UST app event %s already exist on app PID %d",
852d0037 4172 uevent->attr.name, app->pid);
5b4a0ec0 4173 continue;
48842b30 4174 }
48842b30 4175 }
5b4a0ec0 4176
48842b30
DG
4177 rcu_read_unlock();
4178
4179 return ret;
4180}
4181
5b4a0ec0
DG
4182/*
4183 * Start tracing for a specific UST session and app.
4184 */
b34cbebf 4185static
421cb601 4186int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
4187{
4188 int ret = 0;
48842b30 4189 struct ust_app_session *ua_sess;
48842b30 4190
852d0037 4191 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 4192
509cbaf8
MD
4193 rcu_read_lock();
4194
e0c7ec2b
DG
4195 if (!app->compatible) {
4196 goto end;
4197 }
4198
421cb601
DG
4199 ua_sess = lookup_session_by_app(usess, app);
4200 if (ua_sess == NULL) {
d42f20df
DG
4201 /* The session is in teardown process. Ignore and continue. */
4202 goto end;
421cb601 4203 }
48842b30 4204
d0b96690
DG
4205 pthread_mutex_lock(&ua_sess->lock);
4206
b161602a
MD
4207 if (ua_sess->deleted) {
4208 pthread_mutex_unlock(&ua_sess->lock);
4209 goto end;
4210 }
4211
aea829b3
DG
4212 /* Upon restart, we skip the setup, already done */
4213 if (ua_sess->started) {
8be98f9a 4214 goto skip_setup;
aea829b3 4215 }
8be98f9a 4216
a4b92340
DG
4217 /* Create directories if consumer is LOCAL and has a path defined. */
4218 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4219 strlen(usess->consumer->dst.trace_path) > 0) {
4220 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
7972aab2 4221 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
a4b92340 4222 if (ret < 0) {
df5b86c8 4223 if (errno != EEXIST) {
a4b92340 4224 ERR("Trace directory creation error");
d0b96690 4225 goto error_unlock;
421cb601 4226 }
173af62f 4227 }
7753dea8 4228 }
aea829b3 4229
d65d2de8
DG
4230 /*
4231 * Create the metadata for the application. This returns gracefully if a
4232 * metadata was already set for the session.
4233 */
ad7a9107 4234 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
421cb601 4235 if (ret < 0) {
d0b96690 4236 goto error_unlock;
421cb601 4237 }
48842b30 4238
840cb59c 4239 health_code_update();
86acf0da 4240
8be98f9a 4241skip_setup:
421cb601 4242 /* This start the UST tracing */
fb45065e 4243 pthread_mutex_lock(&app->sock_lock);
852d0037 4244 ret = ustctl_start_session(app->sock, ua_sess->handle);
fb45065e 4245 pthread_mutex_unlock(&app->sock_lock);
421cb601 4246 if (ret < 0) {
ffe60014
DG
4247 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4248 ERR("Error starting tracing for app pid: %d (ret: %d)",
4249 app->pid, ret);
4250 } else {
4251 DBG("UST app start session failed. Application is dead.");
3757b385
DG
4252 /*
4253 * This is normal behavior, an application can die during the
4254 * creation process. Don't report an error so the execution can
4255 * continue normally.
4256 */
4257 pthread_mutex_unlock(&ua_sess->lock);
4258 goto end;
ffe60014 4259 }
d0b96690 4260 goto error_unlock;
421cb601 4261 }
5b4a0ec0 4262
55c3953d
DG
4263 /* Indicate that the session has been started once */
4264 ua_sess->started = 1;
4265
d0b96690
DG
4266 pthread_mutex_unlock(&ua_sess->lock);
4267
840cb59c 4268 health_code_update();
86acf0da 4269
421cb601 4270 /* Quiescent wait after starting trace */
fb45065e 4271 pthread_mutex_lock(&app->sock_lock);
ffe60014 4272 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4273 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4274 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4275 ERR("UST app wait quiescent failed for app pid %d ret %d",
4276 app->pid, ret);
4277 }
48842b30 4278
e0c7ec2b
DG
4279end:
4280 rcu_read_unlock();
840cb59c 4281 health_code_update();
421cb601 4282 return 0;
48842b30 4283
d0b96690
DG
4284error_unlock:
4285 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 4286 rcu_read_unlock();
840cb59c 4287 health_code_update();
421cb601
DG
4288 return -1;
4289}
48842b30 4290
8be98f9a
MD
4291/*
4292 * Stop tracing for a specific UST session and app.
4293 */
b34cbebf 4294static
8be98f9a
MD
4295int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4296{
4297 int ret = 0;
4298 struct ust_app_session *ua_sess;
7972aab2 4299 struct ust_registry_session *registry;
8be98f9a 4300
852d0037 4301 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
4302
4303 rcu_read_lock();
4304
e0c7ec2b 4305 if (!app->compatible) {
d88aee68 4306 goto end_no_session;
e0c7ec2b
DG
4307 }
4308
8be98f9a
MD
4309 ua_sess = lookup_session_by_app(usess, app);
4310 if (ua_sess == NULL) {
d88aee68 4311 goto end_no_session;
8be98f9a
MD
4312 }
4313
d88aee68
DG
4314 pthread_mutex_lock(&ua_sess->lock);
4315
b161602a
MD
4316 if (ua_sess->deleted) {
4317 pthread_mutex_unlock(&ua_sess->lock);
4318 goto end_no_session;
4319 }
4320
9bc07046
DG
4321 /*
4322 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
4323 * that was never started. It's possible since we can have a fail start
4324 * from either the application manager thread or the command thread. Simply
4325 * indicate that this is a stop error.
9bc07046 4326 */
f9dfc3d9 4327 if (!ua_sess->started) {
c45536e1
DG
4328 goto error_rcu_unlock;
4329 }
7db205b5 4330
840cb59c 4331 health_code_update();
86acf0da 4332
9d6c7d3f 4333 /* This inhibits UST tracing */
fb45065e 4334 pthread_mutex_lock(&app->sock_lock);
852d0037 4335 ret = ustctl_stop_session(app->sock, ua_sess->handle);
fb45065e 4336 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 4337 if (ret < 0) {
ffe60014
DG
4338 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4339 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4340 app->pid, ret);
4341 } else {
4342 DBG("UST app stop session failed. Application is dead.");
3757b385
DG
4343 /*
4344 * This is normal behavior, an application can die during the
4345 * creation process. Don't report an error so the execution can
4346 * continue normally.
4347 */
4348 goto end_unlock;
ffe60014 4349 }
9d6c7d3f
DG
4350 goto error_rcu_unlock;
4351 }
4352
840cb59c 4353 health_code_update();
86acf0da 4354
9d6c7d3f 4355 /* Quiescent wait after stopping trace */
fb45065e 4356 pthread_mutex_lock(&app->sock_lock);
ffe60014 4357 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4358 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4359 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4360 ERR("UST app wait quiescent failed for app pid %d ret %d",
4361 app->pid, ret);
4362 }
9d6c7d3f 4363
840cb59c 4364 health_code_update();
86acf0da 4365
b34cbebf
MD
4366 registry = get_session_registry(ua_sess);
4367 assert(registry);
1b532a60 4368
ce34fcd0
MD
4369 /* Push metadata for application before freeing the application. */
4370 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 4371
3757b385 4372end_unlock:
b34cbebf
MD
4373 pthread_mutex_unlock(&ua_sess->lock);
4374end_no_session:
4375 rcu_read_unlock();
4376 health_code_update();
4377 return 0;
4378
4379error_rcu_unlock:
4380 pthread_mutex_unlock(&ua_sess->lock);
4381 rcu_read_unlock();
4382 health_code_update();
4383 return -1;
4384}
4385
b34cbebf 4386static
c4b88406
MD
4387int ust_app_flush_app_session(struct ust_app *app,
4388 struct ust_app_session *ua_sess)
b34cbebf 4389{
c4b88406 4390 int ret, retval = 0;
b34cbebf 4391 struct lttng_ht_iter iter;
b34cbebf 4392 struct ust_app_channel *ua_chan;
c4b88406 4393 struct consumer_socket *socket;
b34cbebf 4394
c4b88406 4395 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
4396
4397 rcu_read_lock();
4398
4399 if (!app->compatible) {
c4b88406 4400 goto end_not_compatible;
b34cbebf
MD
4401 }
4402
4403 pthread_mutex_lock(&ua_sess->lock);
4404
b161602a
MD
4405 if (ua_sess->deleted) {
4406 goto end_deleted;
4407 }
4408
b34cbebf
MD
4409 health_code_update();
4410
9d6c7d3f 4411 /* Flushing buffers */
c4b88406
MD
4412 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4413 ua_sess->consumer);
ce34fcd0
MD
4414
4415 /* Flush buffers and push metadata. */
4416 switch (ua_sess->buffer_type) {
4417 case LTTNG_BUFFER_PER_PID:
4418 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4419 node.node) {
4420 health_code_update();
4421 assert(ua_chan->is_sent);
4422 ret = consumer_flush_channel(socket, ua_chan->key);
4423 if (ret) {
4424 ERR("Error flushing consumer channel");
4425 retval = -1;
4426 continue;
4427 }
8be98f9a 4428 }
ce34fcd0
MD
4429 break;
4430 case LTTNG_BUFFER_PER_UID:
4431 default:
4432 assert(0);
4433 break;
8be98f9a 4434 }
8be98f9a 4435
840cb59c 4436 health_code_update();
86acf0da 4437
b161602a 4438end_deleted:
d88aee68 4439 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 4440
c4b88406
MD
4441end_not_compatible:
4442 rcu_read_unlock();
4443 health_code_update();
4444 return retval;
4445}
4446
4447/*
ce34fcd0
MD
4448 * Flush buffers for all applications for a specific UST session.
4449 * Called with UST session lock held.
c4b88406
MD
4450 */
4451static
ce34fcd0 4452int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
4453
4454{
99b1411c 4455 int ret = 0;
c4b88406 4456
ce34fcd0 4457 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
4458
4459 rcu_read_lock();
4460
ce34fcd0
MD
4461 /* Flush buffers and push metadata. */
4462 switch (usess->buffer_type) {
4463 case LTTNG_BUFFER_PER_UID:
4464 {
4465 struct buffer_reg_uid *reg;
4466 struct lttng_ht_iter iter;
4467
4468 /* Flush all per UID buffers associated to that session. */
4469 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4470 struct ust_registry_session *ust_session_reg;
4471 struct buffer_reg_channel *reg_chan;
4472 struct consumer_socket *socket;
4473
4474 /* Get consumer socket to use to push the metadata.*/
4475 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4476 usess->consumer);
4477 if (!socket) {
4478 /* Ignore request if no consumer is found for the session. */
4479 continue;
4480 }
4481
4482 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4483 reg_chan, node.node) {
4484 /*
4485 * The following call will print error values so the return
4486 * code is of little importance because whatever happens, we
4487 * have to try them all.
4488 */
4489 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4490 }
4491
4492 ust_session_reg = reg->registry->reg.ust;
4493 /* Push metadata. */
4494 (void) push_metadata(ust_session_reg, usess->consumer);
4495 }
ce34fcd0
MD
4496 break;
4497 }
4498 case LTTNG_BUFFER_PER_PID:
4499 {
4500 struct ust_app_session *ua_sess;
4501 struct lttng_ht_iter iter;
4502 struct ust_app *app;
4503
4504 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4505 ua_sess = lookup_session_by_app(usess, app);
4506 if (ua_sess == NULL) {
4507 continue;
4508 }
4509 (void) ust_app_flush_app_session(app, ua_sess);
4510 }
4511 break;
4512 }
4513 default:
99b1411c 4514 ret = -1;
ce34fcd0
MD
4515 assert(0);
4516 break;
c4b88406 4517 }
c4b88406 4518
7db205b5 4519 rcu_read_unlock();
840cb59c 4520 health_code_update();
c4b88406 4521 return ret;
8be98f9a
MD
4522}
4523
84cd17c6
MD
4524/*
4525 * Destroy a specific UST session in apps.
4526 */
3353de95 4527static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 4528{
ffe60014 4529 int ret;
84cd17c6 4530 struct ust_app_session *ua_sess;
bec39940 4531 struct lttng_ht_iter iter;
d9bf3ca4 4532 struct lttng_ht_node_u64 *node;
84cd17c6 4533
852d0037 4534 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
4535
4536 rcu_read_lock();
4537
e0c7ec2b
DG
4538 if (!app->compatible) {
4539 goto end;
4540 }
4541
84cd17c6 4542 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 4543 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 4544 if (node == NULL) {
d42f20df
DG
4545 /* Session is being or is deleted. */
4546 goto end;
84cd17c6
MD
4547 }
4548 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 4549
840cb59c 4550 health_code_update();
d0b96690 4551 destroy_app_session(app, ua_sess);
84cd17c6 4552
840cb59c 4553 health_code_update();
7db205b5 4554
84cd17c6 4555 /* Quiescent wait after stopping trace */
fb45065e 4556 pthread_mutex_lock(&app->sock_lock);
ffe60014 4557 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4558 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4559 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4560 ERR("UST app wait quiescent failed for app pid %d ret %d",
4561 app->pid, ret);
4562 }
e0c7ec2b
DG
4563end:
4564 rcu_read_unlock();
840cb59c 4565 health_code_update();
84cd17c6 4566 return 0;
84cd17c6
MD
4567}
4568
5b4a0ec0
DG
4569/*
4570 * Start tracing for the UST session.
4571 */
421cb601
DG
4572int ust_app_start_trace_all(struct ltt_ust_session *usess)
4573{
4574 int ret = 0;
bec39940 4575 struct lttng_ht_iter iter;
421cb601 4576 struct ust_app *app;
48842b30 4577
421cb601
DG
4578 DBG("Starting all UST traces");
4579
4580 rcu_read_lock();
421cb601 4581
852d0037 4582 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
421cb601 4583 ret = ust_app_start_trace(usess, app);
48842b30 4584 if (ret < 0) {
5b4a0ec0
DG
4585 /* Continue to next apps even on error */
4586 continue;
48842b30 4587 }
48842b30 4588 }
5b4a0ec0 4589
48842b30
DG
4590 rcu_read_unlock();
4591
4592 return 0;
4593}
487cf67c 4594
8be98f9a
MD
4595/*
4596 * Start tracing for the UST session.
ce34fcd0 4597 * Called with UST session lock held.
8be98f9a
MD
4598 */
4599int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4600{
4601 int ret = 0;
bec39940 4602 struct lttng_ht_iter iter;
8be98f9a
MD
4603 struct ust_app *app;
4604
4605 DBG("Stopping all UST traces");
4606
4607 rcu_read_lock();
4608
b34cbebf
MD
4609 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4610 ret = ust_app_stop_trace(usess, app);
4611 if (ret < 0) {
4612 /* Continue to next apps even on error */
4613 continue;
4614 }
4615 }
4616
ce34fcd0 4617 (void) ust_app_flush_session(usess);
8be98f9a
MD
4618
4619 rcu_read_unlock();
4620
4621 return 0;
4622}
4623
84cd17c6
MD
4624/*
4625 * Destroy app UST session.
4626 */
4627int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4628{
4629 int ret = 0;
bec39940 4630 struct lttng_ht_iter iter;
84cd17c6
MD
4631 struct ust_app *app;
4632
4633 DBG("Destroy all UST traces");
4634
4635 rcu_read_lock();
4636
852d0037 4637 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 4638 ret = destroy_trace(usess, app);
84cd17c6
MD
4639 if (ret < 0) {
4640 /* Continue to next apps even on error */
4641 continue;
4642 }
4643 }
4644
4645 rcu_read_unlock();
4646
4647 return 0;
4648}
4649
a9ad0c8f
MD
4650static
4651void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
487cf67c 4652{
55c54cce 4653 int ret = 0;
31746f93 4654 struct lttng_ht_iter iter, uiter;
3d8ca23b 4655 struct ust_app_session *ua_sess = NULL;
487cf67c
DG
4656 struct ust_app_channel *ua_chan;
4657 struct ust_app_event *ua_event;
727d5404 4658 struct ust_app_ctx *ua_ctx;
a9ad0c8f 4659 int is_created = 0;
1f3580c7 4660
a9ad0c8f 4661 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
3d8ca23b
DG
4662 if (ret < 0) {
4663 /* Tracer is probably gone or ENOMEM. */
487cf67c
DG
4664 goto error;
4665 }
a9ad0c8f
MD
4666 if (!is_created) {
4667 /* App session already created. */
4668 goto end;
4669 }
3d8ca23b 4670 assert(ua_sess);
487cf67c 4671
d0b96690
DG
4672 pthread_mutex_lock(&ua_sess->lock);
4673
b161602a
MD
4674 if (ua_sess->deleted) {
4675 pthread_mutex_unlock(&ua_sess->lock);
4676 goto end;
4677 }
4678
284d8f55 4679 /*
d0b96690 4680 * We can iterate safely here over all UST app session since the create ust
284d8f55
DG
4681 * app session above made a shadow copy of the UST global domain from the
4682 * ltt ust session.
4683 */
bec39940
DG
4684 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4685 node.node) {
ad7a9107 4686 ret = do_create_channel(app, usess, ua_sess, ua_chan);
a7169585 4687 if (ret < 0 && ret != -ENOTCONN) {
ad7a9107 4688 /*
a7169585
MD
4689 * Stop everything. On error, the application
4690 * failed, no more file descriptor are available
4691 * or ENOMEM so stopping here is the only thing
4692 * we can do for now. The only exception is
4693 * -ENOTCONN, which indicates that the application
4694 * has exit.
ad7a9107
DG
4695 */
4696 goto error_unlock;
487cf67c
DG
4697 }
4698
31746f93
DG
4699 /*
4700 * Add context using the list so they are enabled in the same order the
4701 * user added them.
4702 */
4703 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
727d5404
DG
4704 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4705 if (ret < 0) {
d0b96690 4706 goto error_unlock;
727d5404
DG
4707 }
4708 }
4709
4710
284d8f55 4711 /* For each events */
bec39940
DG
4712 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4713 node.node) {
284d8f55
DG
4714 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4715 if (ret < 0) {
d0b96690 4716 goto error_unlock;
487cf67c 4717 }
36dc12cc 4718 }
487cf67c
DG
4719 }
4720
d0b96690
DG
4721 pthread_mutex_unlock(&ua_sess->lock);
4722
14fb1ebe 4723 if (usess->active) {
421cb601 4724 ret = ust_app_start_trace(usess, app);
36dc12cc 4725 if (ret < 0) {
36dc12cc
DG
4726 goto error;
4727 }
4728
852d0037 4729 DBG2("UST trace started for app pid %d", app->pid);
36dc12cc 4730 }
a9ad0c8f 4731end:
ffe60014 4732 /* Everything went well at this point. */
ffe60014
DG
4733 return;
4734
d0b96690
DG
4735error_unlock:
4736 pthread_mutex_unlock(&ua_sess->lock);
487cf67c 4737error:
ffe60014 4738 if (ua_sess) {
d0b96690 4739 destroy_app_session(app, ua_sess);
ffe60014 4740 }
487cf67c
DG
4741 return;
4742}
55cc08a6 4743
a9ad0c8f
MD
4744static
4745void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
4746{
4747 struct ust_app_session *ua_sess;
4748
4749 ua_sess = lookup_session_by_app(usess, app);
4750 if (ua_sess == NULL) {
4751 return;
4752 }
4753 destroy_app_session(app, ua_sess);
4754}
4755
4756/*
4757 * Add channels/events from UST global domain to registered apps at sock.
4758 *
4759 * Called with session lock held.
4760 * Called with RCU read-side lock held.
4761 */
4762void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
4763{
4764 assert(usess);
4765
4766 DBG2("UST app global update for app sock %d for session id %" PRIu64,
4767 app->sock, usess->id);
4768
4769 if (!app->compatible) {
4770 return;
4771 }
4772
4773 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
4774 ust_app_global_create(usess, app);
4775 } else {
4776 ust_app_global_destroy(usess, app);
4777 }
4778}
4779
4780/*
4781 * Called with session lock held.
4782 */
4783void ust_app_global_update_all(struct ltt_ust_session *usess)
4784{
4785 struct lttng_ht_iter iter;
4786 struct ust_app *app;
4787
4788 rcu_read_lock();
4789 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4790 ust_app_global_update(usess, app);
4791 }
4792 rcu_read_unlock();
4793}
4794
55cc08a6
DG
4795/*
4796 * Add context to a specific channel for global UST domain.
4797 */
4798int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4799 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4800{
4801 int ret = 0;
bec39940
DG
4802 struct lttng_ht_node_str *ua_chan_node;
4803 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
4804 struct ust_app_channel *ua_chan = NULL;
4805 struct ust_app_session *ua_sess;
4806 struct ust_app *app;
4807
4808 rcu_read_lock();
4809
852d0037 4810 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4811 if (!app->compatible) {
4812 /*
4813 * TODO: In time, we should notice the caller of this error by
4814 * telling him that this is a version error.
4815 */
4816 continue;
4817 }
55cc08a6
DG
4818 ua_sess = lookup_session_by_app(usess, app);
4819 if (ua_sess == NULL) {
4820 continue;
4821 }
4822
d0b96690 4823 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4824
4825 if (ua_sess->deleted) {
4826 pthread_mutex_unlock(&ua_sess->lock);
4827 continue;
4828 }
4829
55cc08a6 4830 /* Lookup channel in the ust app session */
bec39940
DG
4831 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4832 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 4833 if (ua_chan_node == NULL) {
d0b96690 4834 goto next_app;
55cc08a6
DG
4835 }
4836 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4837 node);
55cc08a6
DG
4838 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4839 if (ret < 0) {
d0b96690 4840 goto next_app;
55cc08a6 4841 }
d0b96690
DG
4842 next_app:
4843 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
4844 }
4845
55cc08a6
DG
4846 rcu_read_unlock();
4847 return ret;
4848}
4849
76d45b40
DG
4850/*
4851 * Enable event for a channel from a UST session for a specific PID.
4852 */
4853int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4854 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4855{
4856 int ret = 0;
bec39940 4857 struct lttng_ht_iter iter;
18eace3b 4858 struct lttng_ht_node_str *ua_chan_node;
76d45b40
DG
4859 struct ust_app *app;
4860 struct ust_app_session *ua_sess;
4861 struct ust_app_channel *ua_chan;
4862 struct ust_app_event *ua_event;
4863
4864 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4865
4866 rcu_read_lock();
4867
4868 app = ust_app_find_by_pid(pid);
4869 if (app == NULL) {
4870 ERR("UST app enable event per PID %d not found", pid);
4871 ret = -1;
d0b96690 4872 goto end;
76d45b40
DG
4873 }
4874
e0c7ec2b
DG
4875 if (!app->compatible) {
4876 ret = 0;
d0b96690 4877 goto end;
e0c7ec2b
DG
4878 }
4879
76d45b40 4880 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4881 if (!ua_sess) {
4882 /* The application has problem or is probably dead. */
d0b96690
DG
4883 ret = 0;
4884 goto end;
c4a1715b 4885 }
76d45b40 4886
d0b96690 4887 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4888
4889 if (ua_sess->deleted) {
4890 ret = 0;
4891 goto end_unlock;
4892 }
4893
76d45b40 4894 /* Lookup channel in the ust app session */
bec39940
DG
4895 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4896 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
76d45b40
DG
4897 /* If the channel is not found, there is a code flow error */
4898 assert(ua_chan_node);
4899
4900 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4901
18eace3b 4902 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4903 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4904 if (ua_event == NULL) {
76d45b40
DG
4905 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4906 if (ret < 0) {
d0b96690 4907 goto end_unlock;
76d45b40
DG
4908 }
4909 } else {
76d45b40
DG
4910 ret = enable_ust_app_event(ua_sess, ua_event, app);
4911 if (ret < 0) {
d0b96690 4912 goto end_unlock;
76d45b40
DG
4913 }
4914 }
4915
d0b96690
DG
4916end_unlock:
4917 pthread_mutex_unlock(&ua_sess->lock);
4918end:
76d45b40
DG
4919 rcu_read_unlock();
4920 return ret;
4921}
7f79d3a1 4922
4466912f
DG
4923/*
4924 * Calibrate registered applications.
4925 */
4926int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4927{
4928 int ret = 0;
4929 struct lttng_ht_iter iter;
4930 struct ust_app *app;
4931
4932 rcu_read_lock();
4933
852d0037 4934 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4466912f
DG
4935 if (!app->compatible) {
4936 /*
4937 * TODO: In time, we should notice the caller of this error by
4938 * telling him that this is a version error.
4939 */
4940 continue;
4941 }
4942
840cb59c 4943 health_code_update();
86acf0da 4944
fb45065e 4945 pthread_mutex_lock(&app->sock_lock);
852d0037 4946 ret = ustctl_calibrate(app->sock, calibrate);
fb45065e 4947 pthread_mutex_unlock(&app->sock_lock);
4466912f
DG
4948 if (ret < 0) {
4949 switch (ret) {
4950 case -ENOSYS:
4951 /* Means that it's not implemented on the tracer side. */
4952 ret = 0;
4953 break;
4954 default:
4466912f 4955 DBG2("Calibrate app PID %d returned with error %d",
852d0037 4956 app->pid, ret);
4466912f
DG
4957 break;
4958 }
4959 }
4960 }
4961
4962 DBG("UST app global domain calibration finished");
4963
4964 rcu_read_unlock();
4965
840cb59c 4966 health_code_update();
86acf0da 4967
4466912f
DG
4968 return ret;
4969}
d0b96690
DG
4970
4971/*
4972 * Receive registration and populate the given msg structure.
4973 *
4974 * On success return 0 else a negative value returned by the ustctl call.
4975 */
4976int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4977{
4978 int ret;
4979 uint32_t pid, ppid, uid, gid;
4980
4981 assert(msg);
4982
4983 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4984 &pid, &ppid, &uid, &gid,
4985 &msg->bits_per_long,
4986 &msg->uint8_t_alignment,
4987 &msg->uint16_t_alignment,
4988 &msg->uint32_t_alignment,
4989 &msg->uint64_t_alignment,
4990 &msg->long_alignment,
4991 &msg->byte_order,
4992 msg->name);
4993 if (ret < 0) {
4994 switch (-ret) {
4995 case EPIPE:
4996 case ECONNRESET:
4997 case LTTNG_UST_ERR_EXITING:
4998 DBG3("UST app recv reg message failed. Application died");
4999 break;
5000 case LTTNG_UST_ERR_UNSUP_MAJOR:
5001 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5002 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5003 LTTNG_UST_ABI_MINOR_VERSION);
5004 break;
5005 default:
5006 ERR("UST app recv reg message failed with ret %d", ret);
5007 break;
5008 }
5009 goto error;
5010 }
5011 msg->pid = (pid_t) pid;
5012 msg->ppid = (pid_t) ppid;
5013 msg->uid = (uid_t) uid;
5014 msg->gid = (gid_t) gid;
5015
5016error:
5017 return ret;
5018}
5019
d88aee68
DG
5020/*
5021 * Return a ust app channel object using the application object and the channel
5022 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5023 * lock MUST be acquired before calling this function.
5024 */
d0b96690
DG
5025static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5026 int objd)
5027{
5028 struct lttng_ht_node_ulong *node;
5029 struct lttng_ht_iter iter;
5030 struct ust_app_channel *ua_chan = NULL;
5031
5032 assert(app);
5033
5034 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5035 node = lttng_ht_iter_get_node_ulong(&iter);
5036 if (node == NULL) {
5037 DBG2("UST app channel find by objd %d not found", objd);
5038 goto error;
5039 }
5040
5041 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5042
5043error:
5044 return ua_chan;
5045}
5046
d88aee68
DG
5047/*
5048 * Reply to a register channel notification from an application on the notify
5049 * socket. The channel metadata is also created.
5050 *
5051 * The session UST registry lock is acquired in this function.
5052 *
5053 * On success 0 is returned else a negative value.
5054 */
d0b96690
DG
5055static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5056 size_t nr_fields, struct ustctl_field *fields)
5057{
5058 int ret, ret_code = 0;
5059 uint32_t chan_id, reg_count;
7972aab2 5060 uint64_t chan_reg_key;
d0b96690
DG
5061 enum ustctl_channel_header type;
5062 struct ust_app *app;
5063 struct ust_app_channel *ua_chan;
5064 struct ust_app_session *ua_sess;
7972aab2 5065 struct ust_registry_session *registry;
45893984 5066 struct ust_registry_channel *chan_reg;
d0b96690
DG
5067
5068 rcu_read_lock();
5069
5070 /* Lookup application. If not found, there is a code flow error. */
5071 app = find_app_by_notify_sock(sock);
d88aee68
DG
5072 if (!app) {
5073 DBG("Application socket %d is being teardown. Abort event notify",
5074 sock);
5075 ret = 0;
d5d629b5 5076 free(fields);
d88aee68
DG
5077 goto error_rcu_unlock;
5078 }
d0b96690 5079
4950b860 5080 /* Lookup channel by UST object descriptor. */
d0b96690 5081 ua_chan = find_channel_by_objd(app, cobjd);
4950b860
MD
5082 if (!ua_chan) {
5083 DBG("Application channel is being teardown. Abort event notify");
5084 ret = 0;
d5d629b5 5085 free(fields);
4950b860
MD
5086 goto error_rcu_unlock;
5087 }
5088
d0b96690
DG
5089 assert(ua_chan->session);
5090 ua_sess = ua_chan->session;
d0b96690 5091
7972aab2
DG
5092 /* Get right session registry depending on the session buffer type. */
5093 registry = get_session_registry(ua_sess);
5094 assert(registry);
45893984 5095
7972aab2
DG
5096 /* Depending on the buffer type, a different channel key is used. */
5097 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5098 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 5099 } else {
7972aab2 5100 chan_reg_key = ua_chan->key;
d0b96690
DG
5101 }
5102
7972aab2
DG
5103 pthread_mutex_lock(&registry->lock);
5104
5105 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5106 assert(chan_reg);
5107
5108 if (!chan_reg->register_done) {
5109 reg_count = ust_registry_get_event_count(chan_reg);
5110 if (reg_count < 31) {
5111 type = USTCTL_CHANNEL_HEADER_COMPACT;
5112 } else {
5113 type = USTCTL_CHANNEL_HEADER_LARGE;
5114 }
5115
5116 chan_reg->nr_ctx_fields = nr_fields;
5117 chan_reg->ctx_fields = fields;
5118 chan_reg->header_type = type;
d0b96690 5119 } else {
7972aab2
DG
5120 /* Get current already assigned values. */
5121 type = chan_reg->header_type;
d5d629b5
DG
5122 free(fields);
5123 /* Set to NULL so the error path does not do a double free. */
5124 fields = NULL;
d0b96690 5125 }
7972aab2
DG
5126 /* Channel id is set during the object creation. */
5127 chan_id = chan_reg->chan_id;
d0b96690
DG
5128
5129 /* Append to metadata */
7972aab2
DG
5130 if (!chan_reg->metadata_dumped) {
5131 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
d0b96690
DG
5132 if (ret_code) {
5133 ERR("Error appending channel metadata (errno = %d)", ret_code);
5134 goto reply;
5135 }
5136 }
5137
5138reply:
7972aab2
DG
5139 DBG3("UST app replying to register channel key %" PRIu64
5140 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5141 ret_code);
d0b96690
DG
5142
5143 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5144 if (ret < 0) {
5145 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5146 ERR("UST app reply channel failed with ret %d", ret);
5147 } else {
5148 DBG3("UST app reply channel failed. Application died");
5149 }
5150 goto error;
5151 }
5152
7972aab2
DG
5153 /* This channel registry registration is completed. */
5154 chan_reg->register_done = 1;
5155
d0b96690 5156error:
7972aab2 5157 pthread_mutex_unlock(&registry->lock);
d88aee68 5158error_rcu_unlock:
d0b96690 5159 rcu_read_unlock();
d5d629b5
DG
5160 if (ret) {
5161 free(fields);
5162 }
d0b96690
DG
5163 return ret;
5164}
5165
d88aee68
DG
5166/*
5167 * Add event to the UST channel registry. When the event is added to the
5168 * registry, the metadata is also created. Once done, this replies to the
5169 * application with the appropriate error code.
5170 *
5171 * The session UST registry lock is acquired in the function.
5172 *
5173 * On success 0 is returned else a negative value.
5174 */
d0b96690
DG
5175static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5176 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
5177 char *model_emf_uri)
5178{
5179 int ret, ret_code;
5180 uint32_t event_id = 0;
7972aab2 5181 uint64_t chan_reg_key;
d0b96690
DG
5182 struct ust_app *app;
5183 struct ust_app_channel *ua_chan;
5184 struct ust_app_session *ua_sess;
7972aab2 5185 struct ust_registry_session *registry;
d0b96690
DG
5186
5187 rcu_read_lock();
5188
5189 /* Lookup application. If not found, there is a code flow error. */
5190 app = find_app_by_notify_sock(sock);
d88aee68
DG
5191 if (!app) {
5192 DBG("Application socket %d is being teardown. Abort event notify",
5193 sock);
5194 ret = 0;
d5d629b5
DG
5195 free(sig);
5196 free(fields);
5197 free(model_emf_uri);
d88aee68
DG
5198 goto error_rcu_unlock;
5199 }
d0b96690 5200
4950b860 5201 /* Lookup channel by UST object descriptor. */
d0b96690 5202 ua_chan = find_channel_by_objd(app, cobjd);
4950b860
MD
5203 if (!ua_chan) {
5204 DBG("Application channel is being teardown. Abort event notify");
5205 ret = 0;
d5d629b5
DG
5206 free(sig);
5207 free(fields);
5208 free(model_emf_uri);
4950b860
MD
5209 goto error_rcu_unlock;
5210 }
5211
d0b96690
DG
5212 assert(ua_chan->session);
5213 ua_sess = ua_chan->session;
5214
7972aab2
DG
5215 registry = get_session_registry(ua_sess);
5216 assert(registry);
5217
5218 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5219 chan_reg_key = ua_chan->tracing_channel_id;
5220 } else {
5221 chan_reg_key = ua_chan->key;
5222 }
5223
5224 pthread_mutex_lock(&registry->lock);
d0b96690 5225
d5d629b5
DG
5226 /*
5227 * From this point on, this call acquires the ownership of the sig, fields
5228 * and model_emf_uri meaning any free are done inside it if needed. These
5229 * three variables MUST NOT be read/write after this.
5230 */
7972aab2 5231 ret_code = ust_registry_create_event(registry, chan_reg_key,
45893984 5232 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
8494bda5
MD
5233 model_emf_uri, ua_sess->buffer_type, &event_id,
5234 app);
d0b96690
DG
5235
5236 /*
5237 * The return value is returned to ustctl so in case of an error, the
5238 * application can be notified. In case of an error, it's important not to
5239 * return a negative error or else the application will get closed.
5240 */
5241 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5242 if (ret < 0) {
5243 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5244 ERR("UST app reply event failed with ret %d", ret);
5245 } else {
5246 DBG3("UST app reply event failed. Application died");
5247 }
5248 /*
5249 * No need to wipe the create event since the application socket will
5250 * get close on error hence cleaning up everything by itself.
5251 */
5252 goto error;
5253 }
5254
7972aab2
DG
5255 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5256 name, event_id);
d88aee68 5257
d0b96690 5258error:
7972aab2 5259 pthread_mutex_unlock(&registry->lock);
d88aee68 5260error_rcu_unlock:
d0b96690
DG
5261 rcu_read_unlock();
5262 return ret;
5263}
5264
d88aee68
DG
5265/*
5266 * Handle application notification through the given notify socket.
5267 *
5268 * Return 0 on success or else a negative value.
5269 */
d0b96690
DG
5270int ust_app_recv_notify(int sock)
5271{
5272 int ret;
5273 enum ustctl_notify_cmd cmd;
5274
5275 DBG3("UST app receiving notify from sock %d", sock);
5276
5277 ret = ustctl_recv_notify(sock, &cmd);
5278 if (ret < 0) {
5279 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5280 ERR("UST app recv notify failed with ret %d", ret);
5281 } else {
5282 DBG3("UST app recv notify failed. Application died");
5283 }
5284 goto error;
5285 }
5286
5287 switch (cmd) {
5288 case USTCTL_NOTIFY_CMD_EVENT:
5289 {
5290 int sobjd, cobjd, loglevel;
5291 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5292 size_t nr_fields;
5293 struct ustctl_field *fields;
5294
5295 DBG2("UST app ustctl register event received");
5296
5297 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
5298 &sig, &nr_fields, &fields, &model_emf_uri);
5299 if (ret < 0) {
5300 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5301 ERR("UST app recv event failed with ret %d", ret);
5302 } else {
5303 DBG3("UST app recv event failed. Application died");
5304 }
5305 goto error;
5306 }
5307
d5d629b5
DG
5308 /*
5309 * Add event to the UST registry coming from the notify socket. This
5310 * call will free if needed the sig, fields and model_emf_uri. This
5311 * code path loses the ownsership of these variables and transfer them
5312 * to the this function.
5313 */
d0b96690
DG
5314 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5315 fields, loglevel, model_emf_uri);
5316 if (ret < 0) {
5317 goto error;
5318 }
5319
5320 break;
5321 }
5322 case USTCTL_NOTIFY_CMD_CHANNEL:
5323 {
5324 int sobjd, cobjd;
5325 size_t nr_fields;
5326 struct ustctl_field *fields;
5327
5328 DBG2("UST app ustctl register channel received");
5329
5330 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5331 &fields);
5332 if (ret < 0) {
5333 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5334 ERR("UST app recv channel failed with ret %d", ret);
5335 } else {
5336 DBG3("UST app recv channel failed. Application died");
5337 }
5338 goto error;
5339 }
5340
d5d629b5
DG
5341 /*
5342 * The fields ownership are transfered to this function call meaning
5343 * that if needed it will be freed. After this, it's invalid to access
5344 * fields or clean it up.
5345 */
d0b96690
DG
5346 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5347 fields);
5348 if (ret < 0) {
5349 goto error;
5350 }
5351
5352 break;
5353 }
5354 default:
5355 /* Should NEVER happen. */
5356 assert(0);
5357 }
5358
5359error:
5360 return ret;
5361}
d88aee68
DG
5362
5363/*
5364 * Once the notify socket hangs up, this is called. First, it tries to find the
5365 * corresponding application. On failure, the call_rcu to close the socket is
5366 * executed. If an application is found, it tries to delete it from the notify
5367 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5368 *
5369 * Note that an object needs to be allocated here so on ENOMEM failure, the
5370 * call RCU is not done but the rest of the cleanup is.
5371 */
5372void ust_app_notify_sock_unregister(int sock)
5373{
5374 int err_enomem = 0;
5375 struct lttng_ht_iter iter;
5376 struct ust_app *app;
5377 struct ust_app_notify_sock_obj *obj;
5378
5379 assert(sock >= 0);
5380
5381 rcu_read_lock();
5382
5383 obj = zmalloc(sizeof(*obj));
5384 if (!obj) {
5385 /*
5386 * An ENOMEM is kind of uncool. If this strikes we continue the
5387 * procedure but the call_rcu will not be called. In this case, we
5388 * accept the fd leak rather than possibly creating an unsynchronized
5389 * state between threads.
5390 *
5391 * TODO: The notify object should be created once the notify socket is
5392 * registered and stored independantely from the ust app object. The
5393 * tricky part is to synchronize the teardown of the application and
5394 * this notify object. Let's keep that in mind so we can avoid this
5395 * kind of shenanigans with ENOMEM in the teardown path.
5396 */
5397 err_enomem = 1;
5398 } else {
5399 obj->fd = sock;
5400 }
5401
5402 DBG("UST app notify socket unregister %d", sock);
5403
5404 /*
5405 * Lookup application by notify socket. If this fails, this means that the
5406 * hash table delete has already been done by the application
5407 * unregistration process so we can safely close the notify socket in a
5408 * call RCU.
5409 */
5410 app = find_app_by_notify_sock(sock);
5411 if (!app) {
5412 goto close_socket;
5413 }
5414
5415 iter.iter.node = &app->notify_sock_n.node;
5416
5417 /*
5418 * Whatever happens here either we fail or succeed, in both cases we have
5419 * to close the socket after a grace period to continue to the call RCU
5420 * here. If the deletion is successful, the application is not visible
5421 * anymore by other threads and is it fails it means that it was already
5422 * deleted from the hash table so either way we just have to close the
5423 * socket.
5424 */
5425 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5426
5427close_socket:
5428 rcu_read_unlock();
5429
5430 /*
5431 * Close socket after a grace period to avoid for the socket to be reused
5432 * before the application object is freed creating potential race between
5433 * threads trying to add unique in the global hash table.
5434 */
5435 if (!err_enomem) {
5436 call_rcu(&obj->head, close_notify_sock_rcu);
5437 }
5438}
f45e313d
DG
5439
5440/*
5441 * Destroy a ust app data structure and free its memory.
5442 */
5443void ust_app_destroy(struct ust_app *app)
5444{
5445 if (!app) {
5446 return;
5447 }
5448
5449 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5450}
6dc3064a
DG
5451
5452/*
5453 * Take a snapshot for a given UST session. The snapshot is sent to the given
5454 * output.
5455 *
5456 * Return 0 on success or else a negative value.
5457 */
5458int ust_app_snapshot_record(struct ltt_ust_session *usess,
d07ceecd
MD
5459 struct snapshot_output *output, int wait,
5460 uint64_t nb_packets_per_stream)
6dc3064a
DG
5461{
5462 int ret = 0;
7badf927 5463 unsigned int snapshot_done = 0;
6dc3064a
DG
5464 struct lttng_ht_iter iter;
5465 struct ust_app *app;
af706bb7 5466 char pathname[PATH_MAX];
6dc3064a
DG
5467
5468 assert(usess);
5469 assert(output);
5470
5471 rcu_read_lock();
5472
8c924c7b
MD
5473 switch (usess->buffer_type) {
5474 case LTTNG_BUFFER_PER_UID:
5475 {
5476 struct buffer_reg_uid *reg;
6dc3064a 5477
8c924c7b
MD
5478 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5479 struct buffer_reg_channel *reg_chan;
5480 struct consumer_socket *socket;
6dc3064a 5481
8c924c7b
MD
5482 /* Get consumer socket to use to push the metadata.*/
5483 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5484 usess->consumer);
5485 if (!socket) {
5486 ret = -EINVAL;
5487 goto error;
5488 }
6dc3064a 5489
8c924c7b
MD
5490 memset(pathname, 0, sizeof(pathname));
5491 ret = snprintf(pathname, sizeof(pathname),
5492 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5493 reg->uid, reg->bits_per_long);
5494 if (ret < 0) {
5495 PERROR("snprintf snapshot path");
5496 goto error;
5497 }
5498
5499 /* Add the UST default trace dir to path. */
5500 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5501 reg_chan, node.node) {
68808f4e
DG
5502 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5503 output, 0, usess->uid, usess->gid, pathname, wait,
d07ceecd 5504 nb_packets_per_stream);
8c924c7b
MD
5505 if (ret < 0) {
5506 goto error;
5507 }
5508 }
68808f4e
DG
5509 ret = consumer_snapshot_channel(socket,
5510 reg->registry->reg.ust->metadata_key, output, 1,
d07ceecd 5511 usess->uid, usess->gid, pathname, wait, 0);
8c924c7b
MD
5512 if (ret < 0) {
5513 goto error;
5514 }
7badf927 5515 snapshot_done = 1;
af706bb7 5516 }
8c924c7b
MD
5517 break;
5518 }
5519 case LTTNG_BUFFER_PER_PID:
5520 {
5521 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5522 struct consumer_socket *socket;
5523 struct lttng_ht_iter chan_iter;
5524 struct ust_app_channel *ua_chan;
5525 struct ust_app_session *ua_sess;
5526 struct ust_registry_session *registry;
5527
5528 ua_sess = lookup_session_by_app(usess, app);
5529 if (!ua_sess) {
5530 /* Session not associated with this app. */
5531 continue;
5532 }
af706bb7 5533
8c924c7b
MD
5534 /* Get the right consumer socket for the application. */
5535 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5536 output->consumer);
5537 if (!socket) {
5c786ded 5538 ret = -EINVAL;
5c786ded
JD
5539 goto error;
5540 }
5541
8c924c7b
MD
5542 /* Add the UST default trace dir to path. */
5543 memset(pathname, 0, sizeof(pathname));
5544 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5545 ua_sess->path);
6dc3064a 5546 if (ret < 0) {
8c924c7b 5547 PERROR("snprintf snapshot path");
6dc3064a
DG
5548 goto error;
5549 }
6dc3064a 5550
8c924c7b
MD
5551 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5552 ua_chan, node.node) {
68808f4e
DG
5553 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5554 0, ua_sess->euid, ua_sess->egid, pathname, wait,
d07ceecd 5555 nb_packets_per_stream);
8c924c7b
MD
5556 if (ret < 0) {
5557 goto error;
5558 }
5559 }
5560
5561 registry = get_session_registry(ua_sess);
5562 assert(registry);
5563 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
d07ceecd 5564 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
8c924c7b
MD
5565 if (ret < 0) {
5566 goto error;
5567 }
7badf927 5568 snapshot_done = 1;
8c924c7b
MD
5569 }
5570 break;
5571 }
5572 default:
5573 assert(0);
5574 break;
6dc3064a
DG
5575 }
5576
7badf927
DG
5577 if (!snapshot_done) {
5578 /*
5579 * If no snapshot was made and we are not in the error path, this means
5580 * that there are no buffers thus no (prior) application to snapshot
5581 * data from so we have simply NO data.
5582 */
5583 ret = -ENODATA;
5584 }
5585
6dc3064a
DG
5586error:
5587 rcu_read_unlock();
5588 return ret;
5589}
5c786ded
JD
5590
5591/*
d07ceecd 5592 * Return the size taken by one more packet per stream.
5c786ded 5593 */
d07ceecd
MD
5594uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5595 uint64_t cur_nr_packets)
5c786ded 5596{
d07ceecd 5597 uint64_t tot_size = 0;
5c786ded
JD
5598 struct ust_app *app;
5599 struct lttng_ht_iter iter;
5600
5601 assert(usess);
5602
5603 switch (usess->buffer_type) {
5604 case LTTNG_BUFFER_PER_UID:
5605 {
5606 struct buffer_reg_uid *reg;
5607
5608 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5609 struct buffer_reg_channel *reg_chan;
5610
b7064eaa 5611 rcu_read_lock();
5c786ded
JD
5612 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5613 reg_chan, node.node) {
d07ceecd
MD
5614 if (cur_nr_packets >= reg_chan->num_subbuf) {
5615 /*
5616 * Don't take channel into account if we
5617 * already grab all its packets.
5618 */
5619 continue;
5620 }
5621 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5c786ded 5622 }
b7064eaa 5623 rcu_read_unlock();
5c786ded
JD
5624 }
5625 break;
5626 }
5627 case LTTNG_BUFFER_PER_PID:
5628 {
5629 rcu_read_lock();
5630 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5631 struct ust_app_channel *ua_chan;
5632 struct ust_app_session *ua_sess;
5633 struct lttng_ht_iter chan_iter;
5634
5635 ua_sess = lookup_session_by_app(usess, app);
5636 if (!ua_sess) {
5637 /* Session not associated with this app. */
5638 continue;
5639 }
5640
5641 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5642 ua_chan, node.node) {
d07ceecd
MD
5643 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5644 /*
5645 * Don't take channel into account if we
5646 * already grab all its packets.
5647 */
5648 continue;
5649 }
5650 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
5651 }
5652 }
5653 rcu_read_unlock();
5654 break;
5655 }
5656 default:
5657 assert(0);
5658 break;
5659 }
5660
d07ceecd 5661 return tot_size;
5c786ded 5662}
This page took 0.371128 seconds and 4 git commands to generate.