Fix: don't abort metadata push on closed metadata
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53
DG
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
bdf64013 3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
91d76f53
DG
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
91d76f53
DG
17 */
18
6c1c0768 19#define _LGPL_SOURCE
91d76f53 20#include <errno.h>
7972aab2 21#include <inttypes.h>
91d76f53
DG
22#include <pthread.h>
23#include <stdio.h>
24#include <stdlib.h>
099e26bd 25#include <string.h>
aba8e916
DG
26#include <sys/stat.h>
27#include <sys/types.h>
099e26bd 28#include <unistd.h>
0df502fd 29#include <urcu/compiler.h>
fb54cdbf 30#include <lttng/ust-error.h>
331744e3 31#include <signal.h>
bec39940 32
990570ed 33#include <common/common.h>
86acf0da 34#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 35
7972aab2 36#include "buffer-registry.h"
86acf0da 37#include "fd-limit.h"
8782cc74 38#include "health-sessiond.h"
56fff090 39#include "ust-app.h"
48842b30 40#include "ust-consumer.h"
d80a6244 41#include "ust-ctl.h"
0b2dc8df 42#include "utils.h"
fb83fe64 43#include "session.h"
d80a6244 44
c4b88406
MD
45static
46int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
47
d9bf3ca4
MD
48/* Next available channel key. Access under next_channel_key_lock. */
49static uint64_t _next_channel_key;
50static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
51
52/* Next available session ID. Access under next_session_id_lock. */
53static uint64_t _next_session_id;
54static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
55
56/*
d9bf3ca4 57 * Return the incremented value of next_channel_key.
ffe60014 58 */
d9bf3ca4 59static uint64_t get_next_channel_key(void)
ffe60014 60{
d9bf3ca4
MD
61 uint64_t ret;
62
63 pthread_mutex_lock(&next_channel_key_lock);
64 ret = ++_next_channel_key;
65 pthread_mutex_unlock(&next_channel_key_lock);
66 return ret;
ffe60014
DG
67}
68
69/*
7972aab2 70 * Return the atomically incremented value of next_session_id.
ffe60014 71 */
d9bf3ca4 72static uint64_t get_next_session_id(void)
ffe60014 73{
d9bf3ca4
MD
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_session_id_lock);
77 ret = ++_next_session_id;
78 pthread_mutex_unlock(&next_session_id_lock);
79 return ret;
ffe60014
DG
80}
81
d65d2de8
DG
82static void copy_channel_attr_to_ustctl(
83 struct ustctl_consumer_channel_attr *attr,
84 struct lttng_ust_channel_attr *uattr)
85{
86 /* Copy event attributes since the layout is different. */
87 attr->subbuf_size = uattr->subbuf_size;
88 attr->num_subbuf = uattr->num_subbuf;
89 attr->overwrite = uattr->overwrite;
90 attr->switch_timer_interval = uattr->switch_timer_interval;
91 attr->read_timer_interval = uattr->read_timer_interval;
92 attr->output = uattr->output;
93}
94
025faf73
DG
95/*
96 * Match function for the hash table lookup.
97 *
98 * It matches an ust app event based on three attributes which are the event
99 * name, the filter bytecode and the loglevel.
100 */
18eace3b
DG
101static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
102{
103 struct ust_app_event *event;
104 const struct ust_app_ht_key *key;
2106efa0 105 int ev_loglevel_value;
18eace3b
DG
106
107 assert(node);
108 assert(_key);
109
110 event = caa_container_of(node, struct ust_app_event, node.node);
111 key = _key;
2106efa0 112 ev_loglevel_value = event->attr.loglevel;
18eace3b 113
1af53eb5 114 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
115
116 /* Event name */
117 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
118 goto no_match;
119 }
120
121 /* Event loglevel. */
2106efa0 122 if (ev_loglevel_value != key->loglevel_type) {
025faf73 123 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
124 && key->loglevel_type == 0 &&
125 ev_loglevel_value == -1) {
025faf73
DG
126 /*
127 * Match is accepted. This is because on event creation, the
128 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
129 * -1 are accepted for this loglevel type since 0 is the one set by
130 * the API when receiving an enable event.
131 */
132 } else {
133 goto no_match;
134 }
18eace3b
DG
135 }
136
137 /* One of the filters is NULL, fail. */
138 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
139 goto no_match;
140 }
141
025faf73
DG
142 if (key->filter && event->filter) {
143 /* Both filters exists, check length followed by the bytecode. */
144 if (event->filter->len != key->filter->len ||
145 memcmp(event->filter->data, key->filter->data,
146 event->filter->len) != 0) {
147 goto no_match;
148 }
18eace3b
DG
149 }
150
1af53eb5
JI
151 /* One of the exclusions is NULL, fail. */
152 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
153 goto no_match;
154 }
155
156 if (key->exclusion && event->exclusion) {
157 /* Both exclusions exists, check count followed by the names. */
158 if (event->exclusion->count != key->exclusion->count ||
159 memcmp(event->exclusion->names, key->exclusion->names,
160 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
161 goto no_match;
162 }
163 }
164
165
025faf73 166 /* Match. */
18eace3b
DG
167 return 1;
168
169no_match:
170 return 0;
18eace3b
DG
171}
172
025faf73
DG
173/*
174 * Unique add of an ust app event in the given ht. This uses the custom
175 * ht_match_ust_app_event match function and the event name as hash.
176 */
d0b96690 177static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
178 struct ust_app_event *event)
179{
180 struct cds_lfht_node *node_ptr;
181 struct ust_app_ht_key key;
d0b96690 182 struct lttng_ht *ht;
18eace3b 183
d0b96690
DG
184 assert(ua_chan);
185 assert(ua_chan->events);
18eace3b
DG
186 assert(event);
187
d0b96690 188 ht = ua_chan->events;
18eace3b
DG
189 key.name = event->attr.name;
190 key.filter = event->filter;
2106efa0 191 key.loglevel_type = event->attr.loglevel;
91c89f23 192 key.exclusion = event->exclusion;
18eace3b
DG
193
194 node_ptr = cds_lfht_add_unique(ht->ht,
195 ht->hash_fct(event->node.key, lttng_ht_seed),
196 ht_match_ust_app_event, &key, &event->node.node);
197 assert(node_ptr == &event->node.node);
198}
199
d88aee68
DG
200/*
201 * Close the notify socket from the given RCU head object. This MUST be called
202 * through a call_rcu().
203 */
204static void close_notify_sock_rcu(struct rcu_head *head)
205{
206 int ret;
207 struct ust_app_notify_sock_obj *obj =
208 caa_container_of(head, struct ust_app_notify_sock_obj, head);
209
210 /* Must have a valid fd here. */
211 assert(obj->fd >= 0);
212
213 ret = close(obj->fd);
214 if (ret) {
215 ERR("close notify sock %d RCU", obj->fd);
216 }
217 lttng_fd_put(LTTNG_FD_APPS, 1);
218
219 free(obj);
220}
221
7972aab2
DG
222/*
223 * Return the session registry according to the buffer type of the given
224 * session.
225 *
226 * A registry per UID object MUST exists before calling this function or else
227 * it assert() if not found. RCU read side lock must be acquired.
228 */
229static struct ust_registry_session *get_session_registry(
230 struct ust_app_session *ua_sess)
231{
232 struct ust_registry_session *registry = NULL;
233
234 assert(ua_sess);
235
236 switch (ua_sess->buffer_type) {
237 case LTTNG_BUFFER_PER_PID:
238 {
239 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
240 if (!reg_pid) {
241 goto error;
242 }
243 registry = reg_pid->registry->reg.ust;
244 break;
245 }
246 case LTTNG_BUFFER_PER_UID:
247 {
248 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
249 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
250 if (!reg_uid) {
251 goto error;
252 }
253 registry = reg_uid->registry->reg.ust;
254 break;
255 }
256 default:
257 assert(0);
258 };
259
260error:
261 return registry;
262}
263
55cc08a6
DG
264/*
265 * Delete ust context safely. RCU read lock must be held before calling
266 * this function.
267 */
268static
fb45065e
MD
269void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
270 struct ust_app *app)
55cc08a6 271{
ffe60014
DG
272 int ret;
273
274 assert(ua_ctx);
275
55cc08a6 276 if (ua_ctx->obj) {
fb45065e 277 pthread_mutex_lock(&app->sock_lock);
ffe60014 278 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 279 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
280 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
281 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
282 sock, ua_ctx->obj->handle, ret);
ffe60014 283 }
55cc08a6
DG
284 free(ua_ctx->obj);
285 }
286 free(ua_ctx);
287}
288
d80a6244
DG
289/*
290 * Delete ust app event safely. RCU read lock must be held before calling
291 * this function.
292 */
8b366481 293static
fb45065e
MD
294void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
295 struct ust_app *app)
d80a6244 296{
ffe60014
DG
297 int ret;
298
299 assert(ua_event);
300
53a80697 301 free(ua_event->filter);
951f0b71
JI
302 if (ua_event->exclusion != NULL)
303 free(ua_event->exclusion);
edb67388 304 if (ua_event->obj != NULL) {
fb45065e 305 pthread_mutex_lock(&app->sock_lock);
ffe60014 306 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 307 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
308 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
309 ERR("UST app sock %d release event obj failed with ret %d",
310 sock, ret);
311 }
edb67388
DG
312 free(ua_event->obj);
313 }
d80a6244
DG
314 free(ua_event);
315}
316
317/*
7972aab2
DG
318 * Release ust data object of the given stream.
319 *
320 * Return 0 on success or else a negative value.
d80a6244 321 */
fb45065e
MD
322static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
323 struct ust_app *app)
d80a6244 324{
7972aab2 325 int ret = 0;
ffe60014
DG
326
327 assert(stream);
328
8b366481 329 if (stream->obj) {
fb45065e 330 pthread_mutex_lock(&app->sock_lock);
ffe60014 331 ret = ustctl_release_object(sock, stream->obj);
fb45065e 332 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
333 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
334 ERR("UST app sock %d release stream obj failed with ret %d",
335 sock, ret);
336 }
4063050c 337 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
338 free(stream->obj);
339 }
7972aab2
DG
340
341 return ret;
342}
343
344/*
345 * Delete ust app stream safely. RCU read lock must be held before calling
346 * this function.
347 */
348static
fb45065e
MD
349void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
350 struct ust_app *app)
7972aab2
DG
351{
352 assert(stream);
353
fb45065e 354 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 355 free(stream);
d80a6244
DG
356}
357
36b588ed
MD
358/*
359 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
360 * section and outside of call_rcu thread, so we postpone its execution
361 * using ht_cleanup_push. It is simpler than to change the semantic of
362 * the many callers of delete_ust_app_session().
36b588ed
MD
363 */
364static
365void delete_ust_app_channel_rcu(struct rcu_head *head)
366{
367 struct ust_app_channel *ua_chan =
368 caa_container_of(head, struct ust_app_channel, rcu_head);
369
0b2dc8df
MD
370 ht_cleanup_push(ua_chan->ctx);
371 ht_cleanup_push(ua_chan->events);
36b588ed
MD
372 free(ua_chan);
373}
374
fb83fe64
JD
375/*
376 * Extract the lost packet or discarded events counter when the channel is
377 * being deleted and store the value in the parent channel so we can
378 * access it from lttng list and at stop/destroy.
82cac6d2
JG
379 *
380 * The session list lock must be held by the caller.
fb83fe64
JD
381 */
382static
383void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
384{
385 uint64_t discarded = 0, lost = 0;
386 struct ltt_session *session;
387 struct ltt_ust_channel *uchan;
388
389 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
390 return;
391 }
392
393 rcu_read_lock();
394 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
395 if (!session || !session->ust_session) {
396 /*
397 * Not finding the session is not an error because there are
398 * multiple ways the channels can be torn down.
399 *
400 * 1) The session daemon can initiate the destruction of the
401 * ust app session after receiving a destroy command or
402 * during its shutdown/teardown.
403 * 2) The application, since we are in per-pid tracing, is
404 * unregistering and tearing down its ust app session.
405 *
406 * Both paths are protected by the session list lock which
407 * ensures that the accounting of lost packets and discarded
408 * events is done exactly once. The session is then unpublished
409 * from the session list, resulting in this condition.
410 */
fb83fe64
JD
411 goto end;
412 }
413
414 if (ua_chan->attr.overwrite) {
415 consumer_get_lost_packets(ua_chan->session->tracing_id,
416 ua_chan->key, session->ust_session->consumer,
417 &lost);
418 } else {
419 consumer_get_discarded_events(ua_chan->session->tracing_id,
420 ua_chan->key, session->ust_session->consumer,
421 &discarded);
422 }
423 uchan = trace_ust_find_channel_by_name(
424 session->ust_session->domain_global.channels,
425 ua_chan->name);
426 if (!uchan) {
427 ERR("Missing UST channel to store discarded counters");
428 goto end;
429 }
430
431 uchan->per_pid_closed_app_discarded += discarded;
432 uchan->per_pid_closed_app_lost += lost;
433
434end:
435 rcu_read_unlock();
436}
437
d80a6244
DG
438/*
439 * Delete ust app channel safely. RCU read lock must be held before calling
440 * this function.
82cac6d2
JG
441 *
442 * The session list lock must be held by the caller.
d80a6244 443 */
8b366481 444static
d0b96690
DG
445void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
446 struct ust_app *app)
d80a6244
DG
447{
448 int ret;
bec39940 449 struct lttng_ht_iter iter;
d80a6244 450 struct ust_app_event *ua_event;
55cc08a6 451 struct ust_app_ctx *ua_ctx;
030a66fa 452 struct ust_app_stream *stream, *stmp;
7972aab2 453 struct ust_registry_session *registry;
d80a6244 454
ffe60014
DG
455 assert(ua_chan);
456
457 DBG3("UST app deleting channel %s", ua_chan->name);
458
55cc08a6 459 /* Wipe stream */
d80a6244 460 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 461 cds_list_del(&stream->list);
fb45065e 462 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
463 }
464
55cc08a6 465 /* Wipe context */
bec39940 466 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 467 cds_list_del(&ua_ctx->list);
bec39940 468 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 469 assert(!ret);
fb45065e 470 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 471 }
d80a6244 472
55cc08a6 473 /* Wipe events */
bec39940
DG
474 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
475 node.node) {
476 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 477 assert(!ret);
fb45065e 478 delete_ust_app_event(sock, ua_event, app);
d80a6244 479 }
edb67388 480
c8335706
MD
481 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
482 /* Wipe and free registry from session registry. */
483 registry = get_session_registry(ua_chan->session);
484 if (registry) {
485 ust_registry_channel_del_free(registry, ua_chan->key);
486 }
fb83fe64 487 save_per_pid_lost_discarded_counters(ua_chan);
7972aab2 488 }
d0b96690 489
edb67388 490 if (ua_chan->obj != NULL) {
d0b96690
DG
491 /* Remove channel from application UST object descriptor. */
492 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
493 ret = lttng_ht_del(app->ust_objd, &iter);
494 assert(!ret);
fb45065e 495 pthread_mutex_lock(&app->sock_lock);
ffe60014 496 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 497 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
498 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
499 ERR("UST app sock %d release channel obj failed with ret %d",
500 sock, ret);
501 }
7972aab2 502 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
503 free(ua_chan->obj);
504 }
36b588ed 505 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
506}
507
fb45065e
MD
508int ust_app_register_done(struct ust_app *app)
509{
510 int ret;
511
512 pthread_mutex_lock(&app->sock_lock);
513 ret = ustctl_register_done(app->sock);
514 pthread_mutex_unlock(&app->sock_lock);
515 return ret;
516}
517
518int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
519{
520 int ret, sock;
521
522 if (app) {
523 pthread_mutex_lock(&app->sock_lock);
524 sock = app->sock;
525 } else {
526 sock = -1;
527 }
528 ret = ustctl_release_object(sock, data);
529 if (app) {
530 pthread_mutex_unlock(&app->sock_lock);
531 }
532 return ret;
533}
534
331744e3 535/*
1b532a60
DG
536 * Push metadata to consumer socket.
537 *
dc2bbdae
MD
538 * RCU read-side lock must be held to guarantee existance of socket.
539 * Must be called with the ust app session lock held.
540 * Must be called with the registry lock held.
331744e3
JD
541 *
542 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
543 * Returning a -EPIPE return value means we could not send the metadata,
544 * but it can be caused by recoverable errors (e.g. the application has
545 * terminated concurrently).
331744e3
JD
546 */
547ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
548 struct consumer_socket *socket, int send_zero_data)
549{
550 int ret;
551 char *metadata_str = NULL;
c585821b 552 size_t len, offset, new_metadata_len_sent;
331744e3 553 ssize_t ret_val;
93ec662e 554 uint64_t metadata_key, metadata_version;
331744e3
JD
555
556 assert(registry);
557 assert(socket);
1b532a60 558
c585821b
MD
559 metadata_key = registry->metadata_key;
560
ce34fcd0 561 /*
dc2bbdae
MD
562 * Means that no metadata was assigned to the session. This can
563 * happens if no start has been done previously.
ce34fcd0 564 */
c585821b 565 if (!metadata_key) {
ce34fcd0
MD
566 return 0;
567 }
568
331744e3
JD
569 offset = registry->metadata_len_sent;
570 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 571 new_metadata_len_sent = registry->metadata_len;
93ec662e 572 metadata_version = registry->metadata_version;
331744e3
JD
573 if (len == 0) {
574 DBG3("No metadata to push for metadata key %" PRIu64,
575 registry->metadata_key);
576 ret_val = len;
577 if (send_zero_data) {
578 DBG("No metadata to push");
579 goto push_data;
580 }
581 goto end;
582 }
583
584 /* Allocate only what we have to send. */
585 metadata_str = zmalloc(len);
586 if (!metadata_str) {
587 PERROR("zmalloc ust app metadata string");
588 ret_val = -ENOMEM;
589 goto error;
590 }
c585821b 591 /* Copy what we haven't sent out. */
331744e3 592 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
593
594push_data:
c585821b
MD
595 pthread_mutex_unlock(&registry->lock);
596 /*
597 * We need to unlock the registry while we push metadata to
598 * break a circular dependency between the consumerd metadata
599 * lock and the sessiond registry lock. Indeed, pushing metadata
600 * to the consumerd awaits that it gets pushed all the way to
601 * relayd, but doing so requires grabbing the metadata lock. If
602 * a concurrent metadata request is being performed by
603 * consumerd, this can try to grab the registry lock on the
604 * sessiond while holding the metadata lock on the consumer
605 * daemon. Those push and pull schemes are performed on two
606 * different bidirectionnal communication sockets.
607 */
608 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 609 metadata_str, len, offset, metadata_version);
c585821b 610 pthread_mutex_lock(&registry->lock);
331744e3 611 if (ret < 0) {
000baf6a 612 /*
dc2bbdae
MD
613 * There is an acceptable race here between the registry
614 * metadata key assignment and the creation on the
615 * consumer. The session daemon can concurrently push
616 * metadata for this registry while being created on the
617 * consumer since the metadata key of the registry is
618 * assigned *before* it is setup to avoid the consumer
619 * to ask for metadata that could possibly be not found
620 * in the session daemon.
000baf6a 621 *
dc2bbdae
MD
622 * The metadata will get pushed either by the session
623 * being stopped or the consumer requesting metadata if
624 * that race is triggered.
000baf6a
DG
625 */
626 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
627 ret = 0;
c585821b
MD
628 } else {
629 ERR("Error pushing metadata to consumer");
000baf6a 630 }
331744e3
JD
631 ret_val = ret;
632 goto error_push;
c585821b
MD
633 } else {
634 /*
635 * Metadata may have been concurrently pushed, since
636 * we're not holding the registry lock while pushing to
637 * consumer. This is handled by the fact that we send
638 * the metadata content, size, and the offset at which
639 * that metadata belongs. This may arrive out of order
640 * on the consumer side, and the consumer is able to
641 * deal with overlapping fragments. The consumer
642 * supports overlapping fragments, which must be
643 * contiguous starting from offset 0. We keep the
644 * largest metadata_len_sent value of the concurrent
645 * send.
646 */
647 registry->metadata_len_sent =
648 max_t(size_t, registry->metadata_len_sent,
649 new_metadata_len_sent);
331744e3 650 }
331744e3
JD
651 free(metadata_str);
652 return len;
653
654end:
655error:
ce34fcd0
MD
656 if (ret_val) {
657 /*
dc2bbdae
MD
658 * On error, flag the registry that the metadata is
659 * closed. We were unable to push anything and this
660 * means that either the consumer is not responding or
661 * the metadata cache has been destroyed on the
662 * consumer.
ce34fcd0
MD
663 */
664 registry->metadata_closed = 1;
665 }
331744e3
JD
666error_push:
667 free(metadata_str);
668 return ret_val;
669}
670
d88aee68 671/*
ce34fcd0 672 * For a given application and session, push metadata to consumer.
331744e3
JD
673 * Either sock or consumer is required : if sock is NULL, the default
674 * socket to send the metadata is retrieved from consumer, if sock
675 * is not NULL we use it to send the metadata.
ce34fcd0 676 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
677 * therefore ensuring existance of registry. It also ensures existance
678 * of socket throughout this function.
d88aee68
DG
679 *
680 * Return 0 on success else a negative error.
2c57e06d
MD
681 * Returning a -EPIPE return value means we could not send the metadata,
682 * but it can be caused by recoverable errors (e.g. the application has
683 * terminated concurrently).
d88aee68 684 */
7972aab2
DG
685static int push_metadata(struct ust_registry_session *registry,
686 struct consumer_output *consumer)
d88aee68 687{
331744e3
JD
688 int ret_val;
689 ssize_t ret;
d88aee68
DG
690 struct consumer_socket *socket;
691
7972aab2
DG
692 assert(registry);
693 assert(consumer);
694
ce34fcd0 695 pthread_mutex_lock(&registry->lock);
ce34fcd0 696 if (registry->metadata_closed) {
dc2bbdae
MD
697 ret_val = -EPIPE;
698 goto error;
d88aee68
DG
699 }
700
d88aee68 701 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
702 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
703 consumer);
d88aee68 704 if (!socket) {
331744e3 705 ret_val = -1;
ce34fcd0 706 goto error;
d88aee68
DG
707 }
708
331744e3 709 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 710 if (ret < 0) {
331744e3 711 ret_val = ret;
ce34fcd0 712 goto error;
d88aee68 713 }
dc2bbdae 714 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
715 return 0;
716
ce34fcd0 717error:
dc2bbdae 718 pthread_mutex_unlock(&registry->lock);
331744e3 719 return ret_val;
d88aee68
DG
720}
721
722/*
723 * Send to the consumer a close metadata command for the given session. Once
724 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 725 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
726 * in the destroy path.
727 *
728 * Return 0 on success else a negative value.
729 */
7972aab2
DG
730static int close_metadata(struct ust_registry_session *registry,
731 struct consumer_output *consumer)
d88aee68
DG
732{
733 int ret;
734 struct consumer_socket *socket;
735
7972aab2
DG
736 assert(registry);
737 assert(consumer);
d88aee68 738
7972aab2
DG
739 rcu_read_lock();
740
ce34fcd0
MD
741 pthread_mutex_lock(&registry->lock);
742
7972aab2 743 if (!registry->metadata_key || registry->metadata_closed) {
d88aee68 744 ret = 0;
1b532a60 745 goto end;
d88aee68
DG
746 }
747
d88aee68 748 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
749 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
750 consumer);
d88aee68
DG
751 if (!socket) {
752 ret = -1;
7972aab2 753 goto error;
d88aee68
DG
754 }
755
7972aab2 756 ret = consumer_close_metadata(socket, registry->metadata_key);
d88aee68 757 if (ret < 0) {
7972aab2 758 goto error;
d88aee68
DG
759 }
760
d88aee68 761error:
1b532a60
DG
762 /*
763 * Metadata closed. Even on error this means that the consumer is not
764 * responding or not found so either way a second close should NOT be emit
765 * for this registry.
766 */
767 registry->metadata_closed = 1;
768end:
ce34fcd0 769 pthread_mutex_unlock(&registry->lock);
7972aab2 770 rcu_read_unlock();
d88aee68
DG
771 return ret;
772}
773
36b588ed
MD
774/*
775 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
776 * section and outside of call_rcu thread, so we postpone its execution
777 * using ht_cleanup_push. It is simpler than to change the semantic of
778 * the many callers of delete_ust_app_session().
36b588ed
MD
779 */
780static
781void delete_ust_app_session_rcu(struct rcu_head *head)
782{
783 struct ust_app_session *ua_sess =
784 caa_container_of(head, struct ust_app_session, rcu_head);
785
0b2dc8df 786 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
787 free(ua_sess);
788}
789
d80a6244
DG
790/*
791 * Delete ust app session safely. RCU read lock must be held before calling
792 * this function.
82cac6d2
JG
793 *
794 * The session list lock must be held by the caller.
d80a6244 795 */
8b366481 796static
d0b96690
DG
797void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
798 struct ust_app *app)
d80a6244
DG
799{
800 int ret;
bec39940 801 struct lttng_ht_iter iter;
d80a6244 802 struct ust_app_channel *ua_chan;
7972aab2 803 struct ust_registry_session *registry;
d80a6244 804
d88aee68
DG
805 assert(ua_sess);
806
1b532a60
DG
807 pthread_mutex_lock(&ua_sess->lock);
808
b161602a
MD
809 assert(!ua_sess->deleted);
810 ua_sess->deleted = true;
811
7972aab2 812 registry = get_session_registry(ua_sess);
ce34fcd0 813 if (registry) {
d88aee68 814 /* Push metadata for application before freeing the application. */
7972aab2 815 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 816
7972aab2
DG
817 /*
818 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
819 * metadata only on destroy trace session in this case. Also, the
820 * previous push metadata could have flag the metadata registry to
821 * close so don't send a close command if closed.
7972aab2 822 */
ce34fcd0 823 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
824 /* And ask to close it for this session registry. */
825 (void) close_metadata(registry, ua_sess->consumer);
826 }
d80a6244
DG
827 }
828
bec39940
DG
829 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
830 node.node) {
831 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 832 assert(!ret);
d0b96690 833 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 834 }
d80a6244 835
7972aab2
DG
836 /* In case of per PID, the registry is kept in the session. */
837 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
838 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
839 if (reg_pid) {
840 buffer_reg_pid_remove(reg_pid);
841 buffer_reg_pid_destroy(reg_pid);
842 }
843 }
d0b96690 844
aee6bafd 845 if (ua_sess->handle != -1) {
fb45065e 846 pthread_mutex_lock(&app->sock_lock);
ffe60014 847 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 848 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
849 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
850 ERR("UST app sock %d release session handle failed with ret %d",
851 sock, ret);
852 }
10b56aef
MD
853 /* Remove session from application UST object descriptor. */
854 iter.iter.node = &ua_sess->ust_objd_node.node;
855 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
856 assert(!ret);
aee6bafd 857 }
10b56aef 858
1b532a60
DG
859 pthread_mutex_unlock(&ua_sess->lock);
860
6addfa37
MD
861 consumer_output_put(ua_sess->consumer);
862
36b588ed 863 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 864}
91d76f53
DG
865
866/*
284d8f55
DG
867 * Delete a traceable application structure from the global list. Never call
868 * this function outside of a call_rcu call.
36b588ed
MD
869 *
870 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 871 */
8b366481
DG
872static
873void delete_ust_app(struct ust_app *app)
91d76f53 874{
8b366481 875 int ret, sock;
d42f20df 876 struct ust_app_session *ua_sess, *tmp_ua_sess;
44d3bd01 877
82cac6d2
JG
878 /*
879 * The session list lock must be held during this function to guarantee
880 * the existence of ua_sess.
881 */
882 session_lock_list();
d80a6244 883 /* Delete ust app sessions info */
852d0037
DG
884 sock = app->sock;
885 app->sock = -1;
d80a6244 886
8b366481 887 /* Wipe sessions */
d42f20df
DG
888 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
889 teardown_node) {
890 /* Free every object in the session and the session. */
36b588ed 891 rcu_read_lock();
d0b96690 892 delete_ust_app_session(sock, ua_sess, app);
36b588ed 893 rcu_read_unlock();
d80a6244 894 }
36b588ed 895
0b2dc8df 896 ht_cleanup_push(app->sessions);
10b56aef 897 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 898 ht_cleanup_push(app->ust_objd);
d80a6244 899
6414a713 900 /*
852d0037
DG
901 * Wait until we have deleted the application from the sock hash table
902 * before closing this socket, otherwise an application could re-use the
903 * socket ID and race with the teardown, using the same hash table entry.
904 *
905 * It's OK to leave the close in call_rcu. We want it to stay unique for
906 * all RCU readers that could run concurrently with unregister app,
907 * therefore we _need_ to only close that socket after a grace period. So
908 * it should stay in this RCU callback.
909 *
910 * This close() is a very important step of the synchronization model so
911 * every modification to this function must be carefully reviewed.
6414a713 912 */
799e2c4f
MD
913 ret = close(sock);
914 if (ret) {
915 PERROR("close");
916 }
4063050c 917 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 918
852d0037 919 DBG2("UST app pid %d deleted", app->pid);
284d8f55 920 free(app);
82cac6d2 921 session_unlock_list();
099e26bd
DG
922}
923
924/*
f6a9efaa 925 * URCU intermediate call to delete an UST app.
099e26bd 926 */
8b366481
DG
927static
928void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 929{
bec39940
DG
930 struct lttng_ht_node_ulong *node =
931 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 932 struct ust_app *app =
852d0037 933 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 934
852d0037 935 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 936 delete_ust_app(app);
099e26bd
DG
937}
938
ffe60014
DG
939/*
940 * Delete the session from the application ht and delete the data structure by
941 * freeing every object inside and releasing them.
82cac6d2
JG
942 *
943 * The session list lock must be held by the caller.
ffe60014 944 */
d0b96690 945static void destroy_app_session(struct ust_app *app,
ffe60014
DG
946 struct ust_app_session *ua_sess)
947{
948 int ret;
949 struct lttng_ht_iter iter;
950
951 assert(app);
952 assert(ua_sess);
953
954 iter.iter.node = &ua_sess->node.node;
955 ret = lttng_ht_del(app->sessions, &iter);
956 if (ret) {
957 /* Already scheduled for teardown. */
958 goto end;
959 }
960
961 /* Once deleted, free the data structure. */
d0b96690 962 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
963
964end:
965 return;
966}
967
8b366481
DG
968/*
969 * Alloc new UST app session.
970 */
971static
d0b96690 972struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
8b366481
DG
973{
974 struct ust_app_session *ua_sess;
975
976 /* Init most of the default value by allocating and zeroing */
977 ua_sess = zmalloc(sizeof(struct ust_app_session));
978 if (ua_sess == NULL) {
979 PERROR("malloc");
ffe60014 980 goto error_free;
8b366481
DG
981 }
982
983 ua_sess->handle = -1;
bec39940 984 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 985 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 986 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 987
8b366481
DG
988 return ua_sess;
989
ffe60014 990error_free:
8b366481
DG
991 return NULL;
992}
993
994/*
995 * Alloc new UST app channel.
996 */
997static
998struct ust_app_channel *alloc_ust_app_channel(char *name,
d0b96690 999 struct ust_app_session *ua_sess,
ffe60014 1000 struct lttng_ust_channel_attr *attr)
8b366481
DG
1001{
1002 struct ust_app_channel *ua_chan;
1003
1004 /* Init most of the default value by allocating and zeroing */
1005 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1006 if (ua_chan == NULL) {
1007 PERROR("malloc");
1008 goto error;
1009 }
1010
1011 /* Setup channel name */
1012 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1013 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1014
1015 ua_chan->enabled = 1;
1016 ua_chan->handle = -1;
45893984 1017 ua_chan->session = ua_sess;
ffe60014 1018 ua_chan->key = get_next_channel_key();
bec39940
DG
1019 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1020 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1021 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1022
1023 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1024 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1025
1026 /* Copy attributes */
1027 if (attr) {
ffe60014 1028 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1029 ua_chan->attr.subbuf_size = attr->subbuf_size;
1030 ua_chan->attr.num_subbuf = attr->num_subbuf;
1031 ua_chan->attr.overwrite = attr->overwrite;
1032 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1033 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1034 ua_chan->attr.output = attr->output;
8b366481 1035 }
ffe60014
DG
1036 /* By default, the channel is a per cpu channel. */
1037 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1038
1039 DBG3("UST app channel %s allocated", ua_chan->name);
1040
1041 return ua_chan;
1042
1043error:
1044 return NULL;
1045}
1046
37f1c236
DG
1047/*
1048 * Allocate and initialize a UST app stream.
1049 *
1050 * Return newly allocated stream pointer or NULL on error.
1051 */
ffe60014 1052struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1053{
1054 struct ust_app_stream *stream = NULL;
1055
1056 stream = zmalloc(sizeof(*stream));
1057 if (stream == NULL) {
1058 PERROR("zmalloc ust app stream");
1059 goto error;
1060 }
1061
1062 /* Zero could be a valid value for a handle so flag it to -1. */
1063 stream->handle = -1;
1064
1065error:
1066 return stream;
1067}
1068
8b366481
DG
1069/*
1070 * Alloc new UST app event.
1071 */
1072static
1073struct ust_app_event *alloc_ust_app_event(char *name,
1074 struct lttng_ust_event *attr)
1075{
1076 struct ust_app_event *ua_event;
1077
1078 /* Init most of the default value by allocating and zeroing */
1079 ua_event = zmalloc(sizeof(struct ust_app_event));
1080 if (ua_event == NULL) {
1081 PERROR("malloc");
1082 goto error;
1083 }
1084
1085 ua_event->enabled = 1;
1086 strncpy(ua_event->name, name, sizeof(ua_event->name));
1087 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1088 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1089
1090 /* Copy attributes */
1091 if (attr) {
1092 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1093 }
1094
1095 DBG3("UST app event %s allocated", ua_event->name);
1096
1097 return ua_event;
1098
1099error:
1100 return NULL;
1101}
1102
1103/*
1104 * Alloc new UST app context.
1105 */
1106static
bdf64013 1107struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1108{
1109 struct ust_app_ctx *ua_ctx;
1110
1111 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1112 if (ua_ctx == NULL) {
1113 goto error;
1114 }
1115
31746f93
DG
1116 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1117
8b366481
DG
1118 if (uctx) {
1119 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013
JG
1120 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1121 char *provider_name = NULL, *ctx_name = NULL;
1122
1123 provider_name = strdup(uctx->u.app_ctx.provider_name);
1124 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1125 if (!provider_name || !ctx_name) {
1126 free(provider_name);
1127 free(ctx_name);
1128 goto error;
1129 }
1130
1131 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1132 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1133 }
8b366481
DG
1134 }
1135
1136 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1137 return ua_ctx;
bdf64013
JG
1138error:
1139 free(ua_ctx);
1140 return NULL;
8b366481
DG
1141}
1142
025faf73
DG
1143/*
1144 * Allocate a filter and copy the given original filter.
1145 *
1146 * Return allocated filter or NULL on error.
1147 */
51755dc8
JG
1148static struct lttng_filter_bytecode *copy_filter_bytecode(
1149 struct lttng_filter_bytecode *orig_f)
025faf73 1150{
51755dc8 1151 struct lttng_filter_bytecode *filter = NULL;
025faf73
DG
1152
1153 /* Copy filter bytecode */
1154 filter = zmalloc(sizeof(*filter) + orig_f->len);
1155 if (!filter) {
51755dc8 1156 PERROR("zmalloc alloc filter bytecode");
025faf73
DG
1157 goto error;
1158 }
1159
1160 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1161
1162error:
1163 return filter;
1164}
1165
51755dc8
JG
1166/*
1167 * Create a liblttng-ust filter bytecode from given bytecode.
1168 *
1169 * Return allocated filter or NULL on error.
1170 */
1171static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1172 struct lttng_filter_bytecode *orig_f)
1173{
1174 struct lttng_ust_filter_bytecode *filter = NULL;
1175
1176 /* Copy filter bytecode */
1177 filter = zmalloc(sizeof(*filter) + orig_f->len);
1178 if (!filter) {
1179 PERROR("zmalloc alloc ust filter bytecode");
1180 goto error;
1181 }
1182
1183 assert(sizeof(struct lttng_filter_bytecode) ==
1184 sizeof(struct lttng_ust_filter_bytecode));
1185 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1186error:
1187 return filter;
1188}
1189
099e26bd 1190/*
421cb601
DG
1191 * Find an ust_app using the sock and return it. RCU read side lock must be
1192 * held before calling this helper function.
099e26bd 1193 */
f20baf8e 1194struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1195{
bec39940 1196 struct lttng_ht_node_ulong *node;
bec39940 1197 struct lttng_ht_iter iter;
f6a9efaa 1198
852d0037 1199 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1200 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1201 if (node == NULL) {
1202 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1203 goto error;
1204 }
852d0037
DG
1205
1206 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1207
1208error:
1209 return NULL;
099e26bd
DG
1210}
1211
d0b96690
DG
1212/*
1213 * Find an ust_app using the notify sock and return it. RCU read side lock must
1214 * be held before calling this helper function.
1215 */
1216static struct ust_app *find_app_by_notify_sock(int sock)
1217{
1218 struct lttng_ht_node_ulong *node;
1219 struct lttng_ht_iter iter;
1220
1221 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1222 &iter);
1223 node = lttng_ht_iter_get_node_ulong(&iter);
1224 if (node == NULL) {
1225 DBG2("UST app find by notify sock %d not found", sock);
1226 goto error;
1227 }
1228
1229 return caa_container_of(node, struct ust_app, notify_sock_n);
1230
1231error:
1232 return NULL;
1233}
1234
025faf73
DG
1235/*
1236 * Lookup for an ust app event based on event name, filter bytecode and the
1237 * event loglevel.
1238 *
1239 * Return an ust_app_event object or NULL on error.
1240 */
18eace3b 1241static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
2106efa0
PP
1242 char *name, struct lttng_filter_bytecode *filter,
1243 int loglevel_value,
39c5a3a7 1244 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1245{
1246 struct lttng_ht_iter iter;
1247 struct lttng_ht_node_str *node;
1248 struct ust_app_event *event = NULL;
1249 struct ust_app_ht_key key;
18eace3b
DG
1250
1251 assert(name);
1252 assert(ht);
1253
1254 /* Setup key for event lookup. */
1255 key.name = name;
1256 key.filter = filter;
2106efa0 1257 key.loglevel_type = loglevel_value;
39c5a3a7 1258 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1259 key.exclusion = exclusion;
18eace3b 1260
025faf73
DG
1261 /* Lookup using the event name as hash and a custom match fct. */
1262 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1263 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1264 node = lttng_ht_iter_get_node_str(&iter);
1265 if (node == NULL) {
1266 goto end;
1267 }
1268
1269 event = caa_container_of(node, struct ust_app_event, node);
1270
1271end:
18eace3b
DG
1272 return event;
1273}
1274
55cc08a6
DG
1275/*
1276 * Create the channel context on the tracer.
d0b96690
DG
1277 *
1278 * Called with UST app session lock held.
55cc08a6
DG
1279 */
1280static
1281int create_ust_channel_context(struct ust_app_channel *ua_chan,
1282 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1283{
1284 int ret;
1285
840cb59c 1286 health_code_update();
86acf0da 1287
fb45065e 1288 pthread_mutex_lock(&app->sock_lock);
852d0037 1289 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1290 ua_chan->obj, &ua_ctx->obj);
fb45065e 1291 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1292 if (ret < 0) {
ffe60014
DG
1293 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1294 ERR("UST app create channel context failed for app (pid: %d) "
1295 "with ret %d", app->pid, ret);
1296 } else {
3757b385
DG
1297 /*
1298 * This is normal behavior, an application can die during the
1299 * creation process. Don't report an error so the execution can
1300 * continue normally.
1301 */
1302 ret = 0;
ffe60014
DG
1303 DBG3("UST app disable event failed. Application is dead.");
1304 }
55cc08a6
DG
1305 goto error;
1306 }
1307
1308 ua_ctx->handle = ua_ctx->obj->handle;
1309
d0b96690
DG
1310 DBG2("UST app context handle %d created successfully for channel %s",
1311 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1312
1313error:
840cb59c 1314 health_code_update();
55cc08a6
DG
1315 return ret;
1316}
1317
53a80697
MD
1318/*
1319 * Set the filter on the tracer.
1320 */
1321static
1322int set_ust_event_filter(struct ust_app_event *ua_event,
1323 struct ust_app *app)
1324{
1325 int ret;
51755dc8 1326 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1327
840cb59c 1328 health_code_update();
86acf0da 1329
53a80697 1330 if (!ua_event->filter) {
86acf0da
DG
1331 ret = 0;
1332 goto error;
53a80697
MD
1333 }
1334
51755dc8
JG
1335 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1336 if (!ust_bytecode) {
1337 ret = -LTTNG_ERR_NOMEM;
1338 goto error;
1339 }
fb45065e 1340 pthread_mutex_lock(&app->sock_lock);
51755dc8 1341 ret = ustctl_set_filter(app->sock, ust_bytecode,
53a80697 1342 ua_event->obj);
fb45065e 1343 pthread_mutex_unlock(&app->sock_lock);
53a80697 1344 if (ret < 0) {
ffe60014
DG
1345 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1346 ERR("UST app event %s filter failed for app (pid: %d) "
1347 "with ret %d", ua_event->attr.name, app->pid, ret);
1348 } else {
3757b385
DG
1349 /*
1350 * This is normal behavior, an application can die during the
1351 * creation process. Don't report an error so the execution can
1352 * continue normally.
1353 */
1354 ret = 0;
ffe60014
DG
1355 DBG3("UST app filter event failed. Application is dead.");
1356 }
53a80697
MD
1357 goto error;
1358 }
1359
1360 DBG2("UST filter set successfully for event %s", ua_event->name);
1361
1362error:
840cb59c 1363 health_code_update();
51755dc8 1364 free(ust_bytecode);
53a80697
MD
1365 return ret;
1366}
1367
51755dc8
JG
1368static
1369struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1370 struct lttng_event_exclusion *exclusion)
1371{
1372 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1373 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1374 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1375
1376 ust_exclusion = zmalloc(exclusion_alloc_size);
1377 if (!ust_exclusion) {
1378 PERROR("malloc");
1379 goto end;
1380 }
1381
1382 assert(sizeof(struct lttng_event_exclusion) ==
1383 sizeof(struct lttng_ust_event_exclusion));
1384 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1385end:
1386 return ust_exclusion;
1387}
1388
7cc9a73c
JI
1389/*
1390 * Set event exclusions on the tracer.
1391 */
1392static
1393int set_ust_event_exclusion(struct ust_app_event *ua_event,
1394 struct ust_app *app)
1395{
1396 int ret;
51755dc8 1397 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
7cc9a73c
JI
1398
1399 health_code_update();
1400
1401 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1402 ret = 0;
1403 goto error;
1404 }
1405
51755dc8
JG
1406 ust_exclusion = create_ust_exclusion_from_exclusion(
1407 ua_event->exclusion);
1408 if (!ust_exclusion) {
1409 ret = -LTTNG_ERR_NOMEM;
1410 goto error;
1411 }
fb45065e 1412 pthread_mutex_lock(&app->sock_lock);
51755dc8 1413 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
fb45065e 1414 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1415 if (ret < 0) {
1416 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1417 ERR("UST app event %s exclusions failed for app (pid: %d) "
1418 "with ret %d", ua_event->attr.name, app->pid, ret);
1419 } else {
1420 /*
1421 * This is normal behavior, an application can die during the
1422 * creation process. Don't report an error so the execution can
1423 * continue normally.
1424 */
1425 ret = 0;
1426 DBG3("UST app event exclusion failed. Application is dead.");
1427 }
1428 goto error;
1429 }
1430
1431 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1432
1433error:
1434 health_code_update();
51755dc8 1435 free(ust_exclusion);
7cc9a73c
JI
1436 return ret;
1437}
1438
9730260e
DG
1439/*
1440 * Disable the specified event on to UST tracer for the UST session.
1441 */
1442static int disable_ust_event(struct ust_app *app,
1443 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1444{
1445 int ret;
1446
840cb59c 1447 health_code_update();
86acf0da 1448
fb45065e 1449 pthread_mutex_lock(&app->sock_lock);
852d0037 1450 ret = ustctl_disable(app->sock, ua_event->obj);
fb45065e 1451 pthread_mutex_unlock(&app->sock_lock);
9730260e 1452 if (ret < 0) {
ffe60014
DG
1453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1454 ERR("UST app event %s disable failed for app (pid: %d) "
1455 "and session handle %d with ret %d",
1456 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1457 } else {
3757b385
DG
1458 /*
1459 * This is normal behavior, an application can die during the
1460 * creation process. Don't report an error so the execution can
1461 * continue normally.
1462 */
1463 ret = 0;
ffe60014
DG
1464 DBG3("UST app disable event failed. Application is dead.");
1465 }
9730260e
DG
1466 goto error;
1467 }
1468
1469 DBG2("UST app event %s disabled successfully for app (pid: %d)",
852d0037 1470 ua_event->attr.name, app->pid);
9730260e
DG
1471
1472error:
840cb59c 1473 health_code_update();
9730260e
DG
1474 return ret;
1475}
1476
78f0bacd
DG
1477/*
1478 * Disable the specified channel on to UST tracer for the UST session.
1479 */
1480static int disable_ust_channel(struct ust_app *app,
1481 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1482{
1483 int ret;
1484
840cb59c 1485 health_code_update();
86acf0da 1486
fb45065e 1487 pthread_mutex_lock(&app->sock_lock);
852d0037 1488 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1489 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1490 if (ret < 0) {
ffe60014
DG
1491 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1492 ERR("UST app channel %s disable failed for app (pid: %d) "
1493 "and session handle %d with ret %d",
1494 ua_chan->name, app->pid, ua_sess->handle, ret);
1495 } else {
3757b385
DG
1496 /*
1497 * This is normal behavior, an application can die during the
1498 * creation process. Don't report an error so the execution can
1499 * continue normally.
1500 */
1501 ret = 0;
ffe60014
DG
1502 DBG3("UST app disable channel failed. Application is dead.");
1503 }
78f0bacd
DG
1504 goto error;
1505 }
1506
78f0bacd 1507 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1508 ua_chan->name, app->pid);
78f0bacd
DG
1509
1510error:
840cb59c 1511 health_code_update();
78f0bacd
DG
1512 return ret;
1513}
1514
1515/*
1516 * Enable the specified channel on to UST tracer for the UST session.
1517 */
1518static int enable_ust_channel(struct ust_app *app,
1519 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1520{
1521 int ret;
1522
840cb59c 1523 health_code_update();
86acf0da 1524
fb45065e 1525 pthread_mutex_lock(&app->sock_lock);
852d0037 1526 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1527 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1528 if (ret < 0) {
ffe60014
DG
1529 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1530 ERR("UST app channel %s enable failed for app (pid: %d) "
1531 "and session handle %d with ret %d",
1532 ua_chan->name, app->pid, ua_sess->handle, ret);
1533 } else {
3757b385
DG
1534 /*
1535 * This is normal behavior, an application can die during the
1536 * creation process. Don't report an error so the execution can
1537 * continue normally.
1538 */
1539 ret = 0;
ffe60014
DG
1540 DBG3("UST app enable channel failed. Application is dead.");
1541 }
78f0bacd
DG
1542 goto error;
1543 }
1544
1545 ua_chan->enabled = 1;
1546
1547 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1548 ua_chan->name, app->pid);
78f0bacd
DG
1549
1550error:
840cb59c 1551 health_code_update();
78f0bacd
DG
1552 return ret;
1553}
1554
edb67388
DG
1555/*
1556 * Enable the specified event on to UST tracer for the UST session.
1557 */
1558static int enable_ust_event(struct ust_app *app,
1559 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1560{
1561 int ret;
1562
840cb59c 1563 health_code_update();
86acf0da 1564
fb45065e 1565 pthread_mutex_lock(&app->sock_lock);
852d0037 1566 ret = ustctl_enable(app->sock, ua_event->obj);
fb45065e 1567 pthread_mutex_unlock(&app->sock_lock);
edb67388 1568 if (ret < 0) {
ffe60014
DG
1569 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1570 ERR("UST app event %s enable failed for app (pid: %d) "
1571 "and session handle %d with ret %d",
1572 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1573 } else {
3757b385
DG
1574 /*
1575 * This is normal behavior, an application can die during the
1576 * creation process. Don't report an error so the execution can
1577 * continue normally.
1578 */
1579 ret = 0;
ffe60014
DG
1580 DBG3("UST app enable event failed. Application is dead.");
1581 }
edb67388
DG
1582 goto error;
1583 }
1584
1585 DBG2("UST app event %s enabled successfully for app (pid: %d)",
852d0037 1586 ua_event->attr.name, app->pid);
edb67388
DG
1587
1588error:
840cb59c 1589 health_code_update();
edb67388
DG
1590 return ret;
1591}
1592
099e26bd 1593/*
7972aab2 1594 * Send channel and stream buffer to application.
4f3ab6ee 1595 *
ffe60014 1596 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1597 */
7972aab2
DG
1598static int send_channel_pid_to_ust(struct ust_app *app,
1599 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1600{
1601 int ret;
ffe60014 1602 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1603
1604 assert(app);
ffe60014 1605 assert(ua_sess);
4f3ab6ee 1606 assert(ua_chan);
4f3ab6ee 1607
840cb59c 1608 health_code_update();
4f3ab6ee 1609
7972aab2
DG
1610 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1611 app->sock);
86acf0da 1612
ffe60014
DG
1613 /* Send channel to the application. */
1614 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1615 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1616 ret = -ENOTCONN; /* Caused by app exiting. */
1617 goto error;
1618 } else if (ret < 0) {
b551a063
DG
1619 goto error;
1620 }
1621
d88aee68
DG
1622 health_code_update();
1623
ffe60014
DG
1624 /* Send all streams to application. */
1625 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1626 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1628 ret = -ENOTCONN; /* Caused by app exiting. */
1629 goto error;
1630 } else if (ret < 0) {
ffe60014
DG
1631 goto error;
1632 }
1633 /* We don't need the stream anymore once sent to the tracer. */
1634 cds_list_del(&stream->list);
fb45065e 1635 delete_ust_app_stream(-1, stream, app);
ffe60014 1636 }
ffe60014
DG
1637 /* Flag the channel that it is sent to the application. */
1638 ua_chan->is_sent = 1;
ffe60014 1639
b551a063 1640error:
840cb59c 1641 health_code_update();
b551a063
DG
1642 return ret;
1643}
1644
91d76f53 1645/*
5b4a0ec0 1646 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1647 *
1648 * Should be called with session mutex held.
91d76f53 1649 */
edb67388
DG
1650static
1651int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1652 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1653{
5b4a0ec0 1654 int ret = 0;
284d8f55 1655
840cb59c 1656 health_code_update();
86acf0da 1657
5b4a0ec0 1658 /* Create UST event on tracer */
fb45065e 1659 pthread_mutex_lock(&app->sock_lock);
852d0037 1660 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1661 &ua_event->obj);
fb45065e 1662 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1663 if (ret < 0) {
ffe60014
DG
1664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1665 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1666 ua_event->attr.name, app->pid, ret);
1667 } else {
3757b385
DG
1668 /*
1669 * This is normal behavior, an application can die during the
1670 * creation process. Don't report an error so the execution can
1671 * continue normally.
1672 */
1673 ret = 0;
ffe60014
DG
1674 DBG3("UST app create event failed. Application is dead.");
1675 }
5b4a0ec0 1676 goto error;
91d76f53 1677 }
f6a9efaa 1678
5b4a0ec0 1679 ua_event->handle = ua_event->obj->handle;
284d8f55 1680
5b4a0ec0 1681 DBG2("UST app event %s created successfully for pid:%d",
852d0037 1682 ua_event->attr.name, app->pid);
f6a9efaa 1683
840cb59c 1684 health_code_update();
86acf0da 1685
025faf73
DG
1686 /* Set filter if one is present. */
1687 if (ua_event->filter) {
1688 ret = set_ust_event_filter(ua_event, app);
1689 if (ret < 0) {
1690 goto error;
1691 }
1692 }
1693
7cc9a73c
JI
1694 /* Set exclusions for the event */
1695 if (ua_event->exclusion) {
1696 ret = set_ust_event_exclusion(ua_event, app);
1697 if (ret < 0) {
1698 goto error;
1699 }
1700 }
1701
8535a6d9 1702 /* If event not enabled, disable it on the tracer */
40113787
MD
1703 if (ua_event->enabled) {
1704 /*
1705 * We now need to explicitly enable the event, since it
1706 * is now disabled at creation.
1707 */
1708 ret = enable_ust_event(app, ua_sess, ua_event);
1709 if (ret < 0) {
1710 /*
1711 * If we hit an EPERM, something is wrong with our enable call. If
1712 * we get an EEXIST, there is a problem on the tracer side since we
1713 * just created it.
1714 */
1715 switch (ret) {
1716 case -LTTNG_UST_ERR_PERM:
1717 /* Code flow problem */
1718 assert(0);
1719 case -LTTNG_UST_ERR_EXIST:
1720 /* It's OK for our use case. */
1721 ret = 0;
1722 break;
1723 default:
1724 break;
1725 }
1726 goto error;
1727 }
8535a6d9
DG
1728 }
1729
5b4a0ec0 1730error:
840cb59c 1731 health_code_update();
5b4a0ec0 1732 return ret;
91d76f53 1733}
48842b30 1734
5b4a0ec0
DG
1735/*
1736 * Copy data between an UST app event and a LTT event.
1737 */
421cb601 1738static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
1739 struct ltt_ust_event *uevent)
1740{
b4ffad32
JI
1741 size_t exclusion_alloc_size;
1742
48842b30
DG
1743 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1744 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1745
fc34caaa
DG
1746 ua_event->enabled = uevent->enabled;
1747
5b4a0ec0
DG
1748 /* Copy event attributes */
1749 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1750
53a80697
MD
1751 /* Copy filter bytecode */
1752 if (uevent->filter) {
51755dc8 1753 ua_event->filter = copy_filter_bytecode(uevent->filter);
025faf73 1754 /* Filter might be NULL here in case of ENONEM. */
53a80697 1755 }
b4ffad32
JI
1756
1757 /* Copy exclusion data */
1758 if (uevent->exclusion) {
51755dc8 1759 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
1760 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1761 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
1762 if (ua_event->exclusion == NULL) {
1763 PERROR("malloc");
1764 } else {
1765 memcpy(ua_event->exclusion, uevent->exclusion,
1766 exclusion_alloc_size);
b4ffad32
JI
1767 }
1768 }
48842b30
DG
1769}
1770
5b4a0ec0
DG
1771/*
1772 * Copy data between an UST app channel and a LTT channel.
1773 */
421cb601 1774static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
1775 struct ltt_ust_channel *uchan)
1776{
bec39940 1777 struct lttng_ht_iter iter;
48842b30 1778 struct ltt_ust_event *uevent;
55cc08a6 1779 struct ltt_ust_context *uctx;
48842b30
DG
1780 struct ust_app_event *ua_event;
1781
fc34caaa 1782 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
1783
1784 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1785 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 1786
1624d5b7
JD
1787 ua_chan->tracefile_size = uchan->tracefile_size;
1788 ua_chan->tracefile_count = uchan->tracefile_count;
1789
ffe60014
DG
1790 /* Copy event attributes since the layout is different. */
1791 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1792 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1793 ua_chan->attr.overwrite = uchan->attr.overwrite;
1794 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1795 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1796 ua_chan->attr.output = uchan->attr.output;
1797 /*
1798 * Note that the attribute channel type is not set since the channel on the
1799 * tracing registry side does not have this information.
1800 */
48842b30 1801
fc34caaa 1802 ua_chan->enabled = uchan->enabled;
7972aab2 1803 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 1804
31746f93 1805 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
bdf64013
JG
1806 struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1807
55cc08a6
DG
1808 if (ua_ctx == NULL) {
1809 continue;
1810 }
bec39940
DG
1811 lttng_ht_node_init_ulong(&ua_ctx->node,
1812 (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 1813 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 1814 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
55cc08a6 1815 }
48842b30 1816
421cb601 1817 /* Copy all events from ltt ust channel to ust app channel */
bec39940 1818 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
18eace3b 1819 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 1820 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 1821 if (ua_event == NULL) {
421cb601 1822 DBG2("UST event %s not found on shadow copy channel",
48842b30 1823 uevent->attr.name);
284d8f55 1824 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
48842b30 1825 if (ua_event == NULL) {
5b4a0ec0 1826 continue;
48842b30 1827 }
421cb601 1828 shadow_copy_event(ua_event, uevent);
d0b96690 1829 add_unique_ust_app_event(ua_chan, ua_event);
48842b30 1830 }
48842b30
DG
1831 }
1832
fc34caaa 1833 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
1834}
1835
5b4a0ec0
DG
1836/*
1837 * Copy data between a UST app session and a regular LTT session.
1838 */
421cb601 1839static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 1840 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1841{
bec39940
DG
1842 struct lttng_ht_node_str *ua_chan_node;
1843 struct lttng_ht_iter iter;
48842b30
DG
1844 struct ltt_ust_channel *uchan;
1845 struct ust_app_channel *ua_chan;
477d7741
MD
1846 time_t rawtime;
1847 struct tm *timeinfo;
1848 char datetime[16];
1849 int ret;
d7ba1388 1850 char tmp_shm_path[PATH_MAX];
477d7741
MD
1851
1852 /* Get date and time for unique app path */
1853 time(&rawtime);
1854 timeinfo = localtime(&rawtime);
1855 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 1856
421cb601 1857 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 1858
7972aab2
DG
1859 ua_sess->tracing_id = usess->id;
1860 ua_sess->id = get_next_session_id();
1861 ua_sess->uid = app->uid;
1862 ua_sess->gid = app->gid;
1863 ua_sess->euid = usess->uid;
1864 ua_sess->egid = usess->gid;
1865 ua_sess->buffer_type = usess->buffer_type;
1866 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 1867
7972aab2 1868 /* There is only one consumer object per session possible. */
6addfa37 1869 consumer_output_get(usess->consumer);
7972aab2 1870 ua_sess->consumer = usess->consumer;
6addfa37 1871
2bba9e53 1872 ua_sess->output_traces = usess->output_traces;
ecc48a90 1873 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
1874 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1875 &usess->metadata_attr);
7972aab2
DG
1876
1877 switch (ua_sess->buffer_type) {
1878 case LTTNG_BUFFER_PER_PID:
1879 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 1880 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
1881 datetime);
1882 break;
1883 case LTTNG_BUFFER_PER_UID:
1884 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1885 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1886 break;
1887 default:
1888 assert(0);
1889 goto error;
1890 }
477d7741
MD
1891 if (ret < 0) {
1892 PERROR("asprintf UST shadow copy session");
477d7741 1893 assert(0);
7972aab2 1894 goto error;
477d7741
MD
1895 }
1896
3d071855
MD
1897 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1898 sizeof(ua_sess->root_shm_path));
1899 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
1900 strncpy(ua_sess->shm_path, usess->shm_path,
1901 sizeof(ua_sess->shm_path));
1902 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1903 if (ua_sess->shm_path[0]) {
1904 switch (ua_sess->buffer_type) {
1905 case LTTNG_BUFFER_PER_PID:
1906 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1907 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1908 app->name, app->pid, datetime);
1909 break;
1910 case LTTNG_BUFFER_PER_UID:
1911 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1912 DEFAULT_UST_TRACE_UID_PATH,
1913 app->uid, app->bits_per_long);
1914 break;
1915 default:
1916 assert(0);
1917 goto error;
1918 }
1919 if (ret < 0) {
1920 PERROR("sprintf UST shadow copy session");
1921 assert(0);
1922 goto error;
1923 }
1924 strncat(ua_sess->shm_path, tmp_shm_path,
1925 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1926 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1927 }
1928
48842b30 1929 /* Iterate over all channels in global domain. */
bec39940
DG
1930 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1931 uchan, node.node) {
1932 struct lttng_ht_iter uiter;
ba767faf 1933
bec39940
DG
1934 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1935 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5b4a0ec0 1936 if (ua_chan_node != NULL) {
fc34caaa 1937 /* Session exist. Contiuing. */
5b4a0ec0
DG
1938 continue;
1939 }
421cb601 1940
5b4a0ec0
DG
1941 DBG2("Channel %s not found on shadow session copy, creating it",
1942 uchan->name);
fb83fe64
JD
1943 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
1944 &uchan->attr);
5b4a0ec0 1945 if (ua_chan == NULL) {
fc34caaa 1946 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
5b4a0ec0 1947 continue;
48842b30 1948 }
5b4a0ec0 1949 shadow_copy_channel(ua_chan, uchan);
ffe60014
DG
1950 /*
1951 * The concept of metadata channel does not exist on the tracing
1952 * registry side of the session daemon so this can only be a per CPU
1953 * channel and not metadata.
1954 */
1955 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1956
bec39940 1957 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
48842b30 1958 }
6addfa37 1959 return;
7972aab2
DG
1960
1961error:
6addfa37 1962 consumer_output_put(ua_sess->consumer);
48842b30
DG
1963}
1964
78f0bacd
DG
1965/*
1966 * Lookup sesison wrapper.
1967 */
84cd17c6
MD
1968static
1969void __lookup_session_by_app(struct ltt_ust_session *usess,
bec39940 1970 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
1971{
1972 /* Get right UST app session from app */
d9bf3ca4 1973 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
1974}
1975
421cb601
DG
1976/*
1977 * Return ust app session from the app session hashtable using the UST session
a991f516 1978 * id.
421cb601 1979 */
48842b30
DG
1980static struct ust_app_session *lookup_session_by_app(
1981 struct ltt_ust_session *usess, struct ust_app *app)
1982{
bec39940 1983 struct lttng_ht_iter iter;
d9bf3ca4 1984 struct lttng_ht_node_u64 *node;
48842b30 1985
84cd17c6 1986 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 1987 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
1988 if (node == NULL) {
1989 goto error;
1990 }
1991
1992 return caa_container_of(node, struct ust_app_session, node);
1993
1994error:
1995 return NULL;
1996}
1997
7972aab2
DG
1998/*
1999 * Setup buffer registry per PID for the given session and application. If none
2000 * is found, a new one is created, added to the global registry and
2001 * initialized. If regp is valid, it's set with the newly created object.
2002 *
2003 * Return 0 on success or else a negative value.
2004 */
2005static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2006 struct ust_app *app, struct buffer_reg_pid **regp)
2007{
2008 int ret = 0;
2009 struct buffer_reg_pid *reg_pid;
2010
2011 assert(ua_sess);
2012 assert(app);
2013
2014 rcu_read_lock();
2015
2016 reg_pid = buffer_reg_pid_find(ua_sess->id);
2017 if (!reg_pid) {
2018 /*
2019 * This is the create channel path meaning that if there is NO
2020 * registry available, we have to create one for this session.
2021 */
d7ba1388 2022 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 2023 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2024 if (ret < 0) {
2025 goto error;
2026 }
7972aab2
DG
2027 } else {
2028 goto end;
2029 }
2030
2031 /* Initialize registry. */
2032 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2033 app->bits_per_long, app->uint8_t_alignment,
2034 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2035 app->uint64_t_alignment, app->long_alignment,
2036 app->byte_order, app->version.major,
3d071855
MD
2037 app->version.minor, reg_pid->root_shm_path,
2038 reg_pid->shm_path,
d7ba1388 2039 ua_sess->euid, ua_sess->egid);
7972aab2 2040 if (ret < 0) {
286c991a
MD
2041 /*
2042 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2043 * destroy the buffer registry, because it is always expected
2044 * that if the buffer registry can be found, its ust registry is
2045 * non-NULL.
2046 */
2047 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2048 goto error;
2049 }
2050
286c991a
MD
2051 buffer_reg_pid_add(reg_pid);
2052
7972aab2
DG
2053 DBG3("UST app buffer registry per PID created successfully");
2054
2055end:
2056 if (regp) {
2057 *regp = reg_pid;
2058 }
2059error:
2060 rcu_read_unlock();
2061 return ret;
2062}
2063
2064/*
2065 * Setup buffer registry per UID for the given session and application. If none
2066 * is found, a new one is created, added to the global registry and
2067 * initialized. If regp is valid, it's set with the newly created object.
2068 *
2069 * Return 0 on success or else a negative value.
2070 */
2071static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2072 struct ust_app_session *ua_sess,
7972aab2
DG
2073 struct ust_app *app, struct buffer_reg_uid **regp)
2074{
2075 int ret = 0;
2076 struct buffer_reg_uid *reg_uid;
2077
2078 assert(usess);
2079 assert(app);
2080
2081 rcu_read_lock();
2082
2083 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2084 if (!reg_uid) {
2085 /*
2086 * This is the create channel path meaning that if there is NO
2087 * registry available, we have to create one for this session.
2088 */
2089 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2090 LTTNG_DOMAIN_UST, &reg_uid,
2091 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2092 if (ret < 0) {
2093 goto error;
2094 }
7972aab2
DG
2095 } else {
2096 goto end;
2097 }
2098
2099 /* Initialize registry. */
af6142cf 2100 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2101 app->bits_per_long, app->uint8_t_alignment,
2102 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2103 app->uint64_t_alignment, app->long_alignment,
2104 app->byte_order, app->version.major,
3d071855
MD
2105 app->version.minor, reg_uid->root_shm_path,
2106 reg_uid->shm_path, usess->uid, usess->gid);
7972aab2 2107 if (ret < 0) {
286c991a
MD
2108 /*
2109 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2110 * destroy the buffer registry, because it is always expected
2111 * that if the buffer registry can be found, its ust registry is
2112 * non-NULL.
2113 */
2114 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2115 goto error;
2116 }
2117 /* Add node to teardown list of the session. */
2118 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2119
286c991a 2120 buffer_reg_uid_add(reg_uid);
7972aab2 2121
286c991a 2122 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2123end:
2124 if (regp) {
2125 *regp = reg_uid;
2126 }
2127error:
2128 rcu_read_unlock();
2129 return ret;
2130}
2131
421cb601 2132/*
3d8ca23b 2133 * Create a session on the tracer side for the given app.
421cb601 2134 *
3d8ca23b
DG
2135 * On success, ua_sess_ptr is populated with the session pointer or else left
2136 * untouched. If the session was created, is_created is set to 1. On error,
2137 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2138 * be NULL.
2139 *
2140 * Returns 0 on success or else a negative code which is either -ENOMEM or
2141 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2142 */
3d8ca23b
DG
2143static int create_ust_app_session(struct ltt_ust_session *usess,
2144 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2145 int *is_created)
421cb601 2146{
3d8ca23b 2147 int ret, created = 0;
421cb601
DG
2148 struct ust_app_session *ua_sess;
2149
3d8ca23b
DG
2150 assert(usess);
2151 assert(app);
2152 assert(ua_sess_ptr);
2153
840cb59c 2154 health_code_update();
86acf0da 2155
421cb601
DG
2156 ua_sess = lookup_session_by_app(usess, app);
2157 if (ua_sess == NULL) {
d9bf3ca4 2158 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2159 app->pid, usess->id);
d0b96690 2160 ua_sess = alloc_ust_app_session(app);
421cb601
DG
2161 if (ua_sess == NULL) {
2162 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2163 ret = -ENOMEM;
2164 goto error;
421cb601 2165 }
477d7741 2166 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2167 created = 1;
421cb601
DG
2168 }
2169
7972aab2
DG
2170 switch (usess->buffer_type) {
2171 case LTTNG_BUFFER_PER_PID:
2172 /* Init local registry. */
2173 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2174 if (ret < 0) {
e64207cf 2175 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2176 goto error;
2177 }
2178 break;
2179 case LTTNG_BUFFER_PER_UID:
2180 /* Look for a global registry. If none exists, create one. */
d7ba1388 2181 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2182 if (ret < 0) {
e64207cf 2183 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2184 goto error;
2185 }
2186 break;
2187 default:
2188 assert(0);
2189 ret = -EINVAL;
2190 goto error;
2191 }
2192
2193 health_code_update();
2194
2195 if (ua_sess->handle == -1) {
fb45065e 2196 pthread_mutex_lock(&app->sock_lock);
7972aab2 2197 ret = ustctl_create_session(app->sock);
fb45065e 2198 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2199 if (ret < 0) {
2200 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2201 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2202 app->pid, ret);
2203 } else {
2204 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2205 /*
2206 * This is normal behavior, an application can die during the
2207 * creation process. Don't report an error so the execution can
2208 * continue normally. This will get flagged ENOTCONN and the
2209 * caller will handle it.
2210 */
2211 ret = 0;
ffe60014 2212 }
d0b96690 2213 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2214 if (ret != -ENOMEM) {
2215 /*
2216 * Tracer is probably gone or got an internal error so let's
2217 * behave like it will soon unregister or not usable.
2218 */
2219 ret = -ENOTCONN;
2220 }
2221 goto error;
421cb601
DG
2222 }
2223
7972aab2
DG
2224 ua_sess->handle = ret;
2225
2226 /* Add ust app session to app's HT */
d9bf3ca4
MD
2227 lttng_ht_node_init_u64(&ua_sess->node,
2228 ua_sess->tracing_id);
2229 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2230 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2231 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2232 &ua_sess->ust_objd_node);
7972aab2
DG
2233
2234 DBG2("UST app session created successfully with handle %d", ret);
2235 }
2236
2237 *ua_sess_ptr = ua_sess;
2238 if (is_created) {
2239 *is_created = created;
2240 }
2241
2242 /* Everything went well. */
2243 ret = 0;
2244
2245error:
2246 health_code_update();
2247 return ret;
2248}
2249
6a6b2068
JG
2250/*
2251 * Match function for a hash table lookup of ust_app_ctx.
2252 *
2253 * It matches an ust app context based on the context type and, in the case
2254 * of perf counters, their name.
2255 */
2256static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2257{
2258 struct ust_app_ctx *ctx;
bdf64013 2259 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2260
2261 assert(node);
2262 assert(_key);
2263
2264 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2265 key = _key;
2266
2267 /* Context type */
2268 if (ctx->ctx.ctx != key->ctx) {
2269 goto no_match;
2270 }
2271
bdf64013
JG
2272 switch(key->ctx) {
2273 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2274 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2275 ctx->ctx.u.perf_counter.name,
2276 sizeof(key->u.perf_counter.name))) {
2277 goto no_match;
2278 }
2279 break;
2280 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2281 if (strcmp(key->u.app_ctx.provider_name,
2282 ctx->ctx.u.app_ctx.provider_name) ||
2283 strcmp(key->u.app_ctx.ctx_name,
2284 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2285 goto no_match;
2286 }
bdf64013
JG
2287 break;
2288 default:
2289 break;
6a6b2068
JG
2290 }
2291
2292 /* Match. */
2293 return 1;
2294
2295no_match:
2296 return 0;
2297}
2298
2299/*
2300 * Lookup for an ust app context from an lttng_ust_context.
2301 *
be184a0f 2302 * Must be called while holding RCU read side lock.
6a6b2068
JG
2303 * Return an ust_app_ctx object or NULL on error.
2304 */
2305static
2306struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2307 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2308{
2309 struct lttng_ht_iter iter;
2310 struct lttng_ht_node_ulong *node;
2311 struct ust_app_ctx *app_ctx = NULL;
2312
2313 assert(uctx);
2314 assert(ht);
2315
2316 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2317 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2318 ht_match_ust_app_ctx, uctx, &iter.iter);
2319 node = lttng_ht_iter_get_node_ulong(&iter);
2320 if (!node) {
2321 goto end;
2322 }
2323
2324 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2325
2326end:
2327 return app_ctx;
2328}
2329
7972aab2
DG
2330/*
2331 * Create a context for the channel on the tracer.
2332 *
2333 * Called with UST app session lock held and a RCU read side lock.
2334 */
2335static
2336int create_ust_app_channel_context(struct ust_app_session *ua_sess,
bdf64013
JG
2337 struct ust_app_channel *ua_chan,
2338 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2339 struct ust_app *app)
2340{
2341 int ret = 0;
7972aab2
DG
2342 struct ust_app_ctx *ua_ctx;
2343
2344 DBG2("UST app adding context to channel %s", ua_chan->name);
2345
6a6b2068
JG
2346 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2347 if (ua_ctx) {
7972aab2
DG
2348 ret = -EEXIST;
2349 goto error;
2350 }
2351
2352 ua_ctx = alloc_ust_app_ctx(uctx);
2353 if (ua_ctx == NULL) {
2354 /* malloc failed */
2355 ret = -1;
2356 goto error;
2357 }
2358
2359 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2360 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2361 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2362
2363 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2364 if (ret < 0) {
2365 goto error;
2366 }
2367
2368error:
2369 return ret;
2370}
2371
2372/*
2373 * Enable on the tracer side a ust app event for the session and channel.
2374 *
2375 * Called with UST app session lock held.
2376 */
2377static
2378int enable_ust_app_event(struct ust_app_session *ua_sess,
2379 struct ust_app_event *ua_event, struct ust_app *app)
2380{
2381 int ret;
2382
2383 ret = enable_ust_event(app, ua_sess, ua_event);
2384 if (ret < 0) {
2385 goto error;
2386 }
2387
2388 ua_event->enabled = 1;
2389
2390error:
2391 return ret;
2392}
2393
2394/*
2395 * Disable on the tracer side a ust app event for the session and channel.
2396 */
2397static int disable_ust_app_event(struct ust_app_session *ua_sess,
2398 struct ust_app_event *ua_event, struct ust_app *app)
2399{
2400 int ret;
2401
2402 ret = disable_ust_event(app, ua_sess, ua_event);
2403 if (ret < 0) {
2404 goto error;
2405 }
2406
2407 ua_event->enabled = 0;
2408
2409error:
2410 return ret;
2411}
2412
2413/*
2414 * Lookup ust app channel for session and disable it on the tracer side.
2415 */
2416static
2417int disable_ust_app_channel(struct ust_app_session *ua_sess,
2418 struct ust_app_channel *ua_chan, struct ust_app *app)
2419{
2420 int ret;
2421
2422 ret = disable_ust_channel(app, ua_sess, ua_chan);
2423 if (ret < 0) {
2424 goto error;
2425 }
2426
2427 ua_chan->enabled = 0;
2428
2429error:
2430 return ret;
2431}
2432
2433/*
2434 * Lookup ust app channel for session and enable it on the tracer side. This
2435 * MUST be called with a RCU read side lock acquired.
2436 */
2437static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2438 struct ltt_ust_channel *uchan, struct ust_app *app)
2439{
2440 int ret = 0;
2441 struct lttng_ht_iter iter;
2442 struct lttng_ht_node_str *ua_chan_node;
2443 struct ust_app_channel *ua_chan;
2444
2445 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2446 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2447 if (ua_chan_node == NULL) {
d9bf3ca4 2448 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2449 uchan->name, ua_sess->tracing_id);
2450 goto error;
2451 }
2452
2453 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2454
2455 ret = enable_ust_channel(app, ua_sess, ua_chan);
2456 if (ret < 0) {
2457 goto error;
2458 }
2459
2460error:
2461 return ret;
2462}
2463
2464/*
2465 * Ask the consumer to create a channel and get it if successful.
2466 *
2467 * Return 0 on success or else a negative value.
2468 */
2469static int do_consumer_create_channel(struct ltt_ust_session *usess,
2470 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2471 int bitness, struct ust_registry_session *registry)
2472{
2473 int ret;
2474 unsigned int nb_fd = 0;
2475 struct consumer_socket *socket;
2476
2477 assert(usess);
2478 assert(ua_sess);
2479 assert(ua_chan);
2480 assert(registry);
2481
2482 rcu_read_lock();
2483 health_code_update();
2484
2485 /* Get the right consumer socket for the application. */
2486 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2487 if (!socket) {
2488 ret = -EINVAL;
2489 goto error;
2490 }
2491
2492 health_code_update();
2493
2494 /* Need one fd for the channel. */
2495 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2496 if (ret < 0) {
2497 ERR("Exhausted number of available FD upon create channel");
2498 goto error;
2499 }
2500
2501 /*
2502 * Ask consumer to create channel. The consumer will return the number of
2503 * stream we have to expect.
2504 */
2505 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2506 registry);
2507 if (ret < 0) {
2508 goto error_ask;
2509 }
2510
2511 /*
2512 * Compute the number of fd needed before receiving them. It must be 2 per
2513 * stream (2 being the default value here).
2514 */
2515 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2516
2517 /* Reserve the amount of file descriptor we need. */
2518 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2519 if (ret < 0) {
2520 ERR("Exhausted number of available FD upon create channel");
2521 goto error_fd_get_stream;
2522 }
2523
2524 health_code_update();
2525
2526 /*
2527 * Now get the channel from the consumer. This call wil populate the stream
2528 * list of that channel and set the ust objects.
2529 */
d9078d0c
DG
2530 if (usess->consumer->enabled) {
2531 ret = ust_consumer_get_channel(socket, ua_chan);
2532 if (ret < 0) {
2533 goto error_destroy;
2534 }
7972aab2
DG
2535 }
2536
2537 rcu_read_unlock();
2538 return 0;
2539
2540error_destroy:
2541 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2542error_fd_get_stream:
2543 /*
2544 * Initiate a destroy channel on the consumer since we had an error
2545 * handling it on our side. The return value is of no importance since we
2546 * already have a ret value set by the previous error that we need to
2547 * return.
2548 */
2549 (void) ust_consumer_destroy_channel(socket, ua_chan);
2550error_ask:
2551 lttng_fd_put(LTTNG_FD_APPS, 1);
2552error:
2553 health_code_update();
2554 rcu_read_unlock();
2555 return ret;
2556}
2557
2558/*
2559 * Duplicate the ust data object of the ust app stream and save it in the
2560 * buffer registry stream.
2561 *
2562 * Return 0 on success or else a negative value.
2563 */
2564static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2565 struct ust_app_stream *stream)
2566{
2567 int ret;
2568
2569 assert(reg_stream);
2570 assert(stream);
2571
2572 /* Reserve the amount of file descriptor we need. */
2573 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2574 if (ret < 0) {
2575 ERR("Exhausted number of available FD upon duplicate stream");
2576 goto error;
2577 }
2578
2579 /* Duplicate object for stream once the original is in the registry. */
2580 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2581 reg_stream->obj.ust);
2582 if (ret < 0) {
2583 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2584 reg_stream->obj.ust, stream->obj, ret);
2585 lttng_fd_put(LTTNG_FD_APPS, 2);
2586 goto error;
2587 }
2588 stream->handle = stream->obj->handle;
2589
2590error:
2591 return ret;
2592}
2593
2594/*
2595 * Duplicate the ust data object of the ust app. channel and save it in the
2596 * buffer registry channel.
2597 *
2598 * Return 0 on success or else a negative value.
2599 */
2600static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2601 struct ust_app_channel *ua_chan)
2602{
2603 int ret;
2604
2605 assert(reg_chan);
2606 assert(ua_chan);
2607
2608 /* Need two fds for the channel. */
2609 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2610 if (ret < 0) {
2611 ERR("Exhausted number of available FD upon duplicate channel");
2612 goto error_fd_get;
2613 }
2614
2615 /* Duplicate object for stream once the original is in the registry. */
2616 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2617 if (ret < 0) {
2618 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2619 reg_chan->obj.ust, ua_chan->obj, ret);
2620 goto error;
2621 }
2622 ua_chan->handle = ua_chan->obj->handle;
2623
2624 return 0;
2625
2626error:
2627 lttng_fd_put(LTTNG_FD_APPS, 1);
2628error_fd_get:
2629 return ret;
2630}
2631
2632/*
2633 * For a given channel buffer registry, setup all streams of the given ust
2634 * application channel.
2635 *
2636 * Return 0 on success or else a negative value.
2637 */
2638static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2639 struct ust_app_channel *ua_chan,
2640 struct ust_app *app)
7972aab2
DG
2641{
2642 int ret = 0;
2643 struct ust_app_stream *stream, *stmp;
2644
2645 assert(reg_chan);
2646 assert(ua_chan);
2647
2648 DBG2("UST app setup buffer registry stream");
2649
2650 /* Send all streams to application. */
2651 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2652 struct buffer_reg_stream *reg_stream;
2653
2654 ret = buffer_reg_stream_create(&reg_stream);
2655 if (ret < 0) {
2656 goto error;
2657 }
2658
2659 /*
2660 * Keep original pointer and nullify it in the stream so the delete
2661 * stream call does not release the object.
2662 */
2663 reg_stream->obj.ust = stream->obj;
2664 stream->obj = NULL;
2665 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2666
7972aab2
DG
2667 /* We don't need the streams anymore. */
2668 cds_list_del(&stream->list);
fb45065e 2669 delete_ust_app_stream(-1, stream, app);
7972aab2 2670 }
421cb601 2671
7972aab2
DG
2672error:
2673 return ret;
2674}
2675
2676/*
2677 * Create a buffer registry channel for the given session registry and
2678 * application channel object. If regp pointer is valid, it's set with the
2679 * created object. Important, the created object is NOT added to the session
2680 * registry hash table.
2681 *
2682 * Return 0 on success else a negative value.
2683 */
2684static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2685 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2686{
2687 int ret;
2688 struct buffer_reg_channel *reg_chan = NULL;
2689
2690 assert(reg_sess);
2691 assert(ua_chan);
2692
2693 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2694
2695 /* Create buffer registry channel. */
2696 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2697 if (ret < 0) {
2698 goto error_create;
421cb601 2699 }
7972aab2
DG
2700 assert(reg_chan);
2701 reg_chan->consumer_key = ua_chan->key;
8c924c7b 2702 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 2703 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 2704
7972aab2
DG
2705 /* Create and add a channel registry to session. */
2706 ret = ust_registry_channel_add(reg_sess->reg.ust,
2707 ua_chan->tracing_channel_id);
2708 if (ret < 0) {
2709 goto error;
d88aee68 2710 }
7972aab2 2711 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 2712
7972aab2
DG
2713 if (regp) {
2714 *regp = reg_chan;
3d8ca23b 2715 }
d88aee68 2716
7972aab2 2717 return 0;
3d8ca23b
DG
2718
2719error:
7972aab2
DG
2720 /* Safe because the registry channel object was not added to any HT. */
2721 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2722error_create:
3d8ca23b 2723 return ret;
421cb601
DG
2724}
2725
55cc08a6 2726/*
7972aab2
DG
2727 * Setup buffer registry channel for the given session registry and application
2728 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 2729 *
7972aab2 2730 * Return 0 on success else a negative value.
55cc08a6 2731 */
7972aab2 2732static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
2733 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2734 struct ust_app *app)
55cc08a6 2735{
7972aab2 2736 int ret;
55cc08a6 2737
7972aab2
DG
2738 assert(reg_sess);
2739 assert(reg_chan);
2740 assert(ua_chan);
2741 assert(ua_chan->obj);
55cc08a6 2742
7972aab2 2743 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 2744
7972aab2 2745 /* Setup all streams for the registry. */
fb45065e 2746 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 2747 if (ret < 0) {
55cc08a6
DG
2748 goto error;
2749 }
2750
7972aab2
DG
2751 reg_chan->obj.ust = ua_chan->obj;
2752 ua_chan->obj = NULL;
55cc08a6 2753
7972aab2 2754 return 0;
55cc08a6
DG
2755
2756error:
7972aab2
DG
2757 buffer_reg_channel_remove(reg_sess, reg_chan);
2758 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
2759 return ret;
2760}
2761
edb67388 2762/*
7972aab2 2763 * Send buffer registry channel to the application.
d0b96690 2764 *
7972aab2 2765 * Return 0 on success else a negative value.
edb67388 2766 */
7972aab2
DG
2767static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2768 struct ust_app *app, struct ust_app_session *ua_sess,
2769 struct ust_app_channel *ua_chan)
edb67388
DG
2770{
2771 int ret;
7972aab2 2772 struct buffer_reg_stream *reg_stream;
edb67388 2773
7972aab2
DG
2774 assert(reg_chan);
2775 assert(app);
2776 assert(ua_sess);
2777 assert(ua_chan);
2778
2779 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2780
2781 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
2782 if (ret < 0) {
2783 goto error;
2784 }
2785
7972aab2
DG
2786 /* Send channel to the application. */
2787 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
2788 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2789 ret = -ENOTCONN; /* Caused by app exiting. */
2790 goto error;
2791 } else if (ret < 0) {
7972aab2
DG
2792 goto error;
2793 }
2794
2795 health_code_update();
2796
2797 /* Send all streams to application. */
2798 pthread_mutex_lock(&reg_chan->stream_list_lock);
2799 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2800 struct ust_app_stream stream;
2801
2802 ret = duplicate_stream_object(reg_stream, &stream);
2803 if (ret < 0) {
2804 goto error_stream_unlock;
2805 }
2806
2807 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2808 if (ret < 0) {
fb45065e 2809 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
2810 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2811 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 2812 }
7972aab2
DG
2813 goto error_stream_unlock;
2814 }
edb67388 2815
7972aab2
DG
2816 /*
2817 * The return value is not important here. This function will output an
2818 * error if needed.
2819 */
fb45065e 2820 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
2821 }
2822 ua_chan->is_sent = 1;
2823
2824error_stream_unlock:
2825 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
2826error:
2827 return ret;
2828}
2829
9730260e 2830/*
7972aab2
DG
2831 * Create and send to the application the created buffers with per UID buffers.
2832 *
2833 * Return 0 on success else a negative value.
9730260e 2834 */
7972aab2
DG
2835static int create_channel_per_uid(struct ust_app *app,
2836 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2837 struct ust_app_channel *ua_chan)
9730260e
DG
2838{
2839 int ret;
7972aab2
DG
2840 struct buffer_reg_uid *reg_uid;
2841 struct buffer_reg_channel *reg_chan;
9730260e 2842
7972aab2
DG
2843 assert(app);
2844 assert(usess);
2845 assert(ua_sess);
2846 assert(ua_chan);
2847
2848 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2849
2850 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2851 /*
2852 * The session creation handles the creation of this global registry
2853 * object. If none can be find, there is a code flow problem or a
2854 * teardown race.
2855 */
2856 assert(reg_uid);
2857
2858 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2859 reg_uid);
2860 if (!reg_chan) {
2861 /* Create the buffer registry channel object. */
2862 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2863 if (ret < 0) {
f14256d6
MD
2864 ERR("Error creating the UST channel \"%s\" registry instance",
2865 ua_chan->name);
7972aab2
DG
2866 goto error;
2867 }
2868 assert(reg_chan);
2869
2870 /*
2871 * Create the buffers on the consumer side. This call populates the
2872 * ust app channel object with all streams and data object.
2873 */
2874 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2875 app->bits_per_long, reg_uid->registry->reg.ust);
2876 if (ret < 0) {
f14256d6
MD
2877 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2878 ua_chan->name);
2879
07d2ae95
DG
2880 /*
2881 * Let's remove the previously created buffer registry channel so
2882 * it's not visible anymore in the session registry.
2883 */
2884 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2885 ua_chan->tracing_channel_id);
2886 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2887 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
7972aab2
DG
2888 goto error;
2889 }
2890
2891 /*
2892 * Setup the streams and add it to the session registry.
2893 */
fb45065e
MD
2894 ret = setup_buffer_reg_channel(reg_uid->registry,
2895 ua_chan, reg_chan, app);
7972aab2 2896 if (ret < 0) {
f14256d6
MD
2897 ERR("Error setting up UST channel \"%s\"",
2898 ua_chan->name);
7972aab2
DG
2899 goto error;
2900 }
2901
2902 }
2903
2904 /* Send buffers to the application. */
2905 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
9730260e 2906 if (ret < 0) {
a7169585
MD
2907 if (ret != -ENOTCONN) {
2908 ERR("Error sending channel to application");
2909 }
9730260e
DG
2910 goto error;
2911 }
2912
9730260e
DG
2913error:
2914 return ret;
2915}
2916
78f0bacd 2917/*
7972aab2
DG
2918 * Create and send to the application the created buffers with per PID buffers.
2919 *
2920 * Return 0 on success else a negative value.
78f0bacd 2921 */
7972aab2
DG
2922static int create_channel_per_pid(struct ust_app *app,
2923 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2924 struct ust_app_channel *ua_chan)
78f0bacd 2925{
8535a6d9 2926 int ret;
7972aab2 2927 struct ust_registry_session *registry;
78f0bacd 2928
7972aab2
DG
2929 assert(app);
2930 assert(usess);
2931 assert(ua_sess);
2932 assert(ua_chan);
2933
2934 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2935
2936 rcu_read_lock();
2937
2938 registry = get_session_registry(ua_sess);
2939 assert(registry);
2940
2941 /* Create and add a new channel registry to session. */
2942 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 2943 if (ret < 0) {
f14256d6
MD
2944 ERR("Error creating the UST channel \"%s\" registry instance",
2945 ua_chan->name);
78f0bacd
DG
2946 goto error;
2947 }
2948
7972aab2
DG
2949 /* Create and get channel on the consumer side. */
2950 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2951 app->bits_per_long, registry);
2952 if (ret < 0) {
f14256d6
MD
2953 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2954 ua_chan->name);
7972aab2
DG
2955 goto error;
2956 }
2957
2958 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2959 if (ret < 0) {
a7169585
MD
2960 if (ret != -ENOTCONN) {
2961 ERR("Error sending channel to application");
2962 }
7972aab2
DG
2963 goto error;
2964 }
8535a6d9 2965
78f0bacd 2966error:
7972aab2 2967 rcu_read_unlock();
78f0bacd
DG
2968 return ret;
2969}
2970
2971/*
7972aab2
DG
2972 * From an already allocated ust app channel, create the channel buffers if
2973 * need and send it to the application. This MUST be called with a RCU read
2974 * side lock acquired.
2975 *
a7169585
MD
2976 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2977 * the application exited concurrently.
78f0bacd 2978 */
7972aab2
DG
2979static int do_create_channel(struct ust_app *app,
2980 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2981 struct ust_app_channel *ua_chan)
78f0bacd 2982{
7972aab2 2983 int ret;
78f0bacd 2984
7972aab2
DG
2985 assert(app);
2986 assert(usess);
2987 assert(ua_sess);
2988 assert(ua_chan);
2989
2990 /* Handle buffer type before sending the channel to the application. */
2991 switch (usess->buffer_type) {
2992 case LTTNG_BUFFER_PER_UID:
2993 {
2994 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2995 if (ret < 0) {
2996 goto error;
2997 }
2998 break;
2999 }
3000 case LTTNG_BUFFER_PER_PID:
3001 {
3002 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3003 if (ret < 0) {
3004 goto error;
3005 }
3006 break;
3007 }
3008 default:
3009 assert(0);
3010 ret = -EINVAL;
78f0bacd
DG
3011 goto error;
3012 }
3013
7972aab2
DG
3014 /* Initialize ust objd object using the received handle and add it. */
3015 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3016 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 3017
7972aab2
DG
3018 /* If channel is not enabled, disable it on the tracer */
3019 if (!ua_chan->enabled) {
3020 ret = disable_ust_channel(app, ua_sess, ua_chan);
3021 if (ret < 0) {
3022 goto error;
3023 }
78f0bacd
DG
3024 }
3025
3026error:
3027 return ret;
3028}
3029
284d8f55 3030/*
4d710ac2
DG
3031 * Create UST app channel and create it on the tracer. Set ua_chanp of the
3032 * newly created channel if not NULL.
d0b96690 3033 *
36b588ed 3034 * Called with UST app session lock and RCU read-side lock held.
7972aab2 3035 *
a7169585
MD
3036 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3037 * the application exited concurrently.
284d8f55 3038 */
4d710ac2
DG
3039static int create_ust_app_channel(struct ust_app_session *ua_sess,
3040 struct ltt_ust_channel *uchan, struct ust_app *app,
7972aab2 3041 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
4d710ac2 3042 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
3043{
3044 int ret = 0;
bec39940
DG
3045 struct lttng_ht_iter iter;
3046 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
3047 struct ust_app_channel *ua_chan;
3048
3049 /* Lookup channel in the ust app session */
bec39940
DG
3050 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3051 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 3052 if (ua_chan_node != NULL) {
5b4a0ec0 3053 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 3054 goto end;
5b4a0ec0
DG
3055 }
3056
d0b96690 3057 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
3058 if (ua_chan == NULL) {
3059 /* Only malloc can fail here */
4d710ac2 3060 ret = -ENOMEM;
094d1690 3061 goto error_alloc;
fc34caaa
DG
3062 }
3063 shadow_copy_channel(ua_chan, uchan);
3064
ffe60014
DG
3065 /* Set channel type. */
3066 ua_chan->attr.type = type;
3067
7972aab2 3068 ret = do_create_channel(app, usess, ua_sess, ua_chan);
5b4a0ec0
DG
3069 if (ret < 0) {
3070 goto error;
3071 }
3072
fc34caaa 3073 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
852d0037 3074 app->pid);
fc34caaa 3075
d0b96690
DG
3076 /* Only add the channel if successful on the tracer side. */
3077 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3078
fc34caaa 3079end:
4d710ac2
DG
3080 if (ua_chanp) {
3081 *ua_chanp = ua_chan;
3082 }
3083
3084 /* Everything went well. */
3085 return 0;
5b4a0ec0
DG
3086
3087error:
d0b96690 3088 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
094d1690 3089error_alloc:
4d710ac2 3090 return ret;
5b4a0ec0
DG
3091}
3092
3093/*
3094 * Create UST app event and create it on the tracer side.
d0b96690
DG
3095 *
3096 * Called with ust app session mutex held.
5b4a0ec0 3097 */
edb67388
DG
3098static
3099int create_ust_app_event(struct ust_app_session *ua_sess,
3100 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3101 struct ust_app *app)
284d8f55 3102{
edb67388 3103 int ret = 0;
5b4a0ec0 3104 struct ust_app_event *ua_event;
284d8f55 3105
5b4a0ec0 3106 /* Get event node */
18eace3b 3107 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 3108 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 3109 if (ua_event != NULL) {
fc34caaa 3110 ret = -EEXIST;
edb67388
DG
3111 goto end;
3112 }
5b4a0ec0 3113
edb67388
DG
3114 /* Does not exist so create one */
3115 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3116 if (ua_event == NULL) {
3117 /* Only malloc can failed so something is really wrong */
3118 ret = -ENOMEM;
fc34caaa 3119 goto end;
5b4a0ec0 3120 }
edb67388 3121 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3122
edb67388 3123 /* Create it on the tracer side */
5b4a0ec0 3124 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3125 if (ret < 0) {
fc34caaa 3126 /* Not found previously means that it does not exist on the tracer */
76f66f63 3127 assert(ret != -LTTNG_UST_ERR_EXIST);
284d8f55
DG
3128 goto error;
3129 }
3130
d0b96690 3131 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3132
fc34caaa 3133 DBG2("UST app create event %s for PID %d completed", ua_event->name,
852d0037 3134 app->pid);
7f79d3a1 3135
edb67388 3136end:
fc34caaa
DG
3137 return ret;
3138
5b4a0ec0 3139error:
fc34caaa 3140 /* Valid. Calling here is already in a read side lock */
fb45065e 3141 delete_ust_app_event(-1, ua_event, app);
edb67388 3142 return ret;
5b4a0ec0
DG
3143}
3144
3145/*
3146 * Create UST metadata and open it on the tracer side.
d0b96690 3147 *
7972aab2 3148 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3149 */
3150static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3151 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3152{
3153 int ret = 0;
ffe60014 3154 struct ust_app_channel *metadata;
d88aee68 3155 struct consumer_socket *socket;
7972aab2 3156 struct ust_registry_session *registry;
5b4a0ec0 3157
ffe60014
DG
3158 assert(ua_sess);
3159 assert(app);
d88aee68 3160 assert(consumer);
5b4a0ec0 3161
7972aab2
DG
3162 registry = get_session_registry(ua_sess);
3163 assert(registry);
3164
ce34fcd0
MD
3165 pthread_mutex_lock(&registry->lock);
3166
1b532a60
DG
3167 /* Metadata already exists for this registry or it was closed previously */
3168 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3169 ret = 0;
3170 goto error;
5b4a0ec0
DG
3171 }
3172
ffe60014 3173 /* Allocate UST metadata */
d0b96690 3174 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3175 if (!metadata) {
3176 /* malloc() failed */
3177 ret = -ENOMEM;
3178 goto error;
3179 }
5b4a0ec0 3180
ad7a9107 3181 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3182
7972aab2
DG
3183 /* Need one fd for the channel. */
3184 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3185 if (ret < 0) {
3186 ERR("Exhausted number of available FD upon create metadata");
3187 goto error;
3188 }
3189
4dc3dfc5
DG
3190 /* Get the right consumer socket for the application. */
3191 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3192 if (!socket) {
3193 ret = -EINVAL;
3194 goto error_consumer;
3195 }
3196
331744e3
JD
3197 /*
3198 * Keep metadata key so we can identify it on the consumer side. Assign it
3199 * to the registry *before* we ask the consumer so we avoid the race of the
3200 * consumer requesting the metadata and the ask_channel call on our side
3201 * did not returned yet.
3202 */
3203 registry->metadata_key = metadata->key;
3204
d88aee68
DG
3205 /*
3206 * Ask the metadata channel creation to the consumer. The metadata object
3207 * will be created by the consumer and kept their. However, the stream is
3208 * never added or monitored until we do a first push metadata to the
3209 * consumer.
3210 */
7972aab2
DG
3211 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3212 registry);
d88aee68 3213 if (ret < 0) {
f2a444f1
DG
3214 /* Nullify the metadata key so we don't try to close it later on. */
3215 registry->metadata_key = 0;
d88aee68
DG
3216 goto error_consumer;
3217 }
3218
3219 /*
3220 * The setup command will make the metadata stream be sent to the relayd,
3221 * if applicable, and the thread managing the metadatas. This is important
3222 * because after this point, if an error occurs, the only way the stream
3223 * can be deleted is to be monitored in the consumer.
3224 */
7972aab2 3225 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3226 if (ret < 0) {
f2a444f1
DG
3227 /* Nullify the metadata key so we don't try to close it later on. */
3228 registry->metadata_key = 0;
d88aee68 3229 goto error_consumer;
5b4a0ec0
DG
3230 }
3231
7972aab2
DG
3232 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3233 metadata->key, app->pid);
5b4a0ec0 3234
d88aee68 3235error_consumer:
b80f0b6c 3236 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3237 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3238error:
ce34fcd0 3239 pthread_mutex_unlock(&registry->lock);
ffe60014 3240 return ret;
5b4a0ec0
DG
3241}
3242
5b4a0ec0 3243/*
d88aee68
DG
3244 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3245 * acquired before calling this function.
5b4a0ec0
DG
3246 */
3247struct ust_app *ust_app_find_by_pid(pid_t pid)
3248{
d88aee68 3249 struct ust_app *app = NULL;
bec39940
DG
3250 struct lttng_ht_node_ulong *node;
3251 struct lttng_ht_iter iter;
5b4a0ec0 3252
bec39940
DG
3253 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3254 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3255 if (node == NULL) {
3256 DBG2("UST app no found with pid %d", pid);
3257 goto error;
3258 }
5b4a0ec0
DG
3259
3260 DBG2("Found UST app by pid %d", pid);
3261
d88aee68 3262 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3263
3264error:
d88aee68 3265 return app;
5b4a0ec0
DG
3266}
3267
d88aee68
DG
3268/*
3269 * Allocate and init an UST app object using the registration information and
3270 * the command socket. This is called when the command socket connects to the
3271 * session daemon.
3272 *
3273 * The object is returned on success or else NULL.
3274 */
d0b96690 3275struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3276{
d0b96690
DG
3277 struct ust_app *lta = NULL;
3278
3279 assert(msg);
3280 assert(sock >= 0);
3281
3282 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3283
173af62f
DG
3284 if ((msg->bits_per_long == 64 &&
3285 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3286 || (msg->bits_per_long == 32 &&
3287 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
f943b0fb 3288 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3289 "%d-bit long, but no consumerd for this size is available.\n",
3290 msg->name, msg->pid, msg->bits_per_long);
3291 goto error;
3f2c5fcc 3292 }
d0b96690 3293
5b4a0ec0
DG
3294 lta = zmalloc(sizeof(struct ust_app));
3295 if (lta == NULL) {
3296 PERROR("malloc");
d0b96690 3297 goto error;
5b4a0ec0
DG
3298 }
3299
3300 lta->ppid = msg->ppid;
3301 lta->uid = msg->uid;
3302 lta->gid = msg->gid;
d0b96690 3303
7753dea8 3304 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3305 lta->uint8_t_alignment = msg->uint8_t_alignment;
3306 lta->uint16_t_alignment = msg->uint16_t_alignment;
3307 lta->uint32_t_alignment = msg->uint32_t_alignment;
3308 lta->uint64_t_alignment = msg->uint64_t_alignment;
3309 lta->long_alignment = msg->long_alignment;
3310 lta->byte_order = msg->byte_order;
3311
5b4a0ec0
DG
3312 lta->v_major = msg->major;
3313 lta->v_minor = msg->minor;
d9bf3ca4 3314 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690 3315 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
10b56aef 3316 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
d0b96690 3317 lta->notify_sock = -1;
d88aee68
DG
3318
3319 /* Copy name and make sure it's NULL terminated. */
3320 strncpy(lta->name, msg->name, sizeof(lta->name));
3321 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3322
3323 /*
3324 * Before this can be called, when receiving the registration information,
3325 * the application compatibility is checked. So, at this point, the
3326 * application can work with this session daemon.
3327 */
d0b96690 3328 lta->compatible = 1;
5b4a0ec0 3329
852d0037 3330 lta->pid = msg->pid;
d0b96690 3331 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 3332 lta->sock = sock;
fb45065e 3333 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 3334 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 3335
d42f20df 3336 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690
DG
3337error:
3338 return lta;
3339}
3340
d88aee68
DG
3341/*
3342 * For a given application object, add it to every hash table.
3343 */
d0b96690
DG
3344void ust_app_add(struct ust_app *app)
3345{
3346 assert(app);
3347 assert(app->notify_sock >= 0);
3348
5b4a0ec0 3349 rcu_read_lock();
852d0037
DG
3350
3351 /*
3352 * On a re-registration, we want to kick out the previous registration of
3353 * that pid
3354 */
d0b96690 3355 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
3356
3357 /*
3358 * The socket _should_ be unique until _we_ call close. So, a add_unique
3359 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3360 * already in the table.
3361 */
d0b96690 3362 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 3363
d0b96690
DG
3364 /* Add application to the notify socket hash table. */
3365 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3366 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 3367
d0b96690 3368 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
d88aee68
DG
3369 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3370 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3371 app->v_minor);
5b4a0ec0 3372
d0b96690
DG
3373 rcu_read_unlock();
3374}
3375
d88aee68
DG
3376/*
3377 * Set the application version into the object.
3378 *
3379 * Return 0 on success else a negative value either an errno code or a
3380 * LTTng-UST error code.
3381 */
d0b96690
DG
3382int ust_app_version(struct ust_app *app)
3383{
d88aee68
DG
3384 int ret;
3385
d0b96690 3386 assert(app);
d88aee68 3387
fb45065e 3388 pthread_mutex_lock(&app->sock_lock);
d88aee68 3389 ret = ustctl_tracer_version(app->sock, &app->version);
fb45065e 3390 pthread_mutex_unlock(&app->sock_lock);
d88aee68
DG
3391 if (ret < 0) {
3392 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
5368d366 3393 ERR("UST app %d version failed with ret %d", app->sock, ret);
d88aee68 3394 } else {
5368d366 3395 DBG3("UST app %d version failed. Application is dead", app->sock);
d88aee68
DG
3396 }
3397 }
3398
3399 return ret;
5b4a0ec0
DG
3400}
3401
3402/*
3403 * Unregister app by removing it from the global traceable app list and freeing
3404 * the data struct.
3405 *
3406 * The socket is already closed at this point so no close to sock.
3407 */
3408void ust_app_unregister(int sock)
3409{
3410 struct ust_app *lta;
bec39940 3411 struct lttng_ht_node_ulong *node;
c4b88406 3412 struct lttng_ht_iter ust_app_sock_iter;
bec39940 3413 struct lttng_ht_iter iter;
d42f20df 3414 struct ust_app_session *ua_sess;
525b0740 3415 int ret;
5b4a0ec0
DG
3416
3417 rcu_read_lock();
886459c6 3418
5b4a0ec0 3419 /* Get the node reference for a call_rcu */
c4b88406
MD
3420 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3421 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
d0b96690 3422 assert(node);
284d8f55 3423
852d0037 3424 lta = caa_container_of(node, struct ust_app, sock_n);
852d0037
DG
3425 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3426
d88aee68 3427 /*
ce34fcd0
MD
3428 * For per-PID buffers, perform "push metadata" and flush all
3429 * application streams before removing app from hash tables,
3430 * ensuring proper behavior of data_pending check.
c4b88406 3431 * Remove sessions so they are not visible during deletion.
d88aee68 3432 */
d42f20df
DG
3433 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3434 node.node) {
7972aab2
DG
3435 struct ust_registry_session *registry;
3436
d42f20df
DG
3437 ret = lttng_ht_del(lta->sessions, &iter);
3438 if (ret) {
3439 /* The session was already removed so scheduled for teardown. */
3440 continue;
3441 }
3442
ce34fcd0
MD
3443 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3444 (void) ust_app_flush_app_session(lta, ua_sess);
3445 }
c4b88406 3446
d42f20df
DG
3447 /*
3448 * Add session to list for teardown. This is safe since at this point we
3449 * are the only one using this list.
3450 */
d88aee68
DG
3451 pthread_mutex_lock(&ua_sess->lock);
3452
b161602a
MD
3453 if (ua_sess->deleted) {
3454 pthread_mutex_unlock(&ua_sess->lock);
3455 continue;
3456 }
3457
d88aee68
DG
3458 /*
3459 * Normally, this is done in the delete session process which is
3460 * executed in the call rcu below. However, upon registration we can't
3461 * afford to wait for the grace period before pushing data or else the
3462 * data pending feature can race between the unregistration and stop
3463 * command where the data pending command is sent *before* the grace
3464 * period ended.
3465 *
3466 * The close metadata below nullifies the metadata pointer in the
3467 * session so the delete session will NOT push/close a second time.
3468 */
7972aab2 3469 registry = get_session_registry(ua_sess);
ce34fcd0 3470 if (registry) {
7972aab2
DG
3471 /* Push metadata for application before freeing the application. */
3472 (void) push_metadata(registry, ua_sess->consumer);
3473
3474 /*
3475 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
3476 * metadata only on destroy trace session in this case. Also, the
3477 * previous push metadata could have flag the metadata registry to
3478 * close so don't send a close command if closed.
7972aab2 3479 */
ce34fcd0 3480 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
3481 /* And ask to close it for this session registry. */
3482 (void) close_metadata(registry, ua_sess->consumer);
3483 }
3484 }
d42f20df 3485 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
c4b88406 3486
d88aee68 3487 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
3488 }
3489
c4b88406
MD
3490 /* Remove application from PID hash table */
3491 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3492 assert(!ret);
3493
3494 /*
3495 * Remove application from notify hash table. The thread handling the
3496 * notify socket could have deleted the node so ignore on error because
3497 * either way it's valid. The close of that socket is handled by the other
3498 * thread.
3499 */
3500 iter.iter.node = &lta->notify_sock_n.node;
3501 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3502
3503 /*
3504 * Ignore return value since the node might have been removed before by an
3505 * add replace during app registration because the PID can be reassigned by
3506 * the OS.
3507 */
3508 iter.iter.node = &lta->pid_n.node;
3509 ret = lttng_ht_del(ust_app_ht, &iter);
3510 if (ret) {
3511 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3512 lta->pid);
3513 }
3514
852d0037
DG
3515 /* Free memory */
3516 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3517
5b4a0ec0
DG
3518 rcu_read_unlock();
3519 return;
284d8f55
DG
3520}
3521
5b4a0ec0
DG
3522/*
3523 * Fill events array with all events name of all registered apps.
3524 */
3525int ust_app_list_events(struct lttng_event **events)
421cb601 3526{
5b4a0ec0
DG
3527 int ret, handle;
3528 size_t nbmem, count = 0;
bec39940 3529 struct lttng_ht_iter iter;
5b4a0ec0 3530 struct ust_app *app;
c617c0c6 3531 struct lttng_event *tmp_event;
421cb601 3532
5b4a0ec0 3533 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3534 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3535 if (tmp_event == NULL) {
5b4a0ec0
DG
3536 PERROR("zmalloc ust app events");
3537 ret = -ENOMEM;
421cb601
DG
3538 goto error;
3539 }
3540
5b4a0ec0 3541 rcu_read_lock();
421cb601 3542
852d0037 3543 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
90eaa0d2 3544 struct lttng_ust_tracepoint_iter uiter;
ac3bd9c0 3545
840cb59c 3546 health_code_update();
86acf0da 3547
e0c7ec2b
DG
3548 if (!app->compatible) {
3549 /*
3550 * TODO: In time, we should notice the caller of this error by
3551 * telling him that this is a version error.
3552 */
3553 continue;
3554 }
fb45065e 3555 pthread_mutex_lock(&app->sock_lock);
852d0037 3556 handle = ustctl_tracepoint_list(app->sock);
5b4a0ec0 3557 if (handle < 0) {
ffe60014
DG
3558 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3559 ERR("UST app list events getting handle failed for app pid %d",
3560 app->pid);
3561 }
fb45065e 3562 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3563 continue;
3564 }
421cb601 3565
852d0037 3566 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 3567 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3568 /* Handle ustctl error. */
3569 if (ret < 0) {
fb45065e
MD
3570 int release_ret;
3571
a2ba1ab0 3572 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3573 ERR("UST app tp list get failed for app %d with ret %d",
3574 app->sock, ret);
3575 } else {
3576 DBG3("UST app tp list get failed. Application is dead");
3757b385
DG
3577 /*
3578 * This is normal behavior, an application can die during the
3579 * creation process. Don't report an error so the execution can
3580 * continue normally. Continue normal execution.
3581 */
3582 break;
ffe60014 3583 }
98f595d4 3584 free(tmp_event);
fb45065e 3585 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3586 if (release_ret < 0 &&
3587 release_ret != -LTTNG_UST_ERR_EXITING &&
3588 release_ret != -EPIPE) {
fb45065e
MD
3589 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3590 }
3591 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
3592 goto rcu_error;
3593 }
3594
840cb59c 3595 health_code_update();
815564d8 3596 if (count >= nbmem) {
d7b3776f 3597 /* In case the realloc fails, we free the memory */
53efb85a
MD
3598 struct lttng_event *new_tmp_event;
3599 size_t new_nbmem;
3600
3601 new_nbmem = nbmem << 1;
3602 DBG2("Reallocating event list from %zu to %zu entries",
3603 nbmem, new_nbmem);
3604 new_tmp_event = realloc(tmp_event,
3605 new_nbmem * sizeof(struct lttng_event));
3606 if (new_tmp_event == NULL) {
fb45065e
MD
3607 int release_ret;
3608
5b4a0ec0 3609 PERROR("realloc ust app events");
c617c0c6 3610 free(tmp_event);
5b4a0ec0 3611 ret = -ENOMEM;
fb45065e 3612 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3613 if (release_ret < 0 &&
3614 release_ret != -LTTNG_UST_ERR_EXITING &&
3615 release_ret != -EPIPE) {
fb45065e
MD
3616 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3617 }
3618 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3619 goto rcu_error;
3620 }
53efb85a
MD
3621 /* Zero the new memory */
3622 memset(new_tmp_event + nbmem, 0,
3623 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3624 nbmem = new_nbmem;
3625 tmp_event = new_tmp_event;
5b4a0ec0 3626 }
c617c0c6
MD
3627 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3628 tmp_event[count].loglevel = uiter.loglevel;
3629 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3630 tmp_event[count].pid = app->pid;
3631 tmp_event[count].enabled = -1;
5b4a0ec0 3632 count++;
421cb601 3633 }
fb45065e
MD
3634 ret = ustctl_release_handle(app->sock, handle);
3635 pthread_mutex_unlock(&app->sock_lock);
68313703 3636 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
fb45065e
MD
3637 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3638 }
421cb601
DG
3639 }
3640
5b4a0ec0 3641 ret = count;
c617c0c6 3642 *events = tmp_event;
421cb601 3643
5b4a0ec0 3644 DBG2("UST app list events done (%zu events)", count);
421cb601 3645
5b4a0ec0
DG
3646rcu_error:
3647 rcu_read_unlock();
421cb601 3648error:
840cb59c 3649 health_code_update();
5b4a0ec0 3650 return ret;
421cb601
DG
3651}
3652
f37d259d
MD
3653/*
3654 * Fill events array with all events name of all registered apps.
3655 */
3656int ust_app_list_event_fields(struct lttng_event_field **fields)
3657{
3658 int ret, handle;
3659 size_t nbmem, count = 0;
3660 struct lttng_ht_iter iter;
3661 struct ust_app *app;
c617c0c6 3662 struct lttng_event_field *tmp_event;
f37d259d
MD
3663
3664 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3665 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3666 if (tmp_event == NULL) {
f37d259d
MD
3667 PERROR("zmalloc ust app event fields");
3668 ret = -ENOMEM;
3669 goto error;
3670 }
3671
3672 rcu_read_lock();
3673
3674 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3675 struct lttng_ust_field_iter uiter;
3676
840cb59c 3677 health_code_update();
86acf0da 3678
f37d259d
MD
3679 if (!app->compatible) {
3680 /*
3681 * TODO: In time, we should notice the caller of this error by
3682 * telling him that this is a version error.
3683 */
3684 continue;
3685 }
fb45065e 3686 pthread_mutex_lock(&app->sock_lock);
f37d259d
MD
3687 handle = ustctl_tracepoint_field_list(app->sock);
3688 if (handle < 0) {
ffe60014
DG
3689 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3690 ERR("UST app list field getting handle failed for app pid %d",
3691 app->pid);
3692 }
fb45065e 3693 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
3694 continue;
3695 }
3696
3697 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 3698 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3699 /* Handle ustctl error. */
3700 if (ret < 0) {
fb45065e
MD
3701 int release_ret;
3702
a2ba1ab0 3703 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3704 ERR("UST app tp list field failed for app %d with ret %d",
3705 app->sock, ret);
3706 } else {
3707 DBG3("UST app tp list field failed. Application is dead");
3757b385
DG
3708 /*
3709 * This is normal behavior, an application can die during the
3710 * creation process. Don't report an error so the execution can
98f595d4 3711 * continue normally. Reset list and count for next app.
3757b385
DG
3712 */
3713 break;
ffe60014 3714 }
98f595d4 3715 free(tmp_event);
fb45065e
MD
3716 release_ret = ustctl_release_handle(app->sock, handle);
3717 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3718 if (release_ret < 0 &&
3719 release_ret != -LTTNG_UST_ERR_EXITING &&
3720 release_ret != -EPIPE) {
fb45065e
MD
3721 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3722 }
ffe60014
DG
3723 goto rcu_error;
3724 }
3725
840cb59c 3726 health_code_update();
f37d259d 3727 if (count >= nbmem) {
d7b3776f 3728 /* In case the realloc fails, we free the memory */
53efb85a
MD
3729 struct lttng_event_field *new_tmp_event;
3730 size_t new_nbmem;
3731
3732 new_nbmem = nbmem << 1;
3733 DBG2("Reallocating event field list from %zu to %zu entries",
3734 nbmem, new_nbmem);
3735 new_tmp_event = realloc(tmp_event,
3736 new_nbmem * sizeof(struct lttng_event_field));
3737 if (new_tmp_event == NULL) {
fb45065e
MD
3738 int release_ret;
3739
f37d259d 3740 PERROR("realloc ust app event fields");
c617c0c6 3741 free(tmp_event);
f37d259d 3742 ret = -ENOMEM;
fb45065e
MD
3743 release_ret = ustctl_release_handle(app->sock, handle);
3744 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3745 if (release_ret &&
3746 release_ret != -LTTNG_UST_ERR_EXITING &&
3747 release_ret != -EPIPE) {
fb45065e
MD
3748 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3749 }
f37d259d
MD
3750 goto rcu_error;
3751 }
53efb85a
MD
3752 /* Zero the new memory */
3753 memset(new_tmp_event + nbmem, 0,
3754 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3755 nbmem = new_nbmem;
3756 tmp_event = new_tmp_event;
f37d259d 3757 }
f37d259d 3758
c617c0c6 3759 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
2e84128e
DG
3760 /* Mapping between these enums matches 1 to 1. */
3761 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 3762 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 3763
c617c0c6
MD
3764 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3765 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 3766 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
3767 tmp_event[count].event.pid = app->pid;
3768 tmp_event[count].event.enabled = -1;
f37d259d
MD
3769 count++;
3770 }
fb45065e
MD
3771 ret = ustctl_release_handle(app->sock, handle);
3772 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3773 if (ret < 0 &&
3774 ret != -LTTNG_UST_ERR_EXITING &&
3775 ret != -EPIPE) {
fb45065e
MD
3776 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3777 }
f37d259d
MD
3778 }
3779
3780 ret = count;
c617c0c6 3781 *fields = tmp_event;
f37d259d
MD
3782
3783 DBG2("UST app list event fields done (%zu events)", count);
3784
3785rcu_error:
3786 rcu_read_unlock();
3787error:
840cb59c 3788 health_code_update();
f37d259d
MD
3789 return ret;
3790}
3791
5b4a0ec0
DG
3792/*
3793 * Free and clean all traceable apps of the global list.
36b588ed
MD
3794 *
3795 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
3796 */
3797void ust_app_clean_list(void)
421cb601 3798{
5b4a0ec0 3799 int ret;
659ed79f 3800 struct ust_app *app;
bec39940 3801 struct lttng_ht_iter iter;
421cb601 3802
5b4a0ec0 3803 DBG2("UST app cleaning registered apps hash table");
421cb601 3804
5b4a0ec0 3805 rcu_read_lock();
421cb601 3806
f1b711c4
MD
3807 if (ust_app_ht) {
3808 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3809 ret = lttng_ht_del(ust_app_ht, &iter);
3810 assert(!ret);
3811 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3812 }
421cb601
DG
3813 }
3814
852d0037 3815 /* Cleanup socket hash table */
f1b711c4
MD
3816 if (ust_app_ht_by_sock) {
3817 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3818 sock_n.node) {
3819 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3820 assert(!ret);
3821 }
bec39940 3822 }
852d0037 3823
d88aee68 3824 /* Cleanup notify socket hash table */
f1b711c4
MD
3825 if (ust_app_ht_by_notify_sock) {
3826 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3827 notify_sock_n.node) {
3828 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3829 assert(!ret);
3830 }
d88aee68 3831 }
36b588ed 3832 rcu_read_unlock();
d88aee68 3833
bec39940 3834 /* Destroy is done only when the ht is empty */
f1b711c4
MD
3835 if (ust_app_ht) {
3836 ht_cleanup_push(ust_app_ht);
3837 }
3838 if (ust_app_ht_by_sock) {
3839 ht_cleanup_push(ust_app_ht_by_sock);
3840 }
3841 if (ust_app_ht_by_notify_sock) {
3842 ht_cleanup_push(ust_app_ht_by_notify_sock);
3843 }
5b4a0ec0
DG
3844}
3845
3846/*
3847 * Init UST app hash table.
3848 */
57703f6e 3849int ust_app_ht_alloc(void)
5b4a0ec0 3850{
bec39940 3851 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3852 if (!ust_app_ht) {
3853 return -1;
3854 }
852d0037 3855 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3856 if (!ust_app_ht_by_sock) {
3857 return -1;
3858 }
d0b96690 3859 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3860 if (!ust_app_ht_by_notify_sock) {
3861 return -1;
3862 }
3863 return 0;
421cb601
DG
3864}
3865
78f0bacd
DG
3866/*
3867 * For a specific UST session, disable the channel for all registered apps.
3868 */
35a9059d 3869int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3870 struct ltt_ust_channel *uchan)
3871{
3872 int ret = 0;
bec39940
DG
3873 struct lttng_ht_iter iter;
3874 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
3875 struct ust_app *app;
3876 struct ust_app_session *ua_sess;
8535a6d9 3877 struct ust_app_channel *ua_chan;
78f0bacd
DG
3878
3879 if (usess == NULL || uchan == NULL) {
3880 ERR("Disabling UST global channel with NULL values");
3881 ret = -1;
3882 goto error;
3883 }
3884
d9bf3ca4 3885 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 3886 uchan->name, usess->id);
78f0bacd
DG
3887
3888 rcu_read_lock();
3889
3890 /* For every registered applications */
852d0037 3891 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 3892 struct lttng_ht_iter uiter;
e0c7ec2b
DG
3893 if (!app->compatible) {
3894 /*
3895 * TODO: In time, we should notice the caller of this error by
3896 * telling him that this is a version error.
3897 */
3898 continue;
3899 }
78f0bacd
DG
3900 ua_sess = lookup_session_by_app(usess, app);
3901 if (ua_sess == NULL) {
3902 continue;
3903 }
3904
8535a6d9 3905 /* Get channel */
bec39940
DG
3906 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3907 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
3908 /* If the session if found for the app, the channel must be there */
3909 assert(ua_chan_node);
3910
3911 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3912 /* The channel must not be already disabled */
3913 assert(ua_chan->enabled == 1);
3914
3915 /* Disable channel onto application */
3916 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
3917 if (ret < 0) {
3918 /* XXX: We might want to report this error at some point... */
3919 continue;
3920 }
3921 }
3922
3923 rcu_read_unlock();
3924
3925error:
3926 return ret;
3927}
3928
3929/*
3930 * For a specific UST session, enable the channel for all registered apps.
3931 */
35a9059d 3932int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3933 struct ltt_ust_channel *uchan)
3934{
3935 int ret = 0;
bec39940 3936 struct lttng_ht_iter iter;
78f0bacd
DG
3937 struct ust_app *app;
3938 struct ust_app_session *ua_sess;
3939
3940 if (usess == NULL || uchan == NULL) {
3941 ERR("Adding UST global channel to NULL values");
3942 ret = -1;
3943 goto error;
3944 }
3945
d9bf3ca4 3946 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 3947 uchan->name, usess->id);
78f0bacd
DG
3948
3949 rcu_read_lock();
3950
3951 /* For every registered applications */
852d0037 3952 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
3953 if (!app->compatible) {
3954 /*
3955 * TODO: In time, we should notice the caller of this error by
3956 * telling him that this is a version error.
3957 */
3958 continue;
3959 }
78f0bacd
DG
3960 ua_sess = lookup_session_by_app(usess, app);
3961 if (ua_sess == NULL) {
3962 continue;
3963 }
3964
3965 /* Enable channel onto application */
3966 ret = enable_ust_app_channel(ua_sess, uchan, app);
3967 if (ret < 0) {
3968 /* XXX: We might want to report this error at some point... */
3969 continue;
3970 }
3971 }
3972
3973 rcu_read_unlock();
3974
3975error:
3976 return ret;
3977}
3978
b0a40d28
DG
3979/*
3980 * Disable an event in a channel and for a specific session.
3981 */
35a9059d
DG
3982int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3983 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
3984{
3985 int ret = 0;
bec39940 3986 struct lttng_ht_iter iter, uiter;
700c5a9d 3987 struct lttng_ht_node_str *ua_chan_node;
b0a40d28
DG
3988 struct ust_app *app;
3989 struct ust_app_session *ua_sess;
3990 struct ust_app_channel *ua_chan;
3991 struct ust_app_event *ua_event;
3992
3993 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
3994 "%s for session id %" PRIu64,
3995 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
3996
3997 rcu_read_lock();
3998
3999 /* For all registered applications */
852d0037 4000 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4001 if (!app->compatible) {
4002 /*
4003 * TODO: In time, we should notice the caller of this error by
4004 * telling him that this is a version error.
4005 */
4006 continue;
4007 }
b0a40d28
DG
4008 ua_sess = lookup_session_by_app(usess, app);
4009 if (ua_sess == NULL) {
4010 /* Next app */
4011 continue;
4012 }
4013
4014 /* Lookup channel in the ust app session */
bec39940
DG
4015 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4016 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 4017 if (ua_chan_node == NULL) {
d9bf3ca4 4018 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 4019 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
4020 continue;
4021 }
4022 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4023
700c5a9d
JR
4024 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4025 uevent->filter, uevent->attr.loglevel,
4026 uevent->exclusion);
4027 if (ua_event == NULL) {
b0a40d28 4028 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 4029 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
4030 continue;
4031 }
b0a40d28 4032
7f79d3a1 4033 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
4034 if (ret < 0) {
4035 /* XXX: Report error someday... */
4036 continue;
4037 }
4038 }
4039
4040 rcu_read_unlock();
4041
4042 return ret;
4043}
4044
421cb601 4045/*
5b4a0ec0 4046 * For a specific UST session, create the channel for all registered apps.
421cb601 4047 */
35a9059d 4048int ust_app_create_channel_glb(struct ltt_ust_session *usess,
48842b30
DG
4049 struct ltt_ust_channel *uchan)
4050{
3d8ca23b 4051 int ret = 0, created;
bec39940 4052 struct lttng_ht_iter iter;
48842b30 4053 struct ust_app *app;
3d8ca23b 4054 struct ust_app_session *ua_sess = NULL;
48842b30 4055
fc34caaa
DG
4056 /* Very wrong code flow */
4057 assert(usess);
4058 assert(uchan);
421cb601 4059
d9bf3ca4 4060 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
a991f516 4061 uchan->name, usess->id);
48842b30
DG
4062
4063 rcu_read_lock();
421cb601 4064
5b4a0ec0 4065 /* For every registered applications */
852d0037 4066 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4067 if (!app->compatible) {
4068 /*
4069 * TODO: In time, we should notice the caller of this error by
4070 * telling him that this is a version error.
4071 */
4072 continue;
4073 }
a9ad0c8f
MD
4074 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
4075 /* Skip. */
4076 continue;
4077 }
4078
edb67388
DG
4079 /*
4080 * Create session on the tracer side and add it to app session HT. Note
4081 * that if session exist, it will simply return a pointer to the ust
4082 * app session.
4083 */
3d8ca23b
DG
4084 ret = create_ust_app_session(usess, app, &ua_sess, &created);
4085 if (ret < 0) {
4086 switch (ret) {
4087 case -ENOTCONN:
4088 /*
4089 * The application's socket is not valid. Either a bad socket
4090 * or a timeout on it. We can't inform the caller that for a
4091 * specific app, the session failed so lets continue here.
4092 */
a7169585 4093 ret = 0; /* Not an error. */
3d8ca23b
DG
4094 continue;
4095 case -ENOMEM:
4096 default:
4097 goto error_rcu_unlock;
4098 }
48842b30 4099 }
3d8ca23b 4100 assert(ua_sess);
48842b30 4101
d0b96690 4102 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4103
4104 if (ua_sess->deleted) {
4105 pthread_mutex_unlock(&ua_sess->lock);
4106 continue;
4107 }
4108
d65d2de8
DG
4109 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4110 sizeof(uchan->name))) {
ad7a9107
DG
4111 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
4112 ret = 0;
d65d2de8
DG
4113 } else {
4114 /* Create channel onto application. We don't need the chan ref. */
4115 ret = create_ust_app_channel(ua_sess, uchan, app,
4116 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4117 }
d0b96690 4118 pthread_mutex_unlock(&ua_sess->lock);
3d8ca23b 4119 if (ret < 0) {
3d8ca23b
DG
4120 /* Cleanup the created session if it's the case. */
4121 if (created) {
d0b96690 4122 destroy_app_session(app, ua_sess);
3d8ca23b 4123 }
a7169585
MD
4124 switch (ret) {
4125 case -ENOTCONN:
4126 /*
4127 * The application's socket is not valid. Either a bad socket
4128 * or a timeout on it. We can't inform the caller that for a
4129 * specific app, the session failed so lets continue here.
4130 */
4131 ret = 0; /* Not an error. */
4132 continue;
4133 case -ENOMEM:
4134 default:
4135 goto error_rcu_unlock;
4136 }
48842b30 4137 }
48842b30 4138 }
5b4a0ec0 4139
95e047ff 4140error_rcu_unlock:
48842b30 4141 rcu_read_unlock();
3c14c33f 4142 return ret;
48842b30
DG
4143}
4144
5b4a0ec0 4145/*
edb67388 4146 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4147 */
35a9059d 4148int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4149 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4150{
4151 int ret = 0;
bec39940 4152 struct lttng_ht_iter iter, uiter;
18eace3b 4153 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4154 struct ust_app *app;
4155 struct ust_app_session *ua_sess;
4156 struct ust_app_channel *ua_chan;
4157 struct ust_app_event *ua_event;
48842b30 4158
d9bf3ca4 4159 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4160 uevent->attr.name, usess->id);
48842b30 4161
edb67388
DG
4162 /*
4163 * NOTE: At this point, this function is called only if the session and
4164 * channel passed are already created for all apps. and enabled on the
4165 * tracer also.
4166 */
4167
48842b30 4168 rcu_read_lock();
421cb601
DG
4169
4170 /* For all registered applications */
852d0037 4171 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4172 if (!app->compatible) {
4173 /*
4174 * TODO: In time, we should notice the caller of this error by
4175 * telling him that this is a version error.
4176 */
4177 continue;
4178 }
edb67388 4179 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4180 if (!ua_sess) {
4181 /* The application has problem or is probably dead. */
4182 continue;
4183 }
ba767faf 4184
d0b96690
DG
4185 pthread_mutex_lock(&ua_sess->lock);
4186
b161602a
MD
4187 if (ua_sess->deleted) {
4188 pthread_mutex_unlock(&ua_sess->lock);
4189 continue;
4190 }
4191
edb67388 4192 /* Lookup channel in the ust app session */
bec39940
DG
4193 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4194 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4195 /*
4196 * It is possible that the channel cannot be found is
4197 * the channel/event creation occurs concurrently with
4198 * an application exit.
4199 */
4200 if (!ua_chan_node) {
4201 pthread_mutex_unlock(&ua_sess->lock);
4202 continue;
4203 }
edb67388
DG
4204
4205 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4206
18eace3b
DG
4207 /* Get event node */
4208 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4209 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4210 if (ua_event == NULL) {
7f79d3a1 4211 DBG3("UST app enable event %s not found for app PID %d."
852d0037 4212 "Skipping app", uevent->attr.name, app->pid);
d0b96690 4213 goto next_app;
35a9059d 4214 }
35a9059d
DG
4215
4216 ret = enable_ust_app_event(ua_sess, ua_event, app);
4217 if (ret < 0) {
d0b96690 4218 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 4219 goto error;
48842b30 4220 }
d0b96690
DG
4221 next_app:
4222 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
4223 }
4224
7f79d3a1 4225error:
edb67388 4226 rcu_read_unlock();
edb67388
DG
4227 return ret;
4228}
4229
4230/*
4231 * For a specific existing UST session and UST channel, creates the event for
4232 * all registered apps.
4233 */
35a9059d 4234int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
4235 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4236{
4237 int ret = 0;
bec39940
DG
4238 struct lttng_ht_iter iter, uiter;
4239 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
4240 struct ust_app *app;
4241 struct ust_app_session *ua_sess;
4242 struct ust_app_channel *ua_chan;
4243
d9bf3ca4 4244 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 4245 uevent->attr.name, usess->id);
edb67388 4246
edb67388
DG
4247 rcu_read_lock();
4248
4249 /* For all registered applications */
852d0037 4250 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4251 if (!app->compatible) {
4252 /*
4253 * TODO: In time, we should notice the caller of this error by
4254 * telling him that this is a version error.
4255 */
4256 continue;
4257 }
edb67388 4258 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4259 if (!ua_sess) {
4260 /* The application has problem or is probably dead. */
4261 continue;
4262 }
48842b30 4263
d0b96690 4264 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4265
4266 if (ua_sess->deleted) {
4267 pthread_mutex_unlock(&ua_sess->lock);
4268 continue;
4269 }
4270
48842b30 4271 /* Lookup channel in the ust app session */
bec39940
DG
4272 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4273 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
4274 /* If the channel is not found, there is a code flow error */
4275 assert(ua_chan_node);
4276
48842b30
DG
4277 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4278
edb67388 4279 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 4280 pthread_mutex_unlock(&ua_sess->lock);
edb67388 4281 if (ret < 0) {
49c336c1 4282 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
4283 /* Possible value at this point: -ENOMEM. If so, we stop! */
4284 break;
4285 }
4286 DBG2("UST app event %s already exist on app PID %d",
852d0037 4287 uevent->attr.name, app->pid);
5b4a0ec0 4288 continue;
48842b30 4289 }
48842b30 4290 }
5b4a0ec0 4291
48842b30
DG
4292 rcu_read_unlock();
4293
4294 return ret;
4295}
4296
5b4a0ec0
DG
4297/*
4298 * Start tracing for a specific UST session and app.
4299 */
b34cbebf 4300static
421cb601 4301int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
4302{
4303 int ret = 0;
48842b30 4304 struct ust_app_session *ua_sess;
48842b30 4305
852d0037 4306 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 4307
509cbaf8
MD
4308 rcu_read_lock();
4309
e0c7ec2b
DG
4310 if (!app->compatible) {
4311 goto end;
4312 }
4313
421cb601
DG
4314 ua_sess = lookup_session_by_app(usess, app);
4315 if (ua_sess == NULL) {
d42f20df
DG
4316 /* The session is in teardown process. Ignore and continue. */
4317 goto end;
421cb601 4318 }
48842b30 4319
d0b96690
DG
4320 pthread_mutex_lock(&ua_sess->lock);
4321
b161602a
MD
4322 if (ua_sess->deleted) {
4323 pthread_mutex_unlock(&ua_sess->lock);
4324 goto end;
4325 }
4326
aea829b3
DG
4327 /* Upon restart, we skip the setup, already done */
4328 if (ua_sess->started) {
8be98f9a 4329 goto skip_setup;
aea829b3 4330 }
8be98f9a 4331
a4b92340
DG
4332 /* Create directories if consumer is LOCAL and has a path defined. */
4333 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4334 strlen(usess->consumer->dst.trace_path) > 0) {
4335 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
7972aab2 4336 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
a4b92340 4337 if (ret < 0) {
df5b86c8 4338 if (errno != EEXIST) {
a4b92340 4339 ERR("Trace directory creation error");
d0b96690 4340 goto error_unlock;
421cb601 4341 }
173af62f 4342 }
7753dea8 4343 }
aea829b3 4344
d65d2de8
DG
4345 /*
4346 * Create the metadata for the application. This returns gracefully if a
4347 * metadata was already set for the session.
4348 */
ad7a9107 4349 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
421cb601 4350 if (ret < 0) {
d0b96690 4351 goto error_unlock;
421cb601 4352 }
48842b30 4353
840cb59c 4354 health_code_update();
86acf0da 4355
8be98f9a 4356skip_setup:
421cb601 4357 /* This start the UST tracing */
fb45065e 4358 pthread_mutex_lock(&app->sock_lock);
852d0037 4359 ret = ustctl_start_session(app->sock, ua_sess->handle);
fb45065e 4360 pthread_mutex_unlock(&app->sock_lock);
421cb601 4361 if (ret < 0) {
ffe60014
DG
4362 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4363 ERR("Error starting tracing for app pid: %d (ret: %d)",
4364 app->pid, ret);
4365 } else {
4366 DBG("UST app start session failed. Application is dead.");
3757b385
DG
4367 /*
4368 * This is normal behavior, an application can die during the
4369 * creation process. Don't report an error so the execution can
4370 * continue normally.
4371 */
4372 pthread_mutex_unlock(&ua_sess->lock);
4373 goto end;
ffe60014 4374 }
d0b96690 4375 goto error_unlock;
421cb601 4376 }
5b4a0ec0 4377
55c3953d
DG
4378 /* Indicate that the session has been started once */
4379 ua_sess->started = 1;
4380
d0b96690
DG
4381 pthread_mutex_unlock(&ua_sess->lock);
4382
840cb59c 4383 health_code_update();
86acf0da 4384
421cb601 4385 /* Quiescent wait after starting trace */
fb45065e 4386 pthread_mutex_lock(&app->sock_lock);
ffe60014 4387 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4388 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4389 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4390 ERR("UST app wait quiescent failed for app pid %d ret %d",
4391 app->pid, ret);
4392 }
48842b30 4393
e0c7ec2b
DG
4394end:
4395 rcu_read_unlock();
840cb59c 4396 health_code_update();
421cb601 4397 return 0;
48842b30 4398
d0b96690
DG
4399error_unlock:
4400 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 4401 rcu_read_unlock();
840cb59c 4402 health_code_update();
421cb601
DG
4403 return -1;
4404}
48842b30 4405
8be98f9a
MD
4406/*
4407 * Stop tracing for a specific UST session and app.
4408 */
b34cbebf 4409static
8be98f9a
MD
4410int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4411{
4412 int ret = 0;
4413 struct ust_app_session *ua_sess;
7972aab2 4414 struct ust_registry_session *registry;
8be98f9a 4415
852d0037 4416 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
4417
4418 rcu_read_lock();
4419
e0c7ec2b 4420 if (!app->compatible) {
d88aee68 4421 goto end_no_session;
e0c7ec2b
DG
4422 }
4423
8be98f9a
MD
4424 ua_sess = lookup_session_by_app(usess, app);
4425 if (ua_sess == NULL) {
d88aee68 4426 goto end_no_session;
8be98f9a
MD
4427 }
4428
d88aee68
DG
4429 pthread_mutex_lock(&ua_sess->lock);
4430
b161602a
MD
4431 if (ua_sess->deleted) {
4432 pthread_mutex_unlock(&ua_sess->lock);
4433 goto end_no_session;
4434 }
4435
9bc07046
DG
4436 /*
4437 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
4438 * that was never started. It's possible since we can have a fail start
4439 * from either the application manager thread or the command thread. Simply
4440 * indicate that this is a stop error.
9bc07046 4441 */
f9dfc3d9 4442 if (!ua_sess->started) {
c45536e1
DG
4443 goto error_rcu_unlock;
4444 }
7db205b5 4445
840cb59c 4446 health_code_update();
86acf0da 4447
9d6c7d3f 4448 /* This inhibits UST tracing */
fb45065e 4449 pthread_mutex_lock(&app->sock_lock);
852d0037 4450 ret = ustctl_stop_session(app->sock, ua_sess->handle);
fb45065e 4451 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 4452 if (ret < 0) {
ffe60014
DG
4453 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4454 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4455 app->pid, ret);
4456 } else {
4457 DBG("UST app stop session failed. Application is dead.");
3757b385
DG
4458 /*
4459 * This is normal behavior, an application can die during the
4460 * creation process. Don't report an error so the execution can
4461 * continue normally.
4462 */
4463 goto end_unlock;
ffe60014 4464 }
9d6c7d3f
DG
4465 goto error_rcu_unlock;
4466 }
4467
840cb59c 4468 health_code_update();
86acf0da 4469
9d6c7d3f 4470 /* Quiescent wait after stopping trace */
fb45065e 4471 pthread_mutex_lock(&app->sock_lock);
ffe60014 4472 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4473 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4474 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4475 ERR("UST app wait quiescent failed for app pid %d ret %d",
4476 app->pid, ret);
4477 }
9d6c7d3f 4478
840cb59c 4479 health_code_update();
86acf0da 4480
b34cbebf
MD
4481 registry = get_session_registry(ua_sess);
4482 assert(registry);
1b532a60 4483
ce34fcd0
MD
4484 /* Push metadata for application before freeing the application. */
4485 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 4486
3757b385 4487end_unlock:
b34cbebf
MD
4488 pthread_mutex_unlock(&ua_sess->lock);
4489end_no_session:
4490 rcu_read_unlock();
4491 health_code_update();
4492 return 0;
4493
4494error_rcu_unlock:
4495 pthread_mutex_unlock(&ua_sess->lock);
4496 rcu_read_unlock();
4497 health_code_update();
4498 return -1;
4499}
4500
b34cbebf 4501static
c4b88406
MD
4502int ust_app_flush_app_session(struct ust_app *app,
4503 struct ust_app_session *ua_sess)
b34cbebf 4504{
c4b88406 4505 int ret, retval = 0;
b34cbebf 4506 struct lttng_ht_iter iter;
b34cbebf 4507 struct ust_app_channel *ua_chan;
c4b88406 4508 struct consumer_socket *socket;
b34cbebf 4509
c4b88406 4510 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
4511
4512 rcu_read_lock();
4513
4514 if (!app->compatible) {
c4b88406 4515 goto end_not_compatible;
b34cbebf
MD
4516 }
4517
4518 pthread_mutex_lock(&ua_sess->lock);
4519
b161602a
MD
4520 if (ua_sess->deleted) {
4521 goto end_deleted;
4522 }
4523
b34cbebf
MD
4524 health_code_update();
4525
9d6c7d3f 4526 /* Flushing buffers */
c4b88406
MD
4527 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4528 ua_sess->consumer);
ce34fcd0
MD
4529
4530 /* Flush buffers and push metadata. */
4531 switch (ua_sess->buffer_type) {
4532 case LTTNG_BUFFER_PER_PID:
4533 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4534 node.node) {
4535 health_code_update();
ce34fcd0
MD
4536 ret = consumer_flush_channel(socket, ua_chan->key);
4537 if (ret) {
4538 ERR("Error flushing consumer channel");
4539 retval = -1;
4540 continue;
4541 }
8be98f9a 4542 }
ce34fcd0
MD
4543 break;
4544 case LTTNG_BUFFER_PER_UID:
4545 default:
4546 assert(0);
4547 break;
8be98f9a 4548 }
8be98f9a 4549
840cb59c 4550 health_code_update();
86acf0da 4551
b161602a 4552end_deleted:
d88aee68 4553 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 4554
c4b88406
MD
4555end_not_compatible:
4556 rcu_read_unlock();
4557 health_code_update();
4558 return retval;
4559}
4560
4561/*
ce34fcd0
MD
4562 * Flush buffers for all applications for a specific UST session.
4563 * Called with UST session lock held.
c4b88406
MD
4564 */
4565static
ce34fcd0 4566int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
4567
4568{
99b1411c 4569 int ret = 0;
c4b88406 4570
ce34fcd0 4571 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
4572
4573 rcu_read_lock();
4574
ce34fcd0
MD
4575 /* Flush buffers and push metadata. */
4576 switch (usess->buffer_type) {
4577 case LTTNG_BUFFER_PER_UID:
4578 {
4579 struct buffer_reg_uid *reg;
4580 struct lttng_ht_iter iter;
4581
4582 /* Flush all per UID buffers associated to that session. */
4583 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4584 struct ust_registry_session *ust_session_reg;
4585 struct buffer_reg_channel *reg_chan;
4586 struct consumer_socket *socket;
4587
4588 /* Get consumer socket to use to push the metadata.*/
4589 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4590 usess->consumer);
4591 if (!socket) {
4592 /* Ignore request if no consumer is found for the session. */
4593 continue;
4594 }
4595
4596 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4597 reg_chan, node.node) {
4598 /*
4599 * The following call will print error values so the return
4600 * code is of little importance because whatever happens, we
4601 * have to try them all.
4602 */
4603 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4604 }
4605
4606 ust_session_reg = reg->registry->reg.ust;
4607 /* Push metadata. */
4608 (void) push_metadata(ust_session_reg, usess->consumer);
4609 }
ce34fcd0
MD
4610 break;
4611 }
4612 case LTTNG_BUFFER_PER_PID:
4613 {
4614 struct ust_app_session *ua_sess;
4615 struct lttng_ht_iter iter;
4616 struct ust_app *app;
4617
4618 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4619 ua_sess = lookup_session_by_app(usess, app);
4620 if (ua_sess == NULL) {
4621 continue;
4622 }
4623 (void) ust_app_flush_app_session(app, ua_sess);
4624 }
4625 break;
4626 }
4627 default:
99b1411c 4628 ret = -1;
ce34fcd0
MD
4629 assert(0);
4630 break;
c4b88406 4631 }
c4b88406 4632
7db205b5 4633 rcu_read_unlock();
840cb59c 4634 health_code_update();
c4b88406 4635 return ret;
8be98f9a
MD
4636}
4637
0dd01979
MD
4638static
4639int ust_app_clear_quiescent_app_session(struct ust_app *app,
4640 struct ust_app_session *ua_sess)
4641{
4642 int ret = 0;
4643 struct lttng_ht_iter iter;
4644 struct ust_app_channel *ua_chan;
4645 struct consumer_socket *socket;
4646
4647 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4648
4649 rcu_read_lock();
4650
4651 if (!app->compatible) {
4652 goto end_not_compatible;
4653 }
4654
4655 pthread_mutex_lock(&ua_sess->lock);
4656
4657 if (ua_sess->deleted) {
4658 goto end_unlock;
4659 }
4660
4661 health_code_update();
4662
4663 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4664 ua_sess->consumer);
4665 if (!socket) {
4666 ERR("Failed to find consumer (%" PRIu32 ") socket",
4667 app->bits_per_long);
4668 ret = -1;
4669 goto end_unlock;
4670 }
4671
4672 /* Clear quiescent state. */
4673 switch (ua_sess->buffer_type) {
4674 case LTTNG_BUFFER_PER_PID:
4675 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4676 ua_chan, node.node) {
4677 health_code_update();
4678 ret = consumer_clear_quiescent_channel(socket,
4679 ua_chan->key);
4680 if (ret) {
4681 ERR("Error clearing quiescent state for consumer channel");
4682 ret = -1;
4683 continue;
4684 }
4685 }
4686 break;
4687 case LTTNG_BUFFER_PER_UID:
4688 default:
4689 assert(0);
4690 ret = -1;
4691 break;
4692 }
4693
4694 health_code_update();
4695
4696end_unlock:
4697 pthread_mutex_unlock(&ua_sess->lock);
4698
4699end_not_compatible:
4700 rcu_read_unlock();
4701 health_code_update();
4702 return ret;
4703}
4704
4705/*
4706 * Clear quiescent state in each stream for all applications for a
4707 * specific UST session.
4708 * Called with UST session lock held.
4709 */
4710static
4711int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4712
4713{
4714 int ret = 0;
4715
4716 DBG("Clearing stream quiescent state for all ust apps");
4717
4718 rcu_read_lock();
4719
4720 switch (usess->buffer_type) {
4721 case LTTNG_BUFFER_PER_UID:
4722 {
4723 struct lttng_ht_iter iter;
4724 struct buffer_reg_uid *reg;
4725
4726 /*
4727 * Clear quiescent for all per UID buffers associated to
4728 * that session.
4729 */
4730 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4731 struct consumer_socket *socket;
4732 struct buffer_reg_channel *reg_chan;
4733
4734 /* Get associated consumer socket.*/
4735 socket = consumer_find_socket_by_bitness(
4736 reg->bits_per_long, usess->consumer);
4737 if (!socket) {
4738 /*
4739 * Ignore request if no consumer is found for
4740 * the session.
4741 */
4742 continue;
4743 }
4744
4745 cds_lfht_for_each_entry(reg->registry->channels->ht,
4746 &iter.iter, reg_chan, node.node) {
4747 /*
4748 * The following call will print error values so
4749 * the return code is of little importance
4750 * because whatever happens, we have to try them
4751 * all.
4752 */
4753 (void) consumer_clear_quiescent_channel(socket,
4754 reg_chan->consumer_key);
4755 }
4756 }
4757 break;
4758 }
4759 case LTTNG_BUFFER_PER_PID:
4760 {
4761 struct ust_app_session *ua_sess;
4762 struct lttng_ht_iter iter;
4763 struct ust_app *app;
4764
4765 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4766 pid_n.node) {
4767 ua_sess = lookup_session_by_app(usess, app);
4768 if (ua_sess == NULL) {
4769 continue;
4770 }
4771 (void) ust_app_clear_quiescent_app_session(app,
4772 ua_sess);
4773 }
4774 break;
4775 }
4776 default:
4777 ret = -1;
4778 assert(0);
4779 break;
4780 }
4781
4782 rcu_read_unlock();
4783 health_code_update();
4784 return ret;
4785}
4786
84cd17c6
MD
4787/*
4788 * Destroy a specific UST session in apps.
4789 */
3353de95 4790static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 4791{
ffe60014 4792 int ret;
84cd17c6 4793 struct ust_app_session *ua_sess;
bec39940 4794 struct lttng_ht_iter iter;
d9bf3ca4 4795 struct lttng_ht_node_u64 *node;
84cd17c6 4796
852d0037 4797 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
4798
4799 rcu_read_lock();
4800
e0c7ec2b
DG
4801 if (!app->compatible) {
4802 goto end;
4803 }
4804
84cd17c6 4805 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 4806 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 4807 if (node == NULL) {
d42f20df
DG
4808 /* Session is being or is deleted. */
4809 goto end;
84cd17c6
MD
4810 }
4811 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 4812
840cb59c 4813 health_code_update();
d0b96690 4814 destroy_app_session(app, ua_sess);
84cd17c6 4815
840cb59c 4816 health_code_update();
7db205b5 4817
84cd17c6 4818 /* Quiescent wait after stopping trace */
fb45065e 4819 pthread_mutex_lock(&app->sock_lock);
ffe60014 4820 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4821 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4822 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4823 ERR("UST app wait quiescent failed for app pid %d ret %d",
4824 app->pid, ret);
4825 }
e0c7ec2b
DG
4826end:
4827 rcu_read_unlock();
840cb59c 4828 health_code_update();
84cd17c6 4829 return 0;
84cd17c6
MD
4830}
4831
5b4a0ec0
DG
4832/*
4833 * Start tracing for the UST session.
4834 */
421cb601
DG
4835int ust_app_start_trace_all(struct ltt_ust_session *usess)
4836{
4837 int ret = 0;
bec39940 4838 struct lttng_ht_iter iter;
421cb601 4839 struct ust_app *app;
48842b30 4840
421cb601
DG
4841 DBG("Starting all UST traces");
4842
4843 rcu_read_lock();
421cb601 4844
0dd01979
MD
4845 /*
4846 * In a start-stop-start use-case, we need to clear the quiescent state
4847 * of each channel set by the prior stop command, thus ensuring that a
4848 * following stop or destroy is sure to grab a timestamp_end near those
4849 * operations, even if the packet is empty.
4850 */
4851 (void) ust_app_clear_quiescent_session(usess);
4852
852d0037 4853 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
421cb601 4854 ret = ust_app_start_trace(usess, app);
48842b30 4855 if (ret < 0) {
5b4a0ec0
DG
4856 /* Continue to next apps even on error */
4857 continue;
48842b30 4858 }
48842b30 4859 }
5b4a0ec0 4860
48842b30
DG
4861 rcu_read_unlock();
4862
4863 return 0;
4864}
487cf67c 4865
8be98f9a
MD
4866/*
4867 * Start tracing for the UST session.
ce34fcd0 4868 * Called with UST session lock held.
8be98f9a
MD
4869 */
4870int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4871{
4872 int ret = 0;
bec39940 4873 struct lttng_ht_iter iter;
8be98f9a
MD
4874 struct ust_app *app;
4875
4876 DBG("Stopping all UST traces");
4877
4878 rcu_read_lock();
4879
b34cbebf
MD
4880 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4881 ret = ust_app_stop_trace(usess, app);
4882 if (ret < 0) {
4883 /* Continue to next apps even on error */
4884 continue;
4885 }
4886 }
4887
ce34fcd0 4888 (void) ust_app_flush_session(usess);
8be98f9a
MD
4889
4890 rcu_read_unlock();
4891
4892 return 0;
4893}
4894
84cd17c6
MD
4895/*
4896 * Destroy app UST session.
4897 */
4898int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4899{
4900 int ret = 0;
bec39940 4901 struct lttng_ht_iter iter;
84cd17c6
MD
4902 struct ust_app *app;
4903
4904 DBG("Destroy all UST traces");
4905
4906 rcu_read_lock();
4907
852d0037 4908 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 4909 ret = destroy_trace(usess, app);
84cd17c6
MD
4910 if (ret < 0) {
4911 /* Continue to next apps even on error */
4912 continue;
4913 }
4914 }
4915
4916 rcu_read_unlock();
4917
4918 return 0;
4919}
4920
a9ad0c8f
MD
4921static
4922void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
487cf67c 4923{
55c54cce 4924 int ret = 0;
31746f93 4925 struct lttng_ht_iter iter, uiter;
3d8ca23b 4926 struct ust_app_session *ua_sess = NULL;
487cf67c
DG
4927 struct ust_app_channel *ua_chan;
4928 struct ust_app_event *ua_event;
727d5404 4929 struct ust_app_ctx *ua_ctx;
a9ad0c8f 4930 int is_created = 0;
1f3580c7 4931
a9ad0c8f 4932 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
3d8ca23b
DG
4933 if (ret < 0) {
4934 /* Tracer is probably gone or ENOMEM. */
487cf67c
DG
4935 goto error;
4936 }
a9ad0c8f
MD
4937 if (!is_created) {
4938 /* App session already created. */
4939 goto end;
4940 }
3d8ca23b 4941 assert(ua_sess);
487cf67c 4942
d0b96690
DG
4943 pthread_mutex_lock(&ua_sess->lock);
4944
b161602a
MD
4945 if (ua_sess->deleted) {
4946 pthread_mutex_unlock(&ua_sess->lock);
4947 goto end;
4948 }
4949
284d8f55 4950 /*
d0b96690 4951 * We can iterate safely here over all UST app session since the create ust
284d8f55
DG
4952 * app session above made a shadow copy of the UST global domain from the
4953 * ltt ust session.
4954 */
bec39940
DG
4955 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4956 node.node) {
ad7a9107 4957 ret = do_create_channel(app, usess, ua_sess, ua_chan);
a7169585 4958 if (ret < 0 && ret != -ENOTCONN) {
ad7a9107 4959 /*
a7169585
MD
4960 * Stop everything. On error, the application
4961 * failed, no more file descriptor are available
4962 * or ENOMEM so stopping here is the only thing
4963 * we can do for now. The only exception is
4964 * -ENOTCONN, which indicates that the application
4965 * has exit.
ad7a9107
DG
4966 */
4967 goto error_unlock;
487cf67c
DG
4968 }
4969
31746f93
DG
4970 /*
4971 * Add context using the list so they are enabled in the same order the
4972 * user added them.
4973 */
4974 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
727d5404
DG
4975 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4976 if (ret < 0) {
d0b96690 4977 goto error_unlock;
727d5404
DG
4978 }
4979 }
4980
4981
284d8f55 4982 /* For each events */
bec39940
DG
4983 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4984 node.node) {
284d8f55
DG
4985 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4986 if (ret < 0) {
d0b96690 4987 goto error_unlock;
487cf67c 4988 }
36dc12cc 4989 }
487cf67c
DG
4990 }
4991
d0b96690
DG
4992 pthread_mutex_unlock(&ua_sess->lock);
4993
14fb1ebe 4994 if (usess->active) {
421cb601 4995 ret = ust_app_start_trace(usess, app);
36dc12cc 4996 if (ret < 0) {
36dc12cc
DG
4997 goto error;
4998 }
4999
852d0037 5000 DBG2("UST trace started for app pid %d", app->pid);
36dc12cc 5001 }
a9ad0c8f 5002end:
ffe60014 5003 /* Everything went well at this point. */
ffe60014
DG
5004 return;
5005
d0b96690
DG
5006error_unlock:
5007 pthread_mutex_unlock(&ua_sess->lock);
487cf67c 5008error:
ffe60014 5009 if (ua_sess) {
d0b96690 5010 destroy_app_session(app, ua_sess);
ffe60014 5011 }
487cf67c
DG
5012 return;
5013}
55cc08a6 5014
a9ad0c8f
MD
5015static
5016void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5017{
5018 struct ust_app_session *ua_sess;
5019
5020 ua_sess = lookup_session_by_app(usess, app);
5021 if (ua_sess == NULL) {
5022 return;
5023 }
5024 destroy_app_session(app, ua_sess);
5025}
5026
5027/*
5028 * Add channels/events from UST global domain to registered apps at sock.
5029 *
5030 * Called with session lock held.
5031 * Called with RCU read-side lock held.
5032 */
5033void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5034{
5035 assert(usess);
5036
5037 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5038 app->sock, usess->id);
5039
5040 if (!app->compatible) {
5041 return;
5042 }
5043
5044 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
5045 ust_app_global_create(usess, app);
5046 } else {
5047 ust_app_global_destroy(usess, app);
5048 }
5049}
5050
5051/*
5052 * Called with session lock held.
5053 */
5054void ust_app_global_update_all(struct ltt_ust_session *usess)
5055{
5056 struct lttng_ht_iter iter;
5057 struct ust_app *app;
5058
5059 rcu_read_lock();
5060 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5061 ust_app_global_update(usess, app);
5062 }
5063 rcu_read_unlock();
5064}
5065
55cc08a6
DG
5066/*
5067 * Add context to a specific channel for global UST domain.
5068 */
5069int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5070 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5071{
5072 int ret = 0;
bec39940
DG
5073 struct lttng_ht_node_str *ua_chan_node;
5074 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
5075 struct ust_app_channel *ua_chan = NULL;
5076 struct ust_app_session *ua_sess;
5077 struct ust_app *app;
5078
5079 rcu_read_lock();
5080
852d0037 5081 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
5082 if (!app->compatible) {
5083 /*
5084 * TODO: In time, we should notice the caller of this error by
5085 * telling him that this is a version error.
5086 */
5087 continue;
5088 }
55cc08a6
DG
5089 ua_sess = lookup_session_by_app(usess, app);
5090 if (ua_sess == NULL) {
5091 continue;
5092 }
5093
d0b96690 5094 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5095
5096 if (ua_sess->deleted) {
5097 pthread_mutex_unlock(&ua_sess->lock);
5098 continue;
5099 }
5100
55cc08a6 5101 /* Lookup channel in the ust app session */
bec39940
DG
5102 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5103 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 5104 if (ua_chan_node == NULL) {
d0b96690 5105 goto next_app;
55cc08a6
DG
5106 }
5107 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5108 node);
55cc08a6
DG
5109 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
5110 if (ret < 0) {
d0b96690 5111 goto next_app;
55cc08a6 5112 }
d0b96690
DG
5113 next_app:
5114 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
5115 }
5116
55cc08a6
DG
5117 rcu_read_unlock();
5118 return ret;
5119}
5120
76d45b40
DG
5121/*
5122 * Enable event for a channel from a UST session for a specific PID.
5123 */
5124int ust_app_enable_event_pid(struct ltt_ust_session *usess,
5125 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
5126{
5127 int ret = 0;
bec39940 5128 struct lttng_ht_iter iter;
18eace3b 5129 struct lttng_ht_node_str *ua_chan_node;
76d45b40
DG
5130 struct ust_app *app;
5131 struct ust_app_session *ua_sess;
5132 struct ust_app_channel *ua_chan;
5133 struct ust_app_event *ua_event;
5134
5135 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
5136
5137 rcu_read_lock();
5138
5139 app = ust_app_find_by_pid(pid);
5140 if (app == NULL) {
5141 ERR("UST app enable event per PID %d not found", pid);
5142 ret = -1;
d0b96690 5143 goto end;
76d45b40
DG
5144 }
5145
e0c7ec2b
DG
5146 if (!app->compatible) {
5147 ret = 0;
d0b96690 5148 goto end;
e0c7ec2b
DG
5149 }
5150
76d45b40 5151 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
5152 if (!ua_sess) {
5153 /* The application has problem or is probably dead. */
d0b96690
DG
5154 ret = 0;
5155 goto end;
c4a1715b 5156 }
76d45b40 5157
d0b96690 5158 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5159
5160 if (ua_sess->deleted) {
5161 ret = 0;
5162 goto end_unlock;
5163 }
5164
76d45b40 5165 /* Lookup channel in the ust app session */
bec39940
DG
5166 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
5167 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
76d45b40
DG
5168 /* If the channel is not found, there is a code flow error */
5169 assert(ua_chan_node);
5170
5171 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5172
18eace3b 5173 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 5174 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 5175 if (ua_event == NULL) {
76d45b40
DG
5176 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5177 if (ret < 0) {
d0b96690 5178 goto end_unlock;
76d45b40
DG
5179 }
5180 } else {
76d45b40
DG
5181 ret = enable_ust_app_event(ua_sess, ua_event, app);
5182 if (ret < 0) {
d0b96690 5183 goto end_unlock;
76d45b40
DG
5184 }
5185 }
5186
d0b96690
DG
5187end_unlock:
5188 pthread_mutex_unlock(&ua_sess->lock);
5189end:
76d45b40
DG
5190 rcu_read_unlock();
5191 return ret;
5192}
7f79d3a1 5193
d0b96690
DG
5194/*
5195 * Receive registration and populate the given msg structure.
5196 *
5197 * On success return 0 else a negative value returned by the ustctl call.
5198 */
5199int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5200{
5201 int ret;
5202 uint32_t pid, ppid, uid, gid;
5203
5204 assert(msg);
5205
5206 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5207 &pid, &ppid, &uid, &gid,
5208 &msg->bits_per_long,
5209 &msg->uint8_t_alignment,
5210 &msg->uint16_t_alignment,
5211 &msg->uint32_t_alignment,
5212 &msg->uint64_t_alignment,
5213 &msg->long_alignment,
5214 &msg->byte_order,
5215 msg->name);
5216 if (ret < 0) {
5217 switch (-ret) {
5218 case EPIPE:
5219 case ECONNRESET:
5220 case LTTNG_UST_ERR_EXITING:
5221 DBG3("UST app recv reg message failed. Application died");
5222 break;
5223 case LTTNG_UST_ERR_UNSUP_MAJOR:
5224 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5225 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5226 LTTNG_UST_ABI_MINOR_VERSION);
5227 break;
5228 default:
5229 ERR("UST app recv reg message failed with ret %d", ret);
5230 break;
5231 }
5232 goto error;
5233 }
5234 msg->pid = (pid_t) pid;
5235 msg->ppid = (pid_t) ppid;
5236 msg->uid = (uid_t) uid;
5237 msg->gid = (gid_t) gid;
5238
5239error:
5240 return ret;
5241}
5242
10b56aef
MD
5243/*
5244 * Return a ust app session object using the application object and the
5245 * session object descriptor has a key. If not found, NULL is returned.
5246 * A RCU read side lock MUST be acquired when calling this function.
5247*/
5248static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5249 int objd)
5250{
5251 struct lttng_ht_node_ulong *node;
5252 struct lttng_ht_iter iter;
5253 struct ust_app_session *ua_sess = NULL;
5254
5255 assert(app);
5256
5257 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5258 node = lttng_ht_iter_get_node_ulong(&iter);
5259 if (node == NULL) {
5260 DBG2("UST app session find by objd %d not found", objd);
5261 goto error;
5262 }
5263
5264 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5265
5266error:
5267 return ua_sess;
5268}
5269
d88aee68
DG
5270/*
5271 * Return a ust app channel object using the application object and the channel
5272 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5273 * lock MUST be acquired before calling this function.
5274 */
d0b96690
DG
5275static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5276 int objd)
5277{
5278 struct lttng_ht_node_ulong *node;
5279 struct lttng_ht_iter iter;
5280 struct ust_app_channel *ua_chan = NULL;
5281
5282 assert(app);
5283
5284 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5285 node = lttng_ht_iter_get_node_ulong(&iter);
5286 if (node == NULL) {
5287 DBG2("UST app channel find by objd %d not found", objd);
5288 goto error;
5289 }
5290
5291 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5292
5293error:
5294 return ua_chan;
5295}
5296
d88aee68
DG
5297/*
5298 * Reply to a register channel notification from an application on the notify
5299 * socket. The channel metadata is also created.
5300 *
5301 * The session UST registry lock is acquired in this function.
5302 *
5303 * On success 0 is returned else a negative value.
5304 */
d0b96690
DG
5305static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5306 size_t nr_fields, struct ustctl_field *fields)
5307{
5308 int ret, ret_code = 0;
5309 uint32_t chan_id, reg_count;
7972aab2 5310 uint64_t chan_reg_key;
d0b96690
DG
5311 enum ustctl_channel_header type;
5312 struct ust_app *app;
5313 struct ust_app_channel *ua_chan;
5314 struct ust_app_session *ua_sess;
7972aab2 5315 struct ust_registry_session *registry;
45893984 5316 struct ust_registry_channel *chan_reg;
d0b96690
DG
5317
5318 rcu_read_lock();
5319
5320 /* Lookup application. If not found, there is a code flow error. */
5321 app = find_app_by_notify_sock(sock);
d88aee68
DG
5322 if (!app) {
5323 DBG("Application socket %d is being teardown. Abort event notify",
5324 sock);
5325 ret = 0;
d5d629b5 5326 free(fields);
d88aee68
DG
5327 goto error_rcu_unlock;
5328 }
d0b96690 5329
4950b860 5330 /* Lookup channel by UST object descriptor. */
d0b96690 5331 ua_chan = find_channel_by_objd(app, cobjd);
4950b860
MD
5332 if (!ua_chan) {
5333 DBG("Application channel is being teardown. Abort event notify");
5334 ret = 0;
d5d629b5 5335 free(fields);
4950b860
MD
5336 goto error_rcu_unlock;
5337 }
5338
d0b96690
DG
5339 assert(ua_chan->session);
5340 ua_sess = ua_chan->session;
d0b96690 5341
7972aab2
DG
5342 /* Get right session registry depending on the session buffer type. */
5343 registry = get_session_registry(ua_sess);
5344 assert(registry);
45893984 5345
7972aab2
DG
5346 /* Depending on the buffer type, a different channel key is used. */
5347 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5348 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 5349 } else {
7972aab2 5350 chan_reg_key = ua_chan->key;
d0b96690
DG
5351 }
5352
7972aab2
DG
5353 pthread_mutex_lock(&registry->lock);
5354
5355 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5356 assert(chan_reg);
5357
5358 if (!chan_reg->register_done) {
5359 reg_count = ust_registry_get_event_count(chan_reg);
5360 if (reg_count < 31) {
5361 type = USTCTL_CHANNEL_HEADER_COMPACT;
5362 } else {
5363 type = USTCTL_CHANNEL_HEADER_LARGE;
5364 }
5365
5366 chan_reg->nr_ctx_fields = nr_fields;
5367 chan_reg->ctx_fields = fields;
5368 chan_reg->header_type = type;
d0b96690 5369 } else {
7972aab2
DG
5370 /* Get current already assigned values. */
5371 type = chan_reg->header_type;
d5d629b5
DG
5372 free(fields);
5373 /* Set to NULL so the error path does not do a double free. */
5374 fields = NULL;
d0b96690 5375 }
7972aab2
DG
5376 /* Channel id is set during the object creation. */
5377 chan_id = chan_reg->chan_id;
d0b96690
DG
5378
5379 /* Append to metadata */
7972aab2
DG
5380 if (!chan_reg->metadata_dumped) {
5381 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
d0b96690
DG
5382 if (ret_code) {
5383 ERR("Error appending channel metadata (errno = %d)", ret_code);
5384 goto reply;
5385 }
5386 }
5387
5388reply:
7972aab2
DG
5389 DBG3("UST app replying to register channel key %" PRIu64
5390 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5391 ret_code);
d0b96690
DG
5392
5393 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5394 if (ret < 0) {
5395 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5396 ERR("UST app reply channel failed with ret %d", ret);
5397 } else {
5398 DBG3("UST app reply channel failed. Application died");
5399 }
5400 goto error;
5401 }
5402
7972aab2
DG
5403 /* This channel registry registration is completed. */
5404 chan_reg->register_done = 1;
5405
d0b96690 5406error:
7972aab2 5407 pthread_mutex_unlock(&registry->lock);
d88aee68 5408error_rcu_unlock:
d0b96690 5409 rcu_read_unlock();
d5d629b5
DG
5410 if (ret) {
5411 free(fields);
5412 }
d0b96690
DG
5413 return ret;
5414}
5415
d88aee68
DG
5416/*
5417 * Add event to the UST channel registry. When the event is added to the
5418 * registry, the metadata is also created. Once done, this replies to the
5419 * application with the appropriate error code.
5420 *
5421 * The session UST registry lock is acquired in the function.
5422 *
5423 * On success 0 is returned else a negative value.
5424 */
d0b96690 5425static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
2106efa0
PP
5426 char *sig, size_t nr_fields, struct ustctl_field *fields,
5427 int loglevel_value, char *model_emf_uri)
d0b96690
DG
5428{
5429 int ret, ret_code;
5430 uint32_t event_id = 0;
7972aab2 5431 uint64_t chan_reg_key;
d0b96690
DG
5432 struct ust_app *app;
5433 struct ust_app_channel *ua_chan;
5434 struct ust_app_session *ua_sess;
7972aab2 5435 struct ust_registry_session *registry;
d0b96690
DG
5436
5437 rcu_read_lock();
5438
5439 /* Lookup application. If not found, there is a code flow error. */
5440 app = find_app_by_notify_sock(sock);
d88aee68
DG
5441 if (!app) {
5442 DBG("Application socket %d is being teardown. Abort event notify",
5443 sock);
5444 ret = 0;
d5d629b5
DG
5445 free(sig);
5446 free(fields);
5447 free(model_emf_uri);
d88aee68
DG
5448 goto error_rcu_unlock;
5449 }
d0b96690 5450
4950b860 5451 /* Lookup channel by UST object descriptor. */
d0b96690 5452 ua_chan = find_channel_by_objd(app, cobjd);
4950b860
MD
5453 if (!ua_chan) {
5454 DBG("Application channel is being teardown. Abort event notify");
5455 ret = 0;
d5d629b5
DG
5456 free(sig);
5457 free(fields);
5458 free(model_emf_uri);
4950b860
MD
5459 goto error_rcu_unlock;
5460 }
5461
d0b96690
DG
5462 assert(ua_chan->session);
5463 ua_sess = ua_chan->session;
5464
7972aab2
DG
5465 registry = get_session_registry(ua_sess);
5466 assert(registry);
5467
5468 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5469 chan_reg_key = ua_chan->tracing_channel_id;
5470 } else {
5471 chan_reg_key = ua_chan->key;
5472 }
5473
5474 pthread_mutex_lock(&registry->lock);
d0b96690 5475
d5d629b5
DG
5476 /*
5477 * From this point on, this call acquires the ownership of the sig, fields
5478 * and model_emf_uri meaning any free are done inside it if needed. These
5479 * three variables MUST NOT be read/write after this.
5480 */
7972aab2 5481 ret_code = ust_registry_create_event(registry, chan_reg_key,
2106efa0
PP
5482 sobjd, cobjd, name, sig, nr_fields, fields,
5483 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5484 &event_id, app);
d0b96690
DG
5485
5486 /*
5487 * The return value is returned to ustctl so in case of an error, the
5488 * application can be notified. In case of an error, it's important not to
5489 * return a negative error or else the application will get closed.
5490 */
5491 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5492 if (ret < 0) {
5493 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5494 ERR("UST app reply event failed with ret %d", ret);
5495 } else {
5496 DBG3("UST app reply event failed. Application died");
5497 }
5498 /*
5499 * No need to wipe the create event since the application socket will
5500 * get close on error hence cleaning up everything by itself.
5501 */
5502 goto error;
5503 }
5504
7972aab2
DG
5505 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5506 name, event_id);
d88aee68 5507
d0b96690 5508error:
7972aab2 5509 pthread_mutex_unlock(&registry->lock);
d88aee68 5510error_rcu_unlock:
d0b96690
DG
5511 rcu_read_unlock();
5512 return ret;
5513}
5514
10b56aef
MD
5515/*
5516 * Add enum to the UST session registry. Once done, this replies to the
5517 * application with the appropriate error code.
5518 *
5519 * The session UST registry lock is acquired within this function.
5520 *
5521 * On success 0 is returned else a negative value.
5522 */
5523static int add_enum_ust_registry(int sock, int sobjd, char *name,
5524 struct ustctl_enum_entry *entries, size_t nr_entries)
5525{
5526 int ret = 0, ret_code;
5527 struct ust_app *app;
5528 struct ust_app_session *ua_sess;
5529 struct ust_registry_session *registry;
5530 uint64_t enum_id = -1ULL;
5531
5532 rcu_read_lock();
5533
5534 /* Lookup application. If not found, there is a code flow error. */
5535 app = find_app_by_notify_sock(sock);
5536 if (!app) {
5537 /* Return an error since this is not an error */
5538 DBG("Application socket %d is being torn down. Aborting enum registration",
5539 sock);
5540 free(entries);
5541 goto error_rcu_unlock;
5542 }
5543
5544 /* Lookup session by UST object descriptor. */
5545 ua_sess = find_session_by_objd(app, sobjd);
5546 if (!ua_sess) {
5547 /* Return an error since this is not an error */
5548 DBG("Application session is being torn down. Aborting enum registration.");
5549 free(entries);
5550 goto error_rcu_unlock;
5551 }
5552
5553 registry = get_session_registry(ua_sess);
5554 assert(registry);
5555
5556 pthread_mutex_lock(&registry->lock);
5557
5558 /*
5559 * From this point on, the callee acquires the ownership of
5560 * entries. The variable entries MUST NOT be read/written after
5561 * call.
5562 */
5563 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5564 entries, nr_entries, &enum_id);
5565 entries = NULL;
5566
5567 /*
5568 * The return value is returned to ustctl so in case of an error, the
5569 * application can be notified. In case of an error, it's important not to
5570 * return a negative error or else the application will get closed.
5571 */
5572 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5573 if (ret < 0) {
5574 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5575 ERR("UST app reply enum failed with ret %d", ret);
5576 } else {
5577 DBG3("UST app reply enum failed. Application died");
5578 }
5579 /*
5580 * No need to wipe the create enum since the application socket will
5581 * get close on error hence cleaning up everything by itself.
5582 */
5583 goto error;
5584 }
5585
5586 DBG3("UST registry enum %s added successfully or already found", name);
5587
5588error:
5589 pthread_mutex_unlock(&registry->lock);
5590error_rcu_unlock:
5591 rcu_read_unlock();
5592 return ret;
5593}
5594
d88aee68
DG
5595/*
5596 * Handle application notification through the given notify socket.
5597 *
5598 * Return 0 on success or else a negative value.
5599 */
d0b96690
DG
5600int ust_app_recv_notify(int sock)
5601{
5602 int ret;
5603 enum ustctl_notify_cmd cmd;
5604
5605 DBG3("UST app receiving notify from sock %d", sock);
5606
5607 ret = ustctl_recv_notify(sock, &cmd);
5608 if (ret < 0) {
5609 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5610 ERR("UST app recv notify failed with ret %d", ret);
5611 } else {
5612 DBG3("UST app recv notify failed. Application died");
5613 }
5614 goto error;
5615 }
5616
5617 switch (cmd) {
5618 case USTCTL_NOTIFY_CMD_EVENT:
5619 {
2106efa0 5620 int sobjd, cobjd, loglevel_value;
d0b96690
DG
5621 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5622 size_t nr_fields;
5623 struct ustctl_field *fields;
5624
5625 DBG2("UST app ustctl register event received");
5626
2106efa0
PP
5627 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5628 &loglevel_value, &sig, &nr_fields, &fields,
5629 &model_emf_uri);
d0b96690
DG
5630 if (ret < 0) {
5631 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5632 ERR("UST app recv event failed with ret %d", ret);
5633 } else {
5634 DBG3("UST app recv event failed. Application died");
5635 }
5636 goto error;
5637 }
5638
d5d629b5
DG
5639 /*
5640 * Add event to the UST registry coming from the notify socket. This
5641 * call will free if needed the sig, fields and model_emf_uri. This
5642 * code path loses the ownsership of these variables and transfer them
5643 * to the this function.
5644 */
d0b96690 5645 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
2106efa0 5646 fields, loglevel_value, model_emf_uri);
d0b96690
DG
5647 if (ret < 0) {
5648 goto error;
5649 }
5650
5651 break;
5652 }
5653 case USTCTL_NOTIFY_CMD_CHANNEL:
5654 {
5655 int sobjd, cobjd;
5656 size_t nr_fields;
5657 struct ustctl_field *fields;
5658
5659 DBG2("UST app ustctl register channel received");
5660
5661 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5662 &fields);
5663 if (ret < 0) {
5664 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5665 ERR("UST app recv channel failed with ret %d", ret);
5666 } else {
5667 DBG3("UST app recv channel failed. Application died");
5668 }
5669 goto error;
5670 }
5671
d5d629b5
DG
5672 /*
5673 * The fields ownership are transfered to this function call meaning
5674 * that if needed it will be freed. After this, it's invalid to access
5675 * fields or clean it up.
5676 */
d0b96690
DG
5677 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5678 fields);
5679 if (ret < 0) {
5680 goto error;
5681 }
5682
5683 break;
5684 }
10b56aef
MD
5685 case USTCTL_NOTIFY_CMD_ENUM:
5686 {
5687 int sobjd;
5688 char name[LTTNG_UST_SYM_NAME_LEN];
5689 size_t nr_entries;
5690 struct ustctl_enum_entry *entries;
5691
5692 DBG2("UST app ustctl register enum received");
5693
5694 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5695 &entries, &nr_entries);
5696 if (ret < 0) {
5697 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5698 ERR("UST app recv enum failed with ret %d", ret);
5699 } else {
5700 DBG3("UST app recv enum failed. Application died");
5701 }
5702 goto error;
5703 }
5704
5705 /* Callee assumes ownership of entries */
5706 ret = add_enum_ust_registry(sock, sobjd, name,
5707 entries, nr_entries);
5708 if (ret < 0) {
5709 goto error;
5710 }
5711
5712 break;
5713 }
d0b96690
DG
5714 default:
5715 /* Should NEVER happen. */
5716 assert(0);
5717 }
5718
5719error:
5720 return ret;
5721}
d88aee68
DG
5722
5723/*
5724 * Once the notify socket hangs up, this is called. First, it tries to find the
5725 * corresponding application. On failure, the call_rcu to close the socket is
5726 * executed. If an application is found, it tries to delete it from the notify
5727 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5728 *
5729 * Note that an object needs to be allocated here so on ENOMEM failure, the
5730 * call RCU is not done but the rest of the cleanup is.
5731 */
5732void ust_app_notify_sock_unregister(int sock)
5733{
5734 int err_enomem = 0;
5735 struct lttng_ht_iter iter;
5736 struct ust_app *app;
5737 struct ust_app_notify_sock_obj *obj;
5738
5739 assert(sock >= 0);
5740
5741 rcu_read_lock();
5742
5743 obj = zmalloc(sizeof(*obj));
5744 if (!obj) {
5745 /*
5746 * An ENOMEM is kind of uncool. If this strikes we continue the
5747 * procedure but the call_rcu will not be called. In this case, we
5748 * accept the fd leak rather than possibly creating an unsynchronized
5749 * state between threads.
5750 *
5751 * TODO: The notify object should be created once the notify socket is
5752 * registered and stored independantely from the ust app object. The
5753 * tricky part is to synchronize the teardown of the application and
5754 * this notify object. Let's keep that in mind so we can avoid this
5755 * kind of shenanigans with ENOMEM in the teardown path.
5756 */
5757 err_enomem = 1;
5758 } else {
5759 obj->fd = sock;
5760 }
5761
5762 DBG("UST app notify socket unregister %d", sock);
5763
5764 /*
5765 * Lookup application by notify socket. If this fails, this means that the
5766 * hash table delete has already been done by the application
5767 * unregistration process so we can safely close the notify socket in a
5768 * call RCU.
5769 */
5770 app = find_app_by_notify_sock(sock);
5771 if (!app) {
5772 goto close_socket;
5773 }
5774
5775 iter.iter.node = &app->notify_sock_n.node;
5776
5777 /*
5778 * Whatever happens here either we fail or succeed, in both cases we have
5779 * to close the socket after a grace period to continue to the call RCU
5780 * here. If the deletion is successful, the application is not visible
5781 * anymore by other threads and is it fails it means that it was already
5782 * deleted from the hash table so either way we just have to close the
5783 * socket.
5784 */
5785 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5786
5787close_socket:
5788 rcu_read_unlock();
5789
5790 /*
5791 * Close socket after a grace period to avoid for the socket to be reused
5792 * before the application object is freed creating potential race between
5793 * threads trying to add unique in the global hash table.
5794 */
5795 if (!err_enomem) {
5796 call_rcu(&obj->head, close_notify_sock_rcu);
5797 }
5798}
f45e313d
DG
5799
5800/*
5801 * Destroy a ust app data structure and free its memory.
5802 */
5803void ust_app_destroy(struct ust_app *app)
5804{
5805 if (!app) {
5806 return;
5807 }
5808
5809 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5810}
6dc3064a
DG
5811
5812/*
5813 * Take a snapshot for a given UST session. The snapshot is sent to the given
5814 * output.
5815 *
5816 * Return 0 on success or else a negative value.
5817 */
5818int ust_app_snapshot_record(struct ltt_ust_session *usess,
d07ceecd
MD
5819 struct snapshot_output *output, int wait,
5820 uint64_t nb_packets_per_stream)
6dc3064a
DG
5821{
5822 int ret = 0;
5823 struct lttng_ht_iter iter;
5824 struct ust_app *app;
af706bb7 5825 char pathname[PATH_MAX];
6dc3064a
DG
5826
5827 assert(usess);
5828 assert(output);
5829
5830 rcu_read_lock();
5831
8c924c7b
MD
5832 switch (usess->buffer_type) {
5833 case LTTNG_BUFFER_PER_UID:
5834 {
5835 struct buffer_reg_uid *reg;
6dc3064a 5836
8c924c7b
MD
5837 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5838 struct buffer_reg_channel *reg_chan;
5839 struct consumer_socket *socket;
6dc3064a 5840
8c924c7b
MD
5841 /* Get consumer socket to use to push the metadata.*/
5842 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5843 usess->consumer);
5844 if (!socket) {
5845 ret = -EINVAL;
5846 goto error;
5847 }
6dc3064a 5848
8c924c7b
MD
5849 memset(pathname, 0, sizeof(pathname));
5850 ret = snprintf(pathname, sizeof(pathname),
5851 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5852 reg->uid, reg->bits_per_long);
5853 if (ret < 0) {
5854 PERROR("snprintf snapshot path");
5855 goto error;
5856 }
5857
5858 /* Add the UST default trace dir to path. */
5859 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5860 reg_chan, node.node) {
68808f4e
DG
5861 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5862 output, 0, usess->uid, usess->gid, pathname, wait,
d07ceecd 5863 nb_packets_per_stream);
8c924c7b
MD
5864 if (ret < 0) {
5865 goto error;
5866 }
5867 }
68808f4e
DG
5868 ret = consumer_snapshot_channel(socket,
5869 reg->registry->reg.ust->metadata_key, output, 1,
d07ceecd 5870 usess->uid, usess->gid, pathname, wait, 0);
8c924c7b
MD
5871 if (ret < 0) {
5872 goto error;
5873 }
af706bb7 5874 }
8c924c7b
MD
5875 break;
5876 }
5877 case LTTNG_BUFFER_PER_PID:
5878 {
5879 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5880 struct consumer_socket *socket;
5881 struct lttng_ht_iter chan_iter;
5882 struct ust_app_channel *ua_chan;
5883 struct ust_app_session *ua_sess;
5884 struct ust_registry_session *registry;
5885
5886 ua_sess = lookup_session_by_app(usess, app);
5887 if (!ua_sess) {
5888 /* Session not associated with this app. */
5889 continue;
5890 }
af706bb7 5891
8c924c7b
MD
5892 /* Get the right consumer socket for the application. */
5893 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5894 output->consumer);
5895 if (!socket) {
5c786ded 5896 ret = -EINVAL;
5c786ded
JD
5897 goto error;
5898 }
5899
8c924c7b
MD
5900 /* Add the UST default trace dir to path. */
5901 memset(pathname, 0, sizeof(pathname));
5902 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5903 ua_sess->path);
6dc3064a 5904 if (ret < 0) {
8c924c7b 5905 PERROR("snprintf snapshot path");
6dc3064a
DG
5906 goto error;
5907 }
6dc3064a 5908
8c924c7b
MD
5909 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5910 ua_chan, node.node) {
68808f4e
DG
5911 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5912 0, ua_sess->euid, ua_sess->egid, pathname, wait,
d07ceecd 5913 nb_packets_per_stream);
8c924c7b
MD
5914 if (ret < 0) {
5915 goto error;
5916 }
5917 }
5918
5919 registry = get_session_registry(ua_sess);
5920 assert(registry);
5921 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
d07ceecd 5922 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
8c924c7b
MD
5923 if (ret < 0) {
5924 goto error;
5925 }
5926 }
5927 break;
5928 }
5929 default:
5930 assert(0);
5931 break;
6dc3064a
DG
5932 }
5933
5934error:
5935 rcu_read_unlock();
5936 return ret;
5937}
5c786ded
JD
5938
5939/*
d07ceecd 5940 * Return the size taken by one more packet per stream.
5c786ded 5941 */
d07ceecd
MD
5942uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5943 uint64_t cur_nr_packets)
5c786ded 5944{
d07ceecd 5945 uint64_t tot_size = 0;
5c786ded
JD
5946 struct ust_app *app;
5947 struct lttng_ht_iter iter;
5948
5949 assert(usess);
5950
5951 switch (usess->buffer_type) {
5952 case LTTNG_BUFFER_PER_UID:
5953 {
5954 struct buffer_reg_uid *reg;
5955
5956 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5957 struct buffer_reg_channel *reg_chan;
5958
b7064eaa 5959 rcu_read_lock();
5c786ded
JD
5960 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5961 reg_chan, node.node) {
d07ceecd
MD
5962 if (cur_nr_packets >= reg_chan->num_subbuf) {
5963 /*
5964 * Don't take channel into account if we
5965 * already grab all its packets.
5966 */
5967 continue;
5968 }
5969 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5c786ded 5970 }
b7064eaa 5971 rcu_read_unlock();
5c786ded
JD
5972 }
5973 break;
5974 }
5975 case LTTNG_BUFFER_PER_PID:
5976 {
5977 rcu_read_lock();
5978 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5979 struct ust_app_channel *ua_chan;
5980 struct ust_app_session *ua_sess;
5981 struct lttng_ht_iter chan_iter;
5982
5983 ua_sess = lookup_session_by_app(usess, app);
5984 if (!ua_sess) {
5985 /* Session not associated with this app. */
5986 continue;
5987 }
5988
5989 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5990 ua_chan, node.node) {
d07ceecd
MD
5991 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5992 /*
5993 * Don't take channel into account if we
5994 * already grab all its packets.
5995 */
5996 continue;
5997 }
5998 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
5999 }
6000 }
6001 rcu_read_unlock();
6002 break;
6003 }
6004 default:
6005 assert(0);
6006 break;
6007 }
6008
d07ceecd 6009 return tot_size;
5c786ded 6010}
fb83fe64
JD
6011
6012int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6013 struct cds_list_head *buffer_reg_uid_list,
6014 struct consumer_output *consumer, uint64_t uchan_id,
6015 int overwrite, uint64_t *discarded, uint64_t *lost)
6016{
6017 int ret;
6018 uint64_t consumer_chan_key;
6019
6020 ret = buffer_reg_uid_consumer_channel_key(
6021 buffer_reg_uid_list, ust_session_id,
6022 uchan_id, &consumer_chan_key);
6023 if (ret < 0) {
6024 goto end;
6025 }
6026
6027 if (overwrite) {
6028 ret = consumer_get_lost_packets(ust_session_id,
6029 consumer_chan_key, consumer, lost);
7c79cc89 6030 *discarded = 0;
fb83fe64
JD
6031 } else {
6032 ret = consumer_get_discarded_events(ust_session_id,
6033 consumer_chan_key, consumer, discarded);
7c79cc89 6034 *lost = 0;
fb83fe64
JD
6035 }
6036
6037end:
6038 return ret;
6039}
6040
6041int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6042 struct ltt_ust_channel *uchan,
6043 struct consumer_output *consumer, int overwrite,
6044 uint64_t *discarded, uint64_t *lost)
6045{
6046 int ret = 0;
6047 struct lttng_ht_iter iter;
6048 struct lttng_ht_node_str *ua_chan_node;
6049 struct ust_app *app;
6050 struct ust_app_session *ua_sess;
6051 struct ust_app_channel *ua_chan;
6052
6053 rcu_read_lock();
6054 /*
6055 * Iterate over every registered applications, return when we
6056 * found one in the right session and channel.
6057 */
6058 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6059 struct lttng_ht_iter uiter;
6060
6061 ua_sess = lookup_session_by_app(usess, app);
6062 if (ua_sess == NULL) {
6063 continue;
6064 }
6065
6066 /* Get channel */
ee022399 6067 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
fb83fe64
JD
6068 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6069 /* If the session is found for the app, the channel must be there */
6070 assert(ua_chan_node);
6071
6072 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6073
6074 if (overwrite) {
6075 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
6076 consumer, lost);
7c79cc89 6077 *discarded = 0;
fb83fe64
JD
6078 goto end;
6079 } else {
6080 ret = consumer_get_discarded_events(usess->id,
6081 ua_chan->key, consumer, discarded);
7c79cc89 6082 *lost = 0;
fb83fe64
JD
6083 goto end;
6084 }
fb83fe64
JD
6085 }
6086
6087end:
6088 rcu_read_unlock();
6089 return ret;
6090}
c2561365
JD
6091
6092static
6093int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6094 struct ust_app *app)
6095{
6096 int ret = 0;
6097 struct ust_app_session *ua_sess;
6098
6099 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6100
6101 rcu_read_lock();
6102
6103 ua_sess = lookup_session_by_app(usess, app);
6104 if (ua_sess == NULL) {
6105 /* The session is in teardown process. Ignore and continue. */
6106 goto end;
6107 }
6108
6109 pthread_mutex_lock(&ua_sess->lock);
6110
6111 if (ua_sess->deleted) {
6112 goto end_unlock;
6113 }
6114
6115 pthread_mutex_lock(&app->sock_lock);
6116 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6117 pthread_mutex_unlock(&app->sock_lock);
6118
6119end_unlock:
6120 pthread_mutex_unlock(&ua_sess->lock);
6121
6122end:
6123 rcu_read_unlock();
6124 health_code_update();
6125 return ret;
6126}
6127
6128/*
6129 * Regenerate the statedump for each app in the session.
6130 */
6131int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6132{
6133 int ret = 0;
6134 struct lttng_ht_iter iter;
6135 struct ust_app *app;
6136
6137 DBG("Regenerating the metadata for all UST apps");
6138
6139 rcu_read_lock();
6140
6141 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6142 if (!app->compatible) {
6143 continue;
6144 }
6145
6146 ret = ust_app_regenerate_statedump(usess, app);
6147 if (ret < 0) {
6148 /* Continue to the next app even on error */
6149 continue;
6150 }
6151 }
6152
6153 rcu_read_unlock();
6154
6155 return 0;
6156}
This page took 0.409968 seconds and 4 git commands to generate.