Fix: sessiond: bad fd used while rotating exiting app's buffers
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53 1/*
90c106c6 2 * Copyright (C) 2011 EfficiOS Inc.
ab5be9fa 3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
91d76f53 6 *
91d76f53
DG
7 */
8
6c1c0768 9#define _LGPL_SOURCE
533a90fb
FD
10#include <errno.h>
11#include <fcntl.h>
7972aab2 12#include <inttypes.h>
91d76f53
DG
13#include <pthread.h>
14#include <stdio.h>
15#include <stdlib.h>
099e26bd 16#include <string.h>
533a90fb 17#include <sys/mman.h>
aba8e916
DG
18#include <sys/stat.h>
19#include <sys/types.h>
099e26bd 20#include <unistd.h>
0df502fd 21#include <urcu/compiler.h>
331744e3 22#include <signal.h>
bec39940 23
763f0d4c 24#include <common/bytecode/bytecode.h>
edf4b93e 25#include <common/compat/errno.h>
990570ed 26#include <common/common.h>
993578ff
JR
27#include <common/hashtable/utils.h>
28#include <lttng/event-rule/event-rule.h>
29#include <lttng/event-rule/event-rule-internal.h>
45ce77e1 30#include <lttng/event-rule/user-tracepoint.h>
993578ff 31#include <lttng/condition/condition.h>
670a26e4
JR
32#include <lttng/condition/event-rule-matches-internal.h>
33#include <lttng/condition/event-rule-matches.h>
f83be61d 34#include <lttng/trigger/trigger-internal.h>
86acf0da 35#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 36
7972aab2 37#include "buffer-registry.h"
533a90fb 38#include "condition-internal.h"
86acf0da 39#include "fd-limit.h"
8782cc74 40#include "health-sessiond.h"
56fff090 41#include "ust-app.h"
48842b30 42#include "ust-consumer.h"
75018ab6
JG
43#include "lttng-ust-ctl.h"
44#include "lttng-ust-error.h"
0b2dc8df 45#include "utils.h"
fb83fe64 46#include "session.h"
e9404c27
JG
47#include "lttng-sessiond.h"
48#include "notification-thread-commands.h"
5c408ad8 49#include "rotate.h"
44760c20 50#include "event.h"
533a90fb 51#include "event-notifier-error-accounting.h"
28dc0326 52#include "ust-field-utils.h"
d80a6244 53
44cdb3a2
MJ
54struct lttng_ht *ust_app_ht;
55struct lttng_ht *ust_app_ht_by_sock;
56struct lttng_ht *ust_app_ht_by_notify_sock;
57
c4b88406
MD
58static
59int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
d9bf3ca4
MD
61/* Next available channel key. Access under next_channel_key_lock. */
62static uint64_t _next_channel_key;
63static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65/* Next available session ID. Access under next_session_id_lock. */
66static uint64_t _next_session_id;
67static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
68
69/*
d9bf3ca4 70 * Return the incremented value of next_channel_key.
ffe60014 71 */
d9bf3ca4 72static uint64_t get_next_channel_key(void)
ffe60014 73{
d9bf3ca4
MD
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
ffe60014
DG
80}
81
82/*
7972aab2 83 * Return the atomically incremented value of next_session_id.
ffe60014 84 */
d9bf3ca4 85static uint64_t get_next_session_id(void)
ffe60014 86{
d9bf3ca4
MD
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
ffe60014
DG
93}
94
d65d2de8 95static void copy_channel_attr_to_ustctl(
b623cb6a 96 struct lttng_ust_ctl_consumer_channel_attr *attr,
fc4b93fa 97 struct lttng_ust_abi_channel_attr *uattr)
d65d2de8
DG
98{
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
491d1539 106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
107}
108
025faf73
DG
109/*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
18eace3b
DG
115static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116{
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
125
1af53eb5 126 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
127
128 /* Event name */
129 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
130 goto no_match;
131 }
132
133 /* Event loglevel. */
1083b49a
JG
134 if (!loglevels_match(event->attr.loglevel_type, event->attr.loglevel,
135 key->loglevel_type, key->loglevel_value,
136 LTTNG_UST_ABI_LOGLEVEL_ALL)) {
137 goto no_match;
18eace3b
DG
138 }
139
140 /* One of the filters is NULL, fail. */
141 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
142 goto no_match;
143 }
144
025faf73
DG
145 if (key->filter && event->filter) {
146 /* Both filters exists, check length followed by the bytecode. */
147 if (event->filter->len != key->filter->len ||
148 memcmp(event->filter->data, key->filter->data,
149 event->filter->len) != 0) {
150 goto no_match;
151 }
18eace3b
DG
152 }
153
1af53eb5
JI
154 /* One of the exclusions is NULL, fail. */
155 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
156 goto no_match;
157 }
158
159 if (key->exclusion && event->exclusion) {
160 /* Both exclusions exists, check count followed by the names. */
161 if (event->exclusion->count != key->exclusion->count ||
162 memcmp(event->exclusion->names, key->exclusion->names,
fc4b93fa 163 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
1af53eb5
JI
164 goto no_match;
165 }
166 }
167
168
025faf73 169 /* Match. */
18eace3b
DG
170 return 1;
171
172no_match:
173 return 0;
18eace3b
DG
174}
175
025faf73
DG
176/*
177 * Unique add of an ust app event in the given ht. This uses the custom
178 * ht_match_ust_app_event match function and the event name as hash.
179 */
d0b96690 180static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
181 struct ust_app_event *event)
182{
183 struct cds_lfht_node *node_ptr;
184 struct ust_app_ht_key key;
d0b96690 185 struct lttng_ht *ht;
18eace3b 186
d0b96690
DG
187 assert(ua_chan);
188 assert(ua_chan->events);
18eace3b
DG
189 assert(event);
190
d0b96690 191 ht = ua_chan->events;
18eace3b
DG
192 key.name = event->attr.name;
193 key.filter = event->filter;
1083b49a
JG
194 key.loglevel_type = (enum lttng_ust_abi_loglevel_type)
195 event->attr.loglevel_type;
196 key.loglevel_value = event->attr.loglevel;
91c89f23 197 key.exclusion = event->exclusion;
18eace3b
DG
198
199 node_ptr = cds_lfht_add_unique(ht->ht,
200 ht->hash_fct(event->node.key, lttng_ht_seed),
201 ht_match_ust_app_event, &key, &event->node.node);
202 assert(node_ptr == &event->node.node);
203}
204
d88aee68
DG
205/*
206 * Close the notify socket from the given RCU head object. This MUST be called
207 * through a call_rcu().
208 */
209static void close_notify_sock_rcu(struct rcu_head *head)
210{
211 int ret;
212 struct ust_app_notify_sock_obj *obj =
213 caa_container_of(head, struct ust_app_notify_sock_obj, head);
214
215 /* Must have a valid fd here. */
216 assert(obj->fd >= 0);
217
218 ret = close(obj->fd);
219 if (ret) {
220 ERR("close notify sock %d RCU", obj->fd);
221 }
222 lttng_fd_put(LTTNG_FD_APPS, 1);
223
224 free(obj);
225}
226
7972aab2
DG
227/*
228 * Return the session registry according to the buffer type of the given
229 * session.
230 *
231 * A registry per UID object MUST exists before calling this function or else
232 * it assert() if not found. RCU read side lock must be acquired.
233 */
234static struct ust_registry_session *get_session_registry(
235 struct ust_app_session *ua_sess)
236{
237 struct ust_registry_session *registry = NULL;
238
239 assert(ua_sess);
240
241 switch (ua_sess->buffer_type) {
242 case LTTNG_BUFFER_PER_PID:
243 {
244 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
245 if (!reg_pid) {
246 goto error;
247 }
248 registry = reg_pid->registry->reg.ust;
249 break;
250 }
251 case LTTNG_BUFFER_PER_UID:
252 {
253 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
470cc211 254 ua_sess->tracing_id, ua_sess->bits_per_long,
ff588497 255 lttng_credentials_get_uid(&ua_sess->real_credentials));
7972aab2
DG
256 if (!reg_uid) {
257 goto error;
258 }
259 registry = reg_uid->registry->reg.ust;
260 break;
261 }
262 default:
263 assert(0);
264 };
265
266error:
267 return registry;
268}
269
55cc08a6
DG
270/*
271 * Delete ust context safely. RCU read lock must be held before calling
272 * this function.
273 */
274static
fb45065e
MD
275void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
276 struct ust_app *app)
55cc08a6 277{
ffe60014
DG
278 int ret;
279
280 assert(ua_ctx);
281
55cc08a6 282 if (ua_ctx->obj) {
fb45065e 283 pthread_mutex_lock(&app->sock_lock);
b623cb6a 284 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
fb45065e 285 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
286 if (ret < 0) {
287 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
288 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
289 app->pid, app->sock);
290 } else if (ret == -EAGAIN) {
291 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
292 app->pid, app->sock);
293 } else {
294 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
295 ua_ctx->obj->handle, ret,
296 app->pid, app->sock);
297 }
ffe60014 298 }
55cc08a6
DG
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302}
303
d80a6244
DG
304/*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
8b366481 308static
fb45065e
MD
309void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
d80a6244 311{
ffe60014
DG
312 int ret;
313
314 assert(ua_event);
315
53a80697 316 free(ua_event->filter);
951f0b71
JI
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
edb67388 319 if (ua_event->obj != NULL) {
fb45065e 320 pthread_mutex_lock(&app->sock_lock);
b623cb6a 321 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
fb45065e 322 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
323 if (ret < 0) {
324 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
325 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
326 app->pid, app->sock);
327 } else if (ret == -EAGAIN) {
328 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
329 app->pid, app->sock);
330 } else {
331 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
332 ret, app->pid, app->sock);
333 }
ffe60014 334 }
edb67388
DG
335 free(ua_event->obj);
336 }
d80a6244
DG
337 free(ua_event);
338}
339
993578ff
JR
340/*
341 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
342 * through a call_rcu().
343 */
344static
345void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
346{
347 struct ust_app_event_notifier_rule *obj = caa_container_of(
348 head, struct ust_app_event_notifier_rule, rcu_head);
349
350 free(obj);
351}
352
353/*
354 * Delete ust app event notifier rule safely.
355 */
356static void delete_ust_app_event_notifier_rule(int sock,
357 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
358 struct ust_app *app)
359{
360 int ret;
361
362 assert(ua_event_notifier_rule);
363
364 if (ua_event_notifier_rule->exclusion != NULL) {
365 free(ua_event_notifier_rule->exclusion);
366 }
367
368 if (ua_event_notifier_rule->obj != NULL) {
369 pthread_mutex_lock(&app->sock_lock);
b623cb6a 370 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
993578ff 371 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
372 if (ret < 0) {
373 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
374 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
375 app->pid, app->sock);
376 } else if (ret == -EAGAIN) {
377 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
378 app->pid, app->sock);
379 } else {
380 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
381 ret, app->pid, app->sock);
382 }
993578ff
JR
383 }
384
385 free(ua_event_notifier_rule->obj);
386 }
387
267d66aa 388 lttng_trigger_put(ua_event_notifier_rule->trigger);
993578ff
JR
389 call_rcu(&ua_event_notifier_rule->rcu_head,
390 free_ust_app_event_notifier_rule_rcu);
391}
392
d80a6244 393/*
7972aab2
DG
394 * Release ust data object of the given stream.
395 *
396 * Return 0 on success or else a negative value.
d80a6244 397 */
fb45065e
MD
398static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
399 struct ust_app *app)
d80a6244 400{
7972aab2 401 int ret = 0;
ffe60014
DG
402
403 assert(stream);
404
8b366481 405 if (stream->obj) {
fb45065e 406 pthread_mutex_lock(&app->sock_lock);
b623cb6a 407 ret = lttng_ust_ctl_release_object(sock, stream->obj);
fb45065e 408 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
409 if (ret < 0) {
410 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
411 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
412 app->pid, app->sock);
413 } else if (ret == -EAGAIN) {
414 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
415 app->pid, app->sock);
416 } else {
417 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
418 ret, app->pid, app->sock);
419 }
ffe60014 420 }
4063050c 421 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
422 free(stream->obj);
423 }
7972aab2
DG
424
425 return ret;
426}
427
428/*
429 * Delete ust app stream safely. RCU read lock must be held before calling
430 * this function.
431 */
432static
fb45065e
MD
433void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
434 struct ust_app *app)
7972aab2
DG
435{
436 assert(stream);
437
fb45065e 438 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 439 free(stream);
d80a6244
DG
440}
441
36b588ed
MD
442/*
443 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
444 * section and outside of call_rcu thread, so we postpone its execution
445 * using ht_cleanup_push. It is simpler than to change the semantic of
446 * the many callers of delete_ust_app_session().
36b588ed
MD
447 */
448static
449void delete_ust_app_channel_rcu(struct rcu_head *head)
450{
451 struct ust_app_channel *ua_chan =
452 caa_container_of(head, struct ust_app_channel, rcu_head);
453
0b2dc8df
MD
454 ht_cleanup_push(ua_chan->ctx);
455 ht_cleanup_push(ua_chan->events);
36b588ed
MD
456 free(ua_chan);
457}
458
fb83fe64
JD
459/*
460 * Extract the lost packet or discarded events counter when the channel is
461 * being deleted and store the value in the parent channel so we can
462 * access it from lttng list and at stop/destroy.
82cac6d2
JG
463 *
464 * The session list lock must be held by the caller.
fb83fe64
JD
465 */
466static
467void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
468{
469 uint64_t discarded = 0, lost = 0;
470 struct ltt_session *session;
471 struct ltt_ust_channel *uchan;
472
fc4b93fa 473 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
fb83fe64
JD
474 return;
475 }
476
477 rcu_read_lock();
478 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
479 if (!session || !session->ust_session) {
480 /*
481 * Not finding the session is not an error because there are
482 * multiple ways the channels can be torn down.
483 *
484 * 1) The session daemon can initiate the destruction of the
485 * ust app session after receiving a destroy command or
486 * during its shutdown/teardown.
487 * 2) The application, since we are in per-pid tracing, is
488 * unregistering and tearing down its ust app session.
489 *
490 * Both paths are protected by the session list lock which
491 * ensures that the accounting of lost packets and discarded
492 * events is done exactly once. The session is then unpublished
493 * from the session list, resulting in this condition.
494 */
fb83fe64
JD
495 goto end;
496 }
497
498 if (ua_chan->attr.overwrite) {
499 consumer_get_lost_packets(ua_chan->session->tracing_id,
500 ua_chan->key, session->ust_session->consumer,
501 &lost);
502 } else {
503 consumer_get_discarded_events(ua_chan->session->tracing_id,
504 ua_chan->key, session->ust_session->consumer,
505 &discarded);
506 }
507 uchan = trace_ust_find_channel_by_name(
508 session->ust_session->domain_global.channels,
509 ua_chan->name);
510 if (!uchan) {
511 ERR("Missing UST channel to store discarded counters");
512 goto end;
513 }
514
515 uchan->per_pid_closed_app_discarded += discarded;
516 uchan->per_pid_closed_app_lost += lost;
517
518end:
519 rcu_read_unlock();
e32d7f27
JG
520 if (session) {
521 session_put(session);
522 }
fb83fe64
JD
523}
524
d80a6244
DG
525/*
526 * Delete ust app channel safely. RCU read lock must be held before calling
527 * this function.
82cac6d2
JG
528 *
529 * The session list lock must be held by the caller.
d80a6244 530 */
8b366481 531static
d0b96690
DG
532void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
533 struct ust_app *app)
d80a6244
DG
534{
535 int ret;
bec39940 536 struct lttng_ht_iter iter;
d80a6244 537 struct ust_app_event *ua_event;
55cc08a6 538 struct ust_app_ctx *ua_ctx;
030a66fa 539 struct ust_app_stream *stream, *stmp;
7972aab2 540 struct ust_registry_session *registry;
d80a6244 541
ffe60014
DG
542 assert(ua_chan);
543
544 DBG3("UST app deleting channel %s", ua_chan->name);
545
55cc08a6 546 /* Wipe stream */
d80a6244 547 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 548 cds_list_del(&stream->list);
fb45065e 549 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
550 }
551
55cc08a6 552 /* Wipe context */
bec39940 553 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 554 cds_list_del(&ua_ctx->list);
bec39940 555 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 556 assert(!ret);
fb45065e 557 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 558 }
d80a6244 559
55cc08a6 560 /* Wipe events */
bec39940
DG
561 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
562 node.node) {
563 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 564 assert(!ret);
fb45065e 565 delete_ust_app_event(sock, ua_event, app);
d80a6244 566 }
edb67388 567
c8335706
MD
568 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
569 /* Wipe and free registry from session registry. */
570 registry = get_session_registry(ua_chan->session);
571 if (registry) {
e9404c27 572 ust_registry_channel_del_free(registry, ua_chan->key,
e38d96f9
MD
573 sock >= 0);
574 }
45798a31
JG
575 /*
576 * A negative socket can be used by the caller when
577 * cleaning-up a ua_chan in an error path. Skip the
578 * accounting in this case.
579 */
e38d96f9
MD
580 if (sock >= 0) {
581 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 582 }
7972aab2 583 }
d0b96690 584
edb67388 585 if (ua_chan->obj != NULL) {
d0b96690
DG
586 /* Remove channel from application UST object descriptor. */
587 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
588 ret = lttng_ht_del(app->ust_objd, &iter);
589 assert(!ret);
fb45065e 590 pthread_mutex_lock(&app->sock_lock);
b623cb6a 591 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
fb45065e 592 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
593 if (ret < 0) {
594 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
595 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
596 ua_chan->name, app->pid,
597 app->sock);
598 } else if (ret == -EAGAIN) {
599 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
600 ua_chan->name, app->pid,
601 app->sock);
602 } else {
603 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
604 ua_chan->name, ret, app->pid,
605 app->sock);
606 }
ffe60014 607 }
7972aab2 608 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
609 free(ua_chan->obj);
610 }
36b588ed 611 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
612}
613
fb45065e
MD
614int ust_app_register_done(struct ust_app *app)
615{
616 int ret;
617
618 pthread_mutex_lock(&app->sock_lock);
b623cb6a 619 ret = lttng_ust_ctl_register_done(app->sock);
fb45065e
MD
620 pthread_mutex_unlock(&app->sock_lock);
621 return ret;
622}
623
fc4b93fa 624int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
fb45065e
MD
625{
626 int ret, sock;
627
628 if (app) {
629 pthread_mutex_lock(&app->sock_lock);
630 sock = app->sock;
631 } else {
632 sock = -1;
633 }
b623cb6a 634 ret = lttng_ust_ctl_release_object(sock, data);
fb45065e
MD
635 if (app) {
636 pthread_mutex_unlock(&app->sock_lock);
637 }
638 return ret;
639}
640
331744e3 641/*
1b532a60
DG
642 * Push metadata to consumer socket.
643 *
dc2bbdae
MD
644 * RCU read-side lock must be held to guarantee existance of socket.
645 * Must be called with the ust app session lock held.
646 * Must be called with the registry lock held.
331744e3
JD
647 *
648 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
649 * Returning a -EPIPE return value means we could not send the metadata,
650 * but it can be caused by recoverable errors (e.g. the application has
651 * terminated concurrently).
331744e3
JD
652 */
653ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
654 struct consumer_socket *socket, int send_zero_data)
655{
656 int ret;
657 char *metadata_str = NULL;
c585821b 658 size_t len, offset, new_metadata_len_sent;
331744e3 659 ssize_t ret_val;
93ec662e 660 uint64_t metadata_key, metadata_version;
331744e3
JD
661
662 assert(registry);
663 assert(socket);
1b532a60 664
c585821b
MD
665 metadata_key = registry->metadata_key;
666
ce34fcd0 667 /*
dc2bbdae
MD
668 * Means that no metadata was assigned to the session. This can
669 * happens if no start has been done previously.
ce34fcd0 670 */
c585821b 671 if (!metadata_key) {
ce34fcd0
MD
672 return 0;
673 }
674
331744e3
JD
675 offset = registry->metadata_len_sent;
676 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 677 new_metadata_len_sent = registry->metadata_len;
93ec662e 678 metadata_version = registry->metadata_version;
331744e3
JD
679 if (len == 0) {
680 DBG3("No metadata to push for metadata key %" PRIu64,
681 registry->metadata_key);
682 ret_val = len;
683 if (send_zero_data) {
684 DBG("No metadata to push");
685 goto push_data;
686 }
687 goto end;
688 }
689
690 /* Allocate only what we have to send. */
691 metadata_str = zmalloc(len);
692 if (!metadata_str) {
693 PERROR("zmalloc ust app metadata string");
694 ret_val = -ENOMEM;
695 goto error;
696 }
c585821b 697 /* Copy what we haven't sent out. */
331744e3 698 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
699
700push_data:
c585821b
MD
701 pthread_mutex_unlock(&registry->lock);
702 /*
703 * We need to unlock the registry while we push metadata to
704 * break a circular dependency between the consumerd metadata
705 * lock and the sessiond registry lock. Indeed, pushing metadata
706 * to the consumerd awaits that it gets pushed all the way to
707 * relayd, but doing so requires grabbing the metadata lock. If
708 * a concurrent metadata request is being performed by
709 * consumerd, this can try to grab the registry lock on the
710 * sessiond while holding the metadata lock on the consumer
711 * daemon. Those push and pull schemes are performed on two
712 * different bidirectionnal communication sockets.
713 */
714 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 715 metadata_str, len, offset, metadata_version);
c585821b 716 pthread_mutex_lock(&registry->lock);
331744e3 717 if (ret < 0) {
000baf6a 718 /*
dc2bbdae
MD
719 * There is an acceptable race here between the registry
720 * metadata key assignment and the creation on the
721 * consumer. The session daemon can concurrently push
722 * metadata for this registry while being created on the
723 * consumer since the metadata key of the registry is
724 * assigned *before* it is setup to avoid the consumer
725 * to ask for metadata that could possibly be not found
726 * in the session daemon.
000baf6a 727 *
dc2bbdae
MD
728 * The metadata will get pushed either by the session
729 * being stopped or the consumer requesting metadata if
730 * that race is triggered.
000baf6a
DG
731 */
732 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
733 ret = 0;
c585821b
MD
734 } else {
735 ERR("Error pushing metadata to consumer");
000baf6a 736 }
331744e3
JD
737 ret_val = ret;
738 goto error_push;
c585821b
MD
739 } else {
740 /*
741 * Metadata may have been concurrently pushed, since
742 * we're not holding the registry lock while pushing to
743 * consumer. This is handled by the fact that we send
744 * the metadata content, size, and the offset at which
745 * that metadata belongs. This may arrive out of order
746 * on the consumer side, and the consumer is able to
747 * deal with overlapping fragments. The consumer
748 * supports overlapping fragments, which must be
749 * contiguous starting from offset 0. We keep the
750 * largest metadata_len_sent value of the concurrent
751 * send.
752 */
753 registry->metadata_len_sent =
754 max_t(size_t, registry->metadata_len_sent,
755 new_metadata_len_sent);
331744e3 756 }
331744e3
JD
757 free(metadata_str);
758 return len;
759
760end:
761error:
ce34fcd0
MD
762 if (ret_val) {
763 /*
dc2bbdae
MD
764 * On error, flag the registry that the metadata is
765 * closed. We were unable to push anything and this
766 * means that either the consumer is not responding or
767 * the metadata cache has been destroyed on the
768 * consumer.
ce34fcd0
MD
769 */
770 registry->metadata_closed = 1;
771 }
331744e3
JD
772error_push:
773 free(metadata_str);
774 return ret_val;
775}
776
d88aee68 777/*
ce34fcd0 778 * For a given application and session, push metadata to consumer.
331744e3
JD
779 * Either sock or consumer is required : if sock is NULL, the default
780 * socket to send the metadata is retrieved from consumer, if sock
781 * is not NULL we use it to send the metadata.
ce34fcd0 782 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
783 * therefore ensuring existance of registry. It also ensures existance
784 * of socket throughout this function.
d88aee68
DG
785 *
786 * Return 0 on success else a negative error.
2c57e06d
MD
787 * Returning a -EPIPE return value means we could not send the metadata,
788 * but it can be caused by recoverable errors (e.g. the application has
789 * terminated concurrently).
d88aee68 790 */
7972aab2
DG
791static int push_metadata(struct ust_registry_session *registry,
792 struct consumer_output *consumer)
d88aee68 793{
331744e3
JD
794 int ret_val;
795 ssize_t ret;
d88aee68
DG
796 struct consumer_socket *socket;
797
7972aab2
DG
798 assert(registry);
799 assert(consumer);
800
ce34fcd0 801 pthread_mutex_lock(&registry->lock);
ce34fcd0 802 if (registry->metadata_closed) {
dc2bbdae
MD
803 ret_val = -EPIPE;
804 goto error;
d88aee68
DG
805 }
806
d88aee68 807 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
808 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
809 consumer);
d88aee68 810 if (!socket) {
331744e3 811 ret_val = -1;
ce34fcd0 812 goto error;
d88aee68
DG
813 }
814
331744e3 815 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 816 if (ret < 0) {
331744e3 817 ret_val = ret;
ce34fcd0 818 goto error;
d88aee68 819 }
dc2bbdae 820 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
821 return 0;
822
ce34fcd0 823error:
dc2bbdae 824 pthread_mutex_unlock(&registry->lock);
331744e3 825 return ret_val;
d88aee68
DG
826}
827
828/*
829 * Send to the consumer a close metadata command for the given session. Once
830 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 831 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
832 * in the destroy path.
833 *
a70ac2f4
MD
834 * Do not hold the registry lock while communicating with the consumerd, because
835 * doing so causes inter-process deadlocks between consumerd and sessiond with
836 * the metadata request notification.
837 *
d88aee68
DG
838 * Return 0 on success else a negative value.
839 */
7972aab2
DG
840static int close_metadata(struct ust_registry_session *registry,
841 struct consumer_output *consumer)
d88aee68
DG
842{
843 int ret;
844 struct consumer_socket *socket;
a70ac2f4
MD
845 uint64_t metadata_key;
846 bool registry_was_already_closed;
d88aee68 847
7972aab2
DG
848 assert(registry);
849 assert(consumer);
d88aee68 850
7972aab2
DG
851 rcu_read_lock();
852
ce34fcd0 853 pthread_mutex_lock(&registry->lock);
a70ac2f4
MD
854 metadata_key = registry->metadata_key;
855 registry_was_already_closed = registry->metadata_closed;
856 if (metadata_key != 0) {
857 /*
858 * Metadata closed. Even on error this means that the consumer
859 * is not responding or not found so either way a second close
860 * should NOT be emit for this registry.
861 */
862 registry->metadata_closed = 1;
863 }
864 pthread_mutex_unlock(&registry->lock);
ce34fcd0 865
a70ac2f4 866 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 867 ret = 0;
1b532a60 868 goto end;
d88aee68
DG
869 }
870
d88aee68 871 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
872 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
873 consumer);
d88aee68
DG
874 if (!socket) {
875 ret = -1;
a70ac2f4 876 goto end;
d88aee68
DG
877 }
878
a70ac2f4 879 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 880 if (ret < 0) {
a70ac2f4 881 goto end;
d88aee68
DG
882 }
883
1b532a60 884end:
7972aab2 885 rcu_read_unlock();
d88aee68
DG
886 return ret;
887}
888
36b588ed
MD
889/*
890 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
891 * section and outside of call_rcu thread, so we postpone its execution
892 * using ht_cleanup_push. It is simpler than to change the semantic of
893 * the many callers of delete_ust_app_session().
36b588ed
MD
894 */
895static
896void delete_ust_app_session_rcu(struct rcu_head *head)
897{
898 struct ust_app_session *ua_sess =
899 caa_container_of(head, struct ust_app_session, rcu_head);
900
0b2dc8df 901 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
902 free(ua_sess);
903}
904
d80a6244
DG
905/*
906 * Delete ust app session safely. RCU read lock must be held before calling
907 * this function.
82cac6d2
JG
908 *
909 * The session list lock must be held by the caller.
d80a6244 910 */
8b366481 911static
d0b96690
DG
912void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
913 struct ust_app *app)
d80a6244
DG
914{
915 int ret;
bec39940 916 struct lttng_ht_iter iter;
d80a6244 917 struct ust_app_channel *ua_chan;
7972aab2 918 struct ust_registry_session *registry;
d80a6244 919
d88aee68
DG
920 assert(ua_sess);
921
1b532a60
DG
922 pthread_mutex_lock(&ua_sess->lock);
923
b161602a
MD
924 assert(!ua_sess->deleted);
925 ua_sess->deleted = true;
926
7972aab2 927 registry = get_session_registry(ua_sess);
fad1ed2f 928 /* Registry can be null on error path during initialization. */
ce34fcd0 929 if (registry) {
d88aee68 930 /* Push metadata for application before freeing the application. */
7972aab2 931 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 932
7972aab2
DG
933 /*
934 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
935 * metadata only on destroy trace session in this case. Also, the
936 * previous push metadata could have flag the metadata registry to
937 * close so don't send a close command if closed.
7972aab2 938 */
ce34fcd0 939 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
940 /* And ask to close it for this session registry. */
941 (void) close_metadata(registry, ua_sess->consumer);
942 }
d80a6244
DG
943 }
944
bec39940
DG
945 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
946 node.node) {
947 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 948 assert(!ret);
d0b96690 949 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 950 }
d80a6244 951
7972aab2
DG
952 /* In case of per PID, the registry is kept in the session. */
953 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
954 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
955 if (reg_pid) {
fad1ed2f
JR
956 /*
957 * Registry can be null on error path during
958 * initialization.
959 */
7972aab2
DG
960 buffer_reg_pid_remove(reg_pid);
961 buffer_reg_pid_destroy(reg_pid);
962 }
963 }
d0b96690 964
aee6bafd 965 if (ua_sess->handle != -1) {
fb45065e 966 pthread_mutex_lock(&app->sock_lock);
b623cb6a 967 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
fb45065e 968 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
969 if (ret < 0) {
970 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
971 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
972 app->pid, app->sock);
973 } else if (ret == -EAGAIN) {
974 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
975 app->pid, app->sock);
976 } else {
977 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
978 ret, app->pid, app->sock);
979 }
ffe60014 980 }
569744c5 981
10b56aef
MD
982 /* Remove session from application UST object descriptor. */
983 iter.iter.node = &ua_sess->ust_objd_node.node;
984 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
985 assert(!ret);
aee6bafd 986 }
10b56aef 987
1b532a60
DG
988 pthread_mutex_unlock(&ua_sess->lock);
989
6addfa37
MD
990 consumer_output_put(ua_sess->consumer);
991
36b588ed 992 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 993}
91d76f53
DG
994
995/*
284d8f55
DG
996 * Delete a traceable application structure from the global list. Never call
997 * this function outside of a call_rcu call.
36b588ed
MD
998 *
999 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 1000 */
8b366481
DG
1001static
1002void delete_ust_app(struct ust_app *app)
91d76f53 1003{
8b366481 1004 int ret, sock;
d42f20df 1005 struct ust_app_session *ua_sess, *tmp_ua_sess;
993578ff
JR
1006 struct lttng_ht_iter iter;
1007 struct ust_app_event_notifier_rule *event_notifier_rule;
5e2abfaf 1008 bool event_notifier_write_fd_is_open;
44d3bd01 1009
82cac6d2
JG
1010 /*
1011 * The session list lock must be held during this function to guarantee
1012 * the existence of ua_sess.
1013 */
1014 session_lock_list();
d80a6244 1015 /* Delete ust app sessions info */
852d0037
DG
1016 sock = app->sock;
1017 app->sock = -1;
d80a6244 1018
8b366481 1019 /* Wipe sessions */
d42f20df
DG
1020 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
1021 teardown_node) {
1022 /* Free every object in the session and the session. */
36b588ed 1023 rcu_read_lock();
d0b96690 1024 delete_ust_app_session(sock, ua_sess, app);
36b588ed 1025 rcu_read_unlock();
d80a6244 1026 }
36b588ed 1027
993578ff
JR
1028 /* Remove the event notifier rules associated with this app. */
1029 rcu_read_lock();
1030 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
1031 &iter.iter, event_notifier_rule, node.node) {
1032 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
1033 assert(!ret);
1034
1035 delete_ust_app_event_notifier_rule(
1036 app->sock, event_notifier_rule, app);
1037 }
1038
1039 rcu_read_unlock();
1040
0b2dc8df 1041 ht_cleanup_push(app->sessions);
10b56aef 1042 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 1043 ht_cleanup_push(app->ust_objd);
993578ff 1044 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
d80a6244 1045
da873412
JR
1046 /*
1047 * This could be NULL if the event notifier setup failed (e.g the app
1048 * was killed or the tracer does not support this feature).
1049 */
1050 if (app->event_notifier_group.object) {
1051 enum lttng_error_code ret_code;
533a90fb
FD
1052 enum event_notifier_error_accounting_status status;
1053
da873412
JR
1054 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1055 app->event_notifier_group.event_pipe);
1056
1057 ret_code = notification_thread_command_remove_tracer_event_source(
412d7227 1058 the_notification_thread_handle,
da873412
JR
1059 event_notifier_read_fd);
1060 if (ret_code != LTTNG_OK) {
1061 ERR("Failed to remove application tracer event source from notification thread");
1062 }
1063
533a90fb
FD
1064 status = event_notifier_error_accounting_unregister_app(app);
1065 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1066 ERR("Error unregistering app from event notifier error accounting");
1067 }
1068
b623cb6a 1069 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
da873412
JR
1070 free(app->event_notifier_group.object);
1071 }
1072
5e2abfaf
JG
1073 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1074 app->event_notifier_group.event_pipe);
da873412 1075 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
5e2abfaf
JG
1076 /*
1077 * Release the file descriptors reserved for the event notifier pipe.
1078 * The app could be destroyed before the write end of the pipe could be
1079 * passed to the application (and closed). In that case, both file
1080 * descriptors must be released.
1081 */
1082 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
da873412 1083
6414a713 1084 /*
852d0037
DG
1085 * Wait until we have deleted the application from the sock hash table
1086 * before closing this socket, otherwise an application could re-use the
1087 * socket ID and race with the teardown, using the same hash table entry.
1088 *
1089 * It's OK to leave the close in call_rcu. We want it to stay unique for
1090 * all RCU readers that could run concurrently with unregister app,
1091 * therefore we _need_ to only close that socket after a grace period. So
1092 * it should stay in this RCU callback.
1093 *
1094 * This close() is a very important step of the synchronization model so
1095 * every modification to this function must be carefully reviewed.
6414a713 1096 */
799e2c4f
MD
1097 ret = close(sock);
1098 if (ret) {
1099 PERROR("close");
1100 }
4063050c 1101 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 1102
852d0037 1103 DBG2("UST app pid %d deleted", app->pid);
284d8f55 1104 free(app);
82cac6d2 1105 session_unlock_list();
099e26bd
DG
1106}
1107
1108/*
f6a9efaa 1109 * URCU intermediate call to delete an UST app.
099e26bd 1110 */
8b366481
DG
1111static
1112void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 1113{
bec39940
DG
1114 struct lttng_ht_node_ulong *node =
1115 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 1116 struct ust_app *app =
852d0037 1117 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 1118
852d0037 1119 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 1120 delete_ust_app(app);
099e26bd
DG
1121}
1122
ffe60014
DG
1123/*
1124 * Delete the session from the application ht and delete the data structure by
1125 * freeing every object inside and releasing them.
82cac6d2
JG
1126 *
1127 * The session list lock must be held by the caller.
ffe60014 1128 */
d0b96690 1129static void destroy_app_session(struct ust_app *app,
ffe60014
DG
1130 struct ust_app_session *ua_sess)
1131{
1132 int ret;
1133 struct lttng_ht_iter iter;
1134
1135 assert(app);
1136 assert(ua_sess);
1137
1138 iter.iter.node = &ua_sess->node.node;
1139 ret = lttng_ht_del(app->sessions, &iter);
1140 if (ret) {
1141 /* Already scheduled for teardown. */
1142 goto end;
1143 }
1144
1145 /* Once deleted, free the data structure. */
d0b96690 1146 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
1147
1148end:
1149 return;
1150}
1151
8b366481
DG
1152/*
1153 * Alloc new UST app session.
1154 */
1155static
40bbd087 1156struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1157{
1158 struct ust_app_session *ua_sess;
1159
1160 /* Init most of the default value by allocating and zeroing */
1161 ua_sess = zmalloc(sizeof(struct ust_app_session));
1162 if (ua_sess == NULL) {
1163 PERROR("malloc");
ffe60014 1164 goto error_free;
8b366481
DG
1165 }
1166
1167 ua_sess->handle = -1;
bec39940 1168 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
fc4b93fa 1169 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
84ad93e8 1170 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1171
8b366481
DG
1172 return ua_sess;
1173
ffe60014 1174error_free:
8b366481
DG
1175 return NULL;
1176}
1177
1178/*
1179 * Alloc new UST app channel.
1180 */
1181static
b53d4e59 1182struct ust_app_channel *alloc_ust_app_channel(const char *name,
d0b96690 1183 struct ust_app_session *ua_sess,
fc4b93fa 1184 struct lttng_ust_abi_channel_attr *attr)
8b366481
DG
1185{
1186 struct ust_app_channel *ua_chan;
1187
1188 /* Init most of the default value by allocating and zeroing */
1189 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1190 if (ua_chan == NULL) {
1191 PERROR("malloc");
1192 goto error;
1193 }
1194
1195 /* Setup channel name */
1196 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1197 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1198
1199 ua_chan->enabled = 1;
1200 ua_chan->handle = -1;
45893984 1201 ua_chan->session = ua_sess;
ffe60014 1202 ua_chan->key = get_next_channel_key();
bec39940
DG
1203 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1204 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1205 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1206
1207 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1208 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1209
1210 /* Copy attributes */
1211 if (attr) {
b623cb6a 1212 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
2fe6e7f5
DG
1213 ua_chan->attr.subbuf_size = attr->subbuf_size;
1214 ua_chan->attr.num_subbuf = attr->num_subbuf;
1215 ua_chan->attr.overwrite = attr->overwrite;
1216 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1217 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1218 ua_chan->attr.output = attr->output;
491d1539 1219 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1220 }
ffe60014 1221 /* By default, the channel is a per cpu channel. */
fc4b93fa 1222 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
8b366481
DG
1223
1224 DBG3("UST app channel %s allocated", ua_chan->name);
1225
1226 return ua_chan;
1227
1228error:
1229 return NULL;
1230}
1231
37f1c236
DG
1232/*
1233 * Allocate and initialize a UST app stream.
1234 *
1235 * Return newly allocated stream pointer or NULL on error.
1236 */
ffe60014 1237struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1238{
1239 struct ust_app_stream *stream = NULL;
1240
1241 stream = zmalloc(sizeof(*stream));
1242 if (stream == NULL) {
1243 PERROR("zmalloc ust app stream");
1244 goto error;
1245 }
1246
1247 /* Zero could be a valid value for a handle so flag it to -1. */
1248 stream->handle = -1;
1249
1250error:
1251 return stream;
1252}
1253
8b366481
DG
1254/*
1255 * Alloc new UST app event.
1256 */
1257static
1258struct ust_app_event *alloc_ust_app_event(char *name,
fc4b93fa 1259 struct lttng_ust_abi_event *attr)
8b366481
DG
1260{
1261 struct ust_app_event *ua_event;
1262
1263 /* Init most of the default value by allocating and zeroing */
1264 ua_event = zmalloc(sizeof(struct ust_app_event));
1265 if (ua_event == NULL) {
20533947 1266 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1267 goto error;
1268 }
1269
1270 ua_event->enabled = 1;
1271 strncpy(ua_event->name, name, sizeof(ua_event->name));
1272 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1273 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1274
1275 /* Copy attributes */
1276 if (attr) {
1277 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1278 }
1279
1280 DBG3("UST app event %s allocated", ua_event->name);
1281
1282 return ua_event;
1283
1284error:
1285 return NULL;
1286}
1287
993578ff
JR
1288/*
1289 * Allocate a new UST app event notifier rule.
1290 */
1291static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
267d66aa 1292 struct lttng_trigger *trigger)
993578ff
JR
1293{
1294 enum lttng_event_rule_generate_exclusions_status
1295 generate_exclusion_status;
620b0c36 1296 enum lttng_condition_status cond_status;
993578ff 1297 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
267d66aa
JR
1298 struct lttng_condition *condition = NULL;
1299 const struct lttng_event_rule *event_rule = NULL;
993578ff
JR
1300
1301 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1302 if (ua_event_notifier_rule == NULL) {
1303 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1304 goto error;
1305 }
1306
1307 ua_event_notifier_rule->enabled = 1;
267d66aa
JR
1308 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1309 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1310 ua_event_notifier_rule->token);
993578ff 1311
267d66aa
JR
1312 condition = lttng_trigger_get_condition(trigger);
1313 assert(condition);
8dbb86b8
JR
1314 assert(lttng_condition_get_type(condition) ==
1315 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
267d66aa 1316
620b0c36
FD
1317 cond_status = lttng_condition_event_rule_matches_get_rule(
1318 condition, &event_rule);
1319 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
267d66aa 1320 assert(event_rule);
993578ff 1321
46e9a5fb 1322 ua_event_notifier_rule->error_counter_index =
8dbb86b8 1323 lttng_condition_event_rule_matches_get_error_counter_index(condition);
267d66aa
JR
1324 /* Acquire the event notifier's reference to the trigger. */
1325 lttng_trigger_get(trigger);
1326
1327 ua_event_notifier_rule->trigger = trigger;
993578ff
JR
1328 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1329 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1330 event_rule, &ua_event_notifier_rule->exclusion);
1331 switch (generate_exclusion_status) {
1332 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1333 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1334 break;
1335 default:
1a299778 1336 /* Error occurred. */
267d66aa
JR
1337 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1338 goto error_put_trigger;
993578ff
JR
1339 }
1340
1341 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1342 ua_event_notifier_rule->token);
1343
1344 return ua_event_notifier_rule;
1345
267d66aa
JR
1346error_put_trigger:
1347 lttng_trigger_put(trigger);
993578ff
JR
1348error:
1349 free(ua_event_notifier_rule);
1350 return NULL;
1351}
1352
8b366481
DG
1353/*
1354 * Alloc new UST app context.
1355 */
1356static
bdf64013 1357struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1358{
1359 struct ust_app_ctx *ua_ctx;
1360
1361 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1362 if (ua_ctx == NULL) {
1363 goto error;
1364 }
1365
31746f93
DG
1366 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1367
8b366481
DG
1368 if (uctx) {
1369 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
fc4b93fa 1370 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
f3db82be 1371 char *provider_name = NULL, *ctx_name = NULL;
bdf64013
JG
1372
1373 provider_name = strdup(uctx->u.app_ctx.provider_name);
1374 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1375 if (!provider_name || !ctx_name) {
1376 free(provider_name);
1377 free(ctx_name);
1378 goto error;
1379 }
1380
1381 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1382 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1383 }
8b366481
DG
1384 }
1385
1386 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1387 return ua_ctx;
bdf64013
JG
1388error:
1389 free(ua_ctx);
1390 return NULL;
8b366481
DG
1391}
1392
51755dc8
JG
1393/*
1394 * Create a liblttng-ust filter bytecode from given bytecode.
1395 *
1396 * Return allocated filter or NULL on error.
1397 */
fc4b93fa
MD
1398static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1399 const struct lttng_bytecode *orig_f)
51755dc8 1400{
fc4b93fa 1401 struct lttng_ust_abi_filter_bytecode *filter = NULL;
51755dc8 1402
f2eafd2d 1403 /* Copy filter bytecode. */
51755dc8
JG
1404 filter = zmalloc(sizeof(*filter) + orig_f->len);
1405 if (!filter) {
f2eafd2d 1406 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
51755dc8
JG
1407 goto error;
1408 }
1409
2b00d462 1410 assert(sizeof(struct lttng_bytecode) ==
fc4b93fa 1411 sizeof(struct lttng_ust_abi_filter_bytecode));
51755dc8
JG
1412 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1413error:
1414 return filter;
1415}
1416
f2eafd2d
JR
1417/*
1418 * Create a liblttng-ust capture bytecode from given bytecode.
1419 *
1420 * Return allocated filter or NULL on error.
1421 */
fc4b93fa 1422static struct lttng_ust_abi_capture_bytecode *
f2eafd2d
JR
1423create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1424{
fc4b93fa 1425 struct lttng_ust_abi_capture_bytecode *capture = NULL;
f2eafd2d
JR
1426
1427 /* Copy capture bytecode. */
1428 capture = zmalloc(sizeof(*capture) + orig_f->len);
1429 if (!capture) {
fc4b93fa 1430 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
f2eafd2d
JR
1431 goto error;
1432 }
1433
1434 assert(sizeof(struct lttng_bytecode) ==
fc4b93fa 1435 sizeof(struct lttng_ust_abi_capture_bytecode));
f2eafd2d
JR
1436 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1437error:
1438 return capture;
1439}
1440
099e26bd 1441/*
421cb601
DG
1442 * Find an ust_app using the sock and return it. RCU read side lock must be
1443 * held before calling this helper function.
099e26bd 1444 */
f20baf8e 1445struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1446{
bec39940 1447 struct lttng_ht_node_ulong *node;
bec39940 1448 struct lttng_ht_iter iter;
f6a9efaa 1449
852d0037 1450 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1451 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1452 if (node == NULL) {
1453 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1454 goto error;
1455 }
852d0037
DG
1456
1457 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1458
1459error:
1460 return NULL;
099e26bd
DG
1461}
1462
d0b96690
DG
1463/*
1464 * Find an ust_app using the notify sock and return it. RCU read side lock must
1465 * be held before calling this helper function.
1466 */
1467static struct ust_app *find_app_by_notify_sock(int sock)
1468{
1469 struct lttng_ht_node_ulong *node;
1470 struct lttng_ht_iter iter;
1471
1472 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1473 &iter);
1474 node = lttng_ht_iter_get_node_ulong(&iter);
1475 if (node == NULL) {
1476 DBG2("UST app find by notify sock %d not found", sock);
1477 goto error;
1478 }
1479
1480 return caa_container_of(node, struct ust_app, notify_sock_n);
1481
1482error:
1483 return NULL;
1484}
1485
025faf73
DG
1486/*
1487 * Lookup for an ust app event based on event name, filter bytecode and the
1488 * event loglevel.
1489 *
1490 * Return an ust_app_event object or NULL on error.
1491 */
18eace3b 1492static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1083b49a
JG
1493 const char *name,
1494 const struct lttng_bytecode *filter,
1495 enum lttng_ust_abi_loglevel_type loglevel_type,
2106efa0 1496 int loglevel_value,
39c5a3a7 1497 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1498{
1499 struct lttng_ht_iter iter;
1500 struct lttng_ht_node_str *node;
1501 struct ust_app_event *event = NULL;
1502 struct ust_app_ht_key key;
18eace3b
DG
1503
1504 assert(name);
1505 assert(ht);
1506
1507 /* Setup key for event lookup. */
1508 key.name = name;
1509 key.filter = filter;
1083b49a
JG
1510 key.loglevel_type = loglevel_type;
1511 key.loglevel_value = loglevel_value;
39c5a3a7 1512 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1513 key.exclusion = exclusion;
18eace3b 1514
025faf73
DG
1515 /* Lookup using the event name as hash and a custom match fct. */
1516 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1517 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1518 node = lttng_ht_iter_get_node_str(&iter);
1519 if (node == NULL) {
1520 goto end;
1521 }
1522
1523 event = caa_container_of(node, struct ust_app_event, node);
1524
1525end:
18eace3b
DG
1526 return event;
1527}
1528
993578ff
JR
1529/*
1530 * Look-up an event notifier rule based on its token id.
1531 *
1532 * Must be called with the RCU read lock held.
1533 * Return an ust_app_event_notifier_rule object or NULL on error.
1534 */
1535static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1536 struct lttng_ht *ht, uint64_t token)
1537{
1538 struct lttng_ht_iter iter;
1539 struct lttng_ht_node_u64 *node;
1540 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1541
1542 assert(ht);
1543
1544 lttng_ht_lookup(ht, &token, &iter);
1545 node = lttng_ht_iter_get_node_u64(&iter);
1546 if (node == NULL) {
1547 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1548 token);
1549 goto end;
1550 }
1551
1552 event_notifier_rule = caa_container_of(
1553 node, struct ust_app_event_notifier_rule, node);
1554end:
1555 return event_notifier_rule;
1556}
1557
55cc08a6
DG
1558/*
1559 * Create the channel context on the tracer.
d0b96690
DG
1560 *
1561 * Called with UST app session lock held.
55cc08a6
DG
1562 */
1563static
1564int create_ust_channel_context(struct ust_app_channel *ua_chan,
1565 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1566{
1567 int ret;
1568
840cb59c 1569 health_code_update();
86acf0da 1570
fb45065e 1571 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1572 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1573 ua_chan->obj, &ua_ctx->obj);
fb45065e 1574 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1575 if (ret < 0) {
569744c5 1576 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3757b385 1577 ret = 0;
569744c5
JR
1578 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1579 app->pid, app->sock);
1580 } else if (ret == -EAGAIN) {
1581 ret = 0;
1582 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1583 app->pid, app->sock);
1584 } else {
1585 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1586 ret, app->pid, app->sock);
ffe60014 1587 }
55cc08a6
DG
1588 goto error;
1589 }
1590
1591 ua_ctx->handle = ua_ctx->obj->handle;
1592
d0b96690
DG
1593 DBG2("UST app context handle %d created successfully for channel %s",
1594 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1595
1596error:
840cb59c 1597 health_code_update();
55cc08a6
DG
1598 return ret;
1599}
1600
53a80697
MD
1601/*
1602 * Set the filter on the tracer.
1603 */
a154c7b8 1604static int set_ust_object_filter(struct ust_app *app,
2b00d462 1605 const struct lttng_bytecode *bytecode,
fc4b93fa 1606 struct lttng_ust_abi_object_data *ust_object)
53a80697
MD
1607{
1608 int ret;
fc4b93fa 1609 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
53a80697 1610
840cb59c 1611 health_code_update();
86acf0da 1612
f2eafd2d 1613 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
51755dc8
JG
1614 if (!ust_bytecode) {
1615 ret = -LTTNG_ERR_NOMEM;
1616 goto error;
1617 }
fb45065e 1618 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1619 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
a154c7b8 1620 ust_object);
fb45065e 1621 pthread_mutex_unlock(&app->sock_lock);
53a80697 1622 if (ret < 0) {
569744c5
JR
1623 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1624 ret = 0;
1625 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1626 app->pid, app->sock);
1627 } else if (ret == -EAGAIN) {
3757b385 1628 ret = 0;
569744c5
JR
1629 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1630 app->pid, app->sock);
1631 } else {
1632 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1633 ret, app->pid, app->sock, ust_object);
ffe60014 1634 }
53a80697
MD
1635 goto error;
1636 }
1637
f2eafd2d
JR
1638 DBG2("UST filter successfully set: object = %p", ust_object);
1639
1640error:
1641 health_code_update();
1642 free(ust_bytecode);
1643 return ret;
1644}
1645
1646/*
1647 * Set a capture bytecode for the passed object.
11f6ce94
JR
1648 * The sequence number enforces the ordering at runtime and on reception of
1649 * the captured payloads.
f2eafd2d
JR
1650 */
1651static int set_ust_capture(struct ust_app *app,
1652 const struct lttng_bytecode *bytecode,
11f6ce94 1653 unsigned int capture_seqnum,
3d1384a4 1654 struct lttng_ust_abi_object_data *ust_object)
f2eafd2d
JR
1655{
1656 int ret;
fc4b93fa 1657 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
f2eafd2d
JR
1658
1659 health_code_update();
1660
1661 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1662 if (!ust_bytecode) {
1663 ret = -LTTNG_ERR_NOMEM;
1664 goto error;
1665 }
1666
11f6ce94
JR
1667 /*
1668 * Set the sequence number to ensure the capture of fields is ordered.
1669 */
1670 ust_bytecode->seqnum = capture_seqnum;
1671
f2eafd2d 1672 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1673 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
f2eafd2d
JR
1674 ust_object);
1675 pthread_mutex_unlock(&app->sock_lock);
1676 if (ret < 0) {
569744c5 1677 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
f2eafd2d 1678 ret = 0;
569744c5
JR
1679 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1680 app->pid, app->sock);
1681 } else if (ret == -EAGAIN) {
1682 ret = 0;
1683 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1684 app->pid, app->sock);
1685 } else {
1686 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1687 ret, app->pid,
1688 app->sock);
f2eafd2d
JR
1689 }
1690
1691 goto error;
1692 }
1693
1694 DBG2("UST capture successfully set: object = %p", ust_object);
53a80697
MD
1695
1696error:
840cb59c 1697 health_code_update();
51755dc8 1698 free(ust_bytecode);
53a80697
MD
1699 return ret;
1700}
1701
51755dc8 1702static
fc4b93fa 1703struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
c0901ffa 1704 const struct lttng_event_exclusion *exclusion)
51755dc8 1705{
fc4b93fa
MD
1706 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1707 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1708 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
51755dc8
JG
1709
1710 ust_exclusion = zmalloc(exclusion_alloc_size);
1711 if (!ust_exclusion) {
1712 PERROR("malloc");
1713 goto end;
1714 }
1715
1716 assert(sizeof(struct lttng_event_exclusion) ==
fc4b93fa 1717 sizeof(struct lttng_ust_abi_event_exclusion));
51755dc8
JG
1718 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1719end:
1720 return ust_exclusion;
1721}
1722
7cc9a73c
JI
1723/*
1724 * Set event exclusions on the tracer.
1725 */
c0901ffa
JR
1726static int set_ust_object_exclusions(struct ust_app *app,
1727 const struct lttng_event_exclusion *exclusions,
fc4b93fa 1728 struct lttng_ust_abi_object_data *ust_object)
7cc9a73c
JI
1729{
1730 int ret;
fc4b93fa 1731 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
7cc9a73c 1732
c0901ffa 1733 assert(exclusions && exclusions->count > 0);
7cc9a73c 1734
c0901ffa 1735 health_code_update();
7cc9a73c 1736
c0901ffa
JR
1737 ust_exclusions = create_ust_exclusion_from_exclusion(
1738 exclusions);
1739 if (!ust_exclusions) {
51755dc8
JG
1740 ret = -LTTNG_ERR_NOMEM;
1741 goto error;
1742 }
fb45065e 1743 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1744 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
fb45065e 1745 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c 1746 if (ret < 0) {
569744c5 1747 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
7cc9a73c 1748 ret = 0;
569744c5
JR
1749 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1750 app->pid, app->sock);
1751 } else if (ret == -EAGAIN) {
1752 ret = 0;
1753 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1754 app->pid, app->sock);
1755 } else {
1756 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1757 ret, app->pid, app->sock, ust_object);
7cc9a73c
JI
1758 }
1759 goto error;
1760 }
1761
c0901ffa 1762 DBG2("UST exclusions set successfully for object %p", ust_object);
7cc9a73c
JI
1763
1764error:
1765 health_code_update();
c0901ffa 1766 free(ust_exclusions);
7cc9a73c
JI
1767 return ret;
1768}
1769
9730260e
DG
1770/*
1771 * Disable the specified event on to UST tracer for the UST session.
1772 */
e2456d0a 1773static int disable_ust_object(struct ust_app *app,
fc4b93fa 1774 struct lttng_ust_abi_object_data *object)
9730260e
DG
1775{
1776 int ret;
1777
840cb59c 1778 health_code_update();
86acf0da 1779
fb45065e 1780 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1781 ret = lttng_ust_ctl_disable(app->sock, object);
fb45065e 1782 pthread_mutex_unlock(&app->sock_lock);
9730260e 1783 if (ret < 0) {
569744c5
JR
1784 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1785 ret = 0;
1786 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1787 app->pid, app->sock);
1788 } else if (ret == -EAGAIN) {
3757b385 1789 ret = 0;
569744c5
JR
1790 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1791 app->pid, app->sock);
1792 } else {
1793 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1794 ret, app->pid, app->sock, object);
ffe60014 1795 }
9730260e
DG
1796 goto error;
1797 }
1798
569744c5 1799 DBG2("UST app object %p disabled successfully for app: pid = %d",
e2456d0a 1800 object, app->pid);
9730260e
DG
1801
1802error:
840cb59c 1803 health_code_update();
9730260e
DG
1804 return ret;
1805}
1806
78f0bacd
DG
1807/*
1808 * Disable the specified channel on to UST tracer for the UST session.
1809 */
1810static int disable_ust_channel(struct ust_app *app,
1811 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1812{
1813 int ret;
1814
840cb59c 1815 health_code_update();
86acf0da 1816
fb45065e 1817 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1818 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
fb45065e 1819 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1820 if (ret < 0) {
569744c5 1821 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3757b385 1822 ret = 0;
569744c5
JR
1823 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1824 app->pid, app->sock);
1825 } else if (ret == -EAGAIN) {
1826 ret = 0;
1827 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1828 app->pid, app->sock);
1829 } else {
1830 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1831 ua_chan->name, ua_sess->handle, ret,
1832 app->pid, app->sock);
ffe60014 1833 }
78f0bacd
DG
1834 goto error;
1835 }
1836
569744c5 1837 DBG2("UST app channel %s disabled successfully for app: pid = %d",
852d0037 1838 ua_chan->name, app->pid);
78f0bacd
DG
1839
1840error:
840cb59c 1841 health_code_update();
78f0bacd
DG
1842 return ret;
1843}
1844
1845/*
1846 * Enable the specified channel on to UST tracer for the UST session.
1847 */
1848static int enable_ust_channel(struct ust_app *app,
1849 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1850{
1851 int ret;
1852
840cb59c 1853 health_code_update();
86acf0da 1854
fb45065e 1855 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1856 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
fb45065e 1857 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1858 if (ret < 0) {
569744c5 1859 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3757b385 1860 ret = 0;
569744c5
JR
1861 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1862 ua_chan->name, app->pid, app->sock);
1863 } else if (ret == -EAGAIN) {
1864 ret = 0;
1865 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1866 ua_chan->name, app->pid, app->sock);
1867 } else {
1868 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1869 ua_chan->name, ua_sess->handle, ret,
1870 app->pid, app->sock);
ffe60014 1871 }
78f0bacd
DG
1872 goto error;
1873 }
1874
1875 ua_chan->enabled = 1;
1876
569744c5 1877 DBG2("UST app channel %s enabled successfully for app: pid = %d",
852d0037 1878 ua_chan->name, app->pid);
78f0bacd
DG
1879
1880error:
840cb59c 1881 health_code_update();
78f0bacd
DG
1882 return ret;
1883}
1884
edb67388
DG
1885/*
1886 * Enable the specified event on to UST tracer for the UST session.
1887 */
3428a1b7 1888static int enable_ust_object(
fc4b93fa 1889 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
edb67388
DG
1890{
1891 int ret;
1892
840cb59c 1893 health_code_update();
86acf0da 1894
fb45065e 1895 pthread_mutex_lock(&app->sock_lock);
b623cb6a 1896 ret = lttng_ust_ctl_enable(app->sock, ust_object);
fb45065e 1897 pthread_mutex_unlock(&app->sock_lock);
edb67388 1898 if (ret < 0) {
569744c5
JR
1899 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1900 ret = 0;
1901 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1902 app->pid, app->sock);
1903 } else if (ret == -EAGAIN) {
3757b385 1904 ret = 0;
569744c5
JR
1905 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1906 app->pid, app->sock);
1907 } else {
1908 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1909 ret, app->pid, app->sock, ust_object);
ffe60014 1910 }
edb67388
DG
1911 goto error;
1912 }
1913
569744c5 1914 DBG2("UST app object %p enabled successfully for app: pid = %d",
3428a1b7 1915 ust_object, app->pid);
edb67388
DG
1916
1917error:
840cb59c 1918 health_code_update();
edb67388
DG
1919 return ret;
1920}
1921
099e26bd 1922/*
7972aab2 1923 * Send channel and stream buffer to application.
4f3ab6ee 1924 *
ffe60014 1925 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1926 */
7972aab2
DG
1927static int send_channel_pid_to_ust(struct ust_app *app,
1928 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1929{
1930 int ret;
ffe60014 1931 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1932
1933 assert(app);
ffe60014 1934 assert(ua_sess);
4f3ab6ee 1935 assert(ua_chan);
4f3ab6ee 1936
840cb59c 1937 health_code_update();
4f3ab6ee 1938
7972aab2
DG
1939 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1940 app->sock);
86acf0da 1941
ffe60014
DG
1942 /* Send channel to the application. */
1943 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1944 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1945 ret = -ENOTCONN; /* Caused by app exiting. */
1946 goto error;
569744c5
JR
1947 } else if (ret == -EAGAIN) {
1948 /* Caused by timeout. */
1949 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
1950 app->pid, ua_chan->name, ua_sess->tracing_id);
1951 /* Treat this the same way as an application that is exiting. */
1952 ret = -ENOTCONN;
1953 goto error;
a7169585 1954 } else if (ret < 0) {
b551a063
DG
1955 goto error;
1956 }
1957
d88aee68
DG
1958 health_code_update();
1959
ffe60014
DG
1960 /* Send all streams to application. */
1961 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1962 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585 1963 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
569744c5 1964 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 1965 goto error;
569744c5
JR
1966 } else if (ret == -EAGAIN) {
1967 /* Caused by timeout. */
1968 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
1969 app->pid, stream->name, ua_chan->name,
1970 ua_sess->tracing_id);
1971 /*
1972 * Treat this the same way as an application that is
1973 * exiting.
1974 */
1975 ret = -ENOTCONN;
a7169585 1976 } else if (ret < 0) {
ffe60014
DG
1977 goto error;
1978 }
1979 /* We don't need the stream anymore once sent to the tracer. */
1980 cds_list_del(&stream->list);
fb45065e 1981 delete_ust_app_stream(-1, stream, app);
ffe60014 1982 }
ffe60014
DG
1983 /* Flag the channel that it is sent to the application. */
1984 ua_chan->is_sent = 1;
ffe60014 1985
b551a063 1986error:
840cb59c 1987 health_code_update();
b551a063
DG
1988 return ret;
1989}
1990
91d76f53 1991/*
5b4a0ec0 1992 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1993 *
1994 * Should be called with session mutex held.
91d76f53 1995 */
edb67388
DG
1996static
1997int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1998 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1999{
5b4a0ec0 2000 int ret = 0;
284d8f55 2001
840cb59c 2002 health_code_update();
86acf0da 2003
5b4a0ec0 2004 /* Create UST event on tracer */
fb45065e 2005 pthread_mutex_lock(&app->sock_lock);
b623cb6a 2006 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 2007 &ua_event->obj);
fb45065e 2008 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 2009 if (ret < 0) {
569744c5
JR
2010 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2011 ret = 0;
2012 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2013 app->pid, app->sock);
2014 } else if (ret == -EAGAIN) {
3757b385 2015 ret = 0;
569744c5
JR
2016 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2017 app->pid, app->sock);
2018 } else {
2019 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2020 ua_event->attr.name, ret, app->pid,
2021 app->sock);
ffe60014 2022 }
5b4a0ec0 2023 goto error;
91d76f53 2024 }
f6a9efaa 2025
5b4a0ec0 2026 ua_event->handle = ua_event->obj->handle;
284d8f55 2027
569744c5 2028 DBG2("UST app event %s created successfully for pid:%d object = %p",
3428a1b7 2029 ua_event->attr.name, app->pid, ua_event->obj);
f6a9efaa 2030
840cb59c 2031 health_code_update();
86acf0da 2032
025faf73
DG
2033 /* Set filter if one is present. */
2034 if (ua_event->filter) {
a154c7b8 2035 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
025faf73
DG
2036 if (ret < 0) {
2037 goto error;
2038 }
2039 }
2040
7cc9a73c
JI
2041 /* Set exclusions for the event */
2042 if (ua_event->exclusion) {
c0901ffa 2043 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
7cc9a73c
JI
2044 if (ret < 0) {
2045 goto error;
2046 }
2047 }
2048
8535a6d9 2049 /* If event not enabled, disable it on the tracer */
40113787
MD
2050 if (ua_event->enabled) {
2051 /*
2052 * We now need to explicitly enable the event, since it
2053 * is now disabled at creation.
2054 */
3428a1b7 2055 ret = enable_ust_object(app, ua_event->obj);
40113787
MD
2056 if (ret < 0) {
2057 /*
2058 * If we hit an EPERM, something is wrong with our enable call. If
2059 * we get an EEXIST, there is a problem on the tracer side since we
2060 * just created it.
2061 */
2062 switch (ret) {
2063 case -LTTNG_UST_ERR_PERM:
2064 /* Code flow problem */
2065 assert(0);
2066 case -LTTNG_UST_ERR_EXIST:
2067 /* It's OK for our use case. */
2068 ret = 0;
2069 break;
2070 default:
2071 break;
2072 }
2073 goto error;
2074 }
8535a6d9
DG
2075 }
2076
5b4a0ec0 2077error:
840cb59c 2078 health_code_update();
5b4a0ec0 2079 return ret;
91d76f53 2080}
48842b30 2081
993578ff
JR
2082static int init_ust_event_notifier_from_event_rule(
2083 const struct lttng_event_rule *rule,
fc4b93fa 2084 struct lttng_ust_abi_event_notifier *event_notifier)
993578ff
JR
2085{
2086 enum lttng_event_rule_status status;
fc4b93fa 2087 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
993578ff
JR
2088 int loglevel = -1, ret = 0;
2089 const char *pattern;
2090
993578ff
JR
2091
2092 memset(event_notifier, 0, sizeof(*event_notifier));
2093
44760c20
JR
2094 if (lttng_event_rule_targets_agent_domain(rule)) {
2095 /*
2096 * Special event for agents
2097 * The actual meat of the event is in the filter that will be
2098 * attached later on.
2099 * Set the default values for the agent event.
2100 */
2101 pattern = event_get_default_agent_ust_name(
2102 lttng_event_rule_get_domain_type(rule));
2103 loglevel = 0;
fc4b93fa 2104 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
44760c20 2105 } else {
85b05318 2106 const struct lttng_log_level_rule *log_level_rule;
993578ff 2107
45ce77e1
JR
2108 assert(lttng_event_rule_get_type(rule) ==
2109 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
2110
2111 status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
44760c20
JR
2112 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2113 /* At this point, this is a fatal error. */
2114 abort();
2115 }
993578ff 2116
45ce77e1 2117 status = lttng_event_rule_user_tracepoint_get_log_level_rule(
85b05318
JR
2118 rule, &log_level_rule);
2119 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
fc4b93fa 2120 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
85b05318
JR
2121 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2122 enum lttng_log_level_rule_status llr_status;
2123
2124 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2125 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2126 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2127 llr_status = lttng_log_level_rule_exactly_get_level(
2128 log_level_rule, &loglevel);
2129 break;
2130 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2131 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2132 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2133 log_level_rule, &loglevel);
2134 break;
2135 default:
2136 abort();
2137 }
993578ff 2138
85b05318
JR
2139 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2140 } else {
2141 /* At this point this is a fatal error. */
2142 abort();
44760c20 2143 }
993578ff
JR
2144 }
2145
fc4b93fa 2146 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
993578ff 2147 ret = lttng_strncpy(event_notifier->event.name, pattern,
cf771fa9 2148 sizeof(event_notifier->event.name));
993578ff
JR
2149 if (ret) {
2150 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2151 pattern);
2152 goto end;
2153 }
2154
2155 event_notifier->event.loglevel_type = ust_loglevel_type;
2156 event_notifier->event.loglevel = loglevel;
2157end:
2158 return ret;
2159}
2160
2161/*
2162 * Create the specified event notifier against the user space tracer of a
2163 * given application.
2164 */
2165static int create_ust_event_notifier(struct ust_app *app,
2166 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2167{
2168 int ret = 0;
267d66aa
JR
2169 enum lttng_condition_status condition_status;
2170 const struct lttng_condition *condition = NULL;
fc4b93fa 2171 struct lttng_ust_abi_event_notifier event_notifier;
267d66aa 2172 const struct lttng_event_rule *event_rule = NULL;
f83be61d
JR
2173 unsigned int capture_bytecode_count = 0, i;
2174 enum lttng_condition_status cond_status;
45ce77e1 2175 enum lttng_event_rule_type event_rule_type;
993578ff
JR
2176
2177 health_code_update();
2178 assert(app->event_notifier_group.object);
2179
267d66aa
JR
2180 condition = lttng_trigger_get_const_condition(
2181 ua_event_notifier_rule->trigger);
2182 assert(condition);
8dbb86b8
JR
2183 assert(lttng_condition_get_type(condition) ==
2184 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
993578ff 2185
8dbb86b8 2186 condition_status = lttng_condition_event_rule_matches_get_rule(
d602bd6a 2187 condition, &event_rule);
267d66aa 2188 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
d602bd6a 2189
267d66aa 2190 assert(event_rule);
45ce77e1
JR
2191
2192 event_rule_type = lttng_event_rule_get_type(event_rule);
2193 assert(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
2194 event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
2195 event_rule_type ==
2196 LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
2197 event_rule_type ==
2198 LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
267d66aa
JR
2199
2200 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
993578ff 2201 event_notifier.event.token = ua_event_notifier_rule->token;
533a90fb 2202 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
993578ff
JR
2203
2204 /* Create UST event notifier against the tracer. */
2205 pthread_mutex_lock(&app->sock_lock);
b623cb6a 2206 ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
993578ff
JR
2207 app->event_notifier_group.object,
2208 &ua_event_notifier_rule->obj);
2209 pthread_mutex_unlock(&app->sock_lock);
2210 if (ret < 0) {
569744c5
JR
2211 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2212 ret = 0;
2213 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2214 app->pid, app->sock);
2215 } else if (ret == -EAGAIN) {
993578ff 2216 ret = 0;
569744c5
JR
2217 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2218 app->pid, app->sock);
2219 } else {
2220 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2221 event_notifier.event.name, ret, app->pid,
2222 app->sock);
993578ff 2223 }
993578ff
JR
2224 goto error;
2225 }
2226
2227 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2228
569744c5
JR
2229 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
2230 event_notifier.event.name, app->name, app->pid,
993578ff
JR
2231 ua_event_notifier_rule->obj);
2232
2233 health_code_update();
2234
2235 /* Set filter if one is present. */
2236 if (ua_event_notifier_rule->filter) {
2237 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2238 ua_event_notifier_rule->obj);
2239 if (ret < 0) {
2240 goto error;
2241 }
2242 }
2243
2244 /* Set exclusions for the event. */
2245 if (ua_event_notifier_rule->exclusion) {
2246 ret = set_ust_object_exclusions(app,
2247 ua_event_notifier_rule->exclusion,
2248 ua_event_notifier_rule->obj);
2249 if (ret < 0) {
2250 goto error;
2251 }
2252 }
f83be61d
JR
2253
2254 /* Set the capture bytecodes. */
8dbb86b8 2255 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
f83be61d
JR
2256 condition, &capture_bytecode_count);
2257 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2258
2259 for (i = 0; i < capture_bytecode_count; i++) {
2260 const struct lttng_bytecode *capture_bytecode =
8dbb86b8 2261 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
f83be61d
JR
2262 condition, i);
2263
11f6ce94 2264 ret = set_ust_capture(app, capture_bytecode, i,
f83be61d
JR
2265 ua_event_notifier_rule->obj);
2266 if (ret < 0) {
2267 goto error;
2268 }
2269 }
993578ff
JR
2270
2271 /*
2272 * We now need to explicitly enable the event, since it
2273 * is disabled at creation.
2274 */
2275 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2276 if (ret < 0) {
2277 /*
2278 * If we hit an EPERM, something is wrong with our enable call.
2279 * If we get an EEXIST, there is a problem on the tracer side
2280 * since we just created it.
2281 */
2282 switch (ret) {
2283 case -LTTNG_UST_ERR_PERM:
2284 /* Code flow problem. */
2285 abort();
2286 case -LTTNG_UST_ERR_EXIST:
2287 /* It's OK for our use case. */
2288 ret = 0;
2289 break;
2290 default:
2291 break;
2292 }
2293
2294 goto error;
2295 }
2296
2297 ua_event_notifier_rule->enabled = true;
2298
2299error:
2300 health_code_update();
2301 return ret;
2302}
2303
5b4a0ec0
DG
2304/*
2305 * Copy data between an UST app event and a LTT event.
2306 */
421cb601 2307static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
2308 struct ltt_ust_event *uevent)
2309{
b4ffad32
JI
2310 size_t exclusion_alloc_size;
2311
48842b30
DG
2312 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2313 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2314
fc34caaa
DG
2315 ua_event->enabled = uevent->enabled;
2316
5b4a0ec0
DG
2317 /* Copy event attributes */
2318 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2319
53a80697
MD
2320 /* Copy filter bytecode */
2321 if (uevent->filter) {
2b00d462 2322 ua_event->filter = lttng_bytecode_copy(uevent->filter);
025faf73 2323 /* Filter might be NULL here in case of ENONEM. */
53a80697 2324 }
b4ffad32
JI
2325
2326 /* Copy exclusion data */
2327 if (uevent->exclusion) {
51755dc8 2328 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
fc4b93fa 2329 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
b4ffad32 2330 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
2331 if (ua_event->exclusion == NULL) {
2332 PERROR("malloc");
2333 } else {
2334 memcpy(ua_event->exclusion, uevent->exclusion,
2335 exclusion_alloc_size);
b4ffad32
JI
2336 }
2337 }
48842b30
DG
2338}
2339
5b4a0ec0
DG
2340/*
2341 * Copy data between an UST app channel and a LTT channel.
2342 */
421cb601 2343static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
2344 struct ltt_ust_channel *uchan)
2345{
fc34caaa 2346 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
2347
2348 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2349 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 2350
1624d5b7
JD
2351 ua_chan->tracefile_size = uchan->tracefile_size;
2352 ua_chan->tracefile_count = uchan->tracefile_count;
2353
ffe60014
DG
2354 /* Copy event attributes since the layout is different. */
2355 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2356 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2357 ua_chan->attr.overwrite = uchan->attr.overwrite;
2358 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2359 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 2360 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 2361 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
2362 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2363
ffe60014
DG
2364 /*
2365 * Note that the attribute channel type is not set since the channel on the
2366 * tracing registry side does not have this information.
2367 */
48842b30 2368
fc34caaa 2369 ua_chan->enabled = uchan->enabled;
7972aab2 2370 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 2371
fc34caaa 2372 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
2373}
2374
5b4a0ec0
DG
2375/*
2376 * Copy data between a UST app session and a regular LTT session.
2377 */
421cb601 2378static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 2379 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2380{
477d7741
MD
2381 struct tm *timeinfo;
2382 char datetime[16];
2383 int ret;
d7ba1388 2384 char tmp_shm_path[PATH_MAX];
477d7741 2385
940c4592 2386 timeinfo = localtime(&app->registration_time);
477d7741 2387 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 2388
421cb601 2389 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 2390
7972aab2
DG
2391 ua_sess->tracing_id = usess->id;
2392 ua_sess->id = get_next_session_id();
ff588497
JR
2393 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2394 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2395 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2396 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
7972aab2
DG
2397 ua_sess->buffer_type = usess->buffer_type;
2398 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 2399
7972aab2 2400 /* There is only one consumer object per session possible. */
6addfa37 2401 consumer_output_get(usess->consumer);
7972aab2 2402 ua_sess->consumer = usess->consumer;
6addfa37 2403
2bba9e53 2404 ua_sess->output_traces = usess->output_traces;
ecc48a90 2405 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
2406 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2407 &usess->metadata_attr);
7972aab2
DG
2408
2409 switch (ua_sess->buffer_type) {
2410 case LTTNG_BUFFER_PER_PID:
2411 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 2412 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
2413 datetime);
2414 break;
2415 case LTTNG_BUFFER_PER_UID:
2416 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
470cc211 2417 DEFAULT_UST_TRACE_UID_PATH,
ff588497 2418 lttng_credentials_get_uid(&ua_sess->real_credentials),
470cc211 2419 app->bits_per_long);
7972aab2
DG
2420 break;
2421 default:
2422 assert(0);
2423 goto error;
2424 }
477d7741
MD
2425 if (ret < 0) {
2426 PERROR("asprintf UST shadow copy session");
477d7741 2427 assert(0);
7972aab2 2428 goto error;
477d7741
MD
2429 }
2430
3d071855
MD
2431 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2432 sizeof(ua_sess->root_shm_path));
2433 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
2434 strncpy(ua_sess->shm_path, usess->shm_path,
2435 sizeof(ua_sess->shm_path));
2436 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2437 if (ua_sess->shm_path[0]) {
2438 switch (ua_sess->buffer_type) {
2439 case LTTNG_BUFFER_PER_PID:
2440 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2441 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
d7ba1388
MD
2442 app->name, app->pid, datetime);
2443 break;
2444 case LTTNG_BUFFER_PER_UID:
2445 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 2446 "/" DEFAULT_UST_TRACE_UID_PATH,
d7ba1388
MD
2447 app->uid, app->bits_per_long);
2448 break;
2449 default:
2450 assert(0);
2451 goto error;
2452 }
2453 if (ret < 0) {
2454 PERROR("sprintf UST shadow copy session");
2455 assert(0);
2456 goto error;
2457 }
2458 strncat(ua_sess->shm_path, tmp_shm_path,
2459 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2460 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2461 }
6addfa37 2462 return;
7972aab2
DG
2463
2464error:
6addfa37 2465 consumer_output_put(ua_sess->consumer);
48842b30
DG
2466}
2467
78f0bacd
DG
2468/*
2469 * Lookup sesison wrapper.
2470 */
84cd17c6 2471static
fb9a95c4 2472void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 2473 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
2474{
2475 /* Get right UST app session from app */
d9bf3ca4 2476 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
2477}
2478
421cb601
DG
2479/*
2480 * Return ust app session from the app session hashtable using the UST session
a991f516 2481 * id.
421cb601 2482 */
48842b30 2483static struct ust_app_session *lookup_session_by_app(
fb9a95c4 2484 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 2485{
bec39940 2486 struct lttng_ht_iter iter;
d9bf3ca4 2487 struct lttng_ht_node_u64 *node;
48842b30 2488
84cd17c6 2489 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 2490 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
2491 if (node == NULL) {
2492 goto error;
2493 }
2494
2495 return caa_container_of(node, struct ust_app_session, node);
2496
2497error:
2498 return NULL;
2499}
2500
7972aab2
DG
2501/*
2502 * Setup buffer registry per PID for the given session and application. If none
2503 * is found, a new one is created, added to the global registry and
2504 * initialized. If regp is valid, it's set with the newly created object.
2505 *
2506 * Return 0 on success or else a negative value.
2507 */
2508static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2509 struct ust_app *app, struct buffer_reg_pid **regp)
2510{
2511 int ret = 0;
2512 struct buffer_reg_pid *reg_pid;
2513
2514 assert(ua_sess);
2515 assert(app);
2516
2517 rcu_read_lock();
2518
2519 reg_pid = buffer_reg_pid_find(ua_sess->id);
2520 if (!reg_pid) {
2521 /*
2522 * This is the create channel path meaning that if there is NO
2523 * registry available, we have to create one for this session.
2524 */
d7ba1388 2525 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 2526 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2527 if (ret < 0) {
2528 goto error;
2529 }
7972aab2
DG
2530 } else {
2531 goto end;
2532 }
2533
2534 /* Initialize registry. */
2535 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2536 app->bits_per_long, app->uint8_t_alignment,
2537 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2538 app->uint64_t_alignment, app->long_alignment,
470cc211
JG
2539 app->byte_order, app->version.major, app->version.minor,
2540 reg_pid->root_shm_path, reg_pid->shm_path,
ff588497
JR
2541 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2542 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2543 ua_sess->tracing_id,
8de88061 2544 app->uid);
7972aab2 2545 if (ret < 0) {
286c991a
MD
2546 /*
2547 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2548 * destroy the buffer registry, because it is always expected
2549 * that if the buffer registry can be found, its ust registry is
2550 * non-NULL.
2551 */
2552 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2553 goto error;
2554 }
2555
286c991a
MD
2556 buffer_reg_pid_add(reg_pid);
2557
7972aab2
DG
2558 DBG3("UST app buffer registry per PID created successfully");
2559
2560end:
2561 if (regp) {
2562 *regp = reg_pid;
2563 }
2564error:
2565 rcu_read_unlock();
2566 return ret;
2567}
2568
2569/*
2570 * Setup buffer registry per UID for the given session and application. If none
2571 * is found, a new one is created, added to the global registry and
2572 * initialized. If regp is valid, it's set with the newly created object.
2573 *
2574 * Return 0 on success or else a negative value.
2575 */
2576static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2577 struct ust_app_session *ua_sess,
7972aab2
DG
2578 struct ust_app *app, struct buffer_reg_uid **regp)
2579{
2580 int ret = 0;
2581 struct buffer_reg_uid *reg_uid;
2582
2583 assert(usess);
2584 assert(app);
2585
2586 rcu_read_lock();
2587
2588 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2589 if (!reg_uid) {
2590 /*
2591 * This is the create channel path meaning that if there is NO
2592 * registry available, we have to create one for this session.
2593 */
2594 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2595 LTTNG_DOMAIN_UST, &reg_uid,
2596 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2597 if (ret < 0) {
2598 goto error;
2599 }
7972aab2
DG
2600 } else {
2601 goto end;
2602 }
2603
2604 /* Initialize registry. */
af6142cf 2605 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2606 app->bits_per_long, app->uint8_t_alignment,
2607 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2608 app->uint64_t_alignment, app->long_alignment,
2609 app->byte_order, app->version.major,
3d071855 2610 app->version.minor, reg_uid->root_shm_path,
8de88061
JR
2611 reg_uid->shm_path, usess->uid, usess->gid,
2612 ua_sess->tracing_id, app->uid);
7972aab2 2613 if (ret < 0) {
286c991a
MD
2614 /*
2615 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2616 * destroy the buffer registry, because it is always expected
2617 * that if the buffer registry can be found, its ust registry is
2618 * non-NULL.
2619 */
2620 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2621 goto error;
2622 }
2623 /* Add node to teardown list of the session. */
2624 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2625
286c991a 2626 buffer_reg_uid_add(reg_uid);
7972aab2 2627
286c991a 2628 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2629end:
2630 if (regp) {
2631 *regp = reg_uid;
2632 }
2633error:
2634 rcu_read_unlock();
2635 return ret;
2636}
2637
421cb601 2638/*
3d8ca23b 2639 * Create a session on the tracer side for the given app.
421cb601 2640 *
3d8ca23b
DG
2641 * On success, ua_sess_ptr is populated with the session pointer or else left
2642 * untouched. If the session was created, is_created is set to 1. On error,
2643 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2644 * be NULL.
2645 *
2646 * Returns 0 on success or else a negative code which is either -ENOMEM or
b623cb6a 2647 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
421cb601 2648 */
03f91eaa 2649static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2650 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2651 int *is_created)
421cb601 2652{
3d8ca23b 2653 int ret, created = 0;
421cb601
DG
2654 struct ust_app_session *ua_sess;
2655
3d8ca23b
DG
2656 assert(usess);
2657 assert(app);
2658 assert(ua_sess_ptr);
2659
840cb59c 2660 health_code_update();
86acf0da 2661
421cb601
DG
2662 ua_sess = lookup_session_by_app(usess, app);
2663 if (ua_sess == NULL) {
d9bf3ca4 2664 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2665 app->pid, usess->id);
40bbd087 2666 ua_sess = alloc_ust_app_session();
421cb601
DG
2667 if (ua_sess == NULL) {
2668 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2669 ret = -ENOMEM;
2670 goto error;
421cb601 2671 }
477d7741 2672 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2673 created = 1;
421cb601
DG
2674 }
2675
7972aab2
DG
2676 switch (usess->buffer_type) {
2677 case LTTNG_BUFFER_PER_PID:
2678 /* Init local registry. */
2679 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2680 if (ret < 0) {
e64207cf 2681 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2682 goto error;
2683 }
2684 break;
2685 case LTTNG_BUFFER_PER_UID:
2686 /* Look for a global registry. If none exists, create one. */
d7ba1388 2687 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2688 if (ret < 0) {
e64207cf 2689 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2690 goto error;
2691 }
2692 break;
2693 default:
2694 assert(0);
2695 ret = -EINVAL;
2696 goto error;
2697 }
2698
2699 health_code_update();
2700
2701 if (ua_sess->handle == -1) {
fb45065e 2702 pthread_mutex_lock(&app->sock_lock);
b623cb6a 2703 ret = lttng_ust_ctl_create_session(app->sock);
fb45065e 2704 pthread_mutex_unlock(&app->sock_lock);
7972aab2 2705 if (ret < 0) {
569744c5
JR
2706 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2707 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2708 app->pid, app->sock);
3757b385 2709 ret = 0;
569744c5
JR
2710 } else if (ret == -EAGAIN) {
2711 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2712 app->pid, app->sock);
2713 ret = 0;
2714 } else {
2715 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2716 ret, app->pid, app->sock);
ffe60014 2717 }
d0b96690 2718 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2719 if (ret != -ENOMEM) {
2720 /*
2721 * Tracer is probably gone or got an internal error so let's
2722 * behave like it will soon unregister or not usable.
2723 */
2724 ret = -ENOTCONN;
2725 }
2726 goto error;
421cb601
DG
2727 }
2728
7972aab2
DG
2729 ua_sess->handle = ret;
2730
2731 /* Add ust app session to app's HT */
d9bf3ca4
MD
2732 lttng_ht_node_init_u64(&ua_sess->node,
2733 ua_sess->tracing_id);
2734 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2735 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2736 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2737 &ua_sess->ust_objd_node);
7972aab2
DG
2738
2739 DBG2("UST app session created successfully with handle %d", ret);
2740 }
2741
2742 *ua_sess_ptr = ua_sess;
2743 if (is_created) {
2744 *is_created = created;
2745 }
2746
2747 /* Everything went well. */
2748 ret = 0;
2749
2750error:
2751 health_code_update();
2752 return ret;
2753}
2754
6a6b2068
JG
2755/*
2756 * Match function for a hash table lookup of ust_app_ctx.
2757 *
2758 * It matches an ust app context based on the context type and, in the case
2759 * of perf counters, their name.
2760 */
2761static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2762{
2763 struct ust_app_ctx *ctx;
bdf64013 2764 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2765
2766 assert(node);
2767 assert(_key);
2768
2769 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2770 key = _key;
2771
2772 /* Context type */
2773 if (ctx->ctx.ctx != key->ctx) {
2774 goto no_match;
2775 }
2776
bdf64013 2777 switch(key->ctx) {
fc4b93fa 2778 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2779 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2780 ctx->ctx.u.perf_counter.name,
2781 sizeof(key->u.perf_counter.name))) {
2782 goto no_match;
2783 }
2784 break;
fc4b93fa 2785 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
bdf64013
JG
2786 if (strcmp(key->u.app_ctx.provider_name,
2787 ctx->ctx.u.app_ctx.provider_name) ||
2788 strcmp(key->u.app_ctx.ctx_name,
2789 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2790 goto no_match;
2791 }
bdf64013
JG
2792 break;
2793 default:
2794 break;
6a6b2068
JG
2795 }
2796
2797 /* Match. */
2798 return 1;
2799
2800no_match:
2801 return 0;
2802}
2803
2804/*
2805 * Lookup for an ust app context from an lttng_ust_context.
2806 *
be184a0f 2807 * Must be called while holding RCU read side lock.
6a6b2068
JG
2808 * Return an ust_app_ctx object or NULL on error.
2809 */
2810static
2811struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2812 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2813{
2814 struct lttng_ht_iter iter;
2815 struct lttng_ht_node_ulong *node;
2816 struct ust_app_ctx *app_ctx = NULL;
2817
2818 assert(uctx);
2819 assert(ht);
2820
2821 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2822 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2823 ht_match_ust_app_ctx, uctx, &iter.iter);
2824 node = lttng_ht_iter_get_node_ulong(&iter);
2825 if (!node) {
2826 goto end;
2827 }
2828
2829 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2830
2831end:
2832 return app_ctx;
2833}
2834
7972aab2
DG
2835/*
2836 * Create a context for the channel on the tracer.
2837 *
2838 * Called with UST app session lock held and a RCU read side lock.
2839 */
2840static
c9edf082 2841int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
f3db82be 2842 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2843 struct ust_app *app)
2844{
2845 int ret = 0;
7972aab2
DG
2846 struct ust_app_ctx *ua_ctx;
2847
2848 DBG2("UST app adding context to channel %s", ua_chan->name);
2849
6a6b2068
JG
2850 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2851 if (ua_ctx) {
7972aab2
DG
2852 ret = -EEXIST;
2853 goto error;
2854 }
2855
2856 ua_ctx = alloc_ust_app_ctx(uctx);
2857 if (ua_ctx == NULL) {
2858 /* malloc failed */
7682f304 2859 ret = -ENOMEM;
7972aab2
DG
2860 goto error;
2861 }
2862
2863 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2864 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2865 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2866
2867 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2868 if (ret < 0) {
2869 goto error;
2870 }
2871
2872error:
2873 return ret;
2874}
2875
2876/*
2877 * Enable on the tracer side a ust app event for the session and channel.
2878 *
2879 * Called with UST app session lock held.
2880 */
2881static
2882int enable_ust_app_event(struct ust_app_session *ua_sess,
2883 struct ust_app_event *ua_event, struct ust_app *app)
2884{
2885 int ret;
2886
3428a1b7 2887 ret = enable_ust_object(app, ua_event->obj);
7972aab2
DG
2888 if (ret < 0) {
2889 goto error;
2890 }
2891
2892 ua_event->enabled = 1;
2893
2894error:
2895 return ret;
2896}
2897
2898/*
2899 * Disable on the tracer side a ust app event for the session and channel.
2900 */
2901static int disable_ust_app_event(struct ust_app_session *ua_sess,
2902 struct ust_app_event *ua_event, struct ust_app *app)
2903{
2904 int ret;
2905
e2456d0a 2906 ret = disable_ust_object(app, ua_event->obj);
7972aab2
DG
2907 if (ret < 0) {
2908 goto error;
2909 }
2910
2911 ua_event->enabled = 0;
2912
2913error:
2914 return ret;
2915}
2916
2917/*
2918 * Lookup ust app channel for session and disable it on the tracer side.
2919 */
2920static
2921int disable_ust_app_channel(struct ust_app_session *ua_sess,
2922 struct ust_app_channel *ua_chan, struct ust_app *app)
2923{
2924 int ret;
2925
2926 ret = disable_ust_channel(app, ua_sess, ua_chan);
2927 if (ret < 0) {
2928 goto error;
2929 }
2930
2931 ua_chan->enabled = 0;
2932
2933error:
2934 return ret;
2935}
2936
2937/*
2938 * Lookup ust app channel for session and enable it on the tracer side. This
2939 * MUST be called with a RCU read side lock acquired.
2940 */
2941static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2942 struct ltt_ust_channel *uchan, struct ust_app *app)
2943{
2944 int ret = 0;
2945 struct lttng_ht_iter iter;
2946 struct lttng_ht_node_str *ua_chan_node;
2947 struct ust_app_channel *ua_chan;
2948
2949 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2950 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2951 if (ua_chan_node == NULL) {
d9bf3ca4 2952 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2953 uchan->name, ua_sess->tracing_id);
2954 goto error;
2955 }
2956
2957 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2958
2959 ret = enable_ust_channel(app, ua_sess, ua_chan);
2960 if (ret < 0) {
2961 goto error;
2962 }
2963
2964error:
2965 return ret;
2966}
2967
2968/*
2969 * Ask the consumer to create a channel and get it if successful.
2970 *
fad1ed2f
JR
2971 * Called with UST app session lock held.
2972 *
7972aab2
DG
2973 * Return 0 on success or else a negative value.
2974 */
2975static int do_consumer_create_channel(struct ltt_ust_session *usess,
2976 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2977 int bitness, struct ust_registry_session *registry,
2978 uint64_t trace_archive_id)
7972aab2
DG
2979{
2980 int ret;
2981 unsigned int nb_fd = 0;
2982 struct consumer_socket *socket;
2983
2984 assert(usess);
2985 assert(ua_sess);
2986 assert(ua_chan);
2987 assert(registry);
2988
2989 rcu_read_lock();
2990 health_code_update();
2991
2992 /* Get the right consumer socket for the application. */
2993 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2994 if (!socket) {
2995 ret = -EINVAL;
2996 goto error;
2997 }
2998
2999 health_code_update();
3000
3001 /* Need one fd for the channel. */
3002 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3003 if (ret < 0) {
3004 ERR("Exhausted number of available FD upon create channel");
3005 goto error;
3006 }
3007
3008 /*
3009 * Ask consumer to create channel. The consumer will return the number of
3010 * stream we have to expect.
3011 */
3012 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
d2956687 3013 registry, usess->current_trace_chunk);
7972aab2
DG
3014 if (ret < 0) {
3015 goto error_ask;
3016 }
3017
3018 /*
3019 * Compute the number of fd needed before receiving them. It must be 2 per
3020 * stream (2 being the default value here).
3021 */
3022 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
3023
3024 /* Reserve the amount of file descriptor we need. */
3025 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
3026 if (ret < 0) {
3027 ERR("Exhausted number of available FD upon create channel");
3028 goto error_fd_get_stream;
3029 }
3030
3031 health_code_update();
3032
3033 /*
db786d44 3034 * Now get the channel from the consumer. This call will populate the stream
7972aab2
DG
3035 * list of that channel and set the ust objects.
3036 */
d9078d0c
DG
3037 if (usess->consumer->enabled) {
3038 ret = ust_consumer_get_channel(socket, ua_chan);
3039 if (ret < 0) {
3040 goto error_destroy;
3041 }
7972aab2
DG
3042 }
3043
3044 rcu_read_unlock();
3045 return 0;
3046
3047error_destroy:
3048 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
3049error_fd_get_stream:
3050 /*
3051 * Initiate a destroy channel on the consumer since we had an error
3052 * handling it on our side. The return value is of no importance since we
3053 * already have a ret value set by the previous error that we need to
3054 * return.
3055 */
3056 (void) ust_consumer_destroy_channel(socket, ua_chan);
3057error_ask:
3058 lttng_fd_put(LTTNG_FD_APPS, 1);
3059error:
3060 health_code_update();
3061 rcu_read_unlock();
3062 return ret;
3063}
3064
3065/*
3066 * Duplicate the ust data object of the ust app stream and save it in the
3067 * buffer registry stream.
3068 *
3069 * Return 0 on success or else a negative value.
3070 */
3071static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3072 struct ust_app_stream *stream)
3073{
3074 int ret;
3075
3076 assert(reg_stream);
3077 assert(stream);
3078
ec472351 3079 /* Duplicating a stream requires 2 new fds. Reserve them. */
7972aab2
DG
3080 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3081 if (ret < 0) {
3082 ERR("Exhausted number of available FD upon duplicate stream");
3083 goto error;
3084 }
3085
3086 /* Duplicate object for stream once the original is in the registry. */
b623cb6a 3087 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
7972aab2
DG
3088 reg_stream->obj.ust);
3089 if (ret < 0) {
3090 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3091 reg_stream->obj.ust, stream->obj, ret);
3092 lttng_fd_put(LTTNG_FD_APPS, 2);
3093 goto error;
3094 }
3095 stream->handle = stream->obj->handle;
3096
3097error:
3098 return ret;
3099}
3100
3101/*
3102 * Duplicate the ust data object of the ust app. channel and save it in the
3103 * buffer registry channel.
3104 *
3105 * Return 0 on success or else a negative value.
3106 */
3273699d 3107static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
7972aab2
DG
3108 struct ust_app_channel *ua_chan)
3109{
3110 int ret;
3111
3273699d 3112 assert(buf_reg_chan);
7972aab2
DG
3113 assert(ua_chan);
3114
ec472351 3115 /* Duplicating a channel requires 1 new fd. Reserve it. */
7972aab2
DG
3116 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3117 if (ret < 0) {
3118 ERR("Exhausted number of available FD upon duplicate channel");
3119 goto error_fd_get;
3120 }
3121
3122 /* Duplicate object for stream once the original is in the registry. */
b623cb6a 3123 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
7972aab2
DG
3124 if (ret < 0) {
3125 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3273699d 3126 buf_reg_chan->obj.ust, ua_chan->obj, ret);
7972aab2
DG
3127 goto error;
3128 }
3129 ua_chan->handle = ua_chan->obj->handle;
3130
3131 return 0;
3132
3133error:
3134 lttng_fd_put(LTTNG_FD_APPS, 1);
3135error_fd_get:
3136 return ret;
3137}
3138
3139/*
3140 * For a given channel buffer registry, setup all streams of the given ust
3141 * application channel.
3142 *
3143 * Return 0 on success or else a negative value.
3144 */
3273699d 3145static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
fb45065e
MD
3146 struct ust_app_channel *ua_chan,
3147 struct ust_app *app)
7972aab2
DG
3148{
3149 int ret = 0;
3150 struct ust_app_stream *stream, *stmp;
3151
3273699d 3152 assert(buf_reg_chan);
7972aab2
DG
3153 assert(ua_chan);
3154
3155 DBG2("UST app setup buffer registry stream");
3156
3157 /* Send all streams to application. */
3158 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3159 struct buffer_reg_stream *reg_stream;
3160
3161 ret = buffer_reg_stream_create(&reg_stream);
3162 if (ret < 0) {
3163 goto error;
3164 }
3165
3166 /*
3167 * Keep original pointer and nullify it in the stream so the delete
3168 * stream call does not release the object.
3169 */
3170 reg_stream->obj.ust = stream->obj;
3171 stream->obj = NULL;
3273699d 3172 buffer_reg_stream_add(reg_stream, buf_reg_chan);
421cb601 3173
7972aab2
DG
3174 /* We don't need the streams anymore. */
3175 cds_list_del(&stream->list);
fb45065e 3176 delete_ust_app_stream(-1, stream, app);
7972aab2 3177 }
421cb601 3178
7972aab2
DG
3179error:
3180 return ret;
3181}
3182
3183/*
3184 * Create a buffer registry channel for the given session registry and
3185 * application channel object. If regp pointer is valid, it's set with the
3186 * created object. Important, the created object is NOT added to the session
3187 * registry hash table.
3188 *
3189 * Return 0 on success else a negative value.
3190 */
3191static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3192 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3193{
3194 int ret;
3273699d 3195 struct buffer_reg_channel *buf_reg_chan = NULL;
7972aab2
DG
3196
3197 assert(reg_sess);
3198 assert(ua_chan);
3199
3200 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3201
3202 /* Create buffer registry channel. */
3273699d 3203 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
7972aab2
DG
3204 if (ret < 0) {
3205 goto error_create;
421cb601 3206 }
3273699d
FD
3207 assert(buf_reg_chan);
3208 buf_reg_chan->consumer_key = ua_chan->key;
3209 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3210 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 3211
7972aab2
DG
3212 /* Create and add a channel registry to session. */
3213 ret = ust_registry_channel_add(reg_sess->reg.ust,
3214 ua_chan->tracing_channel_id);
3215 if (ret < 0) {
3216 goto error;
d88aee68 3217 }
3273699d 3218 buffer_reg_channel_add(reg_sess, buf_reg_chan);
d88aee68 3219
7972aab2 3220 if (regp) {
3273699d 3221 *regp = buf_reg_chan;
3d8ca23b 3222 }
d88aee68 3223
7972aab2 3224 return 0;
3d8ca23b
DG
3225
3226error:
7972aab2 3227 /* Safe because the registry channel object was not added to any HT. */
3273699d 3228 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
7972aab2 3229error_create:
3d8ca23b 3230 return ret;
421cb601
DG
3231}
3232
55cc08a6 3233/*
7972aab2
DG
3234 * Setup buffer registry channel for the given session registry and application
3235 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 3236 *
7972aab2 3237 * Return 0 on success else a negative value.
55cc08a6 3238 */
7972aab2 3239static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3273699d 3240 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
fb45065e 3241 struct ust_app *app)
55cc08a6 3242{
7972aab2 3243 int ret;
55cc08a6 3244
7972aab2 3245 assert(reg_sess);
3273699d 3246 assert(buf_reg_chan);
7972aab2
DG
3247 assert(ua_chan);
3248 assert(ua_chan->obj);
55cc08a6 3249
7972aab2 3250 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 3251
7972aab2 3252 /* Setup all streams for the registry. */
3273699d 3253 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
7972aab2 3254 if (ret < 0) {
55cc08a6
DG
3255 goto error;
3256 }
3257
3273699d 3258 buf_reg_chan->obj.ust = ua_chan->obj;
7972aab2 3259 ua_chan->obj = NULL;
55cc08a6 3260
7972aab2 3261 return 0;
55cc08a6
DG
3262
3263error:
3273699d
FD
3264 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3265 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
3266 return ret;
3267}
3268
edb67388 3269/*
7972aab2 3270 * Send buffer registry channel to the application.
d0b96690 3271 *
7972aab2 3272 * Return 0 on success else a negative value.
edb67388 3273 */
3273699d 3274static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
7972aab2
DG
3275 struct ust_app *app, struct ust_app_session *ua_sess,
3276 struct ust_app_channel *ua_chan)
edb67388
DG
3277{
3278 int ret;
7972aab2 3279 struct buffer_reg_stream *reg_stream;
edb67388 3280
3273699d 3281 assert(buf_reg_chan);
7972aab2
DG
3282 assert(app);
3283 assert(ua_sess);
3284 assert(ua_chan);
3285
3286 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3287
3273699d 3288 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
edb67388
DG
3289 if (ret < 0) {
3290 goto error;
3291 }
3292
7972aab2
DG
3293 /* Send channel to the application. */
3294 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
3295 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3296 ret = -ENOTCONN; /* Caused by app exiting. */
3297 goto error;
569744c5
JR
3298 } else if (ret == -EAGAIN) {
3299 /* Caused by timeout. */
3300 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
3301 app->pid, ua_chan->name, ua_sess->tracing_id);
3302 /* Treat this the same way as an application that is exiting. */
3303 ret = -ENOTCONN;
3304 goto error;
a7169585 3305 } else if (ret < 0) {
7972aab2
DG
3306 goto error;
3307 }
3308
3309 health_code_update();
3310
3311 /* Send all streams to application. */
3273699d
FD
3312 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3313 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
491d2deb 3314 struct ust_app_stream stream = {};
7972aab2
DG
3315
3316 ret = duplicate_stream_object(reg_stream, &stream);
3317 if (ret < 0) {
3318 goto error_stream_unlock;
3319 }
3320
3321 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3322 if (ret < 0) {
a7169585
MD
3323 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3324 ret = -ENOTCONN; /* Caused by app exiting. */
569744c5
JR
3325 } else if (ret == -EAGAIN) {
3326 /*
3327 * Caused by timeout.
3328 * Treat this the same way as an application
3329 * that is exiting.
3330 */
491d2deb
JG
3331 WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".",
3332 app->pid,
569744c5
JR
3333 ua_chan->name,
3334 ua_sess->tracing_id);
3335 ret = -ENOTCONN;
a7169585 3336 }
569744c5 3337 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
3338 goto error_stream_unlock;
3339 }
edb67388 3340
7972aab2
DG
3341 /*
3342 * The return value is not important here. This function will output an
3343 * error if needed.
3344 */
fb45065e 3345 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
3346 }
3347 ua_chan->is_sent = 1;
3348
3349error_stream_unlock:
3273699d 3350 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
edb67388
DG
3351error:
3352 return ret;
3353}
3354
9730260e 3355/*
7972aab2
DG
3356 * Create and send to the application the created buffers with per UID buffers.
3357 *
9acdc1d6 3358 * This MUST be called with a RCU read side lock acquired.
71e0a100 3359 * The session list lock and the session's lock must be acquired.
9acdc1d6 3360 *
7972aab2 3361 * Return 0 on success else a negative value.
9730260e 3362 */
7972aab2
DG
3363static int create_channel_per_uid(struct ust_app *app,
3364 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3365 struct ust_app_channel *ua_chan)
9730260e
DG
3366{
3367 int ret;
7972aab2 3368 struct buffer_reg_uid *reg_uid;
3273699d 3369 struct buffer_reg_channel *buf_reg_chan;
e32d7f27 3370 struct ltt_session *session = NULL;
e098433c 3371 enum lttng_error_code notification_ret;
3273699d 3372 struct ust_registry_channel *ust_reg_chan;
9730260e 3373
7972aab2
DG
3374 assert(app);
3375 assert(usess);
3376 assert(ua_sess);
3377 assert(ua_chan);
3378
3379 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3380
3381 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3382 /*
3383 * The session creation handles the creation of this global registry
3384 * object. If none can be find, there is a code flow problem or a
3385 * teardown race.
3386 */
3387 assert(reg_uid);
3388
3273699d 3389 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
7972aab2 3390 reg_uid);
3273699d 3391 if (buf_reg_chan) {
2721f7ea
JG
3392 goto send_channel;
3393 }
7972aab2 3394
2721f7ea 3395 /* Create the buffer registry channel object. */
3273699d 3396 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
2721f7ea
JG
3397 if (ret < 0) {
3398 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 3399 ua_chan->name);
2721f7ea
JG
3400 goto error;
3401 }
f14256d6 3402
e098433c
JG
3403 session = session_find_by_id(ua_sess->tracing_id);
3404 assert(session);
3405 assert(pthread_mutex_trylock(&session->lock));
3406 assert(session_trylock_list());
3407
2721f7ea
JG
3408 /*
3409 * Create the buffers on the consumer side. This call populates the
3410 * ust app channel object with all streams and data object.
3411 */
3412 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3413 app->bits_per_long, reg_uid->registry->reg.ust,
d2956687 3414 session->most_recent_chunk_id.value);
2721f7ea
JG
3415 if (ret < 0) {
3416 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3417 ua_chan->name);
7972aab2
DG
3418
3419 /*
2721f7ea
JG
3420 * Let's remove the previously created buffer registry channel so
3421 * it's not visible anymore in the session registry.
7972aab2 3422 */
2721f7ea
JG
3423 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3424 ua_chan->tracing_channel_id, false);
3273699d
FD
3425 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3426 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
2721f7ea 3427 goto error;
7972aab2
DG
3428 }
3429
2721f7ea
JG
3430 /*
3431 * Setup the streams and add it to the session registry.
3432 */
3433 ret = setup_buffer_reg_channel(reg_uid->registry,
3273699d 3434 ua_chan, buf_reg_chan, app);
2721f7ea
JG
3435 if (ret < 0) {
3436 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3437 goto error;
3438 }
3439
e098433c
JG
3440 /* Notify the notification subsystem of the channel's creation. */
3441 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3273699d 3442 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
e098433c 3443 ua_chan->tracing_channel_id);
3273699d
FD
3444 assert(ust_reg_chan);
3445 ust_reg_chan->consumer_key = ua_chan->key;
3446 ust_reg_chan = NULL;
e098433c 3447 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 3448
e098433c 3449 notification_ret = notification_thread_command_add_channel(
412d7227
SM
3450 the_notification_thread_handle, session->name,
3451 lttng_credentials_get_uid(
3452 &ua_sess->effective_credentials),
3453 lttng_credentials_get_gid(
3454 &ua_sess->effective_credentials),
3455 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
3456 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3457 if (notification_ret != LTTNG_OK) {
3458 ret = - (int) notification_ret;
3459 ERR("Failed to add channel to notification thread");
3460 goto error;
e9404c27
JG
3461 }
3462
2721f7ea 3463send_channel:
66ff8e3f 3464 /* Send buffers to the application. */
3273699d 3465 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
66ff8e3f
JG
3466 if (ret < 0) {
3467 if (ret != -ENOTCONN) {
3468 ERR("Error sending channel to application");
3469 }
3470 goto error;
3471 }
3472
9730260e 3473error:
e32d7f27
JG
3474 if (session) {
3475 session_put(session);
3476 }
9730260e
DG
3477 return ret;
3478}
3479
78f0bacd 3480/*
7972aab2
DG
3481 * Create and send to the application the created buffers with per PID buffers.
3482 *
fad1ed2f 3483 * Called with UST app session lock held.
71e0a100 3484 * The session list lock and the session's lock must be acquired.
fad1ed2f 3485 *
7972aab2 3486 * Return 0 on success else a negative value.
78f0bacd 3487 */
7972aab2
DG
3488static int create_channel_per_pid(struct ust_app *app,
3489 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3490 struct ust_app_channel *ua_chan)
78f0bacd 3491{
8535a6d9 3492 int ret;
7972aab2 3493 struct ust_registry_session *registry;
e9404c27 3494 enum lttng_error_code cmd_ret;
e32d7f27 3495 struct ltt_session *session = NULL;
e9404c27 3496 uint64_t chan_reg_key;
3273699d 3497 struct ust_registry_channel *ust_reg_chan;
78f0bacd 3498
7972aab2
DG
3499 assert(app);
3500 assert(usess);
3501 assert(ua_sess);
3502 assert(ua_chan);
3503
3504 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3505
3506 rcu_read_lock();
3507
3508 registry = get_session_registry(ua_sess);
fad1ed2f 3509 /* The UST app session lock is held, registry shall not be null. */
7972aab2
DG
3510 assert(registry);
3511
3512 /* Create and add a new channel registry to session. */
3513 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 3514 if (ret < 0) {
f14256d6
MD
3515 ERR("Error creating the UST channel \"%s\" registry instance",
3516 ua_chan->name);
78f0bacd
DG
3517 goto error;
3518 }
3519
e098433c
JG
3520 session = session_find_by_id(ua_sess->tracing_id);
3521 assert(session);
3522
3523 assert(pthread_mutex_trylock(&session->lock));
3524 assert(session_trylock_list());
3525
7972aab2
DG
3526 /* Create and get channel on the consumer side. */
3527 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 3528 app->bits_per_long, registry,
d2956687 3529 session->most_recent_chunk_id.value);
7972aab2 3530 if (ret < 0) {
f14256d6
MD
3531 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3532 ua_chan->name);
5b951542 3533 goto error_remove_from_registry;
7972aab2
DG
3534 }
3535
3536 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3537 if (ret < 0) {
a7169585
MD
3538 if (ret != -ENOTCONN) {
3539 ERR("Error sending channel to application");
3540 }
5b951542 3541 goto error_remove_from_registry;
7972aab2 3542 }
8535a6d9 3543
e9404c27
JG
3544 chan_reg_key = ua_chan->key;
3545 pthread_mutex_lock(&registry->lock);
3273699d
FD
3546 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3547 assert(ust_reg_chan);
3548 ust_reg_chan->consumer_key = ua_chan->key;
e9404c27
JG
3549 pthread_mutex_unlock(&registry->lock);
3550
3551 cmd_ret = notification_thread_command_add_channel(
412d7227
SM
3552 the_notification_thread_handle, session->name,
3553 lttng_credentials_get_uid(
3554 &ua_sess->effective_credentials),
3555 lttng_credentials_get_gid(
3556 &ua_sess->effective_credentials),
3557 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
e9404c27
JG
3558 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3559 if (cmd_ret != LTTNG_OK) {
3560 ret = - (int) cmd_ret;
3561 ERR("Failed to add channel to notification thread");
5b951542 3562 goto error_remove_from_registry;
e9404c27
JG
3563 }
3564
5b951542
MD
3565error_remove_from_registry:
3566 if (ret) {
3567 ust_registry_channel_del_free(registry, ua_chan->key, false);
3568 }
78f0bacd 3569error:
7972aab2 3570 rcu_read_unlock();
e32d7f27
JG
3571 if (session) {
3572 session_put(session);
3573 }
78f0bacd
DG
3574 return ret;
3575}
3576
3577/*
7972aab2 3578 * From an already allocated ust app channel, create the channel buffers if
88e3c2f5 3579 * needed and send them to the application. This MUST be called with a RCU read
7972aab2
DG
3580 * side lock acquired.
3581 *
fad1ed2f
JR
3582 * Called with UST app session lock held.
3583 *
a7169585
MD
3584 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3585 * the application exited concurrently.
78f0bacd 3586 */
88e3c2f5 3587static int ust_app_channel_send(struct ust_app *app,
7972aab2
DG
3588 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3589 struct ust_app_channel *ua_chan)
78f0bacd 3590{
7972aab2 3591 int ret;
78f0bacd 3592
7972aab2
DG
3593 assert(app);
3594 assert(usess);
88e3c2f5 3595 assert(usess->active);
7972aab2
DG
3596 assert(ua_sess);
3597 assert(ua_chan);
3598
3599 /* Handle buffer type before sending the channel to the application. */
3600 switch (usess->buffer_type) {
3601 case LTTNG_BUFFER_PER_UID:
3602 {
3603 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3604 if (ret < 0) {
3605 goto error;
3606 }
3607 break;
3608 }
3609 case LTTNG_BUFFER_PER_PID:
3610 {
3611 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3612 if (ret < 0) {
3613 goto error;
3614 }
3615 break;
3616 }
3617 default:
3618 assert(0);
3619 ret = -EINVAL;
78f0bacd
DG
3620 goto error;
3621 }
3622
7972aab2
DG
3623 /* Initialize ust objd object using the received handle and add it. */
3624 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3625 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 3626
7972aab2
DG
3627 /* If channel is not enabled, disable it on the tracer */
3628 if (!ua_chan->enabled) {
3629 ret = disable_ust_channel(app, ua_sess, ua_chan);
3630 if (ret < 0) {
3631 goto error;
3632 }
78f0bacd
DG
3633 }
3634
3635error:
3636 return ret;
3637}
3638
284d8f55 3639/*
88e3c2f5 3640 * Create UST app channel and return it through ua_chanp if not NULL.
d0b96690 3641 *
36b588ed 3642 * Called with UST app session lock and RCU read-side lock held.
7972aab2 3643 *
88e3c2f5 3644 * Return 0 on success or else a negative value.
284d8f55 3645 */
88e3c2f5
JG
3646static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3647 struct ltt_ust_channel *uchan,
fc4b93fa 3648 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
4d710ac2 3649 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
3650{
3651 int ret = 0;
bec39940
DG
3652 struct lttng_ht_iter iter;
3653 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
3654 struct ust_app_channel *ua_chan;
3655
3656 /* Lookup channel in the ust app session */
bec39940
DG
3657 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3658 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 3659 if (ua_chan_node != NULL) {
5b4a0ec0 3660 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 3661 goto end;
5b4a0ec0
DG
3662 }
3663
d0b96690 3664 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
3665 if (ua_chan == NULL) {
3666 /* Only malloc can fail here */
4d710ac2 3667 ret = -ENOMEM;
88e3c2f5 3668 goto error;
fc34caaa
DG
3669 }
3670 shadow_copy_channel(ua_chan, uchan);
3671
ffe60014
DG
3672 /* Set channel type. */
3673 ua_chan->attr.type = type;
3674
d0b96690
DG
3675 /* Only add the channel if successful on the tracer side. */
3676 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
fc34caaa 3677end:
4d710ac2
DG
3678 if (ua_chanp) {
3679 *ua_chanp = ua_chan;
3680 }
3681
3682 /* Everything went well. */
3683 return 0;
5b4a0ec0
DG
3684
3685error:
4d710ac2 3686 return ret;
5b4a0ec0
DG
3687}
3688
3689/*
3690 * Create UST app event and create it on the tracer side.
d0b96690 3691 *
993578ff 3692 * Must be called with the RCU read side lock held.
d0b96690 3693 * Called with ust app session mutex held.
5b4a0ec0 3694 */
edb67388
DG
3695static
3696int create_ust_app_event(struct ust_app_session *ua_sess,
3697 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3698 struct ust_app *app)
284d8f55 3699{
edb67388 3700 int ret = 0;
5b4a0ec0 3701 struct ust_app_event *ua_event;
284d8f55 3702
edb67388
DG
3703 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3704 if (ua_event == NULL) {
20533947 3705 /* Only failure mode of alloc_ust_app_event(). */
edb67388 3706 ret = -ENOMEM;
fc34caaa 3707 goto end;
5b4a0ec0 3708 }
edb67388 3709 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3710
edb67388 3711 /* Create it on the tracer side */
5b4a0ec0 3712 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3713 if (ret < 0) {
e9f11505
JG
3714 /*
3715 * Not found previously means that it does not exist on the
3716 * tracer. If the application reports that the event existed,
3717 * it means there is a bug in the sessiond or lttng-ust
3718 * (or corruption, etc.)
3719 */
3720 if (ret == -LTTNG_UST_ERR_EXIST) {
3721 ERR("Tracer for application reported that an event being created already existed: "
3722 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3723 uevent->attr.name,
3724 app->pid, app->ppid, app->uid,
3725 app->gid);
3726 }
284d8f55
DG
3727 goto error;
3728 }
3729
d0b96690 3730 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3731
569744c5
JR
3732 DBG2("UST app create event completed: app = '%s' pid = %d",
3733 app->name, app->pid);
7f79d3a1 3734
edb67388 3735end:
fc34caaa
DG
3736 return ret;
3737
5b4a0ec0 3738error:
fc34caaa 3739 /* Valid. Calling here is already in a read side lock */
fb45065e 3740 delete_ust_app_event(-1, ua_event, app);
edb67388 3741 return ret;
5b4a0ec0
DG
3742}
3743
993578ff
JR
3744/*
3745 * Create UST app event notifier rule and create it on the tracer side.
3746 *
3747 * Must be called with the RCU read side lock held.
3748 * Called with ust app session mutex held.
3749 */
3750static
267d66aa
JR
3751int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3752 struct ust_app *app)
993578ff
JR
3753{
3754 int ret = 0;
3755 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3756
267d66aa 3757 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
993578ff
JR
3758 if (ua_event_notifier_rule == NULL) {
3759 ret = -ENOMEM;
3760 goto end;
3761 }
3762
3763 /* Create it on the tracer side. */
3764 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3765 if (ret < 0) {
3766 /*
3767 * Not found previously means that it does not exist on the
3768 * tracer. If the application reports that the event existed,
3769 * it means there is a bug in the sessiond or lttng-ust
3770 * (or corruption, etc.)
3771 */
3772 if (ret == -LTTNG_UST_ERR_EXIST) {
3773 ERR("Tracer for application reported that an event notifier being created already exists: "
3774 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
267d66aa 3775 lttng_trigger_get_tracer_token(trigger),
993578ff
JR
3776 app->pid, app->ppid, app->uid,
3777 app->gid);
3778 }
3779 goto error;
3780 }
3781
3782 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3783 &ua_event_notifier_rule->node);
3784
569744c5
JR
3785 DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64,
3786 app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
993578ff 3787
533a90fb 3788 goto end;
993578ff
JR
3789
3790error:
3791 /* The RCU read side lock is already being held by the caller. */
3792 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
533a90fb 3793end:
993578ff
JR
3794 return ret;
3795}
3796
5b4a0ec0
DG
3797/*
3798 * Create UST metadata and open it on the tracer side.
d0b96690 3799 *
7972aab2 3800 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3801 */
3802static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3803 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3804{
3805 int ret = 0;
ffe60014 3806 struct ust_app_channel *metadata;
d88aee68 3807 struct consumer_socket *socket;
7972aab2 3808 struct ust_registry_session *registry;
e32d7f27 3809 struct ltt_session *session = NULL;
5b4a0ec0 3810
ffe60014
DG
3811 assert(ua_sess);
3812 assert(app);
d88aee68 3813 assert(consumer);
5b4a0ec0 3814
7972aab2 3815 registry = get_session_registry(ua_sess);
fad1ed2f 3816 /* The UST app session is held registry shall not be null. */
7972aab2
DG
3817 assert(registry);
3818
ce34fcd0
MD
3819 pthread_mutex_lock(&registry->lock);
3820
1b532a60
DG
3821 /* Metadata already exists for this registry or it was closed previously */
3822 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3823 ret = 0;
3824 goto error;
5b4a0ec0
DG
3825 }
3826
ffe60014 3827 /* Allocate UST metadata */
d0b96690 3828 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3829 if (!metadata) {
3830 /* malloc() failed */
3831 ret = -ENOMEM;
3832 goto error;
3833 }
5b4a0ec0 3834
ad7a9107 3835 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3836
7972aab2
DG
3837 /* Need one fd for the channel. */
3838 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3839 if (ret < 0) {
3840 ERR("Exhausted number of available FD upon create metadata");
3841 goto error;
3842 }
3843
4dc3dfc5
DG
3844 /* Get the right consumer socket for the application. */
3845 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3846 if (!socket) {
3847 ret = -EINVAL;
3848 goto error_consumer;
3849 }
3850
331744e3
JD
3851 /*
3852 * Keep metadata key so we can identify it on the consumer side. Assign it
3853 * to the registry *before* we ask the consumer so we avoid the race of the
3854 * consumer requesting the metadata and the ask_channel call on our side
3855 * did not returned yet.
3856 */
3857 registry->metadata_key = metadata->key;
3858
e098433c
JG
3859 session = session_find_by_id(ua_sess->tracing_id);
3860 assert(session);
3861
3862 assert(pthread_mutex_trylock(&session->lock));
3863 assert(session_trylock_list());
3864
d88aee68
DG
3865 /*
3866 * Ask the metadata channel creation to the consumer. The metadata object
3867 * will be created by the consumer and kept their. However, the stream is
3868 * never added or monitored until we do a first push metadata to the
3869 * consumer.
3870 */
7972aab2 3871 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
d2956687 3872 registry, session->current_trace_chunk);
d88aee68 3873 if (ret < 0) {
f2a444f1
DG
3874 /* Nullify the metadata key so we don't try to close it later on. */
3875 registry->metadata_key = 0;
d88aee68
DG
3876 goto error_consumer;
3877 }
3878
3879 /*
3880 * The setup command will make the metadata stream be sent to the relayd,
3881 * if applicable, and the thread managing the metadatas. This is important
3882 * because after this point, if an error occurs, the only way the stream
3883 * can be deleted is to be monitored in the consumer.
3884 */
7972aab2 3885 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3886 if (ret < 0) {
f2a444f1
DG
3887 /* Nullify the metadata key so we don't try to close it later on. */
3888 registry->metadata_key = 0;
d88aee68 3889 goto error_consumer;
5b4a0ec0
DG
3890 }
3891
7972aab2
DG
3892 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3893 metadata->key, app->pid);
5b4a0ec0 3894
d88aee68 3895error_consumer:
b80f0b6c 3896 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3897 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3898error:
ce34fcd0 3899 pthread_mutex_unlock(&registry->lock);
e32d7f27
JG
3900 if (session) {
3901 session_put(session);
3902 }
ffe60014 3903 return ret;
5b4a0ec0
DG
3904}
3905
5b4a0ec0 3906/*
d88aee68
DG
3907 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3908 * acquired before calling this function.
5b4a0ec0
DG
3909 */
3910struct ust_app *ust_app_find_by_pid(pid_t pid)
3911{
d88aee68 3912 struct ust_app *app = NULL;
bec39940
DG
3913 struct lttng_ht_node_ulong *node;
3914 struct lttng_ht_iter iter;
5b4a0ec0 3915
bec39940
DG
3916 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3917 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3918 if (node == NULL) {
3919 DBG2("UST app no found with pid %d", pid);
3920 goto error;
3921 }
5b4a0ec0
DG
3922
3923 DBG2("Found UST app by pid %d", pid);
3924
d88aee68 3925 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3926
3927error:
d88aee68 3928 return app;
5b4a0ec0
DG
3929}
3930
d88aee68
DG
3931/*
3932 * Allocate and init an UST app object using the registration information and
3933 * the command socket. This is called when the command socket connects to the
3934 * session daemon.
3935 *
3936 * The object is returned on success or else NULL.
3937 */
d0b96690 3938struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3939{
5e2abfaf 3940 int ret;
d0b96690 3941 struct ust_app *lta = NULL;
da873412 3942 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
d0b96690
DG
3943
3944 assert(msg);
3945 assert(sock >= 0);
3946
3947 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3948
173af62f 3949 if ((msg->bits_per_long == 64 &&
412d7227
SM
3950 (uatomic_read(&the_ust_consumerd64_fd) ==
3951 -EINVAL)) ||
3952 (msg->bits_per_long == 32 &&
3953 (uatomic_read(&the_ust_consumerd32_fd) ==
3954 -EINVAL))) {
f943b0fb 3955 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3956 "%d-bit long, but no consumerd for this size is available.\n",
3957 msg->name, msg->pid, msg->bits_per_long);
3958 goto error;
3f2c5fcc 3959 }
d0b96690 3960
5e2abfaf
JG
3961 /*
3962 * Reserve the two file descriptors of the event source pipe. The write
3963 * end will be closed once it is passed to the application, at which
3964 * point a single 'put' will be performed.
3965 */
3966 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3967 if (ret) {
569744c5
JR
3968 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
3969 msg->name, (int) msg->pid);
5e2abfaf
JG
3970 goto error;
3971 }
3972
da873412
JR
3973 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3974 if (!event_notifier_event_source_pipe) {
569744c5
JR
3975 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
3976 msg->name, msg->pid);
da873412
JR
3977 goto error;
3978 }
3979
5b4a0ec0
DG
3980 lta = zmalloc(sizeof(struct ust_app));
3981 if (lta == NULL) {
3982 PERROR("malloc");
da873412 3983 goto error_free_pipe;
5b4a0ec0
DG
3984 }
3985
3faa1e3d
JG
3986 urcu_ref_init(&lta->ref);
3987
da873412
JR
3988 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3989
5b4a0ec0
DG
3990 lta->ppid = msg->ppid;
3991 lta->uid = msg->uid;
3992 lta->gid = msg->gid;
d0b96690 3993
7753dea8 3994 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3995 lta->uint8_t_alignment = msg->uint8_t_alignment;
3996 lta->uint16_t_alignment = msg->uint16_t_alignment;
3997 lta->uint32_t_alignment = msg->uint32_t_alignment;
3998 lta->uint64_t_alignment = msg->uint64_t_alignment;
3999 lta->long_alignment = msg->long_alignment;
4000 lta->byte_order = msg->byte_order;
4001
5b4a0ec0
DG
4002 lta->v_major = msg->major;
4003 lta->v_minor = msg->minor;
d9bf3ca4 4004 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690 4005 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
10b56aef 4006 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
d0b96690 4007 lta->notify_sock = -1;
993578ff 4008 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d88aee68
DG
4009
4010 /* Copy name and make sure it's NULL terminated. */
4011 strncpy(lta->name, msg->name, sizeof(lta->name));
4012 lta->name[UST_APP_PROCNAME_LEN] = '\0';
4013
4014 /*
4015 * Before this can be called, when receiving the registration information,
4016 * the application compatibility is checked. So, at this point, the
4017 * application can work with this session daemon.
4018 */
d0b96690 4019 lta->compatible = 1;
5b4a0ec0 4020
852d0037 4021 lta->pid = msg->pid;
d0b96690 4022 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 4023 lta->sock = sock;
fb45065e 4024 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 4025 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 4026
d42f20df 4027 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690 4028 return lta;
da873412
JR
4029
4030error_free_pipe:
4031 lttng_pipe_destroy(event_notifier_event_source_pipe);
5e2abfaf 4032 lttng_fd_put(LTTNG_FD_APPS, 2);
da873412
JR
4033error:
4034 return NULL;
d0b96690
DG
4035}
4036
d88aee68
DG
4037/*
4038 * For a given application object, add it to every hash table.
4039 */
d0b96690
DG
4040void ust_app_add(struct ust_app *app)
4041{
4042 assert(app);
4043 assert(app->notify_sock >= 0);
4044
940c4592
JR
4045 app->registration_time = time(NULL);
4046
5b4a0ec0 4047 rcu_read_lock();
852d0037
DG
4048
4049 /*
4050 * On a re-registration, we want to kick out the previous registration of
4051 * that pid
4052 */
d0b96690 4053 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
4054
4055 /*
4056 * The socket _should_ be unique until _we_ call close. So, a add_unique
4057 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4058 * already in the table.
4059 */
d0b96690 4060 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 4061
d0b96690
DG
4062 /* Add application to the notify socket hash table. */
4063 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
4064 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 4065
569744c5
JR
4066 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4067 "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
d88aee68
DG
4068 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
4069 app->v_minor);
5b4a0ec0 4070
d0b96690
DG
4071 rcu_read_unlock();
4072}
4073
d88aee68
DG
4074/*
4075 * Set the application version into the object.
4076 *
4077 * Return 0 on success else a negative value either an errno code or a
4078 * LTTng-UST error code.
4079 */
d0b96690
DG
4080int ust_app_version(struct ust_app *app)
4081{
d88aee68
DG
4082 int ret;
4083
d0b96690 4084 assert(app);
d88aee68 4085
fb45065e 4086 pthread_mutex_lock(&app->sock_lock);
b623cb6a 4087 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
fb45065e 4088 pthread_mutex_unlock(&app->sock_lock);
d88aee68 4089 if (ret < 0) {
569744c5
JR
4090 if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
4091 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4092 app->pid, app->sock);
4093 } else if (ret == -EAGAIN) {
4094 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4095 app->pid, app->sock);
d88aee68 4096 } else {
569744c5
JR
4097 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4098 ret, app->pid, app->sock);
d88aee68
DG
4099 }
4100 }
4101
4102 return ret;
5b4a0ec0
DG
4103}
4104
27a3be48
MD
4105bool ust_app_supports_notifiers(const struct ust_app *app)
4106{
4107 return app->v_major >= 9;
4108}
4109
4110bool ust_app_supports_counters(const struct ust_app *app)
4111{
4112 return app->v_major >= 9;
4113}
4114
da873412
JR
4115/*
4116 * Setup the base event notifier group.
4117 *
4118 * Return 0 on success else a negative value either an errno code or a
4119 * LTTng-UST error code.
4120 */
4121int ust_app_setup_event_notifier_group(struct ust_app *app)
4122{
4123 int ret;
4124 int event_pipe_write_fd;
fc4b93fa 4125 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
da873412 4126 enum lttng_error_code lttng_ret;
533a90fb 4127 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
da873412
JR
4128
4129 assert(app);
4130
27a3be48
MD
4131 if (!ust_app_supports_notifiers(app)) {
4132 ret = -ENOSYS;
4133 goto error;
4134 }
4135
da873412
JR
4136 /* Get the write side of the pipe. */
4137 event_pipe_write_fd = lttng_pipe_get_writefd(
4138 app->event_notifier_group.event_pipe);
4139
4140 pthread_mutex_lock(&app->sock_lock);
b623cb6a 4141 ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
da873412
JR
4142 event_pipe_write_fd, &event_notifier_group);
4143 pthread_mutex_unlock(&app->sock_lock);
4144 if (ret < 0) {
569744c5
JR
4145 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4146 ret = 0;
4147 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4148 app->pid, app->sock);
4149 } else if (ret == -EAGAIN) {
4150 ret = 0;
4151 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4152 app->pid, app->sock);
da873412 4153 } else {
569744c5
JR
4154 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4155 ret, app->pid, app->sock, event_pipe_write_fd);
da873412 4156 }
da873412
JR
4157 goto error;
4158 }
4159
5d4193fd
JG
4160 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4161 if (ret) {
569744c5
JR
4162 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4163 app->name, app->pid);
5d4193fd
JG
4164 goto error;
4165 }
4166
5e2abfaf
JG
4167 /*
4168 * Release the file descriptor that was reserved for the write-end of
4169 * the pipe.
4170 */
4171 lttng_fd_put(LTTNG_FD_APPS, 1);
4172
da873412 4173 lttng_ret = notification_thread_command_add_tracer_event_source(
412d7227
SM
4174 the_notification_thread_handle,
4175 lttng_pipe_get_readfd(
4176 app->event_notifier_group.event_pipe),
da873412
JR
4177 LTTNG_DOMAIN_UST);
4178 if (lttng_ret != LTTNG_OK) {
4179 ERR("Failed to add tracer event source to notification thread");
4180 ret = - 1;
4181 goto error;
4182 }
4183
4184 /* Assign handle only when the complete setup is valid. */
4185 app->event_notifier_group.object = event_notifier_group;
533a90fb 4186
a5a21280
FD
4187 event_notifier_error_accounting_status =
4188 event_notifier_error_accounting_register_app(app);
27a3be48
MD
4189 switch (event_notifier_error_accounting_status) {
4190 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
4191 break;
4192 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
569744c5
JR
4193 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4194 app->sock, app->name, (int) app->pid);
27a3be48
MD
4195 ret = 0;
4196 goto error_accounting;
4197 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
569744c5
JR
4198 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4199 app->sock, app->name, (int) app->pid);
27a3be48
MD
4200 ret = 0;
4201 goto error_accounting;
4202 default:
533a90fb
FD
4203 ERR("Failed to setup event notifier error accounting for app");
4204 ret = -1;
cd9c532c 4205 goto error_accounting;
533a90fb
FD
4206 }
4207
da873412
JR
4208 return ret;
4209
cd9c532c
FD
4210error_accounting:
4211 lttng_ret = notification_thread_command_remove_tracer_event_source(
4212 the_notification_thread_handle,
4213 lttng_pipe_get_readfd(
4214 app->event_notifier_group.event_pipe));
4215 if (lttng_ret != LTTNG_OK) {
4216 ERR("Failed to remove application tracer event source from notification thread");
4217 }
4218
da873412 4219error:
b623cb6a 4220 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
da873412 4221 free(app->event_notifier_group.object);
88631abd 4222 app->event_notifier_group.object = NULL;
da873412
JR
4223 return ret;
4224}
4225
3faa1e3d 4226static void ust_app_unregister(struct ust_app *app)
5b4a0ec0 4227{
3faa1e3d 4228 int ret;
bec39940 4229 struct lttng_ht_iter iter;
d42f20df 4230 struct ust_app_session *ua_sess;
5b4a0ec0
DG
4231
4232 rcu_read_lock();
886459c6 4233
d88aee68 4234 /*
ce34fcd0
MD
4235 * For per-PID buffers, perform "push metadata" and flush all
4236 * application streams before removing app from hash tables,
4237 * ensuring proper behavior of data_pending check.
c4b88406 4238 * Remove sessions so they are not visible during deletion.
d88aee68 4239 */
3faa1e3d 4240 cds_lfht_for_each_entry(app->sessions->ht, &iter.iter, ua_sess,
d42f20df 4241 node.node) {
7972aab2
DG
4242 struct ust_registry_session *registry;
4243
3faa1e3d 4244 ret = lttng_ht_del(app->sessions, &iter);
d42f20df
DG
4245 if (ret) {
4246 /* The session was already removed so scheduled for teardown. */
4247 continue;
4248 }
4249
ce34fcd0 4250 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3faa1e3d 4251 (void) ust_app_flush_app_session(app, ua_sess);
ce34fcd0 4252 }
c4b88406 4253
d42f20df
DG
4254 /*
4255 * Add session to list for teardown. This is safe since at this point we
4256 * are the only one using this list.
4257 */
d88aee68
DG
4258 pthread_mutex_lock(&ua_sess->lock);
4259
b161602a
MD
4260 if (ua_sess->deleted) {
4261 pthread_mutex_unlock(&ua_sess->lock);
4262 continue;
4263 }
4264
d88aee68
DG
4265 /*
4266 * Normally, this is done in the delete session process which is
4267 * executed in the call rcu below. However, upon registration we can't
4268 * afford to wait for the grace period before pushing data or else the
4269 * data pending feature can race between the unregistration and stop
4270 * command where the data pending command is sent *before* the grace
4271 * period ended.
4272 *
4273 * The close metadata below nullifies the metadata pointer in the
4274 * session so the delete session will NOT push/close a second time.
4275 */
7972aab2 4276 registry = get_session_registry(ua_sess);
ce34fcd0 4277 if (registry) {
7972aab2
DG
4278 /* Push metadata for application before freeing the application. */
4279 (void) push_metadata(registry, ua_sess->consumer);
4280
4281 /*
4282 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
4283 * metadata only on destroy trace session in this case. Also, the
4284 * previous push metadata could have flag the metadata registry to
4285 * close so don't send a close command if closed.
7972aab2 4286 */
ce34fcd0 4287 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
4288 /* And ask to close it for this session registry. */
4289 (void) close_metadata(registry, ua_sess->consumer);
4290 }
4291 }
c4b88406 4292
3faa1e3d 4293 cds_list_add(&ua_sess->teardown_node, &app->teardown_head);
d88aee68 4294 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
4295 }
4296
c4b88406
MD
4297 /*
4298 * Remove application from notify hash table. The thread handling the
4299 * notify socket could have deleted the node so ignore on error because
c48239ca
JG
4300 * either way it's valid. The close of that socket is handled by the
4301 * apps_notify_thread.
c4b88406 4302 */
3faa1e3d 4303 iter.iter.node = &app->notify_sock_n.node;
c4b88406
MD
4304 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4305
3faa1e3d 4306 iter.iter.node = &app->pid_n.node;
c4b88406
MD
4307 ret = lttng_ht_del(ust_app_ht, &iter);
4308 if (ret) {
3faa1e3d 4309 WARN("Unregister app by PID %d failed", app->pid);
c4b88406
MD
4310 }
4311
3faa1e3d
JG
4312 rcu_read_unlock();
4313}
4314
4315/*
4316 * Unregister app by removing it from the global traceable app list and freeing
4317 * the data struct.
4318 *
4319 * The socket is already closed at this point, so there is no need to close it.
4320 */
4321void ust_app_unregister_by_socket(int sock)
4322{
4323 struct ust_app *app;
4324 struct lttng_ht_node_ulong *node;
4325 struct lttng_ht_iter ust_app_sock_iter;
4326 int ret;
4327
4328 rcu_read_lock();
4329
4330 /* Get the node reference for a call_rcu */
4331 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4332 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4333 assert(node);
4334
4335 app = caa_container_of(node, struct ust_app, sock_n);
4336
4337 DBG("PID %d unregistering with sock %d", app->pid, sock);
852d0037 4338
3faa1e3d
JG
4339 /* Remove application from socket hash table */
4340 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4341 assert(!ret);
4342
4343 /*
4344 * The socket is closed: release its reference to the application
4345 * to trigger its eventual teardown.
4346 */
4347 ust_app_put(app);
5b4a0ec0 4348 rcu_read_unlock();
284d8f55
DG
4349}
4350
5b4a0ec0
DG
4351/*
4352 * Fill events array with all events name of all registered apps.
4353 */
4354int ust_app_list_events(struct lttng_event **events)
421cb601 4355{
5b4a0ec0
DG
4356 int ret, handle;
4357 size_t nbmem, count = 0;
bec39940 4358 struct lttng_ht_iter iter;
5b4a0ec0 4359 struct ust_app *app;
c617c0c6 4360 struct lttng_event *tmp_event;
421cb601 4361
5b4a0ec0 4362 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
4363 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4364 if (tmp_event == NULL) {
5b4a0ec0
DG
4365 PERROR("zmalloc ust app events");
4366 ret = -ENOMEM;
421cb601
DG
4367 goto error;
4368 }
4369
5b4a0ec0 4370 rcu_read_lock();
421cb601 4371
852d0037 4372 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
fc4b93fa 4373 struct lttng_ust_abi_tracepoint_iter uiter;
ac3bd9c0 4374
840cb59c 4375 health_code_update();
86acf0da 4376
e0c7ec2b
DG
4377 if (!app->compatible) {
4378 /*
4379 * TODO: In time, we should notice the caller of this error by
4380 * telling him that this is a version error.
4381 */
4382 continue;
4383 }
fb45065e 4384 pthread_mutex_lock(&app->sock_lock);
b623cb6a 4385 handle = lttng_ust_ctl_tracepoint_list(app->sock);
5b4a0ec0 4386 if (handle < 0) {
ffe60014
DG
4387 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4388 ERR("UST app list events getting handle failed for app pid %d",
4389 app->pid);
4390 }
fb45065e 4391 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
4392 continue;
4393 }
421cb601 4394
b623cb6a 4395 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 4396 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
4397 /* Handle ustctl error. */
4398 if (ret < 0) {
fb45065e
MD
4399 int release_ret;
4400
a2ba1ab0 4401 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
4402 ERR("UST app tp list get failed for app %d with ret %d",
4403 app->sock, ret);
4404 } else {
4405 DBG3("UST app tp list get failed. Application is dead");
3757b385 4406 break;
ffe60014 4407 }
98f595d4 4408 free(tmp_event);
b623cb6a 4409 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
68313703
JG
4410 if (release_ret < 0 &&
4411 release_ret != -LTTNG_UST_ERR_EXITING &&
4412 release_ret != -EPIPE) {
fb45065e
MD
4413 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4414 }
4415 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4416 goto rcu_error;
4417 }
4418
840cb59c 4419 health_code_update();
815564d8 4420 if (count >= nbmem) {
d7b3776f 4421 /* In case the realloc fails, we free the memory */
53efb85a
MD
4422 struct lttng_event *new_tmp_event;
4423 size_t new_nbmem;
4424
4425 new_nbmem = nbmem << 1;
4426 DBG2("Reallocating event list from %zu to %zu entries",
4427 nbmem, new_nbmem);
4428 new_tmp_event = realloc(tmp_event,
4429 new_nbmem * sizeof(struct lttng_event));
4430 if (new_tmp_event == NULL) {
fb45065e
MD
4431 int release_ret;
4432
5b4a0ec0 4433 PERROR("realloc ust app events");
c617c0c6 4434 free(tmp_event);
5b4a0ec0 4435 ret = -ENOMEM;
b623cb6a 4436 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
68313703
JG
4437 if (release_ret < 0 &&
4438 release_ret != -LTTNG_UST_ERR_EXITING &&
4439 release_ret != -EPIPE) {
fb45065e
MD
4440 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4441 }
4442 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
4443 goto rcu_error;
4444 }
53efb85a
MD
4445 /* Zero the new memory */
4446 memset(new_tmp_event + nbmem, 0,
4447 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4448 nbmem = new_nbmem;
4449 tmp_event = new_tmp_event;
5b4a0ec0 4450 }
fc4b93fa 4451 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
c617c0c6 4452 tmp_event[count].loglevel = uiter.loglevel;
fc4b93fa 4453 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
c617c0c6
MD
4454 tmp_event[count].pid = app->pid;
4455 tmp_event[count].enabled = -1;
5b4a0ec0 4456 count++;
421cb601 4457 }
b623cb6a 4458 ret = lttng_ust_ctl_release_handle(app->sock, handle);
fb45065e 4459 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
4460 if (ret < 0) {
4461 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4462 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4463 app->pid, app->sock);
4464 } else if (ret == -EAGAIN) {
4465 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4466 app->pid, app->sock);
4467 } else {
4468 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4469 ret, app->pid, app->sock);
4470 }
fb45065e 4471 }
421cb601
DG
4472 }
4473
5b4a0ec0 4474 ret = count;
c617c0c6 4475 *events = tmp_event;
421cb601 4476
5b4a0ec0 4477 DBG2("UST app list events done (%zu events)", count);
421cb601 4478
5b4a0ec0
DG
4479rcu_error:
4480 rcu_read_unlock();
421cb601 4481error:
840cb59c 4482 health_code_update();
5b4a0ec0 4483 return ret;
421cb601
DG
4484}
4485
f37d259d
MD
4486/*
4487 * Fill events array with all events name of all registered apps.
4488 */
4489int ust_app_list_event_fields(struct lttng_event_field **fields)
4490{
4491 int ret, handle;
4492 size_t nbmem, count = 0;
4493 struct lttng_ht_iter iter;
4494 struct ust_app *app;
c617c0c6 4495 struct lttng_event_field *tmp_event;
f37d259d
MD
4496
4497 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
4498 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4499 if (tmp_event == NULL) {
f37d259d
MD
4500 PERROR("zmalloc ust app event fields");
4501 ret = -ENOMEM;
4502 goto error;
4503 }
4504
4505 rcu_read_lock();
4506
4507 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
fc4b93fa 4508 struct lttng_ust_abi_field_iter uiter;
f37d259d 4509
840cb59c 4510 health_code_update();
86acf0da 4511
f37d259d
MD
4512 if (!app->compatible) {
4513 /*
4514 * TODO: In time, we should notice the caller of this error by
4515 * telling him that this is a version error.
4516 */
4517 continue;
4518 }
fb45065e 4519 pthread_mutex_lock(&app->sock_lock);
b623cb6a 4520 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
f37d259d 4521 if (handle < 0) {
ffe60014
DG
4522 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4523 ERR("UST app list field getting handle failed for app pid %d",
4524 app->pid);
4525 }
fb45065e 4526 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
4527 continue;
4528 }
4529
b623cb6a 4530 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 4531 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
4532 /* Handle ustctl error. */
4533 if (ret < 0) {
fb45065e
MD
4534 int release_ret;
4535
a2ba1ab0 4536 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
4537 ERR("UST app tp list field failed for app %d with ret %d",
4538 app->sock, ret);
4539 } else {
4540 DBG3("UST app tp list field failed. Application is dead");
3757b385 4541 break;
ffe60014 4542 }
98f595d4 4543 free(tmp_event);
b623cb6a 4544 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
fb45065e 4545 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4546 if (release_ret < 0 &&
4547 release_ret != -LTTNG_UST_ERR_EXITING &&
4548 release_ret != -EPIPE) {
fb45065e
MD
4549 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4550 }
ffe60014
DG
4551 goto rcu_error;
4552 }
4553
840cb59c 4554 health_code_update();
f37d259d 4555 if (count >= nbmem) {
d7b3776f 4556 /* In case the realloc fails, we free the memory */
53efb85a
MD
4557 struct lttng_event_field *new_tmp_event;
4558 size_t new_nbmem;
4559
4560 new_nbmem = nbmem << 1;
4561 DBG2("Reallocating event field list from %zu to %zu entries",
4562 nbmem, new_nbmem);
4563 new_tmp_event = realloc(tmp_event,
4564 new_nbmem * sizeof(struct lttng_event_field));
4565 if (new_tmp_event == NULL) {
fb45065e
MD
4566 int release_ret;
4567
f37d259d 4568 PERROR("realloc ust app event fields");
c617c0c6 4569 free(tmp_event);
f37d259d 4570 ret = -ENOMEM;
b623cb6a 4571 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
fb45065e 4572 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4573 if (release_ret &&
4574 release_ret != -LTTNG_UST_ERR_EXITING &&
4575 release_ret != -EPIPE) {
fb45065e
MD
4576 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4577 }
f37d259d
MD
4578 goto rcu_error;
4579 }
53efb85a
MD
4580 /* Zero the new memory */
4581 memset(new_tmp_event + nbmem, 0,
4582 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4583 nbmem = new_nbmem;
4584 tmp_event = new_tmp_event;
f37d259d 4585 }
f37d259d 4586
fc4b93fa 4587 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
2e84128e
DG
4588 /* Mapping between these enums matches 1 to 1. */
4589 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 4590 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 4591
fc4b93fa 4592 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
c617c0c6 4593 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 4594 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
4595 tmp_event[count].event.pid = app->pid;
4596 tmp_event[count].event.enabled = -1;
f37d259d
MD
4597 count++;
4598 }
b623cb6a 4599 ret = lttng_ust_ctl_release_handle(app->sock, handle);
fb45065e 4600 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
4601 if (ret < 0 &&
4602 ret != -LTTNG_UST_ERR_EXITING &&
4603 ret != -EPIPE) {
fb45065e
MD
4604 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4605 }
f37d259d
MD
4606 }
4607
4608 ret = count;
c617c0c6 4609 *fields = tmp_event;
f37d259d
MD
4610
4611 DBG2("UST app list event fields done (%zu events)", count);
4612
4613rcu_error:
4614 rcu_read_unlock();
4615error:
840cb59c 4616 health_code_update();
f37d259d
MD
4617 return ret;
4618}
4619
5b4a0ec0
DG
4620/*
4621 * Free and clean all traceable apps of the global list.
36b588ed
MD
4622 *
4623 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
4624 */
4625void ust_app_clean_list(void)
421cb601 4626{
5b4a0ec0 4627 int ret;
659ed79f 4628 struct ust_app *app;
bec39940 4629 struct lttng_ht_iter iter;
421cb601 4630
5b4a0ec0 4631 DBG2("UST app cleaning registered apps hash table");
421cb601 4632
5b4a0ec0 4633 rcu_read_lock();
421cb601 4634
faadaa3a
JG
4635 /* Cleanup notify socket hash table */
4636 if (ust_app_ht_by_notify_sock) {
4637 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4638 notify_sock_n.node) {
b69a1b40
JG
4639 /*
4640 * Assert that all notifiers are gone as all triggers
4641 * are unregistered prior to this clean-up.
4642 */
4643 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
faadaa3a
JG
4644 ust_app_notify_sock_unregister(app->notify_sock);
4645 }
4646 }
4647
852d0037 4648 /* Cleanup socket hash table */
f1b711c4
MD
4649 if (ust_app_ht_by_sock) {
4650 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4651 sock_n.node) {
4652 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4653 assert(!ret);
3faa1e3d
JG
4654
4655 ust_app_put(app);
f1b711c4 4656 }
bec39940 4657 }
852d0037 4658
36b588ed 4659 rcu_read_unlock();
d88aee68 4660
bec39940 4661 /* Destroy is done only when the ht is empty */
f1b711c4
MD
4662 if (ust_app_ht) {
4663 ht_cleanup_push(ust_app_ht);
4664 }
4665 if (ust_app_ht_by_sock) {
4666 ht_cleanup_push(ust_app_ht_by_sock);
4667 }
4668 if (ust_app_ht_by_notify_sock) {
4669 ht_cleanup_push(ust_app_ht_by_notify_sock);
4670 }
5b4a0ec0
DG
4671}
4672
4673/*
4674 * Init UST app hash table.
4675 */
57703f6e 4676int ust_app_ht_alloc(void)
5b4a0ec0 4677{
bec39940 4678 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4679 if (!ust_app_ht) {
4680 return -1;
4681 }
852d0037 4682 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4683 if (!ust_app_ht_by_sock) {
4684 return -1;
4685 }
d0b96690 4686 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
4687 if (!ust_app_ht_by_notify_sock) {
4688 return -1;
4689 }
4690 return 0;
421cb601
DG
4691}
4692
78f0bacd
DG
4693/*
4694 * For a specific UST session, disable the channel for all registered apps.
4695 */
35a9059d 4696int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
4697 struct ltt_ust_channel *uchan)
4698{
4699 int ret = 0;
bec39940
DG
4700 struct lttng_ht_iter iter;
4701 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
4702 struct ust_app *app;
4703 struct ust_app_session *ua_sess;
8535a6d9 4704 struct ust_app_channel *ua_chan;
78f0bacd 4705
88e3c2f5 4706 assert(usess->active);
d9bf3ca4 4707 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 4708 uchan->name, usess->id);
78f0bacd
DG
4709
4710 rcu_read_lock();
4711
4712 /* For every registered applications */
852d0037 4713 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 4714 struct lttng_ht_iter uiter;
e0c7ec2b
DG
4715 if (!app->compatible) {
4716 /*
4717 * TODO: In time, we should notice the caller of this error by
4718 * telling him that this is a version error.
4719 */
4720 continue;
4721 }
78f0bacd
DG
4722 ua_sess = lookup_session_by_app(usess, app);
4723 if (ua_sess == NULL) {
4724 continue;
4725 }
4726
8535a6d9 4727 /* Get channel */
bec39940
DG
4728 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4729 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
4730 /* If the session if found for the app, the channel must be there */
4731 assert(ua_chan_node);
4732
4733 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4734 /* The channel must not be already disabled */
4735 assert(ua_chan->enabled == 1);
4736
4737 /* Disable channel onto application */
4738 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
4739 if (ret < 0) {
4740 /* XXX: We might want to report this error at some point... */
4741 continue;
4742 }
4743 }
4744
4745 rcu_read_unlock();
78f0bacd
DG
4746 return ret;
4747}
4748
4749/*
4750 * For a specific UST session, enable the channel for all registered apps.
4751 */
35a9059d 4752int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
4753 struct ltt_ust_channel *uchan)
4754{
4755 int ret = 0;
bec39940 4756 struct lttng_ht_iter iter;
78f0bacd
DG
4757 struct ust_app *app;
4758 struct ust_app_session *ua_sess;
4759
88e3c2f5 4760 assert(usess->active);
d9bf3ca4 4761 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 4762 uchan->name, usess->id);
78f0bacd
DG
4763
4764 rcu_read_lock();
4765
4766 /* For every registered applications */
852d0037 4767 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4768 if (!app->compatible) {
4769 /*
4770 * TODO: In time, we should notice the caller of this error by
4771 * telling him that this is a version error.
4772 */
4773 continue;
4774 }
78f0bacd
DG
4775 ua_sess = lookup_session_by_app(usess, app);
4776 if (ua_sess == NULL) {
4777 continue;
4778 }
4779
4780 /* Enable channel onto application */
4781 ret = enable_ust_app_channel(ua_sess, uchan, app);
4782 if (ret < 0) {
4783 /* XXX: We might want to report this error at some point... */
4784 continue;
4785 }
4786 }
4787
4788 rcu_read_unlock();
78f0bacd
DG
4789 return ret;
4790}
4791
b0a40d28
DG
4792/*
4793 * Disable an event in a channel and for a specific session.
4794 */
35a9059d
DG
4795int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4796 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
4797{
4798 int ret = 0;
bec39940 4799 struct lttng_ht_iter iter, uiter;
700c5a9d 4800 struct lttng_ht_node_str *ua_chan_node;
b0a40d28
DG
4801 struct ust_app *app;
4802 struct ust_app_session *ua_sess;
4803 struct ust_app_channel *ua_chan;
4804 struct ust_app_event *ua_event;
4805
88e3c2f5 4806 assert(usess->active);
b0a40d28 4807 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
4808 "%s for session id %" PRIu64,
4809 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
4810
4811 rcu_read_lock();
4812
4813 /* For all registered applications */
852d0037 4814 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4815 if (!app->compatible) {
4816 /*
4817 * TODO: In time, we should notice the caller of this error by
4818 * telling him that this is a version error.
4819 */
4820 continue;
4821 }
b0a40d28
DG
4822 ua_sess = lookup_session_by_app(usess, app);
4823 if (ua_sess == NULL) {
4824 /* Next app */
4825 continue;
4826 }
4827
4828 /* Lookup channel in the ust app session */
bec39940
DG
4829 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4830 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 4831 if (ua_chan_node == NULL) {
d9bf3ca4 4832 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 4833 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
4834 continue;
4835 }
4836 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4837
1083b49a
JG
4838 ua_event = find_ust_app_event(ua_chan->events,
4839 uevent->attr.name, uevent->filter,
4840 (enum lttng_ust_abi_loglevel_type)
4841 uevent->attr.loglevel_type,
4842 uevent->attr.loglevel, uevent->exclusion);
700c5a9d 4843 if (ua_event == NULL) {
b0a40d28 4844 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 4845 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
4846 continue;
4847 }
b0a40d28 4848
7f79d3a1 4849 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
4850 if (ret < 0) {
4851 /* XXX: Report error someday... */
4852 continue;
4853 }
4854 }
4855
4856 rcu_read_unlock();
88e3c2f5
JG
4857 return ret;
4858}
4859
4860/* The ua_sess lock must be held by the caller. */
4861static
4862int ust_app_channel_create(struct ltt_ust_session *usess,
4863 struct ust_app_session *ua_sess,
4864 struct ltt_ust_channel *uchan, struct ust_app *app,
4865 struct ust_app_channel **_ua_chan)
4866{
4867 int ret = 0;
4868 struct ust_app_channel *ua_chan = NULL;
4869
4870 assert(ua_sess);
4871 ASSERT_LOCKED(ua_sess->lock);
4872
4873 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4874 sizeof(uchan->name))) {
4875 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4876 &uchan->attr);
4877 ret = 0;
4878 } else {
4879 struct ltt_ust_context *uctx = NULL;
4880
4881 /*
4882 * Create channel onto application and synchronize its
4883 * configuration.
4884 */
4885 ret = ust_app_channel_allocate(ua_sess, uchan,
fc4b93fa 4886 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
88e3c2f5 4887 &ua_chan);
88ebf5a7
JR
4888 if (ret < 0) {
4889 goto error;
4890 }
4891
4892 ret = ust_app_channel_send(app, usess,
4893 ua_sess, ua_chan);
4894 if (ret) {
4895 goto error;
88e3c2f5
JG
4896 }
4897
4898 /* Add contexts. */
4899 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4900 ret = create_ust_app_channel_context(ua_chan,
4901 &uctx->ctx, app);
4902 if (ret) {
88ebf5a7 4903 goto error;
88e3c2f5
JG
4904 }
4905 }
4906 }
88ebf5a7
JR
4907
4908error:
88e3c2f5
JG
4909 if (ret < 0) {
4910 switch (ret) {
4911 case -ENOTCONN:
4912 /*
4913 * The application's socket is not valid. Either a bad socket
4914 * or a timeout on it. We can't inform the caller that for a
4915 * specific app, the session failed so lets continue here.
4916 */
4917 ret = 0; /* Not an error. */
4918 break;
4919 case -ENOMEM:
4920 default:
4921 break;
4922 }
4923 }
88ebf5a7 4924
88e3c2f5
JG
4925 if (ret == 0 && _ua_chan) {
4926 /*
4927 * Only return the application's channel on success. Note
4928 * that the channel can still be part of the application's
4929 * channel hashtable on error.
4930 */
4931 *_ua_chan = ua_chan;
4932 }
b0a40d28
DG
4933 return ret;
4934}
4935
5b4a0ec0 4936/*
edb67388 4937 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4938 */
35a9059d 4939int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4940 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4941{
4942 int ret = 0;
bec39940 4943 struct lttng_ht_iter iter, uiter;
18eace3b 4944 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4945 struct ust_app *app;
4946 struct ust_app_session *ua_sess;
4947 struct ust_app_channel *ua_chan;
4948 struct ust_app_event *ua_event;
48842b30 4949
88e3c2f5 4950 assert(usess->active);
d9bf3ca4 4951 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4952 uevent->attr.name, usess->id);
48842b30 4953
edb67388
DG
4954 /*
4955 * NOTE: At this point, this function is called only if the session and
4956 * channel passed are already created for all apps. and enabled on the
4957 * tracer also.
4958 */
4959
48842b30 4960 rcu_read_lock();
421cb601
DG
4961
4962 /* For all registered applications */
852d0037 4963 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4964 if (!app->compatible) {
4965 /*
4966 * TODO: In time, we should notice the caller of this error by
4967 * telling him that this is a version error.
4968 */
4969 continue;
4970 }
edb67388 4971 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4972 if (!ua_sess) {
4973 /* The application has problem or is probably dead. */
4974 continue;
4975 }
ba767faf 4976
d0b96690
DG
4977 pthread_mutex_lock(&ua_sess->lock);
4978
b161602a
MD
4979 if (ua_sess->deleted) {
4980 pthread_mutex_unlock(&ua_sess->lock);
4981 continue;
4982 }
4983
edb67388 4984 /* Lookup channel in the ust app session */
bec39940
DG
4985 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4986 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4987 /*
4988 * It is possible that the channel cannot be found is
4989 * the channel/event creation occurs concurrently with
4990 * an application exit.
4991 */
4992 if (!ua_chan_node) {
4993 pthread_mutex_unlock(&ua_sess->lock);
4994 continue;
4995 }
edb67388
DG
4996
4997 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4998
18eace3b 4999 /* Get event node */
1083b49a
JG
5000 ua_event = find_ust_app_event(ua_chan->events,
5001 uevent->attr.name, uevent->filter,
5002 (enum lttng_ust_abi_loglevel_type)
5003 uevent->attr.loglevel_type,
5004 uevent->attr.loglevel, uevent->exclusion);
18eace3b 5005 if (ua_event == NULL) {
7f79d3a1 5006 DBG3("UST app enable event %s not found for app PID %d."
852d0037 5007 "Skipping app", uevent->attr.name, app->pid);
d0b96690 5008 goto next_app;
35a9059d 5009 }
35a9059d
DG
5010
5011 ret = enable_ust_app_event(ua_sess, ua_event, app);
5012 if (ret < 0) {
d0b96690 5013 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 5014 goto error;
48842b30 5015 }
d0b96690
DG
5016 next_app:
5017 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
5018 }
5019
7f79d3a1 5020error:
edb67388 5021 rcu_read_unlock();
edb67388
DG
5022 return ret;
5023}
5024
5025/*
5026 * For a specific existing UST session and UST channel, creates the event for
5027 * all registered apps.
5028 */
35a9059d 5029int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
5030 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
5031{
5032 int ret = 0;
bec39940
DG
5033 struct lttng_ht_iter iter, uiter;
5034 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
5035 struct ust_app *app;
5036 struct ust_app_session *ua_sess;
5037 struct ust_app_channel *ua_chan;
5038
88e3c2f5 5039 assert(usess->active);
d9bf3ca4 5040 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 5041 uevent->attr.name, usess->id);
edb67388 5042
edb67388
DG
5043 rcu_read_lock();
5044
5045 /* For all registered applications */
852d0037 5046 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
5047 if (!app->compatible) {
5048 /*
5049 * TODO: In time, we should notice the caller of this error by
5050 * telling him that this is a version error.
5051 */
5052 continue;
5053 }
edb67388 5054 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
5055 if (!ua_sess) {
5056 /* The application has problem or is probably dead. */
5057 continue;
5058 }
48842b30 5059
d0b96690 5060 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5061
5062 if (ua_sess->deleted) {
5063 pthread_mutex_unlock(&ua_sess->lock);
5064 continue;
5065 }
5066
48842b30 5067 /* Lookup channel in the ust app session */
bec39940
DG
5068 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5069 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
5070 /* If the channel is not found, there is a code flow error */
5071 assert(ua_chan_node);
5072
48842b30
DG
5073 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5074
edb67388 5075 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 5076 pthread_mutex_unlock(&ua_sess->lock);
edb67388 5077 if (ret < 0) {
49c336c1 5078 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
5079 /* Possible value at this point: -ENOMEM. If so, we stop! */
5080 break;
5081 }
5082 DBG2("UST app event %s already exist on app PID %d",
852d0037 5083 uevent->attr.name, app->pid);
5b4a0ec0 5084 continue;
48842b30 5085 }
48842b30 5086 }
5b4a0ec0 5087
48842b30 5088 rcu_read_unlock();
48842b30
DG
5089 return ret;
5090}
5091
5b4a0ec0
DG
5092/*
5093 * Start tracing for a specific UST session and app.
fad1ed2f
JR
5094 *
5095 * Called with UST app session lock held.
5096 *
5b4a0ec0 5097 */
b34cbebf 5098static
421cb601 5099int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
5100{
5101 int ret = 0;
48842b30 5102 struct ust_app_session *ua_sess;
48842b30 5103
852d0037 5104 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 5105
509cbaf8
MD
5106 rcu_read_lock();
5107
e0c7ec2b
DG
5108 if (!app->compatible) {
5109 goto end;
5110 }
5111
421cb601
DG
5112 ua_sess = lookup_session_by_app(usess, app);
5113 if (ua_sess == NULL) {
d42f20df
DG
5114 /* The session is in teardown process. Ignore and continue. */
5115 goto end;
421cb601 5116 }
48842b30 5117
d0b96690
DG
5118 pthread_mutex_lock(&ua_sess->lock);
5119
b161602a
MD
5120 if (ua_sess->deleted) {
5121 pthread_mutex_unlock(&ua_sess->lock);
5122 goto end;
5123 }
5124
b0a1c741
JR
5125 if (ua_sess->enabled) {
5126 pthread_mutex_unlock(&ua_sess->lock);
5127 goto end;
5128 }
5129
aea829b3
DG
5130 /* Upon restart, we skip the setup, already done */
5131 if (ua_sess->started) {
8be98f9a 5132 goto skip_setup;
aea829b3 5133 }
8be98f9a 5134
840cb59c 5135 health_code_update();
86acf0da 5136
8be98f9a 5137skip_setup:
a945cdc7 5138 /* This starts the UST tracing */
fb45065e 5139 pthread_mutex_lock(&app->sock_lock);
b623cb6a 5140 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
fb45065e 5141 pthread_mutex_unlock(&app->sock_lock);
421cb601 5142 if (ret < 0) {
569744c5
JR
5143 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5144 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5145 app->pid, app->sock);
3757b385
DG
5146 pthread_mutex_unlock(&ua_sess->lock);
5147 goto end;
569744c5
JR
5148 } else if (ret == -EAGAIN) {
5149 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5150 app->pid, app->sock);
5151 pthread_mutex_unlock(&ua_sess->lock);
5152 goto end;
5153
5154 } else {
5155 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5156 ret, app->pid, app->sock);
ffe60014 5157 }
d0b96690 5158 goto error_unlock;
421cb601 5159 }
5b4a0ec0 5160
55c3953d
DG
5161 /* Indicate that the session has been started once */
5162 ua_sess->started = 1;
b0a1c741 5163 ua_sess->enabled = 1;
55c3953d 5164
d0b96690
DG
5165 pthread_mutex_unlock(&ua_sess->lock);
5166
840cb59c 5167 health_code_update();
86acf0da 5168
421cb601 5169 /* Quiescent wait after starting trace */
fb45065e 5170 pthread_mutex_lock(&app->sock_lock);
b623cb6a 5171 ret = lttng_ust_ctl_wait_quiescent(app->sock);
fb45065e 5172 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
5173 if (ret < 0) {
5174 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5175 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5176 app->pid, app->sock);
5177 } else if (ret == -EAGAIN) {
5178 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5179 app->pid, app->sock);
5180 } else {
5181 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5182 ret, app->pid, app->sock);
5183 }
ffe60014 5184 }
48842b30 5185
e0c7ec2b
DG
5186end:
5187 rcu_read_unlock();
840cb59c 5188 health_code_update();
421cb601 5189 return 0;
48842b30 5190
d0b96690
DG
5191error_unlock:
5192 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 5193 rcu_read_unlock();
840cb59c 5194 health_code_update();
421cb601
DG
5195 return -1;
5196}
48842b30 5197
8be98f9a
MD
5198/*
5199 * Stop tracing for a specific UST session and app.
5200 */
b34cbebf 5201static
8be98f9a
MD
5202int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5203{
5204 int ret = 0;
5205 struct ust_app_session *ua_sess;
7972aab2 5206 struct ust_registry_session *registry;
8be98f9a 5207
852d0037 5208 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
5209
5210 rcu_read_lock();
5211
e0c7ec2b 5212 if (!app->compatible) {
d88aee68 5213 goto end_no_session;
e0c7ec2b
DG
5214 }
5215
8be98f9a
MD
5216 ua_sess = lookup_session_by_app(usess, app);
5217 if (ua_sess == NULL) {
d88aee68 5218 goto end_no_session;
8be98f9a
MD
5219 }
5220
d88aee68
DG
5221 pthread_mutex_lock(&ua_sess->lock);
5222
b161602a
MD
5223 if (ua_sess->deleted) {
5224 pthread_mutex_unlock(&ua_sess->lock);
5225 goto end_no_session;
5226 }
5227
9bc07046
DG
5228 /*
5229 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
5230 * that was never started. It's possible since we can have a fail start
5231 * from either the application manager thread or the command thread. Simply
5232 * indicate that this is a stop error.
9bc07046 5233 */
f9dfc3d9 5234 if (!ua_sess->started) {
c45536e1
DG
5235 goto error_rcu_unlock;
5236 }
7db205b5 5237
840cb59c 5238 health_code_update();
86acf0da 5239
9d6c7d3f 5240 /* This inhibits UST tracing */
fb45065e 5241 pthread_mutex_lock(&app->sock_lock);
b623cb6a 5242 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
fb45065e 5243 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 5244 if (ret < 0) {
569744c5
JR
5245 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5246 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5247 app->pid, app->sock);
3757b385 5248 goto end_unlock;
569744c5
JR
5249 } else if (ret == -EAGAIN) {
5250 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5251 app->pid, app->sock);
5252 goto end_unlock;
5253
5254 } else {
5255 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5256 ret, app->pid, app->sock);
ffe60014 5257 }
9d6c7d3f
DG
5258 goto error_rcu_unlock;
5259 }
5260
840cb59c 5261 health_code_update();
b0a1c741 5262 ua_sess->enabled = 0;
86acf0da 5263
9d6c7d3f 5264 /* Quiescent wait after stopping trace */
fb45065e 5265 pthread_mutex_lock(&app->sock_lock);
b623cb6a 5266 ret = lttng_ust_ctl_wait_quiescent(app->sock);
fb45065e 5267 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
5268 if (ret < 0) {
5269 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5270 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5271 app->pid, app->sock);
5272 } else if (ret == -EAGAIN) {
5273 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5274 app->pid, app->sock);
5275 } else {
5276 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5277 ret, app->pid, app->sock);
5278 }
ffe60014 5279 }
9d6c7d3f 5280
840cb59c 5281 health_code_update();
86acf0da 5282
b34cbebf 5283 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5284
5285 /* The UST app session is held registry shall not be null. */
b34cbebf 5286 assert(registry);
1b532a60 5287
ce34fcd0
MD
5288 /* Push metadata for application before freeing the application. */
5289 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 5290
3757b385 5291end_unlock:
b34cbebf
MD
5292 pthread_mutex_unlock(&ua_sess->lock);
5293end_no_session:
5294 rcu_read_unlock();
5295 health_code_update();
5296 return 0;
5297
5298error_rcu_unlock:
5299 pthread_mutex_unlock(&ua_sess->lock);
5300 rcu_read_unlock();
5301 health_code_update();
5302 return -1;
5303}
5304
b34cbebf 5305static
c4b88406
MD
5306int ust_app_flush_app_session(struct ust_app *app,
5307 struct ust_app_session *ua_sess)
b34cbebf 5308{
c4b88406 5309 int ret, retval = 0;
b34cbebf 5310 struct lttng_ht_iter iter;
b34cbebf 5311 struct ust_app_channel *ua_chan;
c4b88406 5312 struct consumer_socket *socket;
b34cbebf 5313
c4b88406 5314 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
5315
5316 rcu_read_lock();
5317
5318 if (!app->compatible) {
c4b88406 5319 goto end_not_compatible;
b34cbebf
MD
5320 }
5321
5322 pthread_mutex_lock(&ua_sess->lock);
5323
b161602a
MD
5324 if (ua_sess->deleted) {
5325 goto end_deleted;
5326 }
5327
b34cbebf
MD
5328 health_code_update();
5329
9d6c7d3f 5330 /* Flushing buffers */
c4b88406
MD
5331 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5332 ua_sess->consumer);
ce34fcd0
MD
5333
5334 /* Flush buffers and push metadata. */
5335 switch (ua_sess->buffer_type) {
5336 case LTTNG_BUFFER_PER_PID:
5337 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5338 node.node) {
5339 health_code_update();
ce34fcd0
MD
5340 ret = consumer_flush_channel(socket, ua_chan->key);
5341 if (ret) {
5342 ERR("Error flushing consumer channel");
5343 retval = -1;
5344 continue;
5345 }
8be98f9a 5346 }
ce34fcd0
MD
5347 break;
5348 case LTTNG_BUFFER_PER_UID:
5349 default:
5350 assert(0);
5351 break;
8be98f9a 5352 }
8be98f9a 5353
840cb59c 5354 health_code_update();
86acf0da 5355
b161602a 5356end_deleted:
d88aee68 5357 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 5358
c4b88406
MD
5359end_not_compatible:
5360 rcu_read_unlock();
5361 health_code_update();
5362 return retval;
5363}
5364
5365/*
ce34fcd0
MD
5366 * Flush buffers for all applications for a specific UST session.
5367 * Called with UST session lock held.
c4b88406
MD
5368 */
5369static
ce34fcd0 5370int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
5371
5372{
99b1411c 5373 int ret = 0;
c4b88406 5374
ce34fcd0 5375 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
5376
5377 rcu_read_lock();
5378
ce34fcd0
MD
5379 /* Flush buffers and push metadata. */
5380 switch (usess->buffer_type) {
5381 case LTTNG_BUFFER_PER_UID:
5382 {
5383 struct buffer_reg_uid *reg;
5384 struct lttng_ht_iter iter;
5385
5386 /* Flush all per UID buffers associated to that session. */
5387 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5388 struct ust_registry_session *ust_session_reg;
3273699d 5389 struct buffer_reg_channel *buf_reg_chan;
ce34fcd0
MD
5390 struct consumer_socket *socket;
5391
5392 /* Get consumer socket to use to push the metadata.*/
5393 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5394 usess->consumer);
5395 if (!socket) {
5396 /* Ignore request if no consumer is found for the session. */
5397 continue;
5398 }
5399
5400 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3273699d 5401 buf_reg_chan, node.node) {
ce34fcd0
MD
5402 /*
5403 * The following call will print error values so the return
5404 * code is of little importance because whatever happens, we
5405 * have to try them all.
5406 */
3273699d 5407 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
ce34fcd0
MD
5408 }
5409
5410 ust_session_reg = reg->registry->reg.ust;
5411 /* Push metadata. */
5412 (void) push_metadata(ust_session_reg, usess->consumer);
5413 }
ce34fcd0
MD
5414 break;
5415 }
5416 case LTTNG_BUFFER_PER_PID:
5417 {
5418 struct ust_app_session *ua_sess;
5419 struct lttng_ht_iter iter;
5420 struct ust_app *app;
5421
5422 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5423 ua_sess = lookup_session_by_app(usess, app);
5424 if (ua_sess == NULL) {
5425 continue;
5426 }
5427 (void) ust_app_flush_app_session(app, ua_sess);
5428 }
5429 break;
5430 }
5431 default:
99b1411c 5432 ret = -1;
ce34fcd0
MD
5433 assert(0);
5434 break;
c4b88406 5435 }
c4b88406 5436
7db205b5 5437 rcu_read_unlock();
840cb59c 5438 health_code_update();
c4b88406 5439 return ret;
8be98f9a
MD
5440}
5441
0dd01979
MD
5442static
5443int ust_app_clear_quiescent_app_session(struct ust_app *app,
5444 struct ust_app_session *ua_sess)
5445{
5446 int ret = 0;
5447 struct lttng_ht_iter iter;
5448 struct ust_app_channel *ua_chan;
5449 struct consumer_socket *socket;
5450
5451 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5452
5453 rcu_read_lock();
5454
5455 if (!app->compatible) {
5456 goto end_not_compatible;
5457 }
5458
5459 pthread_mutex_lock(&ua_sess->lock);
5460
5461 if (ua_sess->deleted) {
5462 goto end_unlock;
5463 }
5464
5465 health_code_update();
5466
5467 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5468 ua_sess->consumer);
5469 if (!socket) {
5470 ERR("Failed to find consumer (%" PRIu32 ") socket",
5471 app->bits_per_long);
5472 ret = -1;
5473 goto end_unlock;
5474 }
5475
5476 /* Clear quiescent state. */
5477 switch (ua_sess->buffer_type) {
5478 case LTTNG_BUFFER_PER_PID:
5479 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5480 ua_chan, node.node) {
5481 health_code_update();
5482 ret = consumer_clear_quiescent_channel(socket,
5483 ua_chan->key);
5484 if (ret) {
5485 ERR("Error clearing quiescent state for consumer channel");
5486 ret = -1;
5487 continue;
5488 }
5489 }
5490 break;
5491 case LTTNG_BUFFER_PER_UID:
5492 default:
5493 assert(0);
5494 ret = -1;
5495 break;
5496 }
5497
5498 health_code_update();
5499
5500end_unlock:
5501 pthread_mutex_unlock(&ua_sess->lock);
5502
5503end_not_compatible:
5504 rcu_read_unlock();
5505 health_code_update();
5506 return ret;
5507}
5508
5509/*
5510 * Clear quiescent state in each stream for all applications for a
5511 * specific UST session.
5512 * Called with UST session lock held.
5513 */
5514static
5515int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5516
5517{
5518 int ret = 0;
5519
5520 DBG("Clearing stream quiescent state for all ust apps");
5521
5522 rcu_read_lock();
5523
5524 switch (usess->buffer_type) {
5525 case LTTNG_BUFFER_PER_UID:
5526 {
5527 struct lttng_ht_iter iter;
5528 struct buffer_reg_uid *reg;
5529
5530 /*
5531 * Clear quiescent for all per UID buffers associated to
5532 * that session.
5533 */
5534 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5535 struct consumer_socket *socket;
3273699d 5536 struct buffer_reg_channel *buf_reg_chan;
0dd01979
MD
5537
5538 /* Get associated consumer socket.*/
5539 socket = consumer_find_socket_by_bitness(
5540 reg->bits_per_long, usess->consumer);
5541 if (!socket) {
5542 /*
5543 * Ignore request if no consumer is found for
5544 * the session.
5545 */
5546 continue;
5547 }
5548
5549 cds_lfht_for_each_entry(reg->registry->channels->ht,
3273699d 5550 &iter.iter, buf_reg_chan, node.node) {
0dd01979
MD
5551 /*
5552 * The following call will print error values so
5553 * the return code is of little importance
5554 * because whatever happens, we have to try them
5555 * all.
5556 */
5557 (void) consumer_clear_quiescent_channel(socket,
3273699d 5558 buf_reg_chan->consumer_key);
0dd01979
MD
5559 }
5560 }
5561 break;
5562 }
5563 case LTTNG_BUFFER_PER_PID:
5564 {
5565 struct ust_app_session *ua_sess;
5566 struct lttng_ht_iter iter;
5567 struct ust_app *app;
5568
5569 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5570 pid_n.node) {
5571 ua_sess = lookup_session_by_app(usess, app);
5572 if (ua_sess == NULL) {
5573 continue;
5574 }
5575 (void) ust_app_clear_quiescent_app_session(app,
5576 ua_sess);
5577 }
5578 break;
5579 }
5580 default:
5581 ret = -1;
5582 assert(0);
5583 break;
5584 }
5585
5586 rcu_read_unlock();
5587 health_code_update();
5588 return ret;
5589}
5590
84cd17c6
MD
5591/*
5592 * Destroy a specific UST session in apps.
5593 */
3353de95 5594static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 5595{
ffe60014 5596 int ret;
84cd17c6 5597 struct ust_app_session *ua_sess;
bec39940 5598 struct lttng_ht_iter iter;
d9bf3ca4 5599 struct lttng_ht_node_u64 *node;
84cd17c6 5600
852d0037 5601 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
5602
5603 rcu_read_lock();
5604
e0c7ec2b
DG
5605 if (!app->compatible) {
5606 goto end;
5607 }
5608
84cd17c6 5609 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 5610 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 5611 if (node == NULL) {
d42f20df
DG
5612 /* Session is being or is deleted. */
5613 goto end;
84cd17c6
MD
5614 }
5615 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 5616
840cb59c 5617 health_code_update();
d0b96690 5618 destroy_app_session(app, ua_sess);
84cd17c6 5619
840cb59c 5620 health_code_update();
7db205b5 5621
84cd17c6 5622 /* Quiescent wait after stopping trace */
fb45065e 5623 pthread_mutex_lock(&app->sock_lock);
b623cb6a 5624 ret = lttng_ust_ctl_wait_quiescent(app->sock);
fb45065e 5625 pthread_mutex_unlock(&app->sock_lock);
569744c5
JR
5626 if (ret < 0) {
5627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5628 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5629 app->pid, app->sock);
5630 } else if (ret == -EAGAIN) {
5631 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5632 app->pid, app->sock);
5633 } else {
5634 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5635 ret, app->pid, app->sock);
5636 }
ffe60014 5637 }
e0c7ec2b
DG
5638end:
5639 rcu_read_unlock();
840cb59c 5640 health_code_update();
84cd17c6 5641 return 0;
84cd17c6
MD
5642}
5643
5b4a0ec0
DG
5644/*
5645 * Start tracing for the UST session.
5646 */
421cb601
DG
5647int ust_app_start_trace_all(struct ltt_ust_session *usess)
5648{
bec39940 5649 struct lttng_ht_iter iter;
421cb601 5650 struct ust_app *app;
48842b30 5651
421cb601
DG
5652 DBG("Starting all UST traces");
5653
bb2452c8
MD
5654 /*
5655 * Even though the start trace might fail, flag this session active so
5656 * other application coming in are started by default.
5657 */
5658 usess->active = 1;
5659
421cb601 5660 rcu_read_lock();
421cb601 5661
0dd01979
MD
5662 /*
5663 * In a start-stop-start use-case, we need to clear the quiescent state
5664 * of each channel set by the prior stop command, thus ensuring that a
5665 * following stop or destroy is sure to grab a timestamp_end near those
5666 * operations, even if the packet is empty.
5667 */
5668 (void) ust_app_clear_quiescent_session(usess);
5669
0498a00c
MD
5670 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5671 ust_app_global_update(usess, app);
5672 }
5673
48842b30
DG
5674 rcu_read_unlock();
5675
5676 return 0;
5677}
487cf67c 5678
8be98f9a
MD
5679/*
5680 * Start tracing for the UST session.
ce34fcd0 5681 * Called with UST session lock held.
8be98f9a
MD
5682 */
5683int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5684{
5685 int ret = 0;
bec39940 5686 struct lttng_ht_iter iter;
8be98f9a
MD
5687 struct ust_app *app;
5688
5689 DBG("Stopping all UST traces");
5690
bb2452c8
MD
5691 /*
5692 * Even though the stop trace might fail, flag this session inactive so
5693 * other application coming in are not started by default.
5694 */
5695 usess->active = 0;
5696
8be98f9a
MD
5697 rcu_read_lock();
5698
b34cbebf
MD
5699 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5700 ret = ust_app_stop_trace(usess, app);
5701 if (ret < 0) {
5702 /* Continue to next apps even on error */
5703 continue;
5704 }
5705 }
5706
ce34fcd0 5707 (void) ust_app_flush_session(usess);
8be98f9a
MD
5708
5709 rcu_read_unlock();
5710
5711 return 0;
5712}
5713
84cd17c6
MD
5714/*
5715 * Destroy app UST session.
5716 */
5717int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5718{
5719 int ret = 0;
bec39940 5720 struct lttng_ht_iter iter;
84cd17c6
MD
5721 struct ust_app *app;
5722
5723 DBG("Destroy all UST traces");
5724
5725 rcu_read_lock();
5726
852d0037 5727 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 5728 ret = destroy_trace(usess, app);
84cd17c6
MD
5729 if (ret < 0) {
5730 /* Continue to next apps even on error */
5731 continue;
5732 }
5733 }
5734
5735 rcu_read_unlock();
5736
5737 return 0;
5738}
5739
88e3c2f5 5740/* The ua_sess lock must be held by the caller. */
a9ad0c8f 5741static
88e3c2f5
JG
5742int find_or_create_ust_app_channel(
5743 struct ltt_ust_session *usess,
5744 struct ust_app_session *ua_sess,
5745 struct ust_app *app,
5746 struct ltt_ust_channel *uchan,
5747 struct ust_app_channel **ua_chan)
487cf67c 5748{
55c54cce 5749 int ret = 0;
88e3c2f5
JG
5750 struct lttng_ht_iter iter;
5751 struct lttng_ht_node_str *ua_chan_node;
5752
5753 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5754 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5755 if (ua_chan_node) {
5756 *ua_chan = caa_container_of(ua_chan_node,
5757 struct ust_app_channel, node);
5758 goto end;
5759 }
5760
5761 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5762 if (ret) {
5763 goto end;
5764 }
5765end:
5766 return ret;
5767}
5768
5769static
5770int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5771 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5772 struct ust_app *app)
5773{
5774 int ret = 0;
5775 struct ust_app_event *ua_event = NULL;
5776
5777 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1083b49a
JG
5778 uevent->filter,
5779 (enum lttng_ust_abi_loglevel_type)
5780 uevent->attr.loglevel_type,
5781 uevent->attr.loglevel, uevent->exclusion);
88e3c2f5
JG
5782 if (!ua_event) {
5783 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5784 if (ret < 0) {
5785 goto end;
5786 }
5787 } else {
5788 if (ua_event->enabled != uevent->enabled) {
5789 ret = uevent->enabled ?
5790 enable_ust_app_event(ua_sess, ua_event, app) :
5791 disable_ust_app_event(ua_sess, ua_event, app);
5792 }
5793 }
5794
5795end:
5796 return ret;
5797}
5798
993578ff
JR
5799/* Called with RCU read-side lock held. */
5800static
5801void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5802{
5803 int ret = 0;
5804 enum lttng_error_code ret_code;
5805 enum lttng_trigger_status t_status;
5806 struct lttng_ht_iter app_trigger_iter;
5807 struct lttng_triggers *triggers = NULL;
5808 struct ust_app_event_notifier_rule *event_notifier_rule;
5809 unsigned int count, i;
5810
27a3be48
MD
5811 if (!ust_app_supports_notifiers(app)) {
5812 goto end;
5813 }
5814
993578ff
JR
5815 /*
5816 * Currrently, registering or unregistering a trigger with an
5817 * event rule condition causes a full synchronization of the event
5818 * notifiers.
5819 *
5820 * The first step attempts to add an event notifier for all registered
5821 * triggers that apply to the user space tracers. Then, the
5822 * application's event notifiers rules are all checked against the list
5823 * of registered triggers. Any event notifier that doesn't have a
5824 * matching trigger can be assumed to have been disabled.
5825 *
5826 * All of this is inefficient, but is put in place to get the feature
5827 * rolling as it is simpler at this moment. It will be optimized Soon™
5828 * to allow the state of enabled
5829 * event notifiers to be synchronized in a piece-wise way.
5830 */
5831
5832 /* Get all triggers using uid 0 (root) */
5833 ret_code = notification_thread_command_list_triggers(
412d7227 5834 the_notification_thread_handle, 0, &triggers);
993578ff 5835 if (ret_code != LTTNG_OK) {
993578ff
JR
5836 goto end;
5837 }
5838
5839 assert(triggers);
5840
5841 t_status = lttng_triggers_get_count(triggers, &count);
5842 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
993578ff
JR
5843 goto end;
5844 }
5845
5846 for (i = 0; i < count; i++) {
5847 struct lttng_condition *condition;
5848 struct lttng_event_rule *event_rule;
5849 struct lttng_trigger *trigger;
5850 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5851 enum lttng_condition_status condition_status;
5852 uint64_t token;
5853
5854 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5855 assert(trigger);
5856
5857 token = lttng_trigger_get_tracer_token(trigger);
5858 condition = lttng_trigger_get_condition(trigger);
5859
8dbb86b8
JR
5860 if (lttng_condition_get_type(condition) !=
5861 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
993578ff
JR
5862 /* Does not apply */
5863 continue;
5864 }
5865
8dbb86b8
JR
5866 condition_status =
5867 lttng_condition_event_rule_matches_borrow_rule_mutable(
5868 condition, &event_rule);
993578ff
JR
5869 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5870
5871 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5872 /* Skip kernel related triggers. */
5873 continue;
5874 }
5875
5876 /*
5877 * Find or create the associated token event rule. The caller
5878 * holds the RCU read lock, so this is safe to call without
5879 * explicitly acquiring it here.
5880 */
5881 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5882 app->token_to_event_notifier_rule_ht, token);
5883 if (!looked_up_event_notifier_rule) {
267d66aa 5884 ret = create_ust_app_event_notifier_rule(trigger, app);
993578ff
JR
5885 if (ret < 0) {
5886 goto end;
5887 }
5888 }
5889 }
5890
5891 rcu_read_lock();
5892 /* Remove all unknown event sources from the app. */
5893 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5894 &app_trigger_iter.iter, event_notifier_rule,
5895 node.node) {
5896 const uint64_t app_token = event_notifier_rule->token;
5897 bool found = false;
5898
5899 /*
5900 * Check if the app event trigger still exists on the
5901 * notification side.
5902 */
5903 for (i = 0; i < count; i++) {
5904 uint64_t notification_thread_token;
5905 const struct lttng_trigger *trigger =
5906 lttng_triggers_get_at_index(
5907 triggers, i);
5908
5909 assert(trigger);
5910
5911 notification_thread_token =
5912 lttng_trigger_get_tracer_token(trigger);
5913
5914 if (notification_thread_token == app_token) {
5915 found = true;
5916 break;
5917 }
5918 }
5919
5920 if (found) {
5921 /* Still valid. */
5922 continue;
5923 }
5924
5925 /*
5926 * This trigger was unregistered, disable it on the tracer's
5927 * side.
5928 */
5929 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5930 &app_trigger_iter);
5931 assert(ret == 0);
5932
5933 /* Callee logs errors. */
5934 (void) disable_ust_object(app, event_notifier_rule->obj);
5935
5936 delete_ust_app_event_notifier_rule(
5937 app->sock, event_notifier_rule, app);
5938 }
5939
5940 rcu_read_unlock();
5941
5942end:
5943 lttng_triggers_destroy(triggers);
5944 return;
5945}
5946
88e3c2f5 5947/*
a84d1024 5948 * RCU read lock must be held by the caller.
88e3c2f5
JG
5949 */
5950static
a84d1024
FD
5951void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5952 struct ust_app_session *ua_sess,
88e3c2f5
JG
5953 struct ust_app *app)
5954{
5955 int ret = 0;
5956 struct cds_lfht_iter uchan_iter;
5957 struct ltt_ust_channel *uchan;
1f3580c7 5958
a84d1024 5959 assert(usess);
3d8ca23b 5960 assert(ua_sess);
a84d1024 5961 assert(app);
ef67c072 5962
88e3c2f5
JG
5963 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5964 uchan, node.node) {
5965 struct ust_app_channel *ua_chan;
5966 struct cds_lfht_iter uevent_iter;
5967 struct ltt_ust_event *uevent;
487cf67c 5968
31746f93 5969 /*
88e3c2f5
JG
5970 * Search for a matching ust_app_channel. If none is found,
5971 * create it. Creating the channel will cause the ua_chan
5972 * structure to be allocated, the channel buffers to be
5973 * allocated (if necessary) and sent to the application, and
5974 * all enabled contexts will be added to the channel.
31746f93 5975 */
f3db82be 5976 ret = find_or_create_ust_app_channel(usess, ua_sess,
88e3c2f5
JG
5977 app, uchan, &ua_chan);
5978 if (ret) {
5979 /* Tracer is probably gone or ENOMEM. */
a84d1024 5980 goto end;
727d5404
DG
5981 }
5982
88e3c2f5
JG
5983 if (!ua_chan) {
5984 /* ua_chan will be NULL for the metadata channel */
5985 continue;
5986 }
727d5404 5987
88e3c2f5 5988 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
bec39940 5989 node.node) {
88e3c2f5
JG
5990 ret = ust_app_channel_synchronize_event(ua_chan,
5991 uevent, ua_sess, app);
5992 if (ret) {
a84d1024 5993 goto end;
487cf67c 5994 }
36dc12cc 5995 }
d0b96690 5996
88e3c2f5
JG
5997 if (ua_chan->enabled != uchan->enabled) {
5998 ret = uchan->enabled ?
5999 enable_ust_app_channel(ua_sess, uchan, app) :
6000 disable_ust_app_channel(ua_sess, ua_chan, app);
6001 if (ret) {
a84d1024 6002 goto end;
88e3c2f5
JG
6003 }
6004 }
36dc12cc 6005 }
a84d1024
FD
6006end:
6007 return;
6008}
6009
6010/*
6011 * The caller must ensure that the application is compatible and is tracked
6012 * by the process attribute trackers.
6013 */
6014static
6015void ust_app_synchronize(struct ltt_ust_session *usess,
6016 struct ust_app *app)
6017{
6018 int ret = 0;
6019 struct ust_app_session *ua_sess = NULL;
6020
6021 /*
6022 * The application's configuration should only be synchronized for
6023 * active sessions.
6024 */
6025 assert(usess->active);
6026
6027 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
6028 if (ret < 0) {
6029 /* Tracer is probably gone or ENOMEM. */
1549157c
FD
6030 if (ua_sess) {
6031 destroy_app_session(app, ua_sess);
6032 }
6033 goto end;
a84d1024
FD
6034 }
6035 assert(ua_sess);
6036
6037 pthread_mutex_lock(&ua_sess->lock);
6038 if (ua_sess->deleted) {
1549157c 6039 goto deleted_session;
a84d1024
FD
6040 }
6041
6042 rcu_read_lock();
6043
6044 ust_app_synchronize_all_channels(usess, ua_sess, app);
ef67c072
JG
6045
6046 /*
6047 * Create the metadata for the application. This returns gracefully if a
6048 * metadata was already set for the session.
6049 *
6050 * The metadata channel must be created after the data channels as the
6051 * consumer daemon assumes this ordering. When interacting with a relay
6052 * daemon, the consumer will use this assumption to send the
6053 * "STREAMS_SENT" message to the relay daemon.
6054 */
6055 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
6056 if (ret < 0) {
1549157c
FD
6057 ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
6058 app->sock, usess->id);
ef67c072
JG
6059 }
6060
88e3c2f5 6061 rcu_read_unlock();
0498a00c 6062
1549157c 6063deleted_session:
d0b96690 6064 pthread_mutex_unlock(&ua_sess->lock);
1549157c 6065end:
487cf67c
DG
6066 return;
6067}
55cc08a6 6068
a9ad0c8f
MD
6069static
6070void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
6071{
6072 struct ust_app_session *ua_sess;
6073
6074 ua_sess = lookup_session_by_app(usess, app);
6075 if (ua_sess == NULL) {
6076 return;
6077 }
6078 destroy_app_session(app, ua_sess);
6079}
6080
6081/*
6082 * Add channels/events from UST global domain to registered apps at sock.
6083 *
6084 * Called with session lock held.
6085 * Called with RCU read-side lock held.
6086 */
6087void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
6088{
6089 assert(usess);
88e3c2f5 6090 assert(usess->active);
a9ad0c8f
MD
6091
6092 DBG2("UST app global update for app sock %d for session id %" PRIu64,
6093 app->sock, usess->id);
6094
6095 if (!app->compatible) {
6096 return;
6097 }
159b042f
JG
6098 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
6099 usess, app->pid) &&
55c9e7ca 6100 trace_ust_id_tracker_lookup(
159b042f
JG
6101 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
6102 usess, app->uid) &&
55c9e7ca 6103 trace_ust_id_tracker_lookup(
159b042f
JG
6104 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
6105 usess, app->gid)) {
88e3c2f5
JG
6106 /*
6107 * Synchronize the application's internal tracing configuration
6108 * and start tracing.
6109 */
6110 ust_app_synchronize(usess, app);
6111 ust_app_start_trace(usess, app);
a9ad0c8f
MD
6112 } else {
6113 ust_app_global_destroy(usess, app);
6114 }
6115}
6116
993578ff
JR
6117/*
6118 * Add all event notifiers to an application.
6119 *
6120 * Called with session lock held.
6121 * Called with RCU read-side lock held.
6122 */
6123void ust_app_global_update_event_notifier_rules(struct ust_app *app)
6124{
569744c5
JR
6125 DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
6126 app->name, app->pid);
993578ff 6127
27a3be48 6128 if (!app->compatible || !ust_app_supports_notifiers(app)) {
993578ff
JR
6129 return;
6130 }
6131
6132 if (app->event_notifier_group.object == NULL) {
569744c5
JR
6133 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
6134 app->name, app->pid);
993578ff
JR
6135 return;
6136 }
6137
6138 ust_app_synchronize_event_notifier_rules(app);
6139}
6140
a9ad0c8f
MD
6141/*
6142 * Called with session lock held.
6143 */
6144void ust_app_global_update_all(struct ltt_ust_session *usess)
6145{
6146 struct lttng_ht_iter iter;
6147 struct ust_app *app;
6148
6149 rcu_read_lock();
6150 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6151 ust_app_global_update(usess, app);
6152 }
6153 rcu_read_unlock();
6154}
6155
993578ff
JR
6156void ust_app_global_update_all_event_notifier_rules(void)
6157{
6158 struct lttng_ht_iter iter;
6159 struct ust_app *app;
6160
6161 rcu_read_lock();
6162 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6163 ust_app_global_update_event_notifier_rules(app);
6164 }
6165
6166 rcu_read_unlock();
6167}
6168
55cc08a6
DG
6169/*
6170 * Add context to a specific channel for global UST domain.
6171 */
6172int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6173 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6174{
6175 int ret = 0;
bec39940
DG
6176 struct lttng_ht_node_str *ua_chan_node;
6177 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
6178 struct ust_app_channel *ua_chan = NULL;
6179 struct ust_app_session *ua_sess;
6180 struct ust_app *app;
6181
88e3c2f5 6182 assert(usess->active);
0498a00c 6183
55cc08a6 6184 rcu_read_lock();
852d0037 6185 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
6186 if (!app->compatible) {
6187 /*
6188 * TODO: In time, we should notice the caller of this error by
6189 * telling him that this is a version error.
6190 */
6191 continue;
6192 }
55cc08a6
DG
6193 ua_sess = lookup_session_by_app(usess, app);
6194 if (ua_sess == NULL) {
6195 continue;
6196 }
6197
d0b96690 6198 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
6199
6200 if (ua_sess->deleted) {
6201 pthread_mutex_unlock(&ua_sess->lock);
6202 continue;
6203 }
6204
55cc08a6 6205 /* Lookup channel in the ust app session */
bec39940
DG
6206 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6207 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 6208 if (ua_chan_node == NULL) {
d0b96690 6209 goto next_app;
55cc08a6
DG
6210 }
6211 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6212 node);
c9edf082 6213 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
55cc08a6 6214 if (ret < 0) {
d0b96690 6215 goto next_app;
55cc08a6 6216 }
d0b96690
DG
6217 next_app:
6218 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
6219 }
6220
55cc08a6 6221 rcu_read_unlock();
76d45b40
DG
6222 return ret;
6223}
7f79d3a1 6224
d0b96690
DG
6225/*
6226 * Receive registration and populate the given msg structure.
6227 *
6228 * On success return 0 else a negative value returned by the ustctl call.
6229 */
6230int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6231{
6232 int ret;
6233 uint32_t pid, ppid, uid, gid;
6234
6235 assert(msg);
6236
b623cb6a 6237 ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
d0b96690
DG
6238 &pid, &ppid, &uid, &gid,
6239 &msg->bits_per_long,
6240 &msg->uint8_t_alignment,
6241 &msg->uint16_t_alignment,
6242 &msg->uint32_t_alignment,
6243 &msg->uint64_t_alignment,
6244 &msg->long_alignment,
6245 &msg->byte_order,
6246 msg->name);
6247 if (ret < 0) {
6248 switch (-ret) {
6249 case EPIPE:
6250 case ECONNRESET:
6251 case LTTNG_UST_ERR_EXITING:
6252 DBG3("UST app recv reg message failed. Application died");
6253 break;
6254 case LTTNG_UST_ERR_UNSUP_MAJOR:
6255 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6256 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6257 LTTNG_UST_ABI_MINOR_VERSION);
6258 break;
6259 default:
6260 ERR("UST app recv reg message failed with ret %d", ret);
6261 break;
6262 }
6263 goto error;
6264 }
6265 msg->pid = (pid_t) pid;
6266 msg->ppid = (pid_t) ppid;
6267 msg->uid = (uid_t) uid;
6268 msg->gid = (gid_t) gid;
6269
6270error:
6271 return ret;
6272}
6273
10b56aef
MD
6274/*
6275 * Return a ust app session object using the application object and the
6276 * session object descriptor has a key. If not found, NULL is returned.
6277 * A RCU read side lock MUST be acquired when calling this function.
6278*/
6279static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6280 int objd)
6281{
6282 struct lttng_ht_node_ulong *node;
6283 struct lttng_ht_iter iter;
6284 struct ust_app_session *ua_sess = NULL;
6285
6286 assert(app);
6287
6288 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6289 node = lttng_ht_iter_get_node_ulong(&iter);
6290 if (node == NULL) {
6291 DBG2("UST app session find by objd %d not found", objd);
6292 goto error;
6293 }
6294
6295 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6296
6297error:
6298 return ua_sess;
6299}
6300
d88aee68
DG
6301/*
6302 * Return a ust app channel object using the application object and the channel
6303 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6304 * lock MUST be acquired before calling this function.
6305 */
d0b96690
DG
6306static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6307 int objd)
6308{
6309 struct lttng_ht_node_ulong *node;
6310 struct lttng_ht_iter iter;
6311 struct ust_app_channel *ua_chan = NULL;
6312
6313 assert(app);
6314
6315 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6316 node = lttng_ht_iter_get_node_ulong(&iter);
6317 if (node == NULL) {
6318 DBG2("UST app channel find by objd %d not found", objd);
6319 goto error;
6320 }
6321
6322 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6323
6324error:
6325 return ua_chan;
6326}
6327
fefafa36
MD
6328/*
6329 * Fixup legacy context fields for comparison:
6330 * - legacy array becomes array_nestable,
6331 * - legacy struct becomes struct_nestable,
6332 * - legacy variant becomes variant_nestable,
6333 * legacy sequences are not emitted in LTTng-UST contexts.
6334 */
6335static int ust_app_fixup_legacy_context_fields(size_t *_nr_fields,
6336 struct lttng_ust_ctl_field **_fields)
6337{
6338 struct lttng_ust_ctl_field *fields = *_fields, *new_fields = NULL;
6339 size_t nr_fields = *_nr_fields, new_nr_fields = 0, i, j;
6340 bool found = false;
6341 int ret = 0;
6342
6343 for (i = 0; i < nr_fields; i++) {
6344 const struct lttng_ust_ctl_field *field = &fields[i];
6345
6346 switch (field->type.atype) {
6347 case lttng_ust_ctl_atype_sequence:
6348 ERR("Unexpected legacy sequence context.");
6349 ret = -EINVAL;
6350 goto end;
6351 case lttng_ust_ctl_atype_array:
6352 switch (field->type.u.legacy.array.elem_type.atype) {
6353 case lttng_ust_ctl_atype_integer:
6354 break;
6355 default:
6356 ERR("Unexpected legacy array element type in context.");
6357 ret = -EINVAL;
6358 goto end;
6359 }
6360 found = true;
6361 /* One field for array_nested, one field for elem type. */
6362 new_nr_fields += 2;
6363 break;
6364
6365 case lttng_ust_ctl_atype_struct: /* Fallthrough */
6366 case lttng_ust_ctl_atype_variant:
6367 found = true;
6368 new_nr_fields++;
6369 break;
6370 default:
6371 new_nr_fields++;
6372 break;
6373 }
6374 }
6375 if (!found) {
6376 goto end;
6377 }
6378 new_fields = (struct lttng_ust_ctl_field *) zmalloc(sizeof(*new_fields) * new_nr_fields);
6379 if (!new_fields) {
6380 ret = -ENOMEM;
6381 goto end;
6382 }
6383 for (i = 0, j = 0; i < nr_fields; i++, j++) {
6384 const struct lttng_ust_ctl_field *field = &fields[i];
6385 struct lttng_ust_ctl_field *new_field = &new_fields[j];
6386
6387 switch (field->type.atype) {
6388 case lttng_ust_ctl_atype_array:
6389 /* One field for array_nested, one field for elem type. */
6390 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6391 new_field->type.atype = lttng_ust_ctl_atype_array_nestable;
6392 new_field->type.u.array_nestable.length = field->type.u.legacy.array.length;
6393 new_field->type.u.array_nestable.alignment = 0;
6394 new_field = &new_fields[++j]; /* elem type */
6395 new_field->type.atype = field->type.u.legacy.array.elem_type.atype;
6396 assert(new_field->type.atype == lttng_ust_ctl_atype_integer);
6397 new_field->type.u.integer = field->type.u.legacy.array.elem_type.u.basic.integer;
6398 break;
6399 case lttng_ust_ctl_atype_struct:
6400 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6401 new_field->type.atype = lttng_ust_ctl_atype_struct_nestable;
6402 new_field->type.u.struct_nestable.nr_fields = field->type.u.legacy._struct.nr_fields;
6403 new_field->type.u.struct_nestable.alignment = 0;
6404 break;
6405 case lttng_ust_ctl_atype_variant:
6406 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6407 new_field->type.atype = lttng_ust_ctl_atype_variant_nestable;
6408 new_field->type.u.variant_nestable.nr_choices = field->type.u.legacy.variant.nr_choices;
6409 strncpy(new_field->type.u.variant_nestable.tag_name,
6410 field->type.u.legacy.variant.tag_name,
6411 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6412 new_field->type.u.variant_nestable.alignment = 0;
6413 break;
6414 default:
6415 *new_field = *field;
6416 break;
6417 }
6418 }
6419 free(fields);
6420 *_fields = new_fields;
6421 *_nr_fields = new_nr_fields;
6422end:
6423 return ret;
6424}
6425
d88aee68
DG
6426/*
6427 * Reply to a register channel notification from an application on the notify
6428 * socket. The channel metadata is also created.
6429 *
6430 * The session UST registry lock is acquired in this function.
6431 *
6432 * On success 0 is returned else a negative value.
6433 */
8eede835 6434static int reply_ust_register_channel(int sock, int cobjd,
b623cb6a 6435 size_t nr_fields, struct lttng_ust_ctl_field *fields)
d0b96690
DG
6436{
6437 int ret, ret_code = 0;
294e218e 6438 uint32_t chan_id;
7972aab2 6439 uint64_t chan_reg_key;
fefafa36 6440 enum lttng_ust_ctl_channel_header type = LTTNG_UST_CTL_CHANNEL_HEADER_UNKNOWN;
d0b96690
DG
6441 struct ust_app *app;
6442 struct ust_app_channel *ua_chan;
6443 struct ust_app_session *ua_sess;
7972aab2 6444 struct ust_registry_session *registry;
3273699d 6445 struct ust_registry_channel *ust_reg_chan;
d0b96690
DG
6446
6447 rcu_read_lock();
6448
6449 /* Lookup application. If not found, there is a code flow error. */
6450 app = find_app_by_notify_sock(sock);
d88aee68 6451 if (!app) {
fad1ed2f 6452 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68 6453 sock);
a5c3235f 6454 ret = -1;
d88aee68
DG
6455 goto error_rcu_unlock;
6456 }
d0b96690 6457
4950b860 6458 /* Lookup channel by UST object descriptor. */
d0b96690 6459 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 6460 if (!ua_chan) {
fad1ed2f 6461 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
6462 ret = 0;
6463 goto error_rcu_unlock;
6464 }
6465
d0b96690
DG
6466 assert(ua_chan->session);
6467 ua_sess = ua_chan->session;
d0b96690 6468
7972aab2
DG
6469 /* Get right session registry depending on the session buffer type. */
6470 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6471 if (!registry) {
6472 DBG("Application session is being torn down. Abort event notify");
6473 ret = 0;
6474 goto error_rcu_unlock;
6475 };
45893984 6476
7972aab2
DG
6477 /* Depending on the buffer type, a different channel key is used. */
6478 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6479 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 6480 } else {
7972aab2 6481 chan_reg_key = ua_chan->key;
d0b96690
DG
6482 }
6483
7972aab2
DG
6484 pthread_mutex_lock(&registry->lock);
6485
3273699d
FD
6486 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6487 assert(ust_reg_chan);
7972aab2 6488
28dc0326
MD
6489 /* Channel id is set during the object creation. */
6490 chan_id = ust_reg_chan->chan_id;
6491
fefafa36
MD
6492 ret = ust_app_fixup_legacy_context_fields(&nr_fields, &fields);
6493 if (ret < 0) {
6494 ERR("Registering application channel due to legacy context fields fixup error: pid = %d, sock = %d",
6495 app->pid, app->sock);
6496 ret_code = -EINVAL;
6497 goto reply;
6498 }
3273699d 6499 if (!ust_reg_chan->register_done) {
294e218e
MD
6500 /*
6501 * TODO: eventually use the registry event count for
6502 * this channel to better guess header type for per-pid
6503 * buffers.
6504 */
b623cb6a 6505 type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
3273699d
FD
6506 ust_reg_chan->nr_ctx_fields = nr_fields;
6507 ust_reg_chan->ctx_fields = fields;
fad1ed2f 6508 fields = NULL;
3273699d 6509 ust_reg_chan->header_type = type;
d0b96690 6510 } else {
7972aab2 6511 /* Get current already assigned values. */
3273699d 6512 type = ust_reg_chan->header_type;
28dc0326
MD
6513 /*
6514 * Validate that the context fields match between
6515 * registry and newcoming application.
6516 */
6517 if (!match_lttng_ust_ctl_field_array(ust_reg_chan->ctx_fields,
6518 ust_reg_chan->nr_ctx_fields,
6519 fields, nr_fields)) {
6520 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6521 app->pid, app->sock);
6522 ret_code = -EINVAL;
6523 goto reply;
6524 }
d0b96690 6525 }
d0b96690
DG
6526
6527 /* Append to metadata */
3273699d
FD
6528 if (!ust_reg_chan->metadata_dumped) {
6529 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
d0b96690
DG
6530 if (ret_code) {
6531 ERR("Error appending channel metadata (errno = %d)", ret_code);
6532 goto reply;
6533 }
6534 }
6535
6536reply:
7972aab2 6537 DBG3("UST app replying to register channel key %" PRIu64
569744c5 6538 " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
7972aab2 6539 ret_code);
d0b96690 6540
b623cb6a 6541 ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
d0b96690 6542 if (ret < 0) {
569744c5
JR
6543 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6544 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6545 app->pid, app->sock);
6546 } else if (ret == -EAGAIN) {
6547 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6548 app->pid, app->sock);
d0b96690 6549 } else {
569744c5
JR
6550 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6551 ret, app->pid, app->sock);
d0b96690
DG
6552 }
6553 goto error;
6554 }
6555
7972aab2 6556 /* This channel registry registration is completed. */
3273699d 6557 ust_reg_chan->register_done = 1;
7972aab2 6558
d0b96690 6559error:
7972aab2 6560 pthread_mutex_unlock(&registry->lock);
d88aee68 6561error_rcu_unlock:
d0b96690 6562 rcu_read_unlock();
fad1ed2f 6563 free(fields);
d0b96690
DG
6564 return ret;
6565}
6566
d88aee68
DG
6567/*
6568 * Add event to the UST channel registry. When the event is added to the
6569 * registry, the metadata is also created. Once done, this replies to the
6570 * application with the appropriate error code.
6571 *
6572 * The session UST registry lock is acquired in the function.
6573 *
6574 * On success 0 is returned else a negative value.
6575 */
d0b96690 6576static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
b623cb6a 6577 char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
2106efa0 6578 int loglevel_value, char *model_emf_uri)
d0b96690
DG
6579{
6580 int ret, ret_code;
6581 uint32_t event_id = 0;
7972aab2 6582 uint64_t chan_reg_key;
d0b96690
DG
6583 struct ust_app *app;
6584 struct ust_app_channel *ua_chan;
6585 struct ust_app_session *ua_sess;
7972aab2 6586 struct ust_registry_session *registry;
d0b96690
DG
6587
6588 rcu_read_lock();
6589
6590 /* Lookup application. If not found, there is a code flow error. */
6591 app = find_app_by_notify_sock(sock);
d88aee68 6592 if (!app) {
fad1ed2f 6593 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68 6594 sock);
a5c3235f 6595 ret = -1;
d88aee68
DG
6596 goto error_rcu_unlock;
6597 }
d0b96690 6598
4950b860 6599 /* Lookup channel by UST object descriptor. */
d0b96690 6600 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 6601 if (!ua_chan) {
fad1ed2f 6602 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
6603 ret = 0;
6604 goto error_rcu_unlock;
6605 }
6606
d0b96690
DG
6607 assert(ua_chan->session);
6608 ua_sess = ua_chan->session;
6609
7972aab2 6610 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6611 if (!registry) {
6612 DBG("Application session is being torn down. Abort event notify");
6613 ret = 0;
6614 goto error_rcu_unlock;
6615 }
7972aab2
DG
6616
6617 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6618 chan_reg_key = ua_chan->tracing_channel_id;
6619 } else {
6620 chan_reg_key = ua_chan->key;
6621 }
6622
6623 pthread_mutex_lock(&registry->lock);
d0b96690 6624
d5d629b5
DG
6625 /*
6626 * From this point on, this call acquires the ownership of the sig, fields
6627 * and model_emf_uri meaning any free are done inside it if needed. These
6628 * three variables MUST NOT be read/write after this.
6629 */
7972aab2 6630 ret_code = ust_registry_create_event(registry, chan_reg_key,
2106efa0
PP
6631 sobjd, cobjd, name, sig, nr_fields, fields,
6632 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6633 &event_id, app);
fad1ed2f
JR
6634 sig = NULL;
6635 fields = NULL;
6636 model_emf_uri = NULL;
d0b96690
DG
6637
6638 /*
6639 * The return value is returned to ustctl so in case of an error, the
6640 * application can be notified. In case of an error, it's important not to
6641 * return a negative error or else the application will get closed.
6642 */
b623cb6a 6643 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
d0b96690 6644 if (ret < 0) {
569744c5
JR
6645 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6646 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6647 app->pid, app->sock);
6648 } else if (ret == -EAGAIN) {
6649 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6650 app->pid, app->sock);
d0b96690 6651 } else {
569744c5
JR
6652 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6653 ret, app->pid, app->sock);
d0b96690
DG
6654 }
6655 /*
6656 * No need to wipe the create event since the application socket will
6657 * get close on error hence cleaning up everything by itself.
6658 */
6659 goto error;
6660 }
6661
7972aab2
DG
6662 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6663 name, event_id);
d88aee68 6664
d0b96690 6665error:
7972aab2 6666 pthread_mutex_unlock(&registry->lock);
d88aee68 6667error_rcu_unlock:
d0b96690 6668 rcu_read_unlock();
fad1ed2f
JR
6669 free(sig);
6670 free(fields);
6671 free(model_emf_uri);
d0b96690
DG
6672 return ret;
6673}
6674
10b56aef
MD
6675/*
6676 * Add enum to the UST session registry. Once done, this replies to the
6677 * application with the appropriate error code.
6678 *
6679 * The session UST registry lock is acquired within this function.
6680 *
6681 * On success 0 is returned else a negative value.
6682 */
6683static int add_enum_ust_registry(int sock, int sobjd, char *name,
b623cb6a 6684 struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
10b56aef
MD
6685{
6686 int ret = 0, ret_code;
6687 struct ust_app *app;
6688 struct ust_app_session *ua_sess;
6689 struct ust_registry_session *registry;
6690 uint64_t enum_id = -1ULL;
6691
6692 rcu_read_lock();
6693
6694 /* Lookup application. If not found, there is a code flow error. */
6695 app = find_app_by_notify_sock(sock);
6696 if (!app) {
6697 /* Return an error since this is not an error */
6698 DBG("Application socket %d is being torn down. Aborting enum registration",
6699 sock);
6700 free(entries);
a5c3235f 6701 ret = -1;
10b56aef
MD
6702 goto error_rcu_unlock;
6703 }
6704
6705 /* Lookup session by UST object descriptor. */
6706 ua_sess = find_session_by_objd(app, sobjd);
6707 if (!ua_sess) {
6708 /* Return an error since this is not an error */
fad1ed2f 6709 DBG("Application session is being torn down (session not found). Aborting enum registration.");
10b56aef
MD
6710 free(entries);
6711 goto error_rcu_unlock;
6712 }
6713
6714 registry = get_session_registry(ua_sess);
fad1ed2f
JR
6715 if (!registry) {
6716 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6717 free(entries);
6718 goto error_rcu_unlock;
6719 }
10b56aef
MD
6720
6721 pthread_mutex_lock(&registry->lock);
6722
6723 /*
6724 * From this point on, the callee acquires the ownership of
6725 * entries. The variable entries MUST NOT be read/written after
6726 * call.
6727 */
6728 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6729 entries, nr_entries, &enum_id);
6730 entries = NULL;
6731
6732 /*
6733 * The return value is returned to ustctl so in case of an error, the
6734 * application can be notified. In case of an error, it's important not to
6735 * return a negative error or else the application will get closed.
6736 */
b623cb6a 6737 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
10b56aef 6738 if (ret < 0) {
569744c5
JR
6739 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6740 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6741 app->pid, app->sock);
6742 } else if (ret == -EAGAIN) {
6743 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6744 app->pid, app->sock);
10b56aef 6745 } else {
569744c5
JR
6746 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6747 ret, app->pid, app->sock);
10b56aef
MD
6748 }
6749 /*
6750 * No need to wipe the create enum since the application socket will
6751 * get close on error hence cleaning up everything by itself.
6752 */
6753 goto error;
6754 }
6755
6756 DBG3("UST registry enum %s added successfully or already found", name);
6757
6758error:
6759 pthread_mutex_unlock(&registry->lock);
6760error_rcu_unlock:
6761 rcu_read_unlock();
6762 return ret;
6763}
6764
d88aee68
DG
6765/*
6766 * Handle application notification through the given notify socket.
6767 *
6768 * Return 0 on success or else a negative value.
6769 */
d0b96690
DG
6770int ust_app_recv_notify(int sock)
6771{
6772 int ret;
b623cb6a 6773 enum lttng_ust_ctl_notify_cmd cmd;
d0b96690
DG
6774
6775 DBG3("UST app receiving notify from sock %d", sock);
6776
b623cb6a 6777 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
d0b96690 6778 if (ret < 0) {
569744c5
JR
6779 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6780 DBG3("UST app recv notify failed. Application died: sock = %d",
6781 sock);
6782 } else if (ret == -EAGAIN) {
6783 WARN("UST app recv notify failed. Communication time out: sock = %d",
6784 sock);
d0b96690 6785 } else {
569744c5
JR
6786 ERR("UST app recv notify failed with ret %d: sock = %d",
6787 ret, sock);
d0b96690
DG
6788 }
6789 goto error;
6790 }
6791
6792 switch (cmd) {
b623cb6a 6793 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
d0b96690 6794 {
2106efa0 6795 int sobjd, cobjd, loglevel_value;
fc4b93fa 6796 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
d0b96690 6797 size_t nr_fields;
b623cb6a 6798 struct lttng_ust_ctl_field *fields;
d0b96690
DG
6799
6800 DBG2("UST app ustctl register event received");
6801
b623cb6a 6802 ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
2106efa0
PP
6803 &loglevel_value, &sig, &nr_fields, &fields,
6804 &model_emf_uri);
d0b96690 6805 if (ret < 0) {
569744c5
JR
6806 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6807 DBG3("UST app recv event failed. Application died: sock = %d",
6808 sock);
6809 } else if (ret == -EAGAIN) {
6810 WARN("UST app recv event failed. Communication time out: sock = %d",
6811 sock);
d0b96690 6812 } else {
569744c5
JR
6813 ERR("UST app recv event failed with ret %d: sock = %d",
6814 ret, sock);
d0b96690
DG
6815 }
6816 goto error;
6817 }
6818
d5d629b5
DG
6819 /*
6820 * Add event to the UST registry coming from the notify socket. This
6821 * call will free if needed the sig, fields and model_emf_uri. This
6822 * code path loses the ownsership of these variables and transfer them
6823 * to the this function.
6824 */
d0b96690 6825 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
2106efa0 6826 fields, loglevel_value, model_emf_uri);
d0b96690
DG
6827 if (ret < 0) {
6828 goto error;
6829 }
6830
6831 break;
6832 }
b623cb6a 6833 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
d0b96690
DG
6834 {
6835 int sobjd, cobjd;
6836 size_t nr_fields;
b623cb6a 6837 struct lttng_ust_ctl_field *fields;
d0b96690
DG
6838
6839 DBG2("UST app ustctl register channel received");
6840
b623cb6a 6841 ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
d0b96690
DG
6842 &fields);
6843 if (ret < 0) {
569744c5
JR
6844 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6845 DBG3("UST app recv channel failed. Application died: sock = %d",
6846 sock);
6847 } else if (ret == -EAGAIN) {
6848 WARN("UST app recv channel failed. Communication time out: sock = %d",
6849 sock);
d0b96690 6850 } else {
569744c5
JR
6851 ERR("UST app recv channel failed with ret %d: sock = %d)",
6852 ret, sock);
d0b96690
DG
6853 }
6854 goto error;
6855 }
6856
d5d629b5
DG
6857 /*
6858 * The fields ownership are transfered to this function call meaning
6859 * that if needed it will be freed. After this, it's invalid to access
6860 * fields or clean it up.
6861 */
8eede835 6862 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
d0b96690
DG
6863 fields);
6864 if (ret < 0) {
6865 goto error;
6866 }
6867
6868 break;
6869 }
b623cb6a 6870 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
10b56aef
MD
6871 {
6872 int sobjd;
fc4b93fa 6873 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
10b56aef 6874 size_t nr_entries;
b623cb6a 6875 struct lttng_ust_ctl_enum_entry *entries;
10b56aef
MD
6876
6877 DBG2("UST app ustctl register enum received");
6878
b623cb6a 6879 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
10b56aef
MD
6880 &entries, &nr_entries);
6881 if (ret < 0) {
569744c5
JR
6882 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6883 DBG3("UST app recv enum failed. Application died: sock = %d",
6884 sock);
6885 } else if (ret == -EAGAIN) {
6886 WARN("UST app recv enum failed. Communication time out: sock = %d",
6887 sock);
10b56aef 6888 } else {
569744c5
JR
6889 ERR("UST app recv enum failed with ret %d: sock = %d",
6890 ret, sock);
10b56aef
MD
6891 }
6892 goto error;
6893 }
6894
6895 /* Callee assumes ownership of entries */
6896 ret = add_enum_ust_registry(sock, sobjd, name,
6897 entries, nr_entries);
6898 if (ret < 0) {
6899 goto error;
6900 }
6901
6902 break;
6903 }
d0b96690
DG
6904 default:
6905 /* Should NEVER happen. */
6906 assert(0);
6907 }
6908
6909error:
6910 return ret;
6911}
d88aee68
DG
6912
6913/*
6914 * Once the notify socket hangs up, this is called. First, it tries to find the
6915 * corresponding application. On failure, the call_rcu to close the socket is
6916 * executed. If an application is found, it tries to delete it from the notify
6917 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6918 *
6919 * Note that an object needs to be allocated here so on ENOMEM failure, the
6920 * call RCU is not done but the rest of the cleanup is.
6921 */
6922void ust_app_notify_sock_unregister(int sock)
6923{
6924 int err_enomem = 0;
6925 struct lttng_ht_iter iter;
6926 struct ust_app *app;
6927 struct ust_app_notify_sock_obj *obj;
6928
6929 assert(sock >= 0);
6930
6931 rcu_read_lock();
6932
6933 obj = zmalloc(sizeof(*obj));
6934 if (!obj) {
6935 /*
6936 * An ENOMEM is kind of uncool. If this strikes we continue the
6937 * procedure but the call_rcu will not be called. In this case, we
6938 * accept the fd leak rather than possibly creating an unsynchronized
6939 * state between threads.
6940 *
6941 * TODO: The notify object should be created once the notify socket is
6942 * registered and stored independantely from the ust app object. The
6943 * tricky part is to synchronize the teardown of the application and
6944 * this notify object. Let's keep that in mind so we can avoid this
6945 * kind of shenanigans with ENOMEM in the teardown path.
6946 */
6947 err_enomem = 1;
6948 } else {
6949 obj->fd = sock;
6950 }
6951
6952 DBG("UST app notify socket unregister %d", sock);
6953
6954 /*
6955 * Lookup application by notify socket. If this fails, this means that the
6956 * hash table delete has already been done by the application
6957 * unregistration process so we can safely close the notify socket in a
6958 * call RCU.
6959 */
6960 app = find_app_by_notify_sock(sock);
6961 if (!app) {
6962 goto close_socket;
6963 }
6964
6965 iter.iter.node = &app->notify_sock_n.node;
6966
6967 /*
6968 * Whatever happens here either we fail or succeed, in both cases we have
6969 * to close the socket after a grace period to continue to the call RCU
6970 * here. If the deletion is successful, the application is not visible
6971 * anymore by other threads and is it fails it means that it was already
6972 * deleted from the hash table so either way we just have to close the
6973 * socket.
6974 */
6975 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6976
6977close_socket:
6978 rcu_read_unlock();
6979
6980 /*
6981 * Close socket after a grace period to avoid for the socket to be reused
6982 * before the application object is freed creating potential race between
6983 * threads trying to add unique in the global hash table.
6984 */
6985 if (!err_enomem) {
6986 call_rcu(&obj->head, close_notify_sock_rcu);
6987 }
6988}
f45e313d
DG
6989
6990/*
6991 * Destroy a ust app data structure and free its memory.
6992 */
3faa1e3d 6993static void ust_app_destroy(struct ust_app *app)
f45e313d
DG
6994{
6995 if (!app) {
6996 return;
6997 }
6998
6999 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
7000}
6dc3064a
DG
7001
7002/*
7003 * Take a snapshot for a given UST session. The snapshot is sent to the given
7004 * output.
7005 *
9a654598 7006 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6dc3064a 7007 */
fb9a95c4
JG
7008enum lttng_error_code ust_app_snapshot_record(
7009 const struct ltt_ust_session *usess,
348a81dc 7010 const struct consumer_output *output, int wait,
d07ceecd 7011 uint64_t nb_packets_per_stream)
6dc3064a
DG
7012{
7013 int ret = 0;
9a654598 7014 enum lttng_error_code status = LTTNG_OK;
6dc3064a
DG
7015 struct lttng_ht_iter iter;
7016 struct ust_app *app;
affce97e 7017 char *trace_path = NULL;
6dc3064a
DG
7018
7019 assert(usess);
7020 assert(output);
7021
7022 rcu_read_lock();
7023
8c924c7b
MD
7024 switch (usess->buffer_type) {
7025 case LTTNG_BUFFER_PER_UID:
7026 {
7027 struct buffer_reg_uid *reg;
6dc3064a 7028
8c924c7b 7029 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3273699d 7030 struct buffer_reg_channel *buf_reg_chan;
8c924c7b 7031 struct consumer_socket *socket;
3b967712 7032 char pathname[PATH_MAX];
5da88b0f 7033 size_t consumer_path_offset = 0;
6dc3064a 7034
2b269489
JR
7035 if (!reg->registry->reg.ust->metadata_key) {
7036 /* Skip since no metadata is present */
7037 continue;
7038 }
7039
8c924c7b
MD
7040 /* Get consumer socket to use to push the metadata.*/
7041 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7042 usess->consumer);
7043 if (!socket) {
9a654598 7044 status = LTTNG_ERR_INVALID;
8c924c7b
MD
7045 goto error;
7046 }
6dc3064a 7047
8c924c7b
MD
7048 memset(pathname, 0, sizeof(pathname));
7049 ret = snprintf(pathname, sizeof(pathname),
e4061179 7050 DEFAULT_UST_TRACE_UID_PATH,
8c924c7b
MD
7051 reg->uid, reg->bits_per_long);
7052 if (ret < 0) {
7053 PERROR("snprintf snapshot path");
9a654598 7054 status = LTTNG_ERR_INVALID;
8c924c7b
MD
7055 goto error;
7056 }
affce97e
JG
7057 /* Free path allowed on previous iteration. */
7058 free(trace_path);
5da88b0f
MD
7059 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7060 &consumer_path_offset);
3b967712
MD
7061 if (!trace_path) {
7062 status = LTTNG_ERR_INVALID;
7063 goto error;
7064 }
f3db82be 7065 /* Add the UST default trace dir to path. */
8c924c7b 7066 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3273699d 7067 buf_reg_chan, node.node) {
9a654598 7068 status = consumer_snapshot_channel(socket,
3273699d 7069 buf_reg_chan->consumer_key,
e098433c 7070 output, 0, usess->uid,
5da88b0f 7071 usess->gid, &trace_path[consumer_path_offset], wait,
d2956687 7072 nb_packets_per_stream);
9a654598 7073 if (status != LTTNG_OK) {
8c924c7b
MD
7074 goto error;
7075 }
7076 }
9a654598 7077 status = consumer_snapshot_channel(socket,
68808f4e 7078 reg->registry->reg.ust->metadata_key, output, 1,
5da88b0f
MD
7079 usess->uid, usess->gid, &trace_path[consumer_path_offset],
7080 wait, 0);
9a654598 7081 if (status != LTTNG_OK) {
8c924c7b
MD
7082 goto error;
7083 }
af706bb7 7084 }
8c924c7b
MD
7085 break;
7086 }
7087 case LTTNG_BUFFER_PER_PID:
7088 {
7089 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7090 struct consumer_socket *socket;
7091 struct lttng_ht_iter chan_iter;
7092 struct ust_app_channel *ua_chan;
7093 struct ust_app_session *ua_sess;
7094 struct ust_registry_session *registry;
3b967712 7095 char pathname[PATH_MAX];
5da88b0f 7096 size_t consumer_path_offset = 0;
8c924c7b
MD
7097
7098 ua_sess = lookup_session_by_app(usess, app);
7099 if (!ua_sess) {
7100 /* Session not associated with this app. */
7101 continue;
7102 }
af706bb7 7103
8c924c7b
MD
7104 /* Get the right consumer socket for the application. */
7105 socket = consumer_find_socket_by_bitness(app->bits_per_long,
348a81dc 7106 output);
8c924c7b 7107 if (!socket) {
9a654598 7108 status = LTTNG_ERR_INVALID;
5c786ded
JD
7109 goto error;
7110 }
7111
8c924c7b
MD
7112 /* Add the UST default trace dir to path. */
7113 memset(pathname, 0, sizeof(pathname));
e4061179 7114 ret = snprintf(pathname, sizeof(pathname), "%s",
8c924c7b 7115 ua_sess->path);
6dc3064a 7116 if (ret < 0) {
9a654598 7117 status = LTTNG_ERR_INVALID;
8c924c7b 7118 PERROR("snprintf snapshot path");
6dc3064a
DG
7119 goto error;
7120 }
affce97e
JG
7121 /* Free path allowed on previous iteration. */
7122 free(trace_path);
5da88b0f
MD
7123 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7124 &consumer_path_offset);
3b967712
MD
7125 if (!trace_path) {
7126 status = LTTNG_ERR_INVALID;
7127 goto error;
7128 }
f3db82be 7129 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
8c924c7b 7130 ua_chan, node.node) {
9a654598 7131 status = consumer_snapshot_channel(socket,
470cc211 7132 ua_chan->key, output, 0,
ff588497
JR
7133 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7134 lttng_credentials_get_gid(&ua_sess->effective_credentials),
5da88b0f 7135 &trace_path[consumer_path_offset], wait,
d2956687 7136 nb_packets_per_stream);
9a654598
JG
7137 switch (status) {
7138 case LTTNG_OK:
7139 break;
7140 case LTTNG_ERR_CHAN_NOT_FOUND:
7141 continue;
7142 default:
8c924c7b
MD
7143 goto error;
7144 }
7145 }
7146
7147 registry = get_session_registry(ua_sess);
fad1ed2f 7148 if (!registry) {
9bbfb88c
MD
7149 DBG("Application session is being torn down. Skip application.");
7150 continue;
fad1ed2f 7151 }
9a654598 7152 status = consumer_snapshot_channel(socket,
470cc211 7153 registry->metadata_key, output, 1,
ff588497
JR
7154 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7155 lttng_credentials_get_gid(&ua_sess->effective_credentials),
5da88b0f 7156 &trace_path[consumer_path_offset], wait, 0);
9a654598
JG
7157 switch (status) {
7158 case LTTNG_OK:
7159 break;
7160 case LTTNG_ERR_CHAN_NOT_FOUND:
7161 continue;
7162 default:
8c924c7b
MD
7163 goto error;
7164 }
7165 }
7166 break;
7167 }
7168 default:
7169 assert(0);
7170 break;
6dc3064a
DG
7171 }
7172
7173error:
affce97e 7174 free(trace_path);
6dc3064a 7175 rcu_read_unlock();
9a654598 7176 return status;
6dc3064a 7177}
5c786ded
JD
7178
7179/*
d07ceecd 7180 * Return the size taken by one more packet per stream.
5c786ded 7181 */
fb9a95c4
JG
7182uint64_t ust_app_get_size_one_more_packet_per_stream(
7183 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
5c786ded 7184{
d07ceecd 7185 uint64_t tot_size = 0;
5c786ded
JD
7186 struct ust_app *app;
7187 struct lttng_ht_iter iter;
7188
7189 assert(usess);
7190
7191 switch (usess->buffer_type) {
7192 case LTTNG_BUFFER_PER_UID:
7193 {
7194 struct buffer_reg_uid *reg;
7195
7196 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3273699d 7197 struct buffer_reg_channel *buf_reg_chan;
5c786ded 7198
b7064eaa 7199 rcu_read_lock();
5c786ded 7200 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3273699d
FD
7201 buf_reg_chan, node.node) {
7202 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
d07ceecd
MD
7203 /*
7204 * Don't take channel into account if we
7205 * already grab all its packets.
7206 */
7207 continue;
7208 }
3273699d 7209 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
5c786ded 7210 }
b7064eaa 7211 rcu_read_unlock();
5c786ded
JD
7212 }
7213 break;
7214 }
7215 case LTTNG_BUFFER_PER_PID:
7216 {
7217 rcu_read_lock();
7218 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7219 struct ust_app_channel *ua_chan;
7220 struct ust_app_session *ua_sess;
7221 struct lttng_ht_iter chan_iter;
7222
7223 ua_sess = lookup_session_by_app(usess, app);
7224 if (!ua_sess) {
7225 /* Session not associated with this app. */
7226 continue;
7227 }
7228
7229 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7230 ua_chan, node.node) {
d07ceecd
MD
7231 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
7232 /*
7233 * Don't take channel into account if we
7234 * already grab all its packets.
7235 */
7236 continue;
7237 }
7238 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
7239 }
7240 }
7241 rcu_read_unlock();
7242 break;
7243 }
7244 default:
7245 assert(0);
7246 break;
7247 }
7248
d07ceecd 7249 return tot_size;
5c786ded 7250}
fb83fe64
JD
7251
7252int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
7253 struct cds_list_head *buffer_reg_uid_list,
7254 struct consumer_output *consumer, uint64_t uchan_id,
7255 int overwrite, uint64_t *discarded, uint64_t *lost)
7256{
7257 int ret;
7258 uint64_t consumer_chan_key;
7259
70dd8162
MD
7260 *discarded = 0;
7261 *lost = 0;
7262
fb83fe64 7263 ret = buffer_reg_uid_consumer_channel_key(
76604852 7264 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
fb83fe64 7265 if (ret < 0) {
70dd8162
MD
7266 /* Not found */
7267 ret = 0;
fb83fe64
JD
7268 goto end;
7269 }
7270
7271 if (overwrite) {
7272 ret = consumer_get_lost_packets(ust_session_id,
7273 consumer_chan_key, consumer, lost);
7274 } else {
7275 ret = consumer_get_discarded_events(ust_session_id,
7276 consumer_chan_key, consumer, discarded);
7277 }
7278
7279end:
7280 return ret;
7281}
7282
7283int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
7284 struct ltt_ust_channel *uchan,
7285 struct consumer_output *consumer, int overwrite,
7286 uint64_t *discarded, uint64_t *lost)
7287{
7288 int ret = 0;
7289 struct lttng_ht_iter iter;
7290 struct lttng_ht_node_str *ua_chan_node;
7291 struct ust_app *app;
7292 struct ust_app_session *ua_sess;
7293 struct ust_app_channel *ua_chan;
7294
70dd8162
MD
7295 *discarded = 0;
7296 *lost = 0;
7297
fb83fe64
JD
7298 rcu_read_lock();
7299 /*
70dd8162
MD
7300 * Iterate over every registered applications. Sum counters for
7301 * all applications containing requested session and channel.
fb83fe64
JD
7302 */
7303 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7304 struct lttng_ht_iter uiter;
7305
7306 ua_sess = lookup_session_by_app(usess, app);
7307 if (ua_sess == NULL) {
7308 continue;
7309 }
7310
7311 /* Get channel */
ee022399 7312 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
fb83fe64
JD
7313 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7314 /* If the session is found for the app, the channel must be there */
7315 assert(ua_chan_node);
7316
7317 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
7318
7319 if (overwrite) {
70dd8162
MD
7320 uint64_t _lost;
7321
fb83fe64 7322 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
70dd8162
MD
7323 consumer, &_lost);
7324 if (ret < 0) {
7325 break;
7326 }
7327 (*lost) += _lost;
fb83fe64 7328 } else {
70dd8162
MD
7329 uint64_t _discarded;
7330
fb83fe64 7331 ret = consumer_get_discarded_events(usess->id,
70dd8162
MD
7332 ua_chan->key, consumer, &_discarded);
7333 if (ret < 0) {
7334 break;
7335 }
7336 (*discarded) += _discarded;
fb83fe64 7337 }
fb83fe64
JD
7338 }
7339
fb83fe64
JD
7340 rcu_read_unlock();
7341 return ret;
7342}
c2561365
JD
7343
7344static
7345int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7346 struct ust_app *app)
7347{
7348 int ret = 0;
7349 struct ust_app_session *ua_sess;
7350
7351 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7352
7353 rcu_read_lock();
7354
7355 ua_sess = lookup_session_by_app(usess, app);
7356 if (ua_sess == NULL) {
7357 /* The session is in teardown process. Ignore and continue. */
7358 goto end;
7359 }
7360
7361 pthread_mutex_lock(&ua_sess->lock);
7362
7363 if (ua_sess->deleted) {
7364 goto end_unlock;
7365 }
7366
7367 pthread_mutex_lock(&app->sock_lock);
b623cb6a 7368 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
c2561365
JD
7369 pthread_mutex_unlock(&app->sock_lock);
7370
7371end_unlock:
7372 pthread_mutex_unlock(&ua_sess->lock);
7373
7374end:
7375 rcu_read_unlock();
7376 health_code_update();
7377 return ret;
7378}
7379
7380/*
7381 * Regenerate the statedump for each app in the session.
7382 */
7383int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7384{
7385 int ret = 0;
7386 struct lttng_ht_iter iter;
7387 struct ust_app *app;
7388
7389 DBG("Regenerating the metadata for all UST apps");
7390
7391 rcu_read_lock();
7392
7393 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7394 if (!app->compatible) {
7395 continue;
7396 }
7397
7398 ret = ust_app_regenerate_statedump(usess, app);
7399 if (ret < 0) {
7400 /* Continue to the next app even on error */
7401 continue;
7402 }
7403 }
7404
7405 rcu_read_unlock();
7406
7407 return 0;
7408}
5c408ad8
JD
7409
7410/*
7411 * Rotate all the channels of a session.
7412 *
6f6d3b69 7413 * Return LTTNG_OK on success or else an LTTng error code.
5c408ad8 7414 */
6f6d3b69 7415enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
5c408ad8 7416{
6f6d3b69
MD
7417 int ret;
7418 enum lttng_error_code cmd_ret = LTTNG_OK;
5c408ad8 7419 struct lttng_ht_iter iter;
3faa1e3d 7420 struct ust_app *app = NULL;
5c408ad8 7421 struct ltt_ust_session *usess = session->ust_session;
5c408ad8
JD
7422
7423 assert(usess);
7424
7425 rcu_read_lock();
7426
7427 switch (usess->buffer_type) {
7428 case LTTNG_BUFFER_PER_UID:
7429 {
7430 struct buffer_reg_uid *reg;
7431
7432 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3273699d 7433 struct buffer_reg_channel *buf_reg_chan;
5c408ad8
JD
7434 struct consumer_socket *socket;
7435
7436 /* Get consumer socket to use to push the metadata.*/
7437 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7438 usess->consumer);
7439 if (!socket) {
6f6d3b69 7440 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
7441 goto error;
7442 }
7443
5c408ad8
JD
7444 /* Rotate the data channels. */
7445 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3273699d 7446 buf_reg_chan, node.node) {
5c408ad8 7447 ret = consumer_rotate_channel(socket,
3273699d 7448 buf_reg_chan->consumer_key,
5c408ad8 7449 usess->uid, usess->gid,
d2956687
JG
7450 usess->consumer,
7451 /* is_metadata_channel */ false);
5c408ad8 7452 if (ret < 0) {
6f6d3b69 7453 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7454 goto error;
7455 }
7456 }
7457
db786d44
JR
7458 /*
7459 * The metadata channel might not be present.
7460 *
7461 * Consumer stream allocation can be done
7462 * asynchronously and can fail on intermediary
7463 * operations (i.e add context) and lead to data
7464 * channels created with no metadata channel.
7465 */
7466 if (!reg->registry->reg.ust->metadata_key) {
7467 /* Skip since no metadata is present. */
7468 continue;
7469 }
7470
5c408ad8
JD
7471 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7472
7473 ret = consumer_rotate_channel(socket,
7474 reg->registry->reg.ust->metadata_key,
7475 usess->uid, usess->gid,
d2956687
JG
7476 usess->consumer,
7477 /* is_metadata_channel */ true);
5c408ad8 7478 if (ret < 0) {
6f6d3b69 7479 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7480 goto error;
7481 }
5c408ad8
JD
7482 }
7483 break;
7484 }
7485 case LTTNG_BUFFER_PER_PID:
7486 {
7487 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7488 struct consumer_socket *socket;
7489 struct lttng_ht_iter chan_iter;
7490 struct ust_app_channel *ua_chan;
7491 struct ust_app_session *ua_sess;
7492 struct ust_registry_session *registry;
3faa1e3d
JG
7493 bool app_reference_taken;
7494
7495 app_reference_taken = ust_app_get(app);
7496 if (!app_reference_taken) {
7497 /* Application unregistered concurrently, skip it. */
7498 DBG("Could not get application reference as it is being torn down; skipping application");
7499 continue;
7500 }
5c408ad8
JD
7501
7502 ua_sess = lookup_session_by_app(usess, app);
7503 if (!ua_sess) {
7504 /* Session not associated with this app. */
3faa1e3d
JG
7505 ust_app_put(app);
7506 app = NULL;
5c408ad8
JD
7507 continue;
7508 }
5c408ad8
JD
7509
7510 /* Get the right consumer socket for the application. */
7511 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7512 usess->consumer);
7513 if (!socket) {
6f6d3b69 7514 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
7515 goto error;
7516 }
7517
3faa1e3d 7518
5c408ad8 7519 registry = get_session_registry(ua_sess);
3faa1e3d 7520 assert(registry);
5c408ad8 7521
5c408ad8
JD
7522 /* Rotate the data channels. */
7523 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7524 ua_chan, node.node) {
470cc211
JG
7525 ret = consumer_rotate_channel(socket,
7526 ua_chan->key,
ff588497
JR
7527 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7528 lttng_credentials_get_gid(&ua_sess->effective_credentials),
d2956687
JG
7529 ua_sess->consumer,
7530 /* is_metadata_channel */ false);
5c408ad8 7531 if (ret < 0) {
6f6d3b69 7532 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7533 goto error;
7534 }
7535 }
7536
7537 /* Rotate the metadata channel. */
7538 (void) push_metadata(registry, usess->consumer);
470cc211
JG
7539 ret = consumer_rotate_channel(socket,
7540 registry->metadata_key,
ff588497
JR
7541 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7542 lttng_credentials_get_gid(&ua_sess->effective_credentials),
d2956687
JG
7543 ua_sess->consumer,
7544 /* is_metadata_channel */ true);
5c408ad8 7545 if (ret < 0) {
6f6d3b69 7546 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
7547 goto error;
7548 }
3faa1e3d
JG
7549
7550 ust_app_put(app);
7551 app = NULL;
5c408ad8 7552 }
3faa1e3d
JG
7553
7554 app = NULL;
5c408ad8
JD
7555 break;
7556 }
7557 default:
7558 assert(0);
7559 break;
7560 }
7561
6f6d3b69 7562 cmd_ret = LTTNG_OK;
5c408ad8
JD
7563
7564error:
3faa1e3d 7565 ust_app_put(app);
5c408ad8 7566 rcu_read_unlock();
6f6d3b69 7567 return cmd_ret;
5c408ad8 7568}
d2956687
JG
7569
7570enum lttng_error_code ust_app_create_channel_subdirectories(
7571 const struct ltt_ust_session *usess)
7572{
7573 enum lttng_error_code ret = LTTNG_OK;
7574 struct lttng_ht_iter iter;
7575 enum lttng_trace_chunk_status chunk_status;
7576 char *pathname_index;
7577 int fmt_ret;
7578
7579 assert(usess->current_trace_chunk);
7580 rcu_read_lock();
7581
7582 switch (usess->buffer_type) {
7583 case LTTNG_BUFFER_PER_UID:
7584 {
7585 struct buffer_reg_uid *reg;
7586
7587 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7588 fmt_ret = asprintf(&pathname_index,
5da88b0f 7589 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
d2956687
JG
7590 reg->uid, reg->bits_per_long);
7591 if (fmt_ret < 0) {
7592 ERR("Failed to format channel index directory");
7593 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7594 goto error;
7595 }
7596
7597 /*
7598 * Create the index subdirectory which will take care
7599 * of implicitly creating the channel's path.
7600 */
7601 chunk_status = lttng_trace_chunk_create_subdirectory(
7602 usess->current_trace_chunk,
7603 pathname_index);
7604 free(pathname_index);
7605 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7606 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7607 goto error;
7608 }
7609 }
7610 break;
7611 }
7612 case LTTNG_BUFFER_PER_PID:
7613 {
7614 struct ust_app *app;
7615
495dece5
MD
7616 /*
7617 * Create the toplevel ust/ directory in case no apps are running.
7618 */
7619 chunk_status = lttng_trace_chunk_create_subdirectory(
7620 usess->current_trace_chunk,
7621 DEFAULT_UST_TRACE_DIR);
7622 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7623 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7624 goto error;
7625 }
7626
d2956687
JG
7627 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7628 pid_n.node) {
7629 struct ust_app_session *ua_sess;
7630 struct ust_registry_session *registry;
7631
7632 ua_sess = lookup_session_by_app(usess, app);
7633 if (!ua_sess) {
7634 /* Session not associated with this app. */
7635 continue;
7636 }
7637
7638 registry = get_session_registry(ua_sess);
7639 if (!registry) {
7640 DBG("Application session is being torn down. Skip application.");
7641 continue;
7642 }
7643
7644 fmt_ret = asprintf(&pathname_index,
5da88b0f 7645 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
d2956687
JG
7646 ua_sess->path);
7647 if (fmt_ret < 0) {
7648 ERR("Failed to format channel index directory");
7649 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7650 goto error;
7651 }
7652 /*
7653 * Create the index subdirectory which will take care
7654 * of implicitly creating the channel's path.
7655 */
7656 chunk_status = lttng_trace_chunk_create_subdirectory(
7657 usess->current_trace_chunk,
7658 pathname_index);
7659 free(pathname_index);
7660 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7661 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7662 goto error;
7663 }
7664 }
7665 break;
7666 }
7667 default:
7668 abort();
7669 }
7670
7671 ret = LTTNG_OK;
7672error:
7673 rcu_read_unlock();
7674 return ret;
7675}
4a9b9759
MD
7676
7677/*
7678 * Clear all the channels of a session.
7679 *
7680 * Return LTTNG_OK on success or else an LTTng error code.
7681 */
7682enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7683{
7684 int ret;
7685 enum lttng_error_code cmd_ret = LTTNG_OK;
7686 struct lttng_ht_iter iter;
7687 struct ust_app *app;
7688 struct ltt_ust_session *usess = session->ust_session;
7689
7690 assert(usess);
7691
7692 rcu_read_lock();
7693
7694 if (usess->active) {
7695 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7696 cmd_ret = LTTNG_ERR_FATAL;
7697 goto end;
7698 }
7699
7700 switch (usess->buffer_type) {
7701 case LTTNG_BUFFER_PER_UID:
7702 {
7703 struct buffer_reg_uid *reg;
7704
7705 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3273699d 7706 struct buffer_reg_channel *buf_reg_chan;
4a9b9759
MD
7707 struct consumer_socket *socket;
7708
7709 /* Get consumer socket to use to push the metadata.*/
7710 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7711 usess->consumer);
7712 if (!socket) {
7713 cmd_ret = LTTNG_ERR_INVALID;
7714 goto error_socket;
7715 }
7716
7717 /* Clear the data channels. */
7718 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3273699d 7719 buf_reg_chan, node.node) {
4a9b9759 7720 ret = consumer_clear_channel(socket,
3273699d 7721 buf_reg_chan->consumer_key);
4a9b9759
MD
7722 if (ret < 0) {
7723 goto error;
7724 }
7725 }
7726
7727 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7728
7729 /*
7730 * Clear the metadata channel.
7731 * Metadata channel is not cleared per se but we still need to
7732 * perform a rotation operation on it behind the scene.
7733 */
7734 ret = consumer_clear_channel(socket,
7735 reg->registry->reg.ust->metadata_key);
7736 if (ret < 0) {
7737 goto error;
7738 }
7739 }
7740 break;
7741 }
7742 case LTTNG_BUFFER_PER_PID:
7743 {
7744 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7745 struct consumer_socket *socket;
7746 struct lttng_ht_iter chan_iter;
7747 struct ust_app_channel *ua_chan;
7748 struct ust_app_session *ua_sess;
7749 struct ust_registry_session *registry;
7750
7751 ua_sess = lookup_session_by_app(usess, app);
7752 if (!ua_sess) {
7753 /* Session not associated with this app. */
7754 continue;
7755 }
7756
7757 /* Get the right consumer socket for the application. */
7758 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7759 usess->consumer);
7760 if (!socket) {
7761 cmd_ret = LTTNG_ERR_INVALID;
7762 goto error_socket;
7763 }
7764
7765 registry = get_session_registry(ua_sess);
7766 if (!registry) {
7767 DBG("Application session is being torn down. Skip application.");
7768 continue;
7769 }
7770
7771 /* Clear the data channels. */
7772 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7773 ua_chan, node.node) {
7774 ret = consumer_clear_channel(socket, ua_chan->key);
7775 if (ret < 0) {
7776 /* Per-PID buffer and application going away. */
7777 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7778 continue;
7779 }
7780 goto error;
7781 }
7782 }
7783
7784 (void) push_metadata(registry, usess->consumer);
7785
7786 /*
7787 * Clear the metadata channel.
7788 * Metadata channel is not cleared per se but we still need to
7789 * perform rotation operation on it behind the scene.
7790 */
7791 ret = consumer_clear_channel(socket, registry->metadata_key);
7792 if (ret < 0) {
7793 /* Per-PID buffer and application going away. */
7794 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7795 continue;
7796 }
7797 goto error;
7798 }
7799 }
7800 break;
7801 }
7802 default:
7803 assert(0);
7804 break;
7805 }
7806
7807 cmd_ret = LTTNG_OK;
7808 goto end;
7809
7810error:
7811 switch (-ret) {
7812 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7813 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7814 break;
7815 default:
7816 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7817 }
7818
7819error_socket:
7820end:
7821 rcu_read_unlock();
7822 return cmd_ret;
7823}
04ed9e10
JG
7824
7825/*
7826 * This function skips the metadata channel as the begin/end timestamps of a
7827 * metadata packet are useless.
7828 *
7829 * Moreover, opening a packet after a "clear" will cause problems for live
7830 * sessions as it will introduce padding that was not part of the first trace
7831 * chunk. The relay daemon expects the content of the metadata stream of
7832 * successive metadata trace chunks to be strict supersets of one another.
7833 *
7834 * For example, flushing a packet at the beginning of the metadata stream of
7835 * a trace chunk resulting from a "clear" session command will cause the
7836 * size of the metadata stream of the new trace chunk to not match the size of
7837 * the metadata stream of the original chunk. This will confuse the relay
7838 * daemon as the same "offset" in a metadata stream will no longer point
7839 * to the same content.
7840 */
7841enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7842{
7843 enum lttng_error_code ret = LTTNG_OK;
7844 struct lttng_ht_iter iter;
7845 struct ltt_ust_session *usess = session->ust_session;
7846
7847 assert(usess);
7848
7849 rcu_read_lock();
7850
7851 switch (usess->buffer_type) {
7852 case LTTNG_BUFFER_PER_UID:
7853 {
7854 struct buffer_reg_uid *reg;
7855
7856 cds_list_for_each_entry (
7857 reg, &usess->buffer_reg_uid_list, lnode) {
3273699d 7858 struct buffer_reg_channel *buf_reg_chan;
04ed9e10
JG
7859 struct consumer_socket *socket;
7860
7861 socket = consumer_find_socket_by_bitness(
7862 reg->bits_per_long, usess->consumer);
7863 if (!socket) {
7864 ret = LTTNG_ERR_FATAL;
7865 goto error;
7866 }
7867
7868 cds_lfht_for_each_entry(reg->registry->channels->ht,
3273699d 7869 &iter.iter, buf_reg_chan, node.node) {
04ed9e10
JG
7870 const int open_ret =
7871 consumer_open_channel_packets(
7872 socket,
3273699d 7873 buf_reg_chan->consumer_key);
04ed9e10
JG
7874
7875 if (open_ret < 0) {
7876 ret = LTTNG_ERR_UNK;
7877 goto error;
7878 }
7879 }
7880 }
7881 break;
7882 }
7883 case LTTNG_BUFFER_PER_PID:
7884 {
7885 struct ust_app *app;
7886
7887 cds_lfht_for_each_entry (
7888 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7889 struct consumer_socket *socket;
7890 struct lttng_ht_iter chan_iter;
7891 struct ust_app_channel *ua_chan;
7892 struct ust_app_session *ua_sess;
7893 struct ust_registry_session *registry;
7894
7895 ua_sess = lookup_session_by_app(usess, app);
7896 if (!ua_sess) {
7897 /* Session not associated with this app. */
7898 continue;
7899 }
7900
7901 /* Get the right consumer socket for the application. */
7902 socket = consumer_find_socket_by_bitness(
7903 app->bits_per_long, usess->consumer);
7904 if (!socket) {
7905 ret = LTTNG_ERR_FATAL;
7906 goto error;
7907 }
7908
7909 registry = get_session_registry(ua_sess);
7910 if (!registry) {
7911 DBG("Application session is being torn down. Skip application.");
7912 continue;
7913 }
7914
7915 cds_lfht_for_each_entry(ua_sess->channels->ht,
7916 &chan_iter.iter, ua_chan, node.node) {
7917 const int open_ret =
7918 consumer_open_channel_packets(
7919 socket,
7920 ua_chan->key);
7921
7922 if (open_ret < 0) {
7923 /*
7924 * Per-PID buffer and application going
7925 * away.
7926 */
97a171e1 7927 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
04ed9e10
JG
7928 continue;
7929 }
7930
7931 ret = LTTNG_ERR_UNK;
7932 goto error;
7933 }
7934 }
7935 }
7936 break;
7937 }
7938 default:
7939 abort();
7940 break;
7941 }
7942
7943error:
7944 rcu_read_unlock();
7945 return ret;
7946}
3faa1e3d
JG
7947
7948static void ust_app_release(struct urcu_ref *ref)
7949{
7950 struct ust_app *app = container_of(ref, struct ust_app, ref);
7951
7952 ust_app_unregister(app);
7953 ust_app_destroy(app);
7954}
7955
7956bool ust_app_get(struct ust_app *app)
7957{
7958 assert(app);
7959 return urcu_ref_get_unless_zero(&app->ref);
7960}
7961
7962void ust_app_put(struct ust_app *app)
7963{
7964 if (!app) {
7965 return;
7966 }
7967
7968 urcu_ref_put(&app->ref, ust_app_release);
7969}
This page took 0.597512 seconds and 4 git commands to generate.