163767d15cb8ef70101b9428f269b321c1c6ae57
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/user-tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/event-rule-matches-internal.h>
33 #include <lttng/condition/event-rule-matches.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
39 #include "fd-limit.h"
40 #include "health-sessiond.h"
41 #include "ust-app.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
45 #include "utils.h"
46 #include "session.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
49 #include "rotate.h"
50 #include "event.h"
51 #include "event-notifier-error-accounting.h"
52 #include "ust-field-utils.h"
53
54 struct lttng_ht *ust_app_ht;
55 struct lttng_ht *ust_app_ht_by_sock;
56 struct lttng_ht *ust_app_ht_by_notify_sock;
57
58 static
59 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key;
63 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id;
67 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69 /*
70 * Return the incremented value of next_channel_key.
71 */
72 static uint64_t get_next_channel_key(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80 }
81
82 /*
83 * Return the atomically incremented value of next_session_id.
84 */
85 static uint64_t get_next_session_id(void)
86 {
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93 }
94
95 static void copy_channel_attr_to_ustctl(
96 struct lttng_ust_ctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98 {
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107 }
108
109 /*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116 {
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119
120 assert(node);
121 assert(_key);
122
123 event = caa_container_of(node, struct ust_app_event, node.node);
124 key = _key;
125
126 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
127
128 /* Event name */
129 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
130 goto no_match;
131 }
132
133 /* Event loglevel. */
134 if (!loglevels_match(event->attr.loglevel_type, event->attr.loglevel,
135 key->loglevel_type, key->loglevel_value,
136 LTTNG_UST_ABI_LOGLEVEL_ALL)) {
137 goto no_match;
138 }
139
140 /* One of the filters is NULL, fail. */
141 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
142 goto no_match;
143 }
144
145 if (key->filter && event->filter) {
146 /* Both filters exists, check length followed by the bytecode. */
147 if (event->filter->len != key->filter->len ||
148 memcmp(event->filter->data, key->filter->data,
149 event->filter->len) != 0) {
150 goto no_match;
151 }
152 }
153
154 /* One of the exclusions is NULL, fail. */
155 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
156 goto no_match;
157 }
158
159 if (key->exclusion && event->exclusion) {
160 /* Both exclusions exists, check count followed by the names. */
161 if (event->exclusion->count != key->exclusion->count ||
162 memcmp(event->exclusion->names, key->exclusion->names,
163 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
164 goto no_match;
165 }
166 }
167
168
169 /* Match. */
170 return 1;
171
172 no_match:
173 return 0;
174 }
175
176 /*
177 * Unique add of an ust app event in the given ht. This uses the custom
178 * ht_match_ust_app_event match function and the event name as hash.
179 */
180 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
181 struct ust_app_event *event)
182 {
183 struct cds_lfht_node *node_ptr;
184 struct ust_app_ht_key key;
185 struct lttng_ht *ht;
186
187 assert(ua_chan);
188 assert(ua_chan->events);
189 assert(event);
190
191 ht = ua_chan->events;
192 key.name = event->attr.name;
193 key.filter = event->filter;
194 key.loglevel_type = (enum lttng_ust_abi_loglevel_type)
195 event->attr.loglevel_type;
196 key.loglevel_value = event->attr.loglevel;
197 key.exclusion = event->exclusion;
198
199 node_ptr = cds_lfht_add_unique(ht->ht,
200 ht->hash_fct(event->node.key, lttng_ht_seed),
201 ht_match_ust_app_event, &key, &event->node.node);
202 assert(node_ptr == &event->node.node);
203 }
204
205 /*
206 * Close the notify socket from the given RCU head object. This MUST be called
207 * through a call_rcu().
208 */
209 static void close_notify_sock_rcu(struct rcu_head *head)
210 {
211 int ret;
212 struct ust_app_notify_sock_obj *obj =
213 caa_container_of(head, struct ust_app_notify_sock_obj, head);
214
215 /* Must have a valid fd here. */
216 assert(obj->fd >= 0);
217
218 ret = close(obj->fd);
219 if (ret) {
220 ERR("close notify sock %d RCU", obj->fd);
221 }
222 lttng_fd_put(LTTNG_FD_APPS, 1);
223
224 free(obj);
225 }
226
227 /*
228 * Return the session registry according to the buffer type of the given
229 * session.
230 *
231 * A registry per UID object MUST exists before calling this function or else
232 * it assert() if not found. RCU read side lock must be acquired.
233 */
234 static struct ust_registry_session *get_session_registry(
235 struct ust_app_session *ua_sess)
236 {
237 struct ust_registry_session *registry = NULL;
238
239 assert(ua_sess);
240
241 switch (ua_sess->buffer_type) {
242 case LTTNG_BUFFER_PER_PID:
243 {
244 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
245 if (!reg_pid) {
246 goto error;
247 }
248 registry = reg_pid->registry->reg.ust;
249 break;
250 }
251 case LTTNG_BUFFER_PER_UID:
252 {
253 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
254 ua_sess->tracing_id, ua_sess->bits_per_long,
255 lttng_credentials_get_uid(&ua_sess->real_credentials));
256 if (!reg_uid) {
257 goto error;
258 }
259 registry = reg_uid->registry->reg.ust;
260 break;
261 }
262 default:
263 assert(0);
264 };
265
266 error:
267 return registry;
268 }
269
270 /*
271 * Delete ust context safely. RCU read lock must be held before calling
272 * this function.
273 */
274 static
275 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
276 struct ust_app *app)
277 {
278 int ret;
279
280 assert(ua_ctx);
281
282 if (ua_ctx->obj) {
283 pthread_mutex_lock(&app->sock_lock);
284 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
285 pthread_mutex_unlock(&app->sock_lock);
286 if (ret < 0) {
287 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
288 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
289 app->pid, app->sock);
290 } else if (ret == -EAGAIN) {
291 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
292 app->pid, app->sock);
293 } else {
294 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
295 ua_ctx->obj->handle, ret,
296 app->pid, app->sock);
297 }
298 }
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302 }
303
304 /*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
308 static
309 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
311 {
312 int ret;
313
314 assert(ua_event);
315
316 free(ua_event->filter);
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
319 if (ua_event->obj != NULL) {
320 pthread_mutex_lock(&app->sock_lock);
321 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
322 pthread_mutex_unlock(&app->sock_lock);
323 if (ret < 0) {
324 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
325 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
326 app->pid, app->sock);
327 } else if (ret == -EAGAIN) {
328 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
329 app->pid, app->sock);
330 } else {
331 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
332 ret, app->pid, app->sock);
333 }
334 }
335 free(ua_event->obj);
336 }
337 free(ua_event);
338 }
339
340 /*
341 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
342 * through a call_rcu().
343 */
344 static
345 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
346 {
347 struct ust_app_event_notifier_rule *obj = caa_container_of(
348 head, struct ust_app_event_notifier_rule, rcu_head);
349
350 free(obj);
351 }
352
353 /*
354 * Delete ust app event notifier rule safely.
355 */
356 static void delete_ust_app_event_notifier_rule(int sock,
357 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
358 struct ust_app *app)
359 {
360 int ret;
361
362 assert(ua_event_notifier_rule);
363
364 if (ua_event_notifier_rule->exclusion != NULL) {
365 free(ua_event_notifier_rule->exclusion);
366 }
367
368 if (ua_event_notifier_rule->obj != NULL) {
369 pthread_mutex_lock(&app->sock_lock);
370 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
371 pthread_mutex_unlock(&app->sock_lock);
372 if (ret < 0) {
373 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
374 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
375 app->pid, app->sock);
376 } else if (ret == -EAGAIN) {
377 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
378 app->pid, app->sock);
379 } else {
380 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
381 ret, app->pid, app->sock);
382 }
383 }
384
385 free(ua_event_notifier_rule->obj);
386 }
387
388 lttng_trigger_put(ua_event_notifier_rule->trigger);
389 call_rcu(&ua_event_notifier_rule->rcu_head,
390 free_ust_app_event_notifier_rule_rcu);
391 }
392
393 /*
394 * Release ust data object of the given stream.
395 *
396 * Return 0 on success or else a negative value.
397 */
398 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
399 struct ust_app *app)
400 {
401 int ret = 0;
402
403 assert(stream);
404
405 if (stream->obj) {
406 pthread_mutex_lock(&app->sock_lock);
407 ret = lttng_ust_ctl_release_object(sock, stream->obj);
408 pthread_mutex_unlock(&app->sock_lock);
409 if (ret < 0) {
410 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
411 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
412 app->pid, app->sock);
413 } else if (ret == -EAGAIN) {
414 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
415 app->pid, app->sock);
416 } else {
417 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
418 ret, app->pid, app->sock);
419 }
420 }
421 lttng_fd_put(LTTNG_FD_APPS, 2);
422 free(stream->obj);
423 }
424
425 return ret;
426 }
427
428 /*
429 * Delete ust app stream safely. RCU read lock must be held before calling
430 * this function.
431 */
432 static
433 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
434 struct ust_app *app)
435 {
436 assert(stream);
437
438 (void) release_ust_app_stream(sock, stream, app);
439 free(stream);
440 }
441
442 /*
443 * We need to execute ht_destroy outside of RCU read-side critical
444 * section and outside of call_rcu thread, so we postpone its execution
445 * using ht_cleanup_push. It is simpler than to change the semantic of
446 * the many callers of delete_ust_app_session().
447 */
448 static
449 void delete_ust_app_channel_rcu(struct rcu_head *head)
450 {
451 struct ust_app_channel *ua_chan =
452 caa_container_of(head, struct ust_app_channel, rcu_head);
453
454 ht_cleanup_push(ua_chan->ctx);
455 ht_cleanup_push(ua_chan->events);
456 free(ua_chan);
457 }
458
459 /*
460 * Extract the lost packet or discarded events counter when the channel is
461 * being deleted and store the value in the parent channel so we can
462 * access it from lttng list and at stop/destroy.
463 *
464 * The session list lock must be held by the caller.
465 */
466 static
467 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
468 {
469 uint64_t discarded = 0, lost = 0;
470 struct ltt_session *session;
471 struct ltt_ust_channel *uchan;
472
473 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
474 return;
475 }
476
477 rcu_read_lock();
478 session = session_find_by_id(ua_chan->session->tracing_id);
479 if (!session || !session->ust_session) {
480 /*
481 * Not finding the session is not an error because there are
482 * multiple ways the channels can be torn down.
483 *
484 * 1) The session daemon can initiate the destruction of the
485 * ust app session after receiving a destroy command or
486 * during its shutdown/teardown.
487 * 2) The application, since we are in per-pid tracing, is
488 * unregistering and tearing down its ust app session.
489 *
490 * Both paths are protected by the session list lock which
491 * ensures that the accounting of lost packets and discarded
492 * events is done exactly once. The session is then unpublished
493 * from the session list, resulting in this condition.
494 */
495 goto end;
496 }
497
498 if (ua_chan->attr.overwrite) {
499 consumer_get_lost_packets(ua_chan->session->tracing_id,
500 ua_chan->key, session->ust_session->consumer,
501 &lost);
502 } else {
503 consumer_get_discarded_events(ua_chan->session->tracing_id,
504 ua_chan->key, session->ust_session->consumer,
505 &discarded);
506 }
507 uchan = trace_ust_find_channel_by_name(
508 session->ust_session->domain_global.channels,
509 ua_chan->name);
510 if (!uchan) {
511 ERR("Missing UST channel to store discarded counters");
512 goto end;
513 }
514
515 uchan->per_pid_closed_app_discarded += discarded;
516 uchan->per_pid_closed_app_lost += lost;
517
518 end:
519 rcu_read_unlock();
520 if (session) {
521 session_put(session);
522 }
523 }
524
525 /*
526 * Delete ust app channel safely. RCU read lock must be held before calling
527 * this function.
528 *
529 * The session list lock must be held by the caller.
530 */
531 static
532 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
533 struct ust_app *app)
534 {
535 int ret;
536 struct lttng_ht_iter iter;
537 struct ust_app_event *ua_event;
538 struct ust_app_ctx *ua_ctx;
539 struct ust_app_stream *stream, *stmp;
540 struct ust_registry_session *registry;
541
542 assert(ua_chan);
543
544 DBG3("UST app deleting channel %s", ua_chan->name);
545
546 /* Wipe stream */
547 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
548 cds_list_del(&stream->list);
549 delete_ust_app_stream(sock, stream, app);
550 }
551
552 /* Wipe context */
553 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
554 cds_list_del(&ua_ctx->list);
555 ret = lttng_ht_del(ua_chan->ctx, &iter);
556 assert(!ret);
557 delete_ust_app_ctx(sock, ua_ctx, app);
558 }
559
560 /* Wipe events */
561 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
562 node.node) {
563 ret = lttng_ht_del(ua_chan->events, &iter);
564 assert(!ret);
565 delete_ust_app_event(sock, ua_event, app);
566 }
567
568 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
569 /* Wipe and free registry from session registry. */
570 registry = get_session_registry(ua_chan->session);
571 if (registry) {
572 ust_registry_channel_del_free(registry, ua_chan->key,
573 sock >= 0);
574 }
575 /*
576 * A negative socket can be used by the caller when
577 * cleaning-up a ua_chan in an error path. Skip the
578 * accounting in this case.
579 */
580 if (sock >= 0) {
581 save_per_pid_lost_discarded_counters(ua_chan);
582 }
583 }
584
585 if (ua_chan->obj != NULL) {
586 /* Remove channel from application UST object descriptor. */
587 iter.iter.node = &ua_chan->ust_objd_node.node;
588 ret = lttng_ht_del(app->ust_objd, &iter);
589 assert(!ret);
590 pthread_mutex_lock(&app->sock_lock);
591 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
592 pthread_mutex_unlock(&app->sock_lock);
593 if (ret < 0) {
594 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
595 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
596 ua_chan->name, app->pid,
597 app->sock);
598 } else if (ret == -EAGAIN) {
599 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
600 ua_chan->name, app->pid,
601 app->sock);
602 } else {
603 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
604 ua_chan->name, ret, app->pid,
605 app->sock);
606 }
607 }
608 lttng_fd_put(LTTNG_FD_APPS, 1);
609 free(ua_chan->obj);
610 }
611 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
612 }
613
614 int ust_app_register_done(struct ust_app *app)
615 {
616 int ret;
617
618 pthread_mutex_lock(&app->sock_lock);
619 ret = lttng_ust_ctl_register_done(app->sock);
620 pthread_mutex_unlock(&app->sock_lock);
621 return ret;
622 }
623
624 int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
625 {
626 int ret, sock;
627
628 if (app) {
629 pthread_mutex_lock(&app->sock_lock);
630 sock = app->sock;
631 } else {
632 sock = -1;
633 }
634 ret = lttng_ust_ctl_release_object(sock, data);
635 if (app) {
636 pthread_mutex_unlock(&app->sock_lock);
637 }
638 return ret;
639 }
640
641 /*
642 * Push metadata to consumer socket.
643 *
644 * RCU read-side lock must be held to guarantee existance of socket.
645 * Must be called with the ust app session lock held.
646 * Must be called with the registry lock held.
647 *
648 * On success, return the len of metadata pushed or else a negative value.
649 * Returning a -EPIPE return value means we could not send the metadata,
650 * but it can be caused by recoverable errors (e.g. the application has
651 * terminated concurrently).
652 */
653 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
654 struct consumer_socket *socket, int send_zero_data)
655 {
656 int ret;
657 char *metadata_str = NULL;
658 size_t len, offset, new_metadata_len_sent;
659 ssize_t ret_val;
660 uint64_t metadata_key, metadata_version;
661
662 assert(registry);
663 assert(socket);
664
665 metadata_key = registry->metadata_key;
666
667 /*
668 * Means that no metadata was assigned to the session. This can
669 * happens if no start has been done previously.
670 */
671 if (!metadata_key) {
672 return 0;
673 }
674
675 offset = registry->metadata_len_sent;
676 len = registry->metadata_len - registry->metadata_len_sent;
677 new_metadata_len_sent = registry->metadata_len;
678 metadata_version = registry->metadata_version;
679 if (len == 0) {
680 DBG3("No metadata to push for metadata key %" PRIu64,
681 registry->metadata_key);
682 ret_val = len;
683 if (send_zero_data) {
684 DBG("No metadata to push");
685 goto push_data;
686 }
687 goto end;
688 }
689
690 /* Allocate only what we have to send. */
691 metadata_str = zmalloc(len);
692 if (!metadata_str) {
693 PERROR("zmalloc ust app metadata string");
694 ret_val = -ENOMEM;
695 goto error;
696 }
697 /* Copy what we haven't sent out. */
698 memcpy(metadata_str, registry->metadata + offset, len);
699
700 push_data:
701 pthread_mutex_unlock(&registry->lock);
702 /*
703 * We need to unlock the registry while we push metadata to
704 * break a circular dependency between the consumerd metadata
705 * lock and the sessiond registry lock. Indeed, pushing metadata
706 * to the consumerd awaits that it gets pushed all the way to
707 * relayd, but doing so requires grabbing the metadata lock. If
708 * a concurrent metadata request is being performed by
709 * consumerd, this can try to grab the registry lock on the
710 * sessiond while holding the metadata lock on the consumer
711 * daemon. Those push and pull schemes are performed on two
712 * different bidirectionnal communication sockets.
713 */
714 ret = consumer_push_metadata(socket, metadata_key,
715 metadata_str, len, offset, metadata_version);
716 pthread_mutex_lock(&registry->lock);
717 if (ret < 0) {
718 /*
719 * There is an acceptable race here between the registry
720 * metadata key assignment and the creation on the
721 * consumer. The session daemon can concurrently push
722 * metadata for this registry while being created on the
723 * consumer since the metadata key of the registry is
724 * assigned *before* it is setup to avoid the consumer
725 * to ask for metadata that could possibly be not found
726 * in the session daemon.
727 *
728 * The metadata will get pushed either by the session
729 * being stopped or the consumer requesting metadata if
730 * that race is triggered.
731 */
732 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
733 ret = 0;
734 } else {
735 ERR("Error pushing metadata to consumer");
736 }
737 ret_val = ret;
738 goto error_push;
739 } else {
740 /*
741 * Metadata may have been concurrently pushed, since
742 * we're not holding the registry lock while pushing to
743 * consumer. This is handled by the fact that we send
744 * the metadata content, size, and the offset at which
745 * that metadata belongs. This may arrive out of order
746 * on the consumer side, and the consumer is able to
747 * deal with overlapping fragments. The consumer
748 * supports overlapping fragments, which must be
749 * contiguous starting from offset 0. We keep the
750 * largest metadata_len_sent value of the concurrent
751 * send.
752 */
753 registry->metadata_len_sent =
754 max_t(size_t, registry->metadata_len_sent,
755 new_metadata_len_sent);
756 }
757 free(metadata_str);
758 return len;
759
760 end:
761 error:
762 if (ret_val) {
763 /*
764 * On error, flag the registry that the metadata is
765 * closed. We were unable to push anything and this
766 * means that either the consumer is not responding or
767 * the metadata cache has been destroyed on the
768 * consumer.
769 */
770 registry->metadata_closed = 1;
771 }
772 error_push:
773 free(metadata_str);
774 return ret_val;
775 }
776
777 /*
778 * For a given application and session, push metadata to consumer.
779 * Either sock or consumer is required : if sock is NULL, the default
780 * socket to send the metadata is retrieved from consumer, if sock
781 * is not NULL we use it to send the metadata.
782 * RCU read-side lock must be held while calling this function,
783 * therefore ensuring existance of registry. It also ensures existance
784 * of socket throughout this function.
785 *
786 * Return 0 on success else a negative error.
787 * Returning a -EPIPE return value means we could not send the metadata,
788 * but it can be caused by recoverable errors (e.g. the application has
789 * terminated concurrently).
790 */
791 static int push_metadata(struct ust_registry_session *registry,
792 struct consumer_output *consumer)
793 {
794 int ret_val;
795 ssize_t ret;
796 struct consumer_socket *socket;
797
798 assert(registry);
799 assert(consumer);
800
801 pthread_mutex_lock(&registry->lock);
802 if (registry->metadata_closed) {
803 ret_val = -EPIPE;
804 goto error;
805 }
806
807 /* Get consumer socket to use to push the metadata.*/
808 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
809 consumer);
810 if (!socket) {
811 ret_val = -1;
812 goto error;
813 }
814
815 ret = ust_app_push_metadata(registry, socket, 0);
816 if (ret < 0) {
817 ret_val = ret;
818 goto error;
819 }
820 pthread_mutex_unlock(&registry->lock);
821 return 0;
822
823 error:
824 pthread_mutex_unlock(&registry->lock);
825 return ret_val;
826 }
827
828 /*
829 * Send to the consumer a close metadata command for the given session. Once
830 * done, the metadata channel is deleted and the session metadata pointer is
831 * nullified. The session lock MUST be held unless the application is
832 * in the destroy path.
833 *
834 * Do not hold the registry lock while communicating with the consumerd, because
835 * doing so causes inter-process deadlocks between consumerd and sessiond with
836 * the metadata request notification.
837 *
838 * Return 0 on success else a negative value.
839 */
840 static int close_metadata(struct ust_registry_session *registry,
841 struct consumer_output *consumer)
842 {
843 int ret;
844 struct consumer_socket *socket;
845 uint64_t metadata_key;
846 bool registry_was_already_closed;
847
848 assert(registry);
849 assert(consumer);
850
851 rcu_read_lock();
852
853 pthread_mutex_lock(&registry->lock);
854 metadata_key = registry->metadata_key;
855 registry_was_already_closed = registry->metadata_closed;
856 if (metadata_key != 0) {
857 /*
858 * Metadata closed. Even on error this means that the consumer
859 * is not responding or not found so either way a second close
860 * should NOT be emit for this registry.
861 */
862 registry->metadata_closed = 1;
863 }
864 pthread_mutex_unlock(&registry->lock);
865
866 if (metadata_key == 0 || registry_was_already_closed) {
867 ret = 0;
868 goto end;
869 }
870
871 /* Get consumer socket to use to push the metadata.*/
872 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
873 consumer);
874 if (!socket) {
875 ret = -1;
876 goto end;
877 }
878
879 ret = consumer_close_metadata(socket, metadata_key);
880 if (ret < 0) {
881 goto end;
882 }
883
884 end:
885 rcu_read_unlock();
886 return ret;
887 }
888
889 /*
890 * We need to execute ht_destroy outside of RCU read-side critical
891 * section and outside of call_rcu thread, so we postpone its execution
892 * using ht_cleanup_push. It is simpler than to change the semantic of
893 * the many callers of delete_ust_app_session().
894 */
895 static
896 void delete_ust_app_session_rcu(struct rcu_head *head)
897 {
898 struct ust_app_session *ua_sess =
899 caa_container_of(head, struct ust_app_session, rcu_head);
900
901 ht_cleanup_push(ua_sess->channels);
902 free(ua_sess);
903 }
904
905 /*
906 * Delete ust app session safely. RCU read lock must be held before calling
907 * this function.
908 *
909 * The session list lock must be held by the caller.
910 */
911 static
912 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
913 struct ust_app *app)
914 {
915 int ret;
916 struct lttng_ht_iter iter;
917 struct ust_app_channel *ua_chan;
918 struct ust_registry_session *registry;
919
920 assert(ua_sess);
921
922 pthread_mutex_lock(&ua_sess->lock);
923
924 assert(!ua_sess->deleted);
925 ua_sess->deleted = true;
926
927 registry = get_session_registry(ua_sess);
928 /* Registry can be null on error path during initialization. */
929 if (registry) {
930 /* Push metadata for application before freeing the application. */
931 (void) push_metadata(registry, ua_sess->consumer);
932
933 /*
934 * Don't ask to close metadata for global per UID buffers. Close
935 * metadata only on destroy trace session in this case. Also, the
936 * previous push metadata could have flag the metadata registry to
937 * close so don't send a close command if closed.
938 */
939 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
940 /* And ask to close it for this session registry. */
941 (void) close_metadata(registry, ua_sess->consumer);
942 }
943 }
944
945 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
946 node.node) {
947 ret = lttng_ht_del(ua_sess->channels, &iter);
948 assert(!ret);
949 delete_ust_app_channel(sock, ua_chan, app);
950 }
951
952 /* In case of per PID, the registry is kept in the session. */
953 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
954 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
955 if (reg_pid) {
956 /*
957 * Registry can be null on error path during
958 * initialization.
959 */
960 buffer_reg_pid_remove(reg_pid);
961 buffer_reg_pid_destroy(reg_pid);
962 }
963 }
964
965 if (ua_sess->handle != -1) {
966 pthread_mutex_lock(&app->sock_lock);
967 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
968 pthread_mutex_unlock(&app->sock_lock);
969 if (ret < 0) {
970 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
971 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
972 app->pid, app->sock);
973 } else if (ret == -EAGAIN) {
974 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
975 app->pid, app->sock);
976 } else {
977 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
978 ret, app->pid, app->sock);
979 }
980 }
981
982 /* Remove session from application UST object descriptor. */
983 iter.iter.node = &ua_sess->ust_objd_node.node;
984 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
985 assert(!ret);
986 }
987
988 pthread_mutex_unlock(&ua_sess->lock);
989
990 consumer_output_put(ua_sess->consumer);
991
992 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
993 }
994
995 /*
996 * Delete a traceable application structure from the global list. Never call
997 * this function outside of a call_rcu call.
998 *
999 * RCU read side lock should _NOT_ be held when calling this function.
1000 */
1001 static
1002 void delete_ust_app(struct ust_app *app)
1003 {
1004 int ret, sock;
1005 struct ust_app_session *ua_sess, *tmp_ua_sess;
1006 struct lttng_ht_iter iter;
1007 struct ust_app_event_notifier_rule *event_notifier_rule;
1008 bool event_notifier_write_fd_is_open;
1009
1010 /*
1011 * The session list lock must be held during this function to guarantee
1012 * the existence of ua_sess.
1013 */
1014 session_lock_list();
1015 /* Delete ust app sessions info */
1016 sock = app->sock;
1017 app->sock = -1;
1018
1019 /* Wipe sessions */
1020 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
1021 teardown_node) {
1022 /* Free every object in the session and the session. */
1023 rcu_read_lock();
1024 delete_ust_app_session(sock, ua_sess, app);
1025 rcu_read_unlock();
1026 }
1027
1028 /* Remove the event notifier rules associated with this app. */
1029 rcu_read_lock();
1030 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
1031 &iter.iter, event_notifier_rule, node.node) {
1032 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
1033 assert(!ret);
1034
1035 delete_ust_app_event_notifier_rule(
1036 app->sock, event_notifier_rule, app);
1037 }
1038
1039 rcu_read_unlock();
1040
1041 ht_cleanup_push(app->sessions);
1042 ht_cleanup_push(app->ust_sessions_objd);
1043 ht_cleanup_push(app->ust_objd);
1044 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
1045
1046 /*
1047 * This could be NULL if the event notifier setup failed (e.g the app
1048 * was killed or the tracer does not support this feature).
1049 */
1050 if (app->event_notifier_group.object) {
1051 enum lttng_error_code ret_code;
1052 enum event_notifier_error_accounting_status status;
1053
1054 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1055 app->event_notifier_group.event_pipe);
1056
1057 ret_code = notification_thread_command_remove_tracer_event_source(
1058 the_notification_thread_handle,
1059 event_notifier_read_fd);
1060 if (ret_code != LTTNG_OK) {
1061 ERR("Failed to remove application tracer event source from notification thread");
1062 }
1063
1064 status = event_notifier_error_accounting_unregister_app(app);
1065 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1066 ERR("Error unregistering app from event notifier error accounting");
1067 }
1068
1069 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
1070 free(app->event_notifier_group.object);
1071 }
1072
1073 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1074 app->event_notifier_group.event_pipe);
1075 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1076 /*
1077 * Release the file descriptors reserved for the event notifier pipe.
1078 * The app could be destroyed before the write end of the pipe could be
1079 * passed to the application (and closed). In that case, both file
1080 * descriptors must be released.
1081 */
1082 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1083
1084 /*
1085 * Wait until we have deleted the application from the sock hash table
1086 * before closing this socket, otherwise an application could re-use the
1087 * socket ID and race with the teardown, using the same hash table entry.
1088 *
1089 * It's OK to leave the close in call_rcu. We want it to stay unique for
1090 * all RCU readers that could run concurrently with unregister app,
1091 * therefore we _need_ to only close that socket after a grace period. So
1092 * it should stay in this RCU callback.
1093 *
1094 * This close() is a very important step of the synchronization model so
1095 * every modification to this function must be carefully reviewed.
1096 */
1097 ret = close(sock);
1098 if (ret) {
1099 PERROR("close");
1100 }
1101 lttng_fd_put(LTTNG_FD_APPS, 1);
1102
1103 DBG2("UST app pid %d deleted", app->pid);
1104 free(app);
1105 session_unlock_list();
1106 }
1107
1108 /*
1109 * URCU intermediate call to delete an UST app.
1110 */
1111 static
1112 void delete_ust_app_rcu(struct rcu_head *head)
1113 {
1114 struct lttng_ht_node_ulong *node =
1115 caa_container_of(head, struct lttng_ht_node_ulong, head);
1116 struct ust_app *app =
1117 caa_container_of(node, struct ust_app, pid_n);
1118
1119 DBG3("Call RCU deleting app PID %d", app->pid);
1120 delete_ust_app(app);
1121 }
1122
1123 /*
1124 * Delete the session from the application ht and delete the data structure by
1125 * freeing every object inside and releasing them.
1126 *
1127 * The session list lock must be held by the caller.
1128 */
1129 static void destroy_app_session(struct ust_app *app,
1130 struct ust_app_session *ua_sess)
1131 {
1132 int ret;
1133 struct lttng_ht_iter iter;
1134
1135 assert(app);
1136 assert(ua_sess);
1137
1138 iter.iter.node = &ua_sess->node.node;
1139 ret = lttng_ht_del(app->sessions, &iter);
1140 if (ret) {
1141 /* Already scheduled for teardown. */
1142 goto end;
1143 }
1144
1145 /* Once deleted, free the data structure. */
1146 delete_ust_app_session(app->sock, ua_sess, app);
1147
1148 end:
1149 return;
1150 }
1151
1152 /*
1153 * Alloc new UST app session.
1154 */
1155 static
1156 struct ust_app_session *alloc_ust_app_session(void)
1157 {
1158 struct ust_app_session *ua_sess;
1159
1160 /* Init most of the default value by allocating and zeroing */
1161 ua_sess = zmalloc(sizeof(struct ust_app_session));
1162 if (ua_sess == NULL) {
1163 PERROR("malloc");
1164 goto error_free;
1165 }
1166
1167 ua_sess->handle = -1;
1168 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1169 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1170 pthread_mutex_init(&ua_sess->lock, NULL);
1171
1172 return ua_sess;
1173
1174 error_free:
1175 return NULL;
1176 }
1177
1178 /*
1179 * Alloc new UST app channel.
1180 */
1181 static
1182 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1183 struct ust_app_session *ua_sess,
1184 struct lttng_ust_abi_channel_attr *attr)
1185 {
1186 struct ust_app_channel *ua_chan;
1187
1188 /* Init most of the default value by allocating and zeroing */
1189 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1190 if (ua_chan == NULL) {
1191 PERROR("malloc");
1192 goto error;
1193 }
1194
1195 /* Setup channel name */
1196 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1197 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1198
1199 ua_chan->enabled = 1;
1200 ua_chan->handle = -1;
1201 ua_chan->session = ua_sess;
1202 ua_chan->key = get_next_channel_key();
1203 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1204 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1205 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1206
1207 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1208 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1209
1210 /* Copy attributes */
1211 if (attr) {
1212 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1213 ua_chan->attr.subbuf_size = attr->subbuf_size;
1214 ua_chan->attr.num_subbuf = attr->num_subbuf;
1215 ua_chan->attr.overwrite = attr->overwrite;
1216 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1217 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1218 ua_chan->attr.output = attr->output;
1219 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1220 }
1221 /* By default, the channel is a per cpu channel. */
1222 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1223
1224 DBG3("UST app channel %s allocated", ua_chan->name);
1225
1226 return ua_chan;
1227
1228 error:
1229 return NULL;
1230 }
1231
1232 /*
1233 * Allocate and initialize a UST app stream.
1234 *
1235 * Return newly allocated stream pointer or NULL on error.
1236 */
1237 struct ust_app_stream *ust_app_alloc_stream(void)
1238 {
1239 struct ust_app_stream *stream = NULL;
1240
1241 stream = zmalloc(sizeof(*stream));
1242 if (stream == NULL) {
1243 PERROR("zmalloc ust app stream");
1244 goto error;
1245 }
1246
1247 /* Zero could be a valid value for a handle so flag it to -1. */
1248 stream->handle = -1;
1249
1250 error:
1251 return stream;
1252 }
1253
1254 /*
1255 * Alloc new UST app event.
1256 */
1257 static
1258 struct ust_app_event *alloc_ust_app_event(char *name,
1259 struct lttng_ust_abi_event *attr)
1260 {
1261 struct ust_app_event *ua_event;
1262
1263 /* Init most of the default value by allocating and zeroing */
1264 ua_event = zmalloc(sizeof(struct ust_app_event));
1265 if (ua_event == NULL) {
1266 PERROR("Failed to allocate ust_app_event structure");
1267 goto error;
1268 }
1269
1270 ua_event->enabled = 1;
1271 strncpy(ua_event->name, name, sizeof(ua_event->name));
1272 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1273 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1274
1275 /* Copy attributes */
1276 if (attr) {
1277 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1278 }
1279
1280 DBG3("UST app event %s allocated", ua_event->name);
1281
1282 return ua_event;
1283
1284 error:
1285 return NULL;
1286 }
1287
1288 /*
1289 * Allocate a new UST app event notifier rule.
1290 */
1291 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1292 struct lttng_trigger *trigger)
1293 {
1294 enum lttng_event_rule_generate_exclusions_status
1295 generate_exclusion_status;
1296 enum lttng_condition_status cond_status;
1297 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1298 struct lttng_condition *condition = NULL;
1299 const struct lttng_event_rule *event_rule = NULL;
1300
1301 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1302 if (ua_event_notifier_rule == NULL) {
1303 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1304 goto error;
1305 }
1306
1307 ua_event_notifier_rule->enabled = 1;
1308 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1309 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1310 ua_event_notifier_rule->token);
1311
1312 condition = lttng_trigger_get_condition(trigger);
1313 assert(condition);
1314 assert(lttng_condition_get_type(condition) ==
1315 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
1316
1317 cond_status = lttng_condition_event_rule_matches_get_rule(
1318 condition, &event_rule);
1319 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
1320 assert(event_rule);
1321
1322 ua_event_notifier_rule->error_counter_index =
1323 lttng_condition_event_rule_matches_get_error_counter_index(condition);
1324 /* Acquire the event notifier's reference to the trigger. */
1325 lttng_trigger_get(trigger);
1326
1327 ua_event_notifier_rule->trigger = trigger;
1328 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1329 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1330 event_rule, &ua_event_notifier_rule->exclusion);
1331 switch (generate_exclusion_status) {
1332 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1333 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1334 break;
1335 default:
1336 /* Error occurred. */
1337 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1338 goto error_put_trigger;
1339 }
1340
1341 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1342 ua_event_notifier_rule->token);
1343
1344 return ua_event_notifier_rule;
1345
1346 error_put_trigger:
1347 lttng_trigger_put(trigger);
1348 error:
1349 free(ua_event_notifier_rule);
1350 return NULL;
1351 }
1352
1353 /*
1354 * Alloc new UST app context.
1355 */
1356 static
1357 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1358 {
1359 struct ust_app_ctx *ua_ctx;
1360
1361 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1362 if (ua_ctx == NULL) {
1363 goto error;
1364 }
1365
1366 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1367
1368 if (uctx) {
1369 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1370 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1371 char *provider_name = NULL, *ctx_name = NULL;
1372
1373 provider_name = strdup(uctx->u.app_ctx.provider_name);
1374 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1375 if (!provider_name || !ctx_name) {
1376 free(provider_name);
1377 free(ctx_name);
1378 goto error;
1379 }
1380
1381 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1382 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1383 }
1384 }
1385
1386 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1387 return ua_ctx;
1388 error:
1389 free(ua_ctx);
1390 return NULL;
1391 }
1392
1393 /*
1394 * Create a liblttng-ust filter bytecode from given bytecode.
1395 *
1396 * Return allocated filter or NULL on error.
1397 */
1398 static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1399 const struct lttng_bytecode *orig_f)
1400 {
1401 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1402
1403 /* Copy filter bytecode. */
1404 filter = zmalloc(sizeof(*filter) + orig_f->len);
1405 if (!filter) {
1406 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1407 goto error;
1408 }
1409
1410 assert(sizeof(struct lttng_bytecode) ==
1411 sizeof(struct lttng_ust_abi_filter_bytecode));
1412 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1413 error:
1414 return filter;
1415 }
1416
1417 /*
1418 * Create a liblttng-ust capture bytecode from given bytecode.
1419 *
1420 * Return allocated filter or NULL on error.
1421 */
1422 static struct lttng_ust_abi_capture_bytecode *
1423 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1424 {
1425 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1426
1427 /* Copy capture bytecode. */
1428 capture = zmalloc(sizeof(*capture) + orig_f->len);
1429 if (!capture) {
1430 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1431 goto error;
1432 }
1433
1434 assert(sizeof(struct lttng_bytecode) ==
1435 sizeof(struct lttng_ust_abi_capture_bytecode));
1436 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1437 error:
1438 return capture;
1439 }
1440
1441 /*
1442 * Find an ust_app using the sock and return it. RCU read side lock must be
1443 * held before calling this helper function.
1444 */
1445 struct ust_app *ust_app_find_by_sock(int sock)
1446 {
1447 struct lttng_ht_node_ulong *node;
1448 struct lttng_ht_iter iter;
1449
1450 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1451 node = lttng_ht_iter_get_node_ulong(&iter);
1452 if (node == NULL) {
1453 DBG2("UST app find by sock %d not found", sock);
1454 goto error;
1455 }
1456
1457 return caa_container_of(node, struct ust_app, sock_n);
1458
1459 error:
1460 return NULL;
1461 }
1462
1463 /*
1464 * Find an ust_app using the notify sock and return it. RCU read side lock must
1465 * be held before calling this helper function.
1466 */
1467 static struct ust_app *find_app_by_notify_sock(int sock)
1468 {
1469 struct lttng_ht_node_ulong *node;
1470 struct lttng_ht_iter iter;
1471
1472 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1473 &iter);
1474 node = lttng_ht_iter_get_node_ulong(&iter);
1475 if (node == NULL) {
1476 DBG2("UST app find by notify sock %d not found", sock);
1477 goto error;
1478 }
1479
1480 return caa_container_of(node, struct ust_app, notify_sock_n);
1481
1482 error:
1483 return NULL;
1484 }
1485
1486 /*
1487 * Lookup for an ust app event based on event name, filter bytecode and the
1488 * event loglevel.
1489 *
1490 * Return an ust_app_event object or NULL on error.
1491 */
1492 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1493 const char *name,
1494 const struct lttng_bytecode *filter,
1495 enum lttng_ust_abi_loglevel_type loglevel_type,
1496 int loglevel_value,
1497 const struct lttng_event_exclusion *exclusion)
1498 {
1499 struct lttng_ht_iter iter;
1500 struct lttng_ht_node_str *node;
1501 struct ust_app_event *event = NULL;
1502 struct ust_app_ht_key key;
1503
1504 assert(name);
1505 assert(ht);
1506
1507 /* Setup key for event lookup. */
1508 key.name = name;
1509 key.filter = filter;
1510 key.loglevel_type = loglevel_type;
1511 key.loglevel_value = loglevel_value;
1512 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1513 key.exclusion = exclusion;
1514
1515 /* Lookup using the event name as hash and a custom match fct. */
1516 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1517 ht_match_ust_app_event, &key, &iter.iter);
1518 node = lttng_ht_iter_get_node_str(&iter);
1519 if (node == NULL) {
1520 goto end;
1521 }
1522
1523 event = caa_container_of(node, struct ust_app_event, node);
1524
1525 end:
1526 return event;
1527 }
1528
1529 /*
1530 * Look-up an event notifier rule based on its token id.
1531 *
1532 * Must be called with the RCU read lock held.
1533 * Return an ust_app_event_notifier_rule object or NULL on error.
1534 */
1535 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1536 struct lttng_ht *ht, uint64_t token)
1537 {
1538 struct lttng_ht_iter iter;
1539 struct lttng_ht_node_u64 *node;
1540 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1541
1542 assert(ht);
1543
1544 lttng_ht_lookup(ht, &token, &iter);
1545 node = lttng_ht_iter_get_node_u64(&iter);
1546 if (node == NULL) {
1547 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1548 token);
1549 goto end;
1550 }
1551
1552 event_notifier_rule = caa_container_of(
1553 node, struct ust_app_event_notifier_rule, node);
1554 end:
1555 return event_notifier_rule;
1556 }
1557
1558 /*
1559 * Create the channel context on the tracer.
1560 *
1561 * Called with UST app session lock held.
1562 */
1563 static
1564 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1565 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1566 {
1567 int ret;
1568
1569 health_code_update();
1570
1571 pthread_mutex_lock(&app->sock_lock);
1572 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
1573 ua_chan->obj, &ua_ctx->obj);
1574 pthread_mutex_unlock(&app->sock_lock);
1575 if (ret < 0) {
1576 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1577 ret = 0;
1578 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1579 app->pid, app->sock);
1580 } else if (ret == -EAGAIN) {
1581 ret = 0;
1582 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1583 app->pid, app->sock);
1584 } else {
1585 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1586 ret, app->pid, app->sock);
1587 }
1588 goto error;
1589 }
1590
1591 ua_ctx->handle = ua_ctx->obj->handle;
1592
1593 DBG2("UST app context handle %d created successfully for channel %s",
1594 ua_ctx->handle, ua_chan->name);
1595
1596 error:
1597 health_code_update();
1598 return ret;
1599 }
1600
1601 /*
1602 * Set the filter on the tracer.
1603 */
1604 static int set_ust_object_filter(struct ust_app *app,
1605 const struct lttng_bytecode *bytecode,
1606 struct lttng_ust_abi_object_data *ust_object)
1607 {
1608 int ret;
1609 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1610
1611 health_code_update();
1612
1613 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1614 if (!ust_bytecode) {
1615 ret = -LTTNG_ERR_NOMEM;
1616 goto error;
1617 }
1618 pthread_mutex_lock(&app->sock_lock);
1619 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
1620 ust_object);
1621 pthread_mutex_unlock(&app->sock_lock);
1622 if (ret < 0) {
1623 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1624 ret = 0;
1625 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1626 app->pid, app->sock);
1627 } else if (ret == -EAGAIN) {
1628 ret = 0;
1629 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1630 app->pid, app->sock);
1631 } else {
1632 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1633 ret, app->pid, app->sock, ust_object);
1634 }
1635 goto error;
1636 }
1637
1638 DBG2("UST filter successfully set: object = %p", ust_object);
1639
1640 error:
1641 health_code_update();
1642 free(ust_bytecode);
1643 return ret;
1644 }
1645
1646 /*
1647 * Set a capture bytecode for the passed object.
1648 * The sequence number enforces the ordering at runtime and on reception of
1649 * the captured payloads.
1650 */
1651 static int set_ust_capture(struct ust_app *app,
1652 const struct lttng_bytecode *bytecode,
1653 unsigned int capture_seqnum,
1654 struct lttng_ust_abi_object_data *ust_object)
1655 {
1656 int ret;
1657 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1658
1659 health_code_update();
1660
1661 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1662 if (!ust_bytecode) {
1663 ret = -LTTNG_ERR_NOMEM;
1664 goto error;
1665 }
1666
1667 /*
1668 * Set the sequence number to ensure the capture of fields is ordered.
1669 */
1670 ust_bytecode->seqnum = capture_seqnum;
1671
1672 pthread_mutex_lock(&app->sock_lock);
1673 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
1674 ust_object);
1675 pthread_mutex_unlock(&app->sock_lock);
1676 if (ret < 0) {
1677 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1678 ret = 0;
1679 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1680 app->pid, app->sock);
1681 } else if (ret == -EAGAIN) {
1682 ret = 0;
1683 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1684 app->pid, app->sock);
1685 } else {
1686 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1687 ret, app->pid,
1688 app->sock);
1689 }
1690
1691 goto error;
1692 }
1693
1694 DBG2("UST capture successfully set: object = %p", ust_object);
1695
1696 error:
1697 health_code_update();
1698 free(ust_bytecode);
1699 return ret;
1700 }
1701
1702 static
1703 struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1704 const struct lttng_event_exclusion *exclusion)
1705 {
1706 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1707 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1708 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1709
1710 ust_exclusion = zmalloc(exclusion_alloc_size);
1711 if (!ust_exclusion) {
1712 PERROR("malloc");
1713 goto end;
1714 }
1715
1716 assert(sizeof(struct lttng_event_exclusion) ==
1717 sizeof(struct lttng_ust_abi_event_exclusion));
1718 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1719 end:
1720 return ust_exclusion;
1721 }
1722
1723 /*
1724 * Set event exclusions on the tracer.
1725 */
1726 static int set_ust_object_exclusions(struct ust_app *app,
1727 const struct lttng_event_exclusion *exclusions,
1728 struct lttng_ust_abi_object_data *ust_object)
1729 {
1730 int ret;
1731 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1732
1733 assert(exclusions && exclusions->count > 0);
1734
1735 health_code_update();
1736
1737 ust_exclusions = create_ust_exclusion_from_exclusion(
1738 exclusions);
1739 if (!ust_exclusions) {
1740 ret = -LTTNG_ERR_NOMEM;
1741 goto error;
1742 }
1743 pthread_mutex_lock(&app->sock_lock);
1744 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1745 pthread_mutex_unlock(&app->sock_lock);
1746 if (ret < 0) {
1747 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1748 ret = 0;
1749 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1750 app->pid, app->sock);
1751 } else if (ret == -EAGAIN) {
1752 ret = 0;
1753 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1754 app->pid, app->sock);
1755 } else {
1756 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1757 ret, app->pid, app->sock, ust_object);
1758 }
1759 goto error;
1760 }
1761
1762 DBG2("UST exclusions set successfully for object %p", ust_object);
1763
1764 error:
1765 health_code_update();
1766 free(ust_exclusions);
1767 return ret;
1768 }
1769
1770 /*
1771 * Disable the specified event on to UST tracer for the UST session.
1772 */
1773 static int disable_ust_object(struct ust_app *app,
1774 struct lttng_ust_abi_object_data *object)
1775 {
1776 int ret;
1777
1778 health_code_update();
1779
1780 pthread_mutex_lock(&app->sock_lock);
1781 ret = lttng_ust_ctl_disable(app->sock, object);
1782 pthread_mutex_unlock(&app->sock_lock);
1783 if (ret < 0) {
1784 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1785 ret = 0;
1786 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1787 app->pid, app->sock);
1788 } else if (ret == -EAGAIN) {
1789 ret = 0;
1790 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1791 app->pid, app->sock);
1792 } else {
1793 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1794 ret, app->pid, app->sock, object);
1795 }
1796 goto error;
1797 }
1798
1799 DBG2("UST app object %p disabled successfully for app: pid = %d",
1800 object, app->pid);
1801
1802 error:
1803 health_code_update();
1804 return ret;
1805 }
1806
1807 /*
1808 * Disable the specified channel on to UST tracer for the UST session.
1809 */
1810 static int disable_ust_channel(struct ust_app *app,
1811 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1812 {
1813 int ret;
1814
1815 health_code_update();
1816
1817 pthread_mutex_lock(&app->sock_lock);
1818 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
1819 pthread_mutex_unlock(&app->sock_lock);
1820 if (ret < 0) {
1821 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1822 ret = 0;
1823 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1824 app->pid, app->sock);
1825 } else if (ret == -EAGAIN) {
1826 ret = 0;
1827 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1828 app->pid, app->sock);
1829 } else {
1830 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1831 ua_chan->name, ua_sess->handle, ret,
1832 app->pid, app->sock);
1833 }
1834 goto error;
1835 }
1836
1837 DBG2("UST app channel %s disabled successfully for app: pid = %d",
1838 ua_chan->name, app->pid);
1839
1840 error:
1841 health_code_update();
1842 return ret;
1843 }
1844
1845 /*
1846 * Enable the specified channel on to UST tracer for the UST session.
1847 */
1848 static int enable_ust_channel(struct ust_app *app,
1849 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1850 {
1851 int ret;
1852
1853 health_code_update();
1854
1855 pthread_mutex_lock(&app->sock_lock);
1856 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
1857 pthread_mutex_unlock(&app->sock_lock);
1858 if (ret < 0) {
1859 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1860 ret = 0;
1861 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1862 ua_chan->name, app->pid, app->sock);
1863 } else if (ret == -EAGAIN) {
1864 ret = 0;
1865 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1866 ua_chan->name, app->pid, app->sock);
1867 } else {
1868 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1869 ua_chan->name, ua_sess->handle, ret,
1870 app->pid, app->sock);
1871 }
1872 goto error;
1873 }
1874
1875 ua_chan->enabled = 1;
1876
1877 DBG2("UST app channel %s enabled successfully for app: pid = %d",
1878 ua_chan->name, app->pid);
1879
1880 error:
1881 health_code_update();
1882 return ret;
1883 }
1884
1885 /*
1886 * Enable the specified event on to UST tracer for the UST session.
1887 */
1888 static int enable_ust_object(
1889 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1890 {
1891 int ret;
1892
1893 health_code_update();
1894
1895 pthread_mutex_lock(&app->sock_lock);
1896 ret = lttng_ust_ctl_enable(app->sock, ust_object);
1897 pthread_mutex_unlock(&app->sock_lock);
1898 if (ret < 0) {
1899 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1900 ret = 0;
1901 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1902 app->pid, app->sock);
1903 } else if (ret == -EAGAIN) {
1904 ret = 0;
1905 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1906 app->pid, app->sock);
1907 } else {
1908 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1909 ret, app->pid, app->sock, ust_object);
1910 }
1911 goto error;
1912 }
1913
1914 DBG2("UST app object %p enabled successfully for app: pid = %d",
1915 ust_object, app->pid);
1916
1917 error:
1918 health_code_update();
1919 return ret;
1920 }
1921
1922 /*
1923 * Send channel and stream buffer to application.
1924 *
1925 * Return 0 on success. On error, a negative value is returned.
1926 */
1927 static int send_channel_pid_to_ust(struct ust_app *app,
1928 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1929 {
1930 int ret;
1931 struct ust_app_stream *stream, *stmp;
1932
1933 assert(app);
1934 assert(ua_sess);
1935 assert(ua_chan);
1936
1937 health_code_update();
1938
1939 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1940 app->sock);
1941
1942 /* Send channel to the application. */
1943 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1944 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1945 ret = -ENOTCONN; /* Caused by app exiting. */
1946 goto error;
1947 } else if (ret == -EAGAIN) {
1948 /* Caused by timeout. */
1949 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
1950 app->pid, ua_chan->name, ua_sess->tracing_id);
1951 /* Treat this the same way as an application that is exiting. */
1952 ret = -ENOTCONN;
1953 goto error;
1954 } else if (ret < 0) {
1955 goto error;
1956 }
1957
1958 health_code_update();
1959
1960 /* Send all streams to application. */
1961 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1962 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1963 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1964 ret = -ENOTCONN; /* Caused by app exiting. */
1965 goto error;
1966 } else if (ret == -EAGAIN) {
1967 /* Caused by timeout. */
1968 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
1969 app->pid, stream->name, ua_chan->name,
1970 ua_sess->tracing_id);
1971 /*
1972 * Treat this the same way as an application that is
1973 * exiting.
1974 */
1975 ret = -ENOTCONN;
1976 } else if (ret < 0) {
1977 goto error;
1978 }
1979 /* We don't need the stream anymore once sent to the tracer. */
1980 cds_list_del(&stream->list);
1981 delete_ust_app_stream(-1, stream, app);
1982 }
1983 /* Flag the channel that it is sent to the application. */
1984 ua_chan->is_sent = 1;
1985
1986 error:
1987 health_code_update();
1988 return ret;
1989 }
1990
1991 /*
1992 * Create the specified event onto the UST tracer for a UST session.
1993 *
1994 * Should be called with session mutex held.
1995 */
1996 static
1997 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1998 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1999 {
2000 int ret = 0;
2001
2002 health_code_update();
2003
2004 /* Create UST event on tracer */
2005 pthread_mutex_lock(&app->sock_lock);
2006 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
2007 &ua_event->obj);
2008 pthread_mutex_unlock(&app->sock_lock);
2009 if (ret < 0) {
2010 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2011 ret = 0;
2012 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2013 app->pid, app->sock);
2014 } else if (ret == -EAGAIN) {
2015 ret = 0;
2016 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2017 app->pid, app->sock);
2018 } else {
2019 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2020 ua_event->attr.name, ret, app->pid,
2021 app->sock);
2022 }
2023 goto error;
2024 }
2025
2026 ua_event->handle = ua_event->obj->handle;
2027
2028 DBG2("UST app event %s created successfully for pid:%d object = %p",
2029 ua_event->attr.name, app->pid, ua_event->obj);
2030
2031 health_code_update();
2032
2033 /* Set filter if one is present. */
2034 if (ua_event->filter) {
2035 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
2036 if (ret < 0) {
2037 goto error;
2038 }
2039 }
2040
2041 /* Set exclusions for the event */
2042 if (ua_event->exclusion) {
2043 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
2044 if (ret < 0) {
2045 goto error;
2046 }
2047 }
2048
2049 /* If event not enabled, disable it on the tracer */
2050 if (ua_event->enabled) {
2051 /*
2052 * We now need to explicitly enable the event, since it
2053 * is now disabled at creation.
2054 */
2055 ret = enable_ust_object(app, ua_event->obj);
2056 if (ret < 0) {
2057 /*
2058 * If we hit an EPERM, something is wrong with our enable call. If
2059 * we get an EEXIST, there is a problem on the tracer side since we
2060 * just created it.
2061 */
2062 switch (ret) {
2063 case -LTTNG_UST_ERR_PERM:
2064 /* Code flow problem */
2065 assert(0);
2066 case -LTTNG_UST_ERR_EXIST:
2067 /* It's OK for our use case. */
2068 ret = 0;
2069 break;
2070 default:
2071 break;
2072 }
2073 goto error;
2074 }
2075 }
2076
2077 error:
2078 health_code_update();
2079 return ret;
2080 }
2081
2082 static int init_ust_event_notifier_from_event_rule(
2083 const struct lttng_event_rule *rule,
2084 struct lttng_ust_abi_event_notifier *event_notifier)
2085 {
2086 enum lttng_event_rule_status status;
2087 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2088 int loglevel = -1, ret = 0;
2089 const char *pattern;
2090
2091
2092 memset(event_notifier, 0, sizeof(*event_notifier));
2093
2094 if (lttng_event_rule_targets_agent_domain(rule)) {
2095 /*
2096 * Special event for agents
2097 * The actual meat of the event is in the filter that will be
2098 * attached later on.
2099 * Set the default values for the agent event.
2100 */
2101 pattern = event_get_default_agent_ust_name(
2102 lttng_event_rule_get_domain_type(rule));
2103 loglevel = 0;
2104 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2105 } else {
2106 const struct lttng_log_level_rule *log_level_rule;
2107
2108 assert(lttng_event_rule_get_type(rule) ==
2109 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
2110
2111 status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
2112 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2113 /* At this point, this is a fatal error. */
2114 abort();
2115 }
2116
2117 status = lttng_event_rule_user_tracepoint_get_log_level_rule(
2118 rule, &log_level_rule);
2119 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2120 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2121 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2122 enum lttng_log_level_rule_status llr_status;
2123
2124 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2125 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2126 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2127 llr_status = lttng_log_level_rule_exactly_get_level(
2128 log_level_rule, &loglevel);
2129 break;
2130 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2131 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2132 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2133 log_level_rule, &loglevel);
2134 break;
2135 default:
2136 abort();
2137 }
2138
2139 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2140 } else {
2141 /* At this point this is a fatal error. */
2142 abort();
2143 }
2144 }
2145
2146 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2147 ret = lttng_strncpy(event_notifier->event.name, pattern,
2148 sizeof(event_notifier->event.name));
2149 if (ret) {
2150 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2151 pattern);
2152 goto end;
2153 }
2154
2155 event_notifier->event.loglevel_type = ust_loglevel_type;
2156 event_notifier->event.loglevel = loglevel;
2157 end:
2158 return ret;
2159 }
2160
2161 /*
2162 * Create the specified event notifier against the user space tracer of a
2163 * given application.
2164 */
2165 static int create_ust_event_notifier(struct ust_app *app,
2166 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2167 {
2168 int ret = 0;
2169 enum lttng_condition_status condition_status;
2170 const struct lttng_condition *condition = NULL;
2171 struct lttng_ust_abi_event_notifier event_notifier;
2172 const struct lttng_event_rule *event_rule = NULL;
2173 unsigned int capture_bytecode_count = 0, i;
2174 enum lttng_condition_status cond_status;
2175 enum lttng_event_rule_type event_rule_type;
2176
2177 health_code_update();
2178 assert(app->event_notifier_group.object);
2179
2180 condition = lttng_trigger_get_const_condition(
2181 ua_event_notifier_rule->trigger);
2182 assert(condition);
2183 assert(lttng_condition_get_type(condition) ==
2184 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
2185
2186 condition_status = lttng_condition_event_rule_matches_get_rule(
2187 condition, &event_rule);
2188 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
2189
2190 assert(event_rule);
2191
2192 event_rule_type = lttng_event_rule_get_type(event_rule);
2193 assert(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
2194 event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
2195 event_rule_type ==
2196 LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
2197 event_rule_type ==
2198 LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
2199
2200 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2201 event_notifier.event.token = ua_event_notifier_rule->token;
2202 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2203
2204 /* Create UST event notifier against the tracer. */
2205 pthread_mutex_lock(&app->sock_lock);
2206 ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
2207 app->event_notifier_group.object,
2208 &ua_event_notifier_rule->obj);
2209 pthread_mutex_unlock(&app->sock_lock);
2210 if (ret < 0) {
2211 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2212 ret = 0;
2213 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2214 app->pid, app->sock);
2215 } else if (ret == -EAGAIN) {
2216 ret = 0;
2217 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2218 app->pid, app->sock);
2219 } else {
2220 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2221 event_notifier.event.name, ret, app->pid,
2222 app->sock);
2223 }
2224 goto error;
2225 }
2226
2227 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2228
2229 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
2230 event_notifier.event.name, app->name, app->pid,
2231 ua_event_notifier_rule->obj);
2232
2233 health_code_update();
2234
2235 /* Set filter if one is present. */
2236 if (ua_event_notifier_rule->filter) {
2237 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2238 ua_event_notifier_rule->obj);
2239 if (ret < 0) {
2240 goto error;
2241 }
2242 }
2243
2244 /* Set exclusions for the event. */
2245 if (ua_event_notifier_rule->exclusion) {
2246 ret = set_ust_object_exclusions(app,
2247 ua_event_notifier_rule->exclusion,
2248 ua_event_notifier_rule->obj);
2249 if (ret < 0) {
2250 goto error;
2251 }
2252 }
2253
2254 /* Set the capture bytecodes. */
2255 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
2256 condition, &capture_bytecode_count);
2257 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2258
2259 for (i = 0; i < capture_bytecode_count; i++) {
2260 const struct lttng_bytecode *capture_bytecode =
2261 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
2262 condition, i);
2263
2264 ret = set_ust_capture(app, capture_bytecode, i,
2265 ua_event_notifier_rule->obj);
2266 if (ret < 0) {
2267 goto error;
2268 }
2269 }
2270
2271 /*
2272 * We now need to explicitly enable the event, since it
2273 * is disabled at creation.
2274 */
2275 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2276 if (ret < 0) {
2277 /*
2278 * If we hit an EPERM, something is wrong with our enable call.
2279 * If we get an EEXIST, there is a problem on the tracer side
2280 * since we just created it.
2281 */
2282 switch (ret) {
2283 case -LTTNG_UST_ERR_PERM:
2284 /* Code flow problem. */
2285 abort();
2286 case -LTTNG_UST_ERR_EXIST:
2287 /* It's OK for our use case. */
2288 ret = 0;
2289 break;
2290 default:
2291 break;
2292 }
2293
2294 goto error;
2295 }
2296
2297 ua_event_notifier_rule->enabled = true;
2298
2299 error:
2300 health_code_update();
2301 return ret;
2302 }
2303
2304 /*
2305 * Copy data between an UST app event and a LTT event.
2306 */
2307 static void shadow_copy_event(struct ust_app_event *ua_event,
2308 struct ltt_ust_event *uevent)
2309 {
2310 size_t exclusion_alloc_size;
2311
2312 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2313 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2314
2315 ua_event->enabled = uevent->enabled;
2316
2317 /* Copy event attributes */
2318 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2319
2320 /* Copy filter bytecode */
2321 if (uevent->filter) {
2322 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2323 /* Filter might be NULL here in case of ENONEM. */
2324 }
2325
2326 /* Copy exclusion data */
2327 if (uevent->exclusion) {
2328 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2329 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2330 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2331 if (ua_event->exclusion == NULL) {
2332 PERROR("malloc");
2333 } else {
2334 memcpy(ua_event->exclusion, uevent->exclusion,
2335 exclusion_alloc_size);
2336 }
2337 }
2338 }
2339
2340 /*
2341 * Copy data between an UST app channel and a LTT channel.
2342 */
2343 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2344 struct ltt_ust_channel *uchan)
2345 {
2346 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2347
2348 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2349 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2350
2351 ua_chan->tracefile_size = uchan->tracefile_size;
2352 ua_chan->tracefile_count = uchan->tracefile_count;
2353
2354 /* Copy event attributes since the layout is different. */
2355 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2356 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2357 ua_chan->attr.overwrite = uchan->attr.overwrite;
2358 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2359 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2360 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2361 ua_chan->attr.output = uchan->attr.output;
2362 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2363
2364 /*
2365 * Note that the attribute channel type is not set since the channel on the
2366 * tracing registry side does not have this information.
2367 */
2368
2369 ua_chan->enabled = uchan->enabled;
2370 ua_chan->tracing_channel_id = uchan->id;
2371
2372 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2373 }
2374
2375 /*
2376 * Copy data between a UST app session and a regular LTT session.
2377 */
2378 static void shadow_copy_session(struct ust_app_session *ua_sess,
2379 struct ltt_ust_session *usess, struct ust_app *app)
2380 {
2381 struct tm *timeinfo;
2382 char datetime[16];
2383 int ret;
2384 char tmp_shm_path[PATH_MAX];
2385
2386 timeinfo = localtime(&app->registration_time);
2387 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2388
2389 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2390
2391 ua_sess->tracing_id = usess->id;
2392 ua_sess->id = get_next_session_id();
2393 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2394 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2395 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2396 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2397 ua_sess->buffer_type = usess->buffer_type;
2398 ua_sess->bits_per_long = app->bits_per_long;
2399
2400 /* There is only one consumer object per session possible. */
2401 consumer_output_get(usess->consumer);
2402 ua_sess->consumer = usess->consumer;
2403
2404 ua_sess->output_traces = usess->output_traces;
2405 ua_sess->live_timer_interval = usess->live_timer_interval;
2406 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2407 &usess->metadata_attr);
2408
2409 switch (ua_sess->buffer_type) {
2410 case LTTNG_BUFFER_PER_PID:
2411 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2412 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2413 datetime);
2414 break;
2415 case LTTNG_BUFFER_PER_UID:
2416 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2417 DEFAULT_UST_TRACE_UID_PATH,
2418 lttng_credentials_get_uid(&ua_sess->real_credentials),
2419 app->bits_per_long);
2420 break;
2421 default:
2422 assert(0);
2423 goto error;
2424 }
2425 if (ret < 0) {
2426 PERROR("asprintf UST shadow copy session");
2427 assert(0);
2428 goto error;
2429 }
2430
2431 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2432 sizeof(ua_sess->root_shm_path));
2433 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2434 strncpy(ua_sess->shm_path, usess->shm_path,
2435 sizeof(ua_sess->shm_path));
2436 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2437 if (ua_sess->shm_path[0]) {
2438 switch (ua_sess->buffer_type) {
2439 case LTTNG_BUFFER_PER_PID:
2440 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2441 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2442 app->name, app->pid, datetime);
2443 break;
2444 case LTTNG_BUFFER_PER_UID:
2445 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2446 "/" DEFAULT_UST_TRACE_UID_PATH,
2447 app->uid, app->bits_per_long);
2448 break;
2449 default:
2450 assert(0);
2451 goto error;
2452 }
2453 if (ret < 0) {
2454 PERROR("sprintf UST shadow copy session");
2455 assert(0);
2456 goto error;
2457 }
2458 strncat(ua_sess->shm_path, tmp_shm_path,
2459 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2460 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2461 }
2462 return;
2463
2464 error:
2465 consumer_output_put(ua_sess->consumer);
2466 }
2467
2468 /*
2469 * Lookup sesison wrapper.
2470 */
2471 static
2472 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2473 struct ust_app *app, struct lttng_ht_iter *iter)
2474 {
2475 /* Get right UST app session from app */
2476 lttng_ht_lookup(app->sessions, &usess->id, iter);
2477 }
2478
2479 /*
2480 * Return ust app session from the app session hashtable using the UST session
2481 * id.
2482 */
2483 static struct ust_app_session *lookup_session_by_app(
2484 const struct ltt_ust_session *usess, struct ust_app *app)
2485 {
2486 struct lttng_ht_iter iter;
2487 struct lttng_ht_node_u64 *node;
2488
2489 __lookup_session_by_app(usess, app, &iter);
2490 node = lttng_ht_iter_get_node_u64(&iter);
2491 if (node == NULL) {
2492 goto error;
2493 }
2494
2495 return caa_container_of(node, struct ust_app_session, node);
2496
2497 error:
2498 return NULL;
2499 }
2500
2501 /*
2502 * Setup buffer registry per PID for the given session and application. If none
2503 * is found, a new one is created, added to the global registry and
2504 * initialized. If regp is valid, it's set with the newly created object.
2505 *
2506 * Return 0 on success or else a negative value.
2507 */
2508 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2509 struct ust_app *app, struct buffer_reg_pid **regp)
2510 {
2511 int ret = 0;
2512 struct buffer_reg_pid *reg_pid;
2513
2514 assert(ua_sess);
2515 assert(app);
2516
2517 rcu_read_lock();
2518
2519 reg_pid = buffer_reg_pid_find(ua_sess->id);
2520 if (!reg_pid) {
2521 /*
2522 * This is the create channel path meaning that if there is NO
2523 * registry available, we have to create one for this session.
2524 */
2525 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2526 ua_sess->root_shm_path, ua_sess->shm_path);
2527 if (ret < 0) {
2528 goto error;
2529 }
2530 } else {
2531 goto end;
2532 }
2533
2534 /* Initialize registry. */
2535 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2536 app->bits_per_long, app->uint8_t_alignment,
2537 app->uint16_t_alignment, app->uint32_t_alignment,
2538 app->uint64_t_alignment, app->long_alignment,
2539 app->byte_order, app->version.major, app->version.minor,
2540 reg_pid->root_shm_path, reg_pid->shm_path,
2541 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2542 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2543 ua_sess->tracing_id,
2544 app->uid);
2545 if (ret < 0) {
2546 /*
2547 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2548 * destroy the buffer registry, because it is always expected
2549 * that if the buffer registry can be found, its ust registry is
2550 * non-NULL.
2551 */
2552 buffer_reg_pid_destroy(reg_pid);
2553 goto error;
2554 }
2555
2556 buffer_reg_pid_add(reg_pid);
2557
2558 DBG3("UST app buffer registry per PID created successfully");
2559
2560 end:
2561 if (regp) {
2562 *regp = reg_pid;
2563 }
2564 error:
2565 rcu_read_unlock();
2566 return ret;
2567 }
2568
2569 /*
2570 * Setup buffer registry per UID for the given session and application. If none
2571 * is found, a new one is created, added to the global registry and
2572 * initialized. If regp is valid, it's set with the newly created object.
2573 *
2574 * Return 0 on success or else a negative value.
2575 */
2576 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2577 struct ust_app_session *ua_sess,
2578 struct ust_app *app, struct buffer_reg_uid **regp)
2579 {
2580 int ret = 0;
2581 struct buffer_reg_uid *reg_uid;
2582
2583 assert(usess);
2584 assert(app);
2585
2586 rcu_read_lock();
2587
2588 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2589 if (!reg_uid) {
2590 /*
2591 * This is the create channel path meaning that if there is NO
2592 * registry available, we have to create one for this session.
2593 */
2594 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2595 LTTNG_DOMAIN_UST, &reg_uid,
2596 ua_sess->root_shm_path, ua_sess->shm_path);
2597 if (ret < 0) {
2598 goto error;
2599 }
2600 } else {
2601 goto end;
2602 }
2603
2604 /* Initialize registry. */
2605 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2606 app->bits_per_long, app->uint8_t_alignment,
2607 app->uint16_t_alignment, app->uint32_t_alignment,
2608 app->uint64_t_alignment, app->long_alignment,
2609 app->byte_order, app->version.major,
2610 app->version.minor, reg_uid->root_shm_path,
2611 reg_uid->shm_path, usess->uid, usess->gid,
2612 ua_sess->tracing_id, app->uid);
2613 if (ret < 0) {
2614 /*
2615 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2616 * destroy the buffer registry, because it is always expected
2617 * that if the buffer registry can be found, its ust registry is
2618 * non-NULL.
2619 */
2620 buffer_reg_uid_destroy(reg_uid, NULL);
2621 goto error;
2622 }
2623 /* Add node to teardown list of the session. */
2624 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2625
2626 buffer_reg_uid_add(reg_uid);
2627
2628 DBG3("UST app buffer registry per UID created successfully");
2629 end:
2630 if (regp) {
2631 *regp = reg_uid;
2632 }
2633 error:
2634 rcu_read_unlock();
2635 return ret;
2636 }
2637
2638 /*
2639 * Create a session on the tracer side for the given app.
2640 *
2641 * On success, ua_sess_ptr is populated with the session pointer or else left
2642 * untouched. If the session was created, is_created is set to 1. On error,
2643 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2644 * be NULL.
2645 *
2646 * Returns 0 on success or else a negative code which is either -ENOMEM or
2647 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2648 */
2649 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2650 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2651 int *is_created)
2652 {
2653 int ret, created = 0;
2654 struct ust_app_session *ua_sess;
2655
2656 assert(usess);
2657 assert(app);
2658 assert(ua_sess_ptr);
2659
2660 health_code_update();
2661
2662 ua_sess = lookup_session_by_app(usess, app);
2663 if (ua_sess == NULL) {
2664 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2665 app->pid, usess->id);
2666 ua_sess = alloc_ust_app_session();
2667 if (ua_sess == NULL) {
2668 /* Only malloc can failed so something is really wrong */
2669 ret = -ENOMEM;
2670 goto error;
2671 }
2672 shadow_copy_session(ua_sess, usess, app);
2673 created = 1;
2674 }
2675
2676 switch (usess->buffer_type) {
2677 case LTTNG_BUFFER_PER_PID:
2678 /* Init local registry. */
2679 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2680 if (ret < 0) {
2681 delete_ust_app_session(-1, ua_sess, app);
2682 goto error;
2683 }
2684 break;
2685 case LTTNG_BUFFER_PER_UID:
2686 /* Look for a global registry. If none exists, create one. */
2687 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2688 if (ret < 0) {
2689 delete_ust_app_session(-1, ua_sess, app);
2690 goto error;
2691 }
2692 break;
2693 default:
2694 assert(0);
2695 ret = -EINVAL;
2696 goto error;
2697 }
2698
2699 health_code_update();
2700
2701 if (ua_sess->handle == -1) {
2702 pthread_mutex_lock(&app->sock_lock);
2703 ret = lttng_ust_ctl_create_session(app->sock);
2704 pthread_mutex_unlock(&app->sock_lock);
2705 if (ret < 0) {
2706 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2707 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2708 app->pid, app->sock);
2709 ret = 0;
2710 } else if (ret == -EAGAIN) {
2711 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2712 app->pid, app->sock);
2713 ret = 0;
2714 } else {
2715 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2716 ret, app->pid, app->sock);
2717 }
2718 delete_ust_app_session(-1, ua_sess, app);
2719 if (ret != -ENOMEM) {
2720 /*
2721 * Tracer is probably gone or got an internal error so let's
2722 * behave like it will soon unregister or not usable.
2723 */
2724 ret = -ENOTCONN;
2725 }
2726 goto error;
2727 }
2728
2729 ua_sess->handle = ret;
2730
2731 /* Add ust app session to app's HT */
2732 lttng_ht_node_init_u64(&ua_sess->node,
2733 ua_sess->tracing_id);
2734 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2735 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2736 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2737 &ua_sess->ust_objd_node);
2738
2739 DBG2("UST app session created successfully with handle %d", ret);
2740 }
2741
2742 *ua_sess_ptr = ua_sess;
2743 if (is_created) {
2744 *is_created = created;
2745 }
2746
2747 /* Everything went well. */
2748 ret = 0;
2749
2750 error:
2751 health_code_update();
2752 return ret;
2753 }
2754
2755 /*
2756 * Match function for a hash table lookup of ust_app_ctx.
2757 *
2758 * It matches an ust app context based on the context type and, in the case
2759 * of perf counters, their name.
2760 */
2761 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2762 {
2763 struct ust_app_ctx *ctx;
2764 const struct lttng_ust_context_attr *key;
2765
2766 assert(node);
2767 assert(_key);
2768
2769 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2770 key = _key;
2771
2772 /* Context type */
2773 if (ctx->ctx.ctx != key->ctx) {
2774 goto no_match;
2775 }
2776
2777 switch(key->ctx) {
2778 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2779 if (strncmp(key->u.perf_counter.name,
2780 ctx->ctx.u.perf_counter.name,
2781 sizeof(key->u.perf_counter.name))) {
2782 goto no_match;
2783 }
2784 break;
2785 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2786 if (strcmp(key->u.app_ctx.provider_name,
2787 ctx->ctx.u.app_ctx.provider_name) ||
2788 strcmp(key->u.app_ctx.ctx_name,
2789 ctx->ctx.u.app_ctx.ctx_name)) {
2790 goto no_match;
2791 }
2792 break;
2793 default:
2794 break;
2795 }
2796
2797 /* Match. */
2798 return 1;
2799
2800 no_match:
2801 return 0;
2802 }
2803
2804 /*
2805 * Lookup for an ust app context from an lttng_ust_context.
2806 *
2807 * Must be called while holding RCU read side lock.
2808 * Return an ust_app_ctx object or NULL on error.
2809 */
2810 static
2811 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2812 struct lttng_ust_context_attr *uctx)
2813 {
2814 struct lttng_ht_iter iter;
2815 struct lttng_ht_node_ulong *node;
2816 struct ust_app_ctx *app_ctx = NULL;
2817
2818 assert(uctx);
2819 assert(ht);
2820
2821 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2822 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2823 ht_match_ust_app_ctx, uctx, &iter.iter);
2824 node = lttng_ht_iter_get_node_ulong(&iter);
2825 if (!node) {
2826 goto end;
2827 }
2828
2829 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2830
2831 end:
2832 return app_ctx;
2833 }
2834
2835 /*
2836 * Create a context for the channel on the tracer.
2837 *
2838 * Called with UST app session lock held and a RCU read side lock.
2839 */
2840 static
2841 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2842 struct lttng_ust_context_attr *uctx,
2843 struct ust_app *app)
2844 {
2845 int ret = 0;
2846 struct ust_app_ctx *ua_ctx;
2847
2848 DBG2("UST app adding context to channel %s", ua_chan->name);
2849
2850 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2851 if (ua_ctx) {
2852 ret = -EEXIST;
2853 goto error;
2854 }
2855
2856 ua_ctx = alloc_ust_app_ctx(uctx);
2857 if (ua_ctx == NULL) {
2858 /* malloc failed */
2859 ret = -ENOMEM;
2860 goto error;
2861 }
2862
2863 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2864 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2865 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2866
2867 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2868 if (ret < 0) {
2869 goto error;
2870 }
2871
2872 error:
2873 return ret;
2874 }
2875
2876 /*
2877 * Enable on the tracer side a ust app event for the session and channel.
2878 *
2879 * Called with UST app session lock held.
2880 */
2881 static
2882 int enable_ust_app_event(struct ust_app_session *ua_sess,
2883 struct ust_app_event *ua_event, struct ust_app *app)
2884 {
2885 int ret;
2886
2887 ret = enable_ust_object(app, ua_event->obj);
2888 if (ret < 0) {
2889 goto error;
2890 }
2891
2892 ua_event->enabled = 1;
2893
2894 error:
2895 return ret;
2896 }
2897
2898 /*
2899 * Disable on the tracer side a ust app event for the session and channel.
2900 */
2901 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2902 struct ust_app_event *ua_event, struct ust_app *app)
2903 {
2904 int ret;
2905
2906 ret = disable_ust_object(app, ua_event->obj);
2907 if (ret < 0) {
2908 goto error;
2909 }
2910
2911 ua_event->enabled = 0;
2912
2913 error:
2914 return ret;
2915 }
2916
2917 /*
2918 * Lookup ust app channel for session and disable it on the tracer side.
2919 */
2920 static
2921 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2922 struct ust_app_channel *ua_chan, struct ust_app *app)
2923 {
2924 int ret;
2925
2926 ret = disable_ust_channel(app, ua_sess, ua_chan);
2927 if (ret < 0) {
2928 goto error;
2929 }
2930
2931 ua_chan->enabled = 0;
2932
2933 error:
2934 return ret;
2935 }
2936
2937 /*
2938 * Lookup ust app channel for session and enable it on the tracer side. This
2939 * MUST be called with a RCU read side lock acquired.
2940 */
2941 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2942 struct ltt_ust_channel *uchan, struct ust_app *app)
2943 {
2944 int ret = 0;
2945 struct lttng_ht_iter iter;
2946 struct lttng_ht_node_str *ua_chan_node;
2947 struct ust_app_channel *ua_chan;
2948
2949 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2950 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2951 if (ua_chan_node == NULL) {
2952 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2953 uchan->name, ua_sess->tracing_id);
2954 goto error;
2955 }
2956
2957 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2958
2959 ret = enable_ust_channel(app, ua_sess, ua_chan);
2960 if (ret < 0) {
2961 goto error;
2962 }
2963
2964 error:
2965 return ret;
2966 }
2967
2968 /*
2969 * Ask the consumer to create a channel and get it if successful.
2970 *
2971 * Called with UST app session lock held.
2972 *
2973 * Return 0 on success or else a negative value.
2974 */
2975 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2976 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2977 int bitness, struct ust_registry_session *registry,
2978 uint64_t trace_archive_id)
2979 {
2980 int ret;
2981 unsigned int nb_fd = 0;
2982 struct consumer_socket *socket;
2983
2984 assert(usess);
2985 assert(ua_sess);
2986 assert(ua_chan);
2987 assert(registry);
2988
2989 rcu_read_lock();
2990 health_code_update();
2991
2992 /* Get the right consumer socket for the application. */
2993 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2994 if (!socket) {
2995 ret = -EINVAL;
2996 goto error;
2997 }
2998
2999 health_code_update();
3000
3001 /* Need one fd for the channel. */
3002 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3003 if (ret < 0) {
3004 ERR("Exhausted number of available FD upon create channel");
3005 goto error;
3006 }
3007
3008 /*
3009 * Ask consumer to create channel. The consumer will return the number of
3010 * stream we have to expect.
3011 */
3012 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
3013 registry, usess->current_trace_chunk);
3014 if (ret < 0) {
3015 goto error_ask;
3016 }
3017
3018 /*
3019 * Compute the number of fd needed before receiving them. It must be 2 per
3020 * stream (2 being the default value here).
3021 */
3022 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
3023
3024 /* Reserve the amount of file descriptor we need. */
3025 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
3026 if (ret < 0) {
3027 ERR("Exhausted number of available FD upon create channel");
3028 goto error_fd_get_stream;
3029 }
3030
3031 health_code_update();
3032
3033 /*
3034 * Now get the channel from the consumer. This call will populate the stream
3035 * list of that channel and set the ust objects.
3036 */
3037 if (usess->consumer->enabled) {
3038 ret = ust_consumer_get_channel(socket, ua_chan);
3039 if (ret < 0) {
3040 goto error_destroy;
3041 }
3042 }
3043
3044 rcu_read_unlock();
3045 return 0;
3046
3047 error_destroy:
3048 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
3049 error_fd_get_stream:
3050 /*
3051 * Initiate a destroy channel on the consumer since we had an error
3052 * handling it on our side. The return value is of no importance since we
3053 * already have a ret value set by the previous error that we need to
3054 * return.
3055 */
3056 (void) ust_consumer_destroy_channel(socket, ua_chan);
3057 error_ask:
3058 lttng_fd_put(LTTNG_FD_APPS, 1);
3059 error:
3060 health_code_update();
3061 rcu_read_unlock();
3062 return ret;
3063 }
3064
3065 /*
3066 * Duplicate the ust data object of the ust app stream and save it in the
3067 * buffer registry stream.
3068 *
3069 * Return 0 on success or else a negative value.
3070 */
3071 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3072 struct ust_app_stream *stream)
3073 {
3074 int ret;
3075
3076 assert(reg_stream);
3077 assert(stream);
3078
3079 /* Duplicating a stream requires 2 new fds. Reserve them. */
3080 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3081 if (ret < 0) {
3082 ERR("Exhausted number of available FD upon duplicate stream");
3083 goto error;
3084 }
3085
3086 /* Duplicate object for stream once the original is in the registry. */
3087 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
3088 reg_stream->obj.ust);
3089 if (ret < 0) {
3090 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3091 reg_stream->obj.ust, stream->obj, ret);
3092 lttng_fd_put(LTTNG_FD_APPS, 2);
3093 goto error;
3094 }
3095 stream->handle = stream->obj->handle;
3096
3097 error:
3098 return ret;
3099 }
3100
3101 /*
3102 * Duplicate the ust data object of the ust app. channel and save it in the
3103 * buffer registry channel.
3104 *
3105 * Return 0 on success or else a negative value.
3106 */
3107 static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3108 struct ust_app_channel *ua_chan)
3109 {
3110 int ret;
3111
3112 assert(buf_reg_chan);
3113 assert(ua_chan);
3114
3115 /* Duplicating a channel requires 1 new fd. Reserve it. */
3116 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3117 if (ret < 0) {
3118 ERR("Exhausted number of available FD upon duplicate channel");
3119 goto error_fd_get;
3120 }
3121
3122 /* Duplicate object for stream once the original is in the registry. */
3123 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3124 if (ret < 0) {
3125 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3126 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3127 goto error;
3128 }
3129 ua_chan->handle = ua_chan->obj->handle;
3130
3131 return 0;
3132
3133 error:
3134 lttng_fd_put(LTTNG_FD_APPS, 1);
3135 error_fd_get:
3136 return ret;
3137 }
3138
3139 /*
3140 * For a given channel buffer registry, setup all streams of the given ust
3141 * application channel.
3142 *
3143 * Return 0 on success or else a negative value.
3144 */
3145 static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3146 struct ust_app_channel *ua_chan,
3147 struct ust_app *app)
3148 {
3149 int ret = 0;
3150 struct ust_app_stream *stream, *stmp;
3151
3152 assert(buf_reg_chan);
3153 assert(ua_chan);
3154
3155 DBG2("UST app setup buffer registry stream");
3156
3157 /* Send all streams to application. */
3158 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3159 struct buffer_reg_stream *reg_stream;
3160
3161 ret = buffer_reg_stream_create(&reg_stream);
3162 if (ret < 0) {
3163 goto error;
3164 }
3165
3166 /*
3167 * Keep original pointer and nullify it in the stream so the delete
3168 * stream call does not release the object.
3169 */
3170 reg_stream->obj.ust = stream->obj;
3171 stream->obj = NULL;
3172 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3173
3174 /* We don't need the streams anymore. */
3175 cds_list_del(&stream->list);
3176 delete_ust_app_stream(-1, stream, app);
3177 }
3178
3179 error:
3180 return ret;
3181 }
3182
3183 /*
3184 * Create a buffer registry channel for the given session registry and
3185 * application channel object. If regp pointer is valid, it's set with the
3186 * created object. Important, the created object is NOT added to the session
3187 * registry hash table.
3188 *
3189 * Return 0 on success else a negative value.
3190 */
3191 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3192 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3193 {
3194 int ret;
3195 struct buffer_reg_channel *buf_reg_chan = NULL;
3196
3197 assert(reg_sess);
3198 assert(ua_chan);
3199
3200 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3201
3202 /* Create buffer registry channel. */
3203 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3204 if (ret < 0) {
3205 goto error_create;
3206 }
3207 assert(buf_reg_chan);
3208 buf_reg_chan->consumer_key = ua_chan->key;
3209 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3210 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3211
3212 /* Create and add a channel registry to session. */
3213 ret = ust_registry_channel_add(reg_sess->reg.ust,
3214 ua_chan->tracing_channel_id);
3215 if (ret < 0) {
3216 goto error;
3217 }
3218 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3219
3220 if (regp) {
3221 *regp = buf_reg_chan;
3222 }
3223
3224 return 0;
3225
3226 error:
3227 /* Safe because the registry channel object was not added to any HT. */
3228 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3229 error_create:
3230 return ret;
3231 }
3232
3233 /*
3234 * Setup buffer registry channel for the given session registry and application
3235 * channel object. If regp pointer is valid, it's set with the created object.
3236 *
3237 * Return 0 on success else a negative value.
3238 */
3239 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3240 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3241 struct ust_app *app)
3242 {
3243 int ret;
3244
3245 assert(reg_sess);
3246 assert(buf_reg_chan);
3247 assert(ua_chan);
3248 assert(ua_chan->obj);
3249
3250 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3251
3252 /* Setup all streams for the registry. */
3253 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3254 if (ret < 0) {
3255 goto error;
3256 }
3257
3258 buf_reg_chan->obj.ust = ua_chan->obj;
3259 ua_chan->obj = NULL;
3260
3261 return 0;
3262
3263 error:
3264 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3265 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3266 return ret;
3267 }
3268
3269 /*
3270 * Send buffer registry channel to the application.
3271 *
3272 * Return 0 on success else a negative value.
3273 */
3274 static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3275 struct ust_app *app, struct ust_app_session *ua_sess,
3276 struct ust_app_channel *ua_chan)
3277 {
3278 int ret;
3279 struct buffer_reg_stream *reg_stream;
3280
3281 assert(buf_reg_chan);
3282 assert(app);
3283 assert(ua_sess);
3284 assert(ua_chan);
3285
3286 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3287
3288 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3289 if (ret < 0) {
3290 goto error;
3291 }
3292
3293 /* Send channel to the application. */
3294 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3295 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3296 ret = -ENOTCONN; /* Caused by app exiting. */
3297 goto error;
3298 } else if (ret == -EAGAIN) {
3299 /* Caused by timeout. */
3300 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
3301 app->pid, ua_chan->name, ua_sess->tracing_id);
3302 /* Treat this the same way as an application that is exiting. */
3303 ret = -ENOTCONN;
3304 goto error;
3305 } else if (ret < 0) {
3306 goto error;
3307 }
3308
3309 health_code_update();
3310
3311 /* Send all streams to application. */
3312 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3313 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
3314 struct ust_app_stream stream = {};
3315
3316 ret = duplicate_stream_object(reg_stream, &stream);
3317 if (ret < 0) {
3318 goto error_stream_unlock;
3319 }
3320
3321 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3322 if (ret < 0) {
3323 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3324 ret = -ENOTCONN; /* Caused by app exiting. */
3325 } else if (ret == -EAGAIN) {
3326 /*
3327 * Caused by timeout.
3328 * Treat this the same way as an application
3329 * that is exiting.
3330 */
3331 WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".",
3332 app->pid,
3333 ua_chan->name,
3334 ua_sess->tracing_id);
3335 ret = -ENOTCONN;
3336 }
3337 (void) release_ust_app_stream(-1, &stream, app);
3338 goto error_stream_unlock;
3339 }
3340
3341 /*
3342 * The return value is not important here. This function will output an
3343 * error if needed.
3344 */
3345 (void) release_ust_app_stream(-1, &stream, app);
3346 }
3347 ua_chan->is_sent = 1;
3348
3349 error_stream_unlock:
3350 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3351 error:
3352 return ret;
3353 }
3354
3355 /*
3356 * Create and send to the application the created buffers with per UID buffers.
3357 *
3358 * This MUST be called with a RCU read side lock acquired.
3359 * The session list lock and the session's lock must be acquired.
3360 *
3361 * Return 0 on success else a negative value.
3362 */
3363 static int create_channel_per_uid(struct ust_app *app,
3364 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3365 struct ust_app_channel *ua_chan)
3366 {
3367 int ret;
3368 struct buffer_reg_uid *reg_uid;
3369 struct buffer_reg_channel *buf_reg_chan;
3370 struct ltt_session *session = NULL;
3371 enum lttng_error_code notification_ret;
3372 struct ust_registry_channel *ust_reg_chan;
3373
3374 assert(app);
3375 assert(usess);
3376 assert(ua_sess);
3377 assert(ua_chan);
3378
3379 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3380
3381 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3382 /*
3383 * The session creation handles the creation of this global registry
3384 * object. If none can be find, there is a code flow problem or a
3385 * teardown race.
3386 */
3387 assert(reg_uid);
3388
3389 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3390 reg_uid);
3391 if (buf_reg_chan) {
3392 goto send_channel;
3393 }
3394
3395 /* Create the buffer registry channel object. */
3396 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3397 if (ret < 0) {
3398 ERR("Error creating the UST channel \"%s\" registry instance",
3399 ua_chan->name);
3400 goto error;
3401 }
3402
3403 session = session_find_by_id(ua_sess->tracing_id);
3404 assert(session);
3405 assert(pthread_mutex_trylock(&session->lock));
3406 assert(session_trylock_list());
3407
3408 /*
3409 * Create the buffers on the consumer side. This call populates the
3410 * ust app channel object with all streams and data object.
3411 */
3412 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3413 app->bits_per_long, reg_uid->registry->reg.ust,
3414 session->most_recent_chunk_id.value);
3415 if (ret < 0) {
3416 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3417 ua_chan->name);
3418
3419 /*
3420 * Let's remove the previously created buffer registry channel so
3421 * it's not visible anymore in the session registry.
3422 */
3423 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3424 ua_chan->tracing_channel_id, false);
3425 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3426 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3427 goto error;
3428 }
3429
3430 /*
3431 * Setup the streams and add it to the session registry.
3432 */
3433 ret = setup_buffer_reg_channel(reg_uid->registry,
3434 ua_chan, buf_reg_chan, app);
3435 if (ret < 0) {
3436 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3437 goto error;
3438 }
3439
3440 /* Notify the notification subsystem of the channel's creation. */
3441 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3442 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
3443 ua_chan->tracing_channel_id);
3444 assert(ust_reg_chan);
3445 ust_reg_chan->consumer_key = ua_chan->key;
3446 ust_reg_chan = NULL;
3447 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3448
3449 notification_ret = notification_thread_command_add_channel(
3450 the_notification_thread_handle, session->name,
3451 lttng_credentials_get_uid(
3452 &ua_sess->effective_credentials),
3453 lttng_credentials_get_gid(
3454 &ua_sess->effective_credentials),
3455 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3456 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3457 if (notification_ret != LTTNG_OK) {
3458 ret = - (int) notification_ret;
3459 ERR("Failed to add channel to notification thread");
3460 goto error;
3461 }
3462
3463 send_channel:
3464 /* Send buffers to the application. */
3465 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3466 if (ret < 0) {
3467 if (ret != -ENOTCONN) {
3468 ERR("Error sending channel to application");
3469 }
3470 goto error;
3471 }
3472
3473 error:
3474 if (session) {
3475 session_put(session);
3476 }
3477 return ret;
3478 }
3479
3480 /*
3481 * Create and send to the application the created buffers with per PID buffers.
3482 *
3483 * Called with UST app session lock held.
3484 * The session list lock and the session's lock must be acquired.
3485 *
3486 * Return 0 on success else a negative value.
3487 */
3488 static int create_channel_per_pid(struct ust_app *app,
3489 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3490 struct ust_app_channel *ua_chan)
3491 {
3492 int ret;
3493 struct ust_registry_session *registry;
3494 enum lttng_error_code cmd_ret;
3495 struct ltt_session *session = NULL;
3496 uint64_t chan_reg_key;
3497 struct ust_registry_channel *ust_reg_chan;
3498
3499 assert(app);
3500 assert(usess);
3501 assert(ua_sess);
3502 assert(ua_chan);
3503
3504 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3505
3506 rcu_read_lock();
3507
3508 registry = get_session_registry(ua_sess);
3509 /* The UST app session lock is held, registry shall not be null. */
3510 assert(registry);
3511
3512 /* Create and add a new channel registry to session. */
3513 ret = ust_registry_channel_add(registry, ua_chan->key);
3514 if (ret < 0) {
3515 ERR("Error creating the UST channel \"%s\" registry instance",
3516 ua_chan->name);
3517 goto error;
3518 }
3519
3520 session = session_find_by_id(ua_sess->tracing_id);
3521 assert(session);
3522
3523 assert(pthread_mutex_trylock(&session->lock));
3524 assert(session_trylock_list());
3525
3526 /* Create and get channel on the consumer side. */
3527 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3528 app->bits_per_long, registry,
3529 session->most_recent_chunk_id.value);
3530 if (ret < 0) {
3531 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3532 ua_chan->name);
3533 goto error_remove_from_registry;
3534 }
3535
3536 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3537 if (ret < 0) {
3538 if (ret != -ENOTCONN) {
3539 ERR("Error sending channel to application");
3540 }
3541 goto error_remove_from_registry;
3542 }
3543
3544 chan_reg_key = ua_chan->key;
3545 pthread_mutex_lock(&registry->lock);
3546 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3547 assert(ust_reg_chan);
3548 ust_reg_chan->consumer_key = ua_chan->key;
3549 pthread_mutex_unlock(&registry->lock);
3550
3551 cmd_ret = notification_thread_command_add_channel(
3552 the_notification_thread_handle, session->name,
3553 lttng_credentials_get_uid(
3554 &ua_sess->effective_credentials),
3555 lttng_credentials_get_gid(
3556 &ua_sess->effective_credentials),
3557 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3558 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3559 if (cmd_ret != LTTNG_OK) {
3560 ret = - (int) cmd_ret;
3561 ERR("Failed to add channel to notification thread");
3562 goto error_remove_from_registry;
3563 }
3564
3565 error_remove_from_registry:
3566 if (ret) {
3567 ust_registry_channel_del_free(registry, ua_chan->key, false);
3568 }
3569 error:
3570 rcu_read_unlock();
3571 if (session) {
3572 session_put(session);
3573 }
3574 return ret;
3575 }
3576
3577 /*
3578 * From an already allocated ust app channel, create the channel buffers if
3579 * needed and send them to the application. This MUST be called with a RCU read
3580 * side lock acquired.
3581 *
3582 * Called with UST app session lock held.
3583 *
3584 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3585 * the application exited concurrently.
3586 */
3587 static int ust_app_channel_send(struct ust_app *app,
3588 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3589 struct ust_app_channel *ua_chan)
3590 {
3591 int ret;
3592
3593 assert(app);
3594 assert(usess);
3595 assert(usess->active);
3596 assert(ua_sess);
3597 assert(ua_chan);
3598
3599 /* Handle buffer type before sending the channel to the application. */
3600 switch (usess->buffer_type) {
3601 case LTTNG_BUFFER_PER_UID:
3602 {
3603 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3604 if (ret < 0) {
3605 goto error;
3606 }
3607 break;
3608 }
3609 case LTTNG_BUFFER_PER_PID:
3610 {
3611 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3612 if (ret < 0) {
3613 goto error;
3614 }
3615 break;
3616 }
3617 default:
3618 assert(0);
3619 ret = -EINVAL;
3620 goto error;
3621 }
3622
3623 /* Initialize ust objd object using the received handle and add it. */
3624 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3625 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3626
3627 /* If channel is not enabled, disable it on the tracer */
3628 if (!ua_chan->enabled) {
3629 ret = disable_ust_channel(app, ua_sess, ua_chan);
3630 if (ret < 0) {
3631 goto error;
3632 }
3633 }
3634
3635 error:
3636 return ret;
3637 }
3638
3639 /*
3640 * Create UST app channel and return it through ua_chanp if not NULL.
3641 *
3642 * Called with UST app session lock and RCU read-side lock held.
3643 *
3644 * Return 0 on success or else a negative value.
3645 */
3646 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3647 struct ltt_ust_channel *uchan,
3648 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
3649 struct ust_app_channel **ua_chanp)
3650 {
3651 int ret = 0;
3652 struct lttng_ht_iter iter;
3653 struct lttng_ht_node_str *ua_chan_node;
3654 struct ust_app_channel *ua_chan;
3655
3656 /* Lookup channel in the ust app session */
3657 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3658 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3659 if (ua_chan_node != NULL) {
3660 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3661 goto end;
3662 }
3663
3664 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3665 if (ua_chan == NULL) {
3666 /* Only malloc can fail here */
3667 ret = -ENOMEM;
3668 goto error;
3669 }
3670 shadow_copy_channel(ua_chan, uchan);
3671
3672 /* Set channel type. */
3673 ua_chan->attr.type = type;
3674
3675 /* Only add the channel if successful on the tracer side. */
3676 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3677 end:
3678 if (ua_chanp) {
3679 *ua_chanp = ua_chan;
3680 }
3681
3682 /* Everything went well. */
3683 return 0;
3684
3685 error:
3686 return ret;
3687 }
3688
3689 /*
3690 * Create UST app event and create it on the tracer side.
3691 *
3692 * Must be called with the RCU read side lock held.
3693 * Called with ust app session mutex held.
3694 */
3695 static
3696 int create_ust_app_event(struct ust_app_session *ua_sess,
3697 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3698 struct ust_app *app)
3699 {
3700 int ret = 0;
3701 struct ust_app_event *ua_event;
3702
3703 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3704 if (ua_event == NULL) {
3705 /* Only failure mode of alloc_ust_app_event(). */
3706 ret = -ENOMEM;
3707 goto end;
3708 }
3709 shadow_copy_event(ua_event, uevent);
3710
3711 /* Create it on the tracer side */
3712 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3713 if (ret < 0) {
3714 /*
3715 * Not found previously means that it does not exist on the
3716 * tracer. If the application reports that the event existed,
3717 * it means there is a bug in the sessiond or lttng-ust
3718 * (or corruption, etc.)
3719 */
3720 if (ret == -LTTNG_UST_ERR_EXIST) {
3721 ERR("Tracer for application reported that an event being created already existed: "
3722 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3723 uevent->attr.name,
3724 app->pid, app->ppid, app->uid,
3725 app->gid);
3726 }
3727 goto error;
3728 }
3729
3730 add_unique_ust_app_event(ua_chan, ua_event);
3731
3732 DBG2("UST app create event completed: app = '%s' pid = %d",
3733 app->name, app->pid);
3734
3735 end:
3736 return ret;
3737
3738 error:
3739 /* Valid. Calling here is already in a read side lock */
3740 delete_ust_app_event(-1, ua_event, app);
3741 return ret;
3742 }
3743
3744 /*
3745 * Create UST app event notifier rule and create it on the tracer side.
3746 *
3747 * Must be called with the RCU read side lock held.
3748 * Called with ust app session mutex held.
3749 */
3750 static
3751 int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3752 struct ust_app *app)
3753 {
3754 int ret = 0;
3755 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3756
3757 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3758 if (ua_event_notifier_rule == NULL) {
3759 ret = -ENOMEM;
3760 goto end;
3761 }
3762
3763 /* Create it on the tracer side. */
3764 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3765 if (ret < 0) {
3766 /*
3767 * Not found previously means that it does not exist on the
3768 * tracer. If the application reports that the event existed,
3769 * it means there is a bug in the sessiond or lttng-ust
3770 * (or corruption, etc.)
3771 */
3772 if (ret == -LTTNG_UST_ERR_EXIST) {
3773 ERR("Tracer for application reported that an event notifier being created already exists: "
3774 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3775 lttng_trigger_get_tracer_token(trigger),
3776 app->pid, app->ppid, app->uid,
3777 app->gid);
3778 }
3779 goto error;
3780 }
3781
3782 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3783 &ua_event_notifier_rule->node);
3784
3785 DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64,
3786 app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
3787
3788 goto end;
3789
3790 error:
3791 /* The RCU read side lock is already being held by the caller. */
3792 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3793 end:
3794 return ret;
3795 }
3796
3797 /*
3798 * Create UST metadata and open it on the tracer side.
3799 *
3800 * Called with UST app session lock held and RCU read side lock.
3801 */
3802 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3803 struct ust_app *app, struct consumer_output *consumer)
3804 {
3805 int ret = 0;
3806 struct ust_app_channel *metadata;
3807 struct consumer_socket *socket;
3808 struct ust_registry_session *registry;
3809 struct ltt_session *session = NULL;
3810
3811 assert(ua_sess);
3812 assert(app);
3813 assert(consumer);
3814
3815 registry = get_session_registry(ua_sess);
3816 /* The UST app session is held registry shall not be null. */
3817 assert(registry);
3818
3819 pthread_mutex_lock(&registry->lock);
3820
3821 /* Metadata already exists for this registry or it was closed previously */
3822 if (registry->metadata_key || registry->metadata_closed) {
3823 ret = 0;
3824 goto error;
3825 }
3826
3827 /* Allocate UST metadata */
3828 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3829 if (!metadata) {
3830 /* malloc() failed */
3831 ret = -ENOMEM;
3832 goto error;
3833 }
3834
3835 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3836
3837 /* Need one fd for the channel. */
3838 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3839 if (ret < 0) {
3840 ERR("Exhausted number of available FD upon create metadata");
3841 goto error;
3842 }
3843
3844 /* Get the right consumer socket for the application. */
3845 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3846 if (!socket) {
3847 ret = -EINVAL;
3848 goto error_consumer;
3849 }
3850
3851 /*
3852 * Keep metadata key so we can identify it on the consumer side. Assign it
3853 * to the registry *before* we ask the consumer so we avoid the race of the
3854 * consumer requesting the metadata and the ask_channel call on our side
3855 * did not returned yet.
3856 */
3857 registry->metadata_key = metadata->key;
3858
3859 session = session_find_by_id(ua_sess->tracing_id);
3860 assert(session);
3861
3862 assert(pthread_mutex_trylock(&session->lock));
3863 assert(session_trylock_list());
3864
3865 /*
3866 * Ask the metadata channel creation to the consumer. The metadata object
3867 * will be created by the consumer and kept their. However, the stream is
3868 * never added or monitored until we do a first push metadata to the
3869 * consumer.
3870 */
3871 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3872 registry, session->current_trace_chunk);
3873 if (ret < 0) {
3874 /* Nullify the metadata key so we don't try to close it later on. */
3875 registry->metadata_key = 0;
3876 goto error_consumer;
3877 }
3878
3879 /*
3880 * The setup command will make the metadata stream be sent to the relayd,
3881 * if applicable, and the thread managing the metadatas. This is important
3882 * because after this point, if an error occurs, the only way the stream
3883 * can be deleted is to be monitored in the consumer.
3884 */
3885 ret = consumer_setup_metadata(socket, metadata->key);
3886 if (ret < 0) {
3887 /* Nullify the metadata key so we don't try to close it later on. */
3888 registry->metadata_key = 0;
3889 goto error_consumer;
3890 }
3891
3892 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3893 metadata->key, app->pid);
3894
3895 error_consumer:
3896 lttng_fd_put(LTTNG_FD_APPS, 1);
3897 delete_ust_app_channel(-1, metadata, app);
3898 error:
3899 pthread_mutex_unlock(&registry->lock);
3900 if (session) {
3901 session_put(session);
3902 }
3903 return ret;
3904 }
3905
3906 /*
3907 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3908 * acquired before calling this function.
3909 */
3910 struct ust_app *ust_app_find_by_pid(pid_t pid)
3911 {
3912 struct ust_app *app = NULL;
3913 struct lttng_ht_node_ulong *node;
3914 struct lttng_ht_iter iter;
3915
3916 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3917 node = lttng_ht_iter_get_node_ulong(&iter);
3918 if (node == NULL) {
3919 DBG2("UST app no found with pid %d", pid);
3920 goto error;
3921 }
3922
3923 DBG2("Found UST app by pid %d", pid);
3924
3925 app = caa_container_of(node, struct ust_app, pid_n);
3926
3927 error:
3928 return app;
3929 }
3930
3931 /*
3932 * Allocate and init an UST app object using the registration information and
3933 * the command socket. This is called when the command socket connects to the
3934 * session daemon.
3935 *
3936 * The object is returned on success or else NULL.
3937 */
3938 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3939 {
3940 int ret;
3941 struct ust_app *lta = NULL;
3942 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3943
3944 assert(msg);
3945 assert(sock >= 0);
3946
3947 DBG3("UST app creating application for socket %d", sock);
3948
3949 if ((msg->bits_per_long == 64 &&
3950 (uatomic_read(&the_ust_consumerd64_fd) ==
3951 -EINVAL)) ||
3952 (msg->bits_per_long == 32 &&
3953 (uatomic_read(&the_ust_consumerd32_fd) ==
3954 -EINVAL))) {
3955 ERR("Registration failed: application \"%s\" (pid: %d) has "
3956 "%d-bit long, but no consumerd for this size is available.\n",
3957 msg->name, msg->pid, msg->bits_per_long);
3958 goto error;
3959 }
3960
3961 /*
3962 * Reserve the two file descriptors of the event source pipe. The write
3963 * end will be closed once it is passed to the application, at which
3964 * point a single 'put' will be performed.
3965 */
3966 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3967 if (ret) {
3968 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
3969 msg->name, (int) msg->pid);
3970 goto error;
3971 }
3972
3973 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3974 if (!event_notifier_event_source_pipe) {
3975 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
3976 msg->name, msg->pid);
3977 goto error;
3978 }
3979
3980 lta = zmalloc(sizeof(struct ust_app));
3981 if (lta == NULL) {
3982 PERROR("malloc");
3983 goto error_free_pipe;
3984 }
3985
3986 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3987
3988 lta->ppid = msg->ppid;
3989 lta->uid = msg->uid;
3990 lta->gid = msg->gid;
3991
3992 lta->bits_per_long = msg->bits_per_long;
3993 lta->uint8_t_alignment = msg->uint8_t_alignment;
3994 lta->uint16_t_alignment = msg->uint16_t_alignment;
3995 lta->uint32_t_alignment = msg->uint32_t_alignment;
3996 lta->uint64_t_alignment = msg->uint64_t_alignment;
3997 lta->long_alignment = msg->long_alignment;
3998 lta->byte_order = msg->byte_order;
3999
4000 lta->v_major = msg->major;
4001 lta->v_minor = msg->minor;
4002 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4003 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4004 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4005 lta->notify_sock = -1;
4006 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
4007
4008 /* Copy name and make sure it's NULL terminated. */
4009 strncpy(lta->name, msg->name, sizeof(lta->name));
4010 lta->name[UST_APP_PROCNAME_LEN] = '\0';
4011
4012 /*
4013 * Before this can be called, when receiving the registration information,
4014 * the application compatibility is checked. So, at this point, the
4015 * application can work with this session daemon.
4016 */
4017 lta->compatible = 1;
4018
4019 lta->pid = msg->pid;
4020 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
4021 lta->sock = sock;
4022 pthread_mutex_init(&lta->sock_lock, NULL);
4023 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
4024
4025 CDS_INIT_LIST_HEAD(&lta->teardown_head);
4026 return lta;
4027
4028 error_free_pipe:
4029 lttng_pipe_destroy(event_notifier_event_source_pipe);
4030 lttng_fd_put(LTTNG_FD_APPS, 2);
4031 error:
4032 return NULL;
4033 }
4034
4035 /*
4036 * For a given application object, add it to every hash table.
4037 */
4038 void ust_app_add(struct ust_app *app)
4039 {
4040 assert(app);
4041 assert(app->notify_sock >= 0);
4042
4043 app->registration_time = time(NULL);
4044
4045 rcu_read_lock();
4046
4047 /*
4048 * On a re-registration, we want to kick out the previous registration of
4049 * that pid
4050 */
4051 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
4052
4053 /*
4054 * The socket _should_ be unique until _we_ call close. So, a add_unique
4055 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4056 * already in the table.
4057 */
4058 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
4059
4060 /* Add application to the notify socket hash table. */
4061 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
4062 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
4063
4064 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4065 "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
4066 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
4067 app->v_minor);
4068
4069 rcu_read_unlock();
4070 }
4071
4072 /*
4073 * Set the application version into the object.
4074 *
4075 * Return 0 on success else a negative value either an errno code or a
4076 * LTTng-UST error code.
4077 */
4078 int ust_app_version(struct ust_app *app)
4079 {
4080 int ret;
4081
4082 assert(app);
4083
4084 pthread_mutex_lock(&app->sock_lock);
4085 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
4086 pthread_mutex_unlock(&app->sock_lock);
4087 if (ret < 0) {
4088 if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
4089 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4090 app->pid, app->sock);
4091 } else if (ret == -EAGAIN) {
4092 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4093 app->pid, app->sock);
4094 } else {
4095 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4096 ret, app->pid, app->sock);
4097 }
4098 }
4099
4100 return ret;
4101 }
4102
4103 bool ust_app_supports_notifiers(const struct ust_app *app)
4104 {
4105 return app->v_major >= 9;
4106 }
4107
4108 bool ust_app_supports_counters(const struct ust_app *app)
4109 {
4110 return app->v_major >= 9;
4111 }
4112
4113 /*
4114 * Setup the base event notifier group.
4115 *
4116 * Return 0 on success else a negative value either an errno code or a
4117 * LTTng-UST error code.
4118 */
4119 int ust_app_setup_event_notifier_group(struct ust_app *app)
4120 {
4121 int ret;
4122 int event_pipe_write_fd;
4123 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
4124 enum lttng_error_code lttng_ret;
4125 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4126
4127 assert(app);
4128
4129 if (!ust_app_supports_notifiers(app)) {
4130 ret = -ENOSYS;
4131 goto error;
4132 }
4133
4134 /* Get the write side of the pipe. */
4135 event_pipe_write_fd = lttng_pipe_get_writefd(
4136 app->event_notifier_group.event_pipe);
4137
4138 pthread_mutex_lock(&app->sock_lock);
4139 ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
4140 event_pipe_write_fd, &event_notifier_group);
4141 pthread_mutex_unlock(&app->sock_lock);
4142 if (ret < 0) {
4143 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4144 ret = 0;
4145 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4146 app->pid, app->sock);
4147 } else if (ret == -EAGAIN) {
4148 ret = 0;
4149 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4150 app->pid, app->sock);
4151 } else {
4152 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4153 ret, app->pid, app->sock, event_pipe_write_fd);
4154 }
4155 goto error;
4156 }
4157
4158 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4159 if (ret) {
4160 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4161 app->name, app->pid);
4162 goto error;
4163 }
4164
4165 /*
4166 * Release the file descriptor that was reserved for the write-end of
4167 * the pipe.
4168 */
4169 lttng_fd_put(LTTNG_FD_APPS, 1);
4170
4171 lttng_ret = notification_thread_command_add_tracer_event_source(
4172 the_notification_thread_handle,
4173 lttng_pipe_get_readfd(
4174 app->event_notifier_group.event_pipe),
4175 LTTNG_DOMAIN_UST);
4176 if (lttng_ret != LTTNG_OK) {
4177 ERR("Failed to add tracer event source to notification thread");
4178 ret = - 1;
4179 goto error;
4180 }
4181
4182 /* Assign handle only when the complete setup is valid. */
4183 app->event_notifier_group.object = event_notifier_group;
4184
4185 event_notifier_error_accounting_status =
4186 event_notifier_error_accounting_register_app(app);
4187 switch (event_notifier_error_accounting_status) {
4188 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
4189 break;
4190 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
4191 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4192 app->sock, app->name, (int) app->pid);
4193 ret = 0;
4194 goto error_accounting;
4195 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
4196 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4197 app->sock, app->name, (int) app->pid);
4198 ret = 0;
4199 goto error_accounting;
4200 default:
4201 ERR("Failed to setup event notifier error accounting for app");
4202 ret = -1;
4203 goto error_accounting;
4204 }
4205
4206 return ret;
4207
4208 error_accounting:
4209 lttng_ret = notification_thread_command_remove_tracer_event_source(
4210 the_notification_thread_handle,
4211 lttng_pipe_get_readfd(
4212 app->event_notifier_group.event_pipe));
4213 if (lttng_ret != LTTNG_OK) {
4214 ERR("Failed to remove application tracer event source from notification thread");
4215 }
4216
4217 error:
4218 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
4219 free(app->event_notifier_group.object);
4220 app->event_notifier_group.object = NULL;
4221 return ret;
4222 }
4223
4224 /*
4225 * Unregister app by removing it from the global traceable app list and freeing
4226 * the data struct.
4227 *
4228 * The socket is already closed at this point so no close to sock.
4229 */
4230 void ust_app_unregister(int sock)
4231 {
4232 struct ust_app *lta;
4233 struct lttng_ht_node_ulong *node;
4234 struct lttng_ht_iter ust_app_sock_iter;
4235 struct lttng_ht_iter iter;
4236 struct ust_app_session *ua_sess;
4237 int ret;
4238
4239 rcu_read_lock();
4240
4241 /* Get the node reference for a call_rcu */
4242 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4243 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4244 assert(node);
4245
4246 lta = caa_container_of(node, struct ust_app, sock_n);
4247 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4248
4249 /*
4250 * For per-PID buffers, perform "push metadata" and flush all
4251 * application streams before removing app from hash tables,
4252 * ensuring proper behavior of data_pending check.
4253 * Remove sessions so they are not visible during deletion.
4254 */
4255 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
4256 node.node) {
4257 struct ust_registry_session *registry;
4258
4259 ret = lttng_ht_del(lta->sessions, &iter);
4260 if (ret) {
4261 /* The session was already removed so scheduled for teardown. */
4262 continue;
4263 }
4264
4265 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4266 (void) ust_app_flush_app_session(lta, ua_sess);
4267 }
4268
4269 /*
4270 * Add session to list for teardown. This is safe since at this point we
4271 * are the only one using this list.
4272 */
4273 pthread_mutex_lock(&ua_sess->lock);
4274
4275 if (ua_sess->deleted) {
4276 pthread_mutex_unlock(&ua_sess->lock);
4277 continue;
4278 }
4279
4280 /*
4281 * Normally, this is done in the delete session process which is
4282 * executed in the call rcu below. However, upon registration we can't
4283 * afford to wait for the grace period before pushing data or else the
4284 * data pending feature can race between the unregistration and stop
4285 * command where the data pending command is sent *before* the grace
4286 * period ended.
4287 *
4288 * The close metadata below nullifies the metadata pointer in the
4289 * session so the delete session will NOT push/close a second time.
4290 */
4291 registry = get_session_registry(ua_sess);
4292 if (registry) {
4293 /* Push metadata for application before freeing the application. */
4294 (void) push_metadata(registry, ua_sess->consumer);
4295
4296 /*
4297 * Don't ask to close metadata for global per UID buffers. Close
4298 * metadata only on destroy trace session in this case. Also, the
4299 * previous push metadata could have flag the metadata registry to
4300 * close so don't send a close command if closed.
4301 */
4302 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4303 /* And ask to close it for this session registry. */
4304 (void) close_metadata(registry, ua_sess->consumer);
4305 }
4306 }
4307 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4308
4309 pthread_mutex_unlock(&ua_sess->lock);
4310 }
4311
4312 /* Remove application from PID hash table */
4313 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4314 assert(!ret);
4315
4316 /*
4317 * Remove application from notify hash table. The thread handling the
4318 * notify socket could have deleted the node so ignore on error because
4319 * either way it's valid. The close of that socket is handled by the
4320 * apps_notify_thread.
4321 */
4322 iter.iter.node = &lta->notify_sock_n.node;
4323 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4324
4325 /*
4326 * Ignore return value since the node might have been removed before by an
4327 * add replace during app registration because the PID can be reassigned by
4328 * the OS.
4329 */
4330 iter.iter.node = &lta->pid_n.node;
4331 ret = lttng_ht_del(ust_app_ht, &iter);
4332 if (ret) {
4333 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4334 lta->pid);
4335 }
4336
4337 /* Free memory */
4338 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4339
4340 rcu_read_unlock();
4341 return;
4342 }
4343
4344 /*
4345 * Fill events array with all events name of all registered apps.
4346 */
4347 int ust_app_list_events(struct lttng_event **events)
4348 {
4349 int ret, handle;
4350 size_t nbmem, count = 0;
4351 struct lttng_ht_iter iter;
4352 struct ust_app *app;
4353 struct lttng_event *tmp_event;
4354
4355 nbmem = UST_APP_EVENT_LIST_SIZE;
4356 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4357 if (tmp_event == NULL) {
4358 PERROR("zmalloc ust app events");
4359 ret = -ENOMEM;
4360 goto error;
4361 }
4362
4363 rcu_read_lock();
4364
4365 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4366 struct lttng_ust_abi_tracepoint_iter uiter;
4367
4368 health_code_update();
4369
4370 if (!app->compatible) {
4371 /*
4372 * TODO: In time, we should notice the caller of this error by
4373 * telling him that this is a version error.
4374 */
4375 continue;
4376 }
4377 pthread_mutex_lock(&app->sock_lock);
4378 handle = lttng_ust_ctl_tracepoint_list(app->sock);
4379 if (handle < 0) {
4380 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4381 ERR("UST app list events getting handle failed for app pid %d",
4382 app->pid);
4383 }
4384 pthread_mutex_unlock(&app->sock_lock);
4385 continue;
4386 }
4387
4388 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
4389 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4390 /* Handle ustctl error. */
4391 if (ret < 0) {
4392 int release_ret;
4393
4394 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4395 ERR("UST app tp list get failed for app %d with ret %d",
4396 app->sock, ret);
4397 } else {
4398 DBG3("UST app tp list get failed. Application is dead");
4399 break;
4400 }
4401 free(tmp_event);
4402 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4403 if (release_ret < 0 &&
4404 release_ret != -LTTNG_UST_ERR_EXITING &&
4405 release_ret != -EPIPE) {
4406 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4407 }
4408 pthread_mutex_unlock(&app->sock_lock);
4409 goto rcu_error;
4410 }
4411
4412 health_code_update();
4413 if (count >= nbmem) {
4414 /* In case the realloc fails, we free the memory */
4415 struct lttng_event *new_tmp_event;
4416 size_t new_nbmem;
4417
4418 new_nbmem = nbmem << 1;
4419 DBG2("Reallocating event list from %zu to %zu entries",
4420 nbmem, new_nbmem);
4421 new_tmp_event = realloc(tmp_event,
4422 new_nbmem * sizeof(struct lttng_event));
4423 if (new_tmp_event == NULL) {
4424 int release_ret;
4425
4426 PERROR("realloc ust app events");
4427 free(tmp_event);
4428 ret = -ENOMEM;
4429 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4430 if (release_ret < 0 &&
4431 release_ret != -LTTNG_UST_ERR_EXITING &&
4432 release_ret != -EPIPE) {
4433 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4434 }
4435 pthread_mutex_unlock(&app->sock_lock);
4436 goto rcu_error;
4437 }
4438 /* Zero the new memory */
4439 memset(new_tmp_event + nbmem, 0,
4440 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4441 nbmem = new_nbmem;
4442 tmp_event = new_tmp_event;
4443 }
4444 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4445 tmp_event[count].loglevel = uiter.loglevel;
4446 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4447 tmp_event[count].pid = app->pid;
4448 tmp_event[count].enabled = -1;
4449 count++;
4450 }
4451 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4452 pthread_mutex_unlock(&app->sock_lock);
4453 if (ret < 0) {
4454 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4455 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4456 app->pid, app->sock);
4457 } else if (ret == -EAGAIN) {
4458 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4459 app->pid, app->sock);
4460 } else {
4461 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4462 ret, app->pid, app->sock);
4463 }
4464 }
4465 }
4466
4467 ret = count;
4468 *events = tmp_event;
4469
4470 DBG2("UST app list events done (%zu events)", count);
4471
4472 rcu_error:
4473 rcu_read_unlock();
4474 error:
4475 health_code_update();
4476 return ret;
4477 }
4478
4479 /*
4480 * Fill events array with all events name of all registered apps.
4481 */
4482 int ust_app_list_event_fields(struct lttng_event_field **fields)
4483 {
4484 int ret, handle;
4485 size_t nbmem, count = 0;
4486 struct lttng_ht_iter iter;
4487 struct ust_app *app;
4488 struct lttng_event_field *tmp_event;
4489
4490 nbmem = UST_APP_EVENT_LIST_SIZE;
4491 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4492 if (tmp_event == NULL) {
4493 PERROR("zmalloc ust app event fields");
4494 ret = -ENOMEM;
4495 goto error;
4496 }
4497
4498 rcu_read_lock();
4499
4500 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4501 struct lttng_ust_abi_field_iter uiter;
4502
4503 health_code_update();
4504
4505 if (!app->compatible) {
4506 /*
4507 * TODO: In time, we should notice the caller of this error by
4508 * telling him that this is a version error.
4509 */
4510 continue;
4511 }
4512 pthread_mutex_lock(&app->sock_lock);
4513 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
4514 if (handle < 0) {
4515 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4516 ERR("UST app list field getting handle failed for app pid %d",
4517 app->pid);
4518 }
4519 pthread_mutex_unlock(&app->sock_lock);
4520 continue;
4521 }
4522
4523 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
4524 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4525 /* Handle ustctl error. */
4526 if (ret < 0) {
4527 int release_ret;
4528
4529 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4530 ERR("UST app tp list field failed for app %d with ret %d",
4531 app->sock, ret);
4532 } else {
4533 DBG3("UST app tp list field failed. Application is dead");
4534 break;
4535 }
4536 free(tmp_event);
4537 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4538 pthread_mutex_unlock(&app->sock_lock);
4539 if (release_ret < 0 &&
4540 release_ret != -LTTNG_UST_ERR_EXITING &&
4541 release_ret != -EPIPE) {
4542 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4543 }
4544 goto rcu_error;
4545 }
4546
4547 health_code_update();
4548 if (count >= nbmem) {
4549 /* In case the realloc fails, we free the memory */
4550 struct lttng_event_field *new_tmp_event;
4551 size_t new_nbmem;
4552
4553 new_nbmem = nbmem << 1;
4554 DBG2("Reallocating event field list from %zu to %zu entries",
4555 nbmem, new_nbmem);
4556 new_tmp_event = realloc(tmp_event,
4557 new_nbmem * sizeof(struct lttng_event_field));
4558 if (new_tmp_event == NULL) {
4559 int release_ret;
4560
4561 PERROR("realloc ust app event fields");
4562 free(tmp_event);
4563 ret = -ENOMEM;
4564 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4565 pthread_mutex_unlock(&app->sock_lock);
4566 if (release_ret &&
4567 release_ret != -LTTNG_UST_ERR_EXITING &&
4568 release_ret != -EPIPE) {
4569 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4570 }
4571 goto rcu_error;
4572 }
4573 /* Zero the new memory */
4574 memset(new_tmp_event + nbmem, 0,
4575 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4576 nbmem = new_nbmem;
4577 tmp_event = new_tmp_event;
4578 }
4579
4580 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4581 /* Mapping between these enums matches 1 to 1. */
4582 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4583 tmp_event[count].nowrite = uiter.nowrite;
4584
4585 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4586 tmp_event[count].event.loglevel = uiter.loglevel;
4587 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4588 tmp_event[count].event.pid = app->pid;
4589 tmp_event[count].event.enabled = -1;
4590 count++;
4591 }
4592 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4593 pthread_mutex_unlock(&app->sock_lock);
4594 if (ret < 0 &&
4595 ret != -LTTNG_UST_ERR_EXITING &&
4596 ret != -EPIPE) {
4597 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4598 }
4599 }
4600
4601 ret = count;
4602 *fields = tmp_event;
4603
4604 DBG2("UST app list event fields done (%zu events)", count);
4605
4606 rcu_error:
4607 rcu_read_unlock();
4608 error:
4609 health_code_update();
4610 return ret;
4611 }
4612
4613 /*
4614 * Free and clean all traceable apps of the global list.
4615 *
4616 * Should _NOT_ be called with RCU read-side lock held.
4617 */
4618 void ust_app_clean_list(void)
4619 {
4620 int ret;
4621 struct ust_app *app;
4622 struct lttng_ht_iter iter;
4623
4624 DBG2("UST app cleaning registered apps hash table");
4625
4626 rcu_read_lock();
4627
4628 /* Cleanup notify socket hash table */
4629 if (ust_app_ht_by_notify_sock) {
4630 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4631 notify_sock_n.node) {
4632 /*
4633 * Assert that all notifiers are gone as all triggers
4634 * are unregistered prior to this clean-up.
4635 */
4636 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4637
4638 ust_app_notify_sock_unregister(app->notify_sock);
4639 }
4640 }
4641
4642 if (ust_app_ht) {
4643 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4644 ret = lttng_ht_del(ust_app_ht, &iter);
4645 assert(!ret);
4646 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4647 }
4648 }
4649
4650 /* Cleanup socket hash table */
4651 if (ust_app_ht_by_sock) {
4652 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4653 sock_n.node) {
4654 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4655 assert(!ret);
4656 }
4657 }
4658
4659 rcu_read_unlock();
4660
4661 /* Destroy is done only when the ht is empty */
4662 if (ust_app_ht) {
4663 ht_cleanup_push(ust_app_ht);
4664 }
4665 if (ust_app_ht_by_sock) {
4666 ht_cleanup_push(ust_app_ht_by_sock);
4667 }
4668 if (ust_app_ht_by_notify_sock) {
4669 ht_cleanup_push(ust_app_ht_by_notify_sock);
4670 }
4671 }
4672
4673 /*
4674 * Init UST app hash table.
4675 */
4676 int ust_app_ht_alloc(void)
4677 {
4678 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4679 if (!ust_app_ht) {
4680 return -1;
4681 }
4682 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4683 if (!ust_app_ht_by_sock) {
4684 return -1;
4685 }
4686 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4687 if (!ust_app_ht_by_notify_sock) {
4688 return -1;
4689 }
4690 return 0;
4691 }
4692
4693 /*
4694 * For a specific UST session, disable the channel for all registered apps.
4695 */
4696 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4697 struct ltt_ust_channel *uchan)
4698 {
4699 int ret = 0;
4700 struct lttng_ht_iter iter;
4701 struct lttng_ht_node_str *ua_chan_node;
4702 struct ust_app *app;
4703 struct ust_app_session *ua_sess;
4704 struct ust_app_channel *ua_chan;
4705
4706 assert(usess->active);
4707 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4708 uchan->name, usess->id);
4709
4710 rcu_read_lock();
4711
4712 /* For every registered applications */
4713 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4714 struct lttng_ht_iter uiter;
4715 if (!app->compatible) {
4716 /*
4717 * TODO: In time, we should notice the caller of this error by
4718 * telling him that this is a version error.
4719 */
4720 continue;
4721 }
4722 ua_sess = lookup_session_by_app(usess, app);
4723 if (ua_sess == NULL) {
4724 continue;
4725 }
4726
4727 /* Get channel */
4728 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4729 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4730 /* If the session if found for the app, the channel must be there */
4731 assert(ua_chan_node);
4732
4733 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4734 /* The channel must not be already disabled */
4735 assert(ua_chan->enabled == 1);
4736
4737 /* Disable channel onto application */
4738 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4739 if (ret < 0) {
4740 /* XXX: We might want to report this error at some point... */
4741 continue;
4742 }
4743 }
4744
4745 rcu_read_unlock();
4746 return ret;
4747 }
4748
4749 /*
4750 * For a specific UST session, enable the channel for all registered apps.
4751 */
4752 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4753 struct ltt_ust_channel *uchan)
4754 {
4755 int ret = 0;
4756 struct lttng_ht_iter iter;
4757 struct ust_app *app;
4758 struct ust_app_session *ua_sess;
4759
4760 assert(usess->active);
4761 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4762 uchan->name, usess->id);
4763
4764 rcu_read_lock();
4765
4766 /* For every registered applications */
4767 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4768 if (!app->compatible) {
4769 /*
4770 * TODO: In time, we should notice the caller of this error by
4771 * telling him that this is a version error.
4772 */
4773 continue;
4774 }
4775 ua_sess = lookup_session_by_app(usess, app);
4776 if (ua_sess == NULL) {
4777 continue;
4778 }
4779
4780 /* Enable channel onto application */
4781 ret = enable_ust_app_channel(ua_sess, uchan, app);
4782 if (ret < 0) {
4783 /* XXX: We might want to report this error at some point... */
4784 continue;
4785 }
4786 }
4787
4788 rcu_read_unlock();
4789 return ret;
4790 }
4791
4792 /*
4793 * Disable an event in a channel and for a specific session.
4794 */
4795 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4796 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4797 {
4798 int ret = 0;
4799 struct lttng_ht_iter iter, uiter;
4800 struct lttng_ht_node_str *ua_chan_node;
4801 struct ust_app *app;
4802 struct ust_app_session *ua_sess;
4803 struct ust_app_channel *ua_chan;
4804 struct ust_app_event *ua_event;
4805
4806 assert(usess->active);
4807 DBG("UST app disabling event %s for all apps in channel "
4808 "%s for session id %" PRIu64,
4809 uevent->attr.name, uchan->name, usess->id);
4810
4811 rcu_read_lock();
4812
4813 /* For all registered applications */
4814 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4815 if (!app->compatible) {
4816 /*
4817 * TODO: In time, we should notice the caller of this error by
4818 * telling him that this is a version error.
4819 */
4820 continue;
4821 }
4822 ua_sess = lookup_session_by_app(usess, app);
4823 if (ua_sess == NULL) {
4824 /* Next app */
4825 continue;
4826 }
4827
4828 /* Lookup channel in the ust app session */
4829 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4830 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4831 if (ua_chan_node == NULL) {
4832 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4833 "Skipping", uchan->name, usess->id, app->pid);
4834 continue;
4835 }
4836 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4837
4838 ua_event = find_ust_app_event(ua_chan->events,
4839 uevent->attr.name, uevent->filter,
4840 (enum lttng_ust_abi_loglevel_type)
4841 uevent->attr.loglevel_type,
4842 uevent->attr.loglevel, uevent->exclusion);
4843 if (ua_event == NULL) {
4844 DBG2("Event %s not found in channel %s for app pid %d."
4845 "Skipping", uevent->attr.name, uchan->name, app->pid);
4846 continue;
4847 }
4848
4849 ret = disable_ust_app_event(ua_sess, ua_event, app);
4850 if (ret < 0) {
4851 /* XXX: Report error someday... */
4852 continue;
4853 }
4854 }
4855
4856 rcu_read_unlock();
4857 return ret;
4858 }
4859
4860 /* The ua_sess lock must be held by the caller. */
4861 static
4862 int ust_app_channel_create(struct ltt_ust_session *usess,
4863 struct ust_app_session *ua_sess,
4864 struct ltt_ust_channel *uchan, struct ust_app *app,
4865 struct ust_app_channel **_ua_chan)
4866 {
4867 int ret = 0;
4868 struct ust_app_channel *ua_chan = NULL;
4869
4870 assert(ua_sess);
4871 ASSERT_LOCKED(ua_sess->lock);
4872
4873 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4874 sizeof(uchan->name))) {
4875 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4876 &uchan->attr);
4877 ret = 0;
4878 } else {
4879 struct ltt_ust_context *uctx = NULL;
4880
4881 /*
4882 * Create channel onto application and synchronize its
4883 * configuration.
4884 */
4885 ret = ust_app_channel_allocate(ua_sess, uchan,
4886 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
4887 &ua_chan);
4888 if (ret < 0) {
4889 goto error;
4890 }
4891
4892 ret = ust_app_channel_send(app, usess,
4893 ua_sess, ua_chan);
4894 if (ret) {
4895 goto error;
4896 }
4897
4898 /* Add contexts. */
4899 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4900 ret = create_ust_app_channel_context(ua_chan,
4901 &uctx->ctx, app);
4902 if (ret) {
4903 goto error;
4904 }
4905 }
4906 }
4907
4908 error:
4909 if (ret < 0) {
4910 switch (ret) {
4911 case -ENOTCONN:
4912 /*
4913 * The application's socket is not valid. Either a bad socket
4914 * or a timeout on it. We can't inform the caller that for a
4915 * specific app, the session failed so lets continue here.
4916 */
4917 ret = 0; /* Not an error. */
4918 break;
4919 case -ENOMEM:
4920 default:
4921 break;
4922 }
4923 }
4924
4925 if (ret == 0 && _ua_chan) {
4926 /*
4927 * Only return the application's channel on success. Note
4928 * that the channel can still be part of the application's
4929 * channel hashtable on error.
4930 */
4931 *_ua_chan = ua_chan;
4932 }
4933 return ret;
4934 }
4935
4936 /*
4937 * Enable event for a specific session and channel on the tracer.
4938 */
4939 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4940 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4941 {
4942 int ret = 0;
4943 struct lttng_ht_iter iter, uiter;
4944 struct lttng_ht_node_str *ua_chan_node;
4945 struct ust_app *app;
4946 struct ust_app_session *ua_sess;
4947 struct ust_app_channel *ua_chan;
4948 struct ust_app_event *ua_event;
4949
4950 assert(usess->active);
4951 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4952 uevent->attr.name, usess->id);
4953
4954 /*
4955 * NOTE: At this point, this function is called only if the session and
4956 * channel passed are already created for all apps. and enabled on the
4957 * tracer also.
4958 */
4959
4960 rcu_read_lock();
4961
4962 /* For all registered applications */
4963 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4964 if (!app->compatible) {
4965 /*
4966 * TODO: In time, we should notice the caller of this error by
4967 * telling him that this is a version error.
4968 */
4969 continue;
4970 }
4971 ua_sess = lookup_session_by_app(usess, app);
4972 if (!ua_sess) {
4973 /* The application has problem or is probably dead. */
4974 continue;
4975 }
4976
4977 pthread_mutex_lock(&ua_sess->lock);
4978
4979 if (ua_sess->deleted) {
4980 pthread_mutex_unlock(&ua_sess->lock);
4981 continue;
4982 }
4983
4984 /* Lookup channel in the ust app session */
4985 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4986 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4987 /*
4988 * It is possible that the channel cannot be found is
4989 * the channel/event creation occurs concurrently with
4990 * an application exit.
4991 */
4992 if (!ua_chan_node) {
4993 pthread_mutex_unlock(&ua_sess->lock);
4994 continue;
4995 }
4996
4997 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4998
4999 /* Get event node */
5000 ua_event = find_ust_app_event(ua_chan->events,
5001 uevent->attr.name, uevent->filter,
5002 (enum lttng_ust_abi_loglevel_type)
5003 uevent->attr.loglevel_type,
5004 uevent->attr.loglevel, uevent->exclusion);
5005 if (ua_event == NULL) {
5006 DBG3("UST app enable event %s not found for app PID %d."
5007 "Skipping app", uevent->attr.name, app->pid);
5008 goto next_app;
5009 }
5010
5011 ret = enable_ust_app_event(ua_sess, ua_event, app);
5012 if (ret < 0) {
5013 pthread_mutex_unlock(&ua_sess->lock);
5014 goto error;
5015 }
5016 next_app:
5017 pthread_mutex_unlock(&ua_sess->lock);
5018 }
5019
5020 error:
5021 rcu_read_unlock();
5022 return ret;
5023 }
5024
5025 /*
5026 * For a specific existing UST session and UST channel, creates the event for
5027 * all registered apps.
5028 */
5029 int ust_app_create_event_glb(struct ltt_ust_session *usess,
5030 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
5031 {
5032 int ret = 0;
5033 struct lttng_ht_iter iter, uiter;
5034 struct lttng_ht_node_str *ua_chan_node;
5035 struct ust_app *app;
5036 struct ust_app_session *ua_sess;
5037 struct ust_app_channel *ua_chan;
5038
5039 assert(usess->active);
5040 DBG("UST app creating event %s for all apps for session id %" PRIu64,
5041 uevent->attr.name, usess->id);
5042
5043 rcu_read_lock();
5044
5045 /* For all registered applications */
5046 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5047 if (!app->compatible) {
5048 /*
5049 * TODO: In time, we should notice the caller of this error by
5050 * telling him that this is a version error.
5051 */
5052 continue;
5053 }
5054 ua_sess = lookup_session_by_app(usess, app);
5055 if (!ua_sess) {
5056 /* The application has problem or is probably dead. */
5057 continue;
5058 }
5059
5060 pthread_mutex_lock(&ua_sess->lock);
5061
5062 if (ua_sess->deleted) {
5063 pthread_mutex_unlock(&ua_sess->lock);
5064 continue;
5065 }
5066
5067 /* Lookup channel in the ust app session */
5068 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5069 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5070 /* If the channel is not found, there is a code flow error */
5071 assert(ua_chan_node);
5072
5073 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5074
5075 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5076 pthread_mutex_unlock(&ua_sess->lock);
5077 if (ret < 0) {
5078 if (ret != -LTTNG_UST_ERR_EXIST) {
5079 /* Possible value at this point: -ENOMEM. If so, we stop! */
5080 break;
5081 }
5082 DBG2("UST app event %s already exist on app PID %d",
5083 uevent->attr.name, app->pid);
5084 continue;
5085 }
5086 }
5087
5088 rcu_read_unlock();
5089 return ret;
5090 }
5091
5092 /*
5093 * Start tracing for a specific UST session and app.
5094 *
5095 * Called with UST app session lock held.
5096 *
5097 */
5098 static
5099 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
5100 {
5101 int ret = 0;
5102 struct ust_app_session *ua_sess;
5103
5104 DBG("Starting tracing for ust app pid %d", app->pid);
5105
5106 rcu_read_lock();
5107
5108 if (!app->compatible) {
5109 goto end;
5110 }
5111
5112 ua_sess = lookup_session_by_app(usess, app);
5113 if (ua_sess == NULL) {
5114 /* The session is in teardown process. Ignore and continue. */
5115 goto end;
5116 }
5117
5118 pthread_mutex_lock(&ua_sess->lock);
5119
5120 if (ua_sess->deleted) {
5121 pthread_mutex_unlock(&ua_sess->lock);
5122 goto end;
5123 }
5124
5125 if (ua_sess->enabled) {
5126 pthread_mutex_unlock(&ua_sess->lock);
5127 goto end;
5128 }
5129
5130 /* Upon restart, we skip the setup, already done */
5131 if (ua_sess->started) {
5132 goto skip_setup;
5133 }
5134
5135 health_code_update();
5136
5137 skip_setup:
5138 /* This starts the UST tracing */
5139 pthread_mutex_lock(&app->sock_lock);
5140 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
5141 pthread_mutex_unlock(&app->sock_lock);
5142 if (ret < 0) {
5143 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5144 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5145 app->pid, app->sock);
5146 pthread_mutex_unlock(&ua_sess->lock);
5147 goto end;
5148 } else if (ret == -EAGAIN) {
5149 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5150 app->pid, app->sock);
5151 pthread_mutex_unlock(&ua_sess->lock);
5152 goto end;
5153
5154 } else {
5155 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5156 ret, app->pid, app->sock);
5157 }
5158 goto error_unlock;
5159 }
5160
5161 /* Indicate that the session has been started once */
5162 ua_sess->started = 1;
5163 ua_sess->enabled = 1;
5164
5165 pthread_mutex_unlock(&ua_sess->lock);
5166
5167 health_code_update();
5168
5169 /* Quiescent wait after starting trace */
5170 pthread_mutex_lock(&app->sock_lock);
5171 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5172 pthread_mutex_unlock(&app->sock_lock);
5173 if (ret < 0) {
5174 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5175 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5176 app->pid, app->sock);
5177 } else if (ret == -EAGAIN) {
5178 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5179 app->pid, app->sock);
5180 } else {
5181 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5182 ret, app->pid, app->sock);
5183 }
5184 }
5185
5186 end:
5187 rcu_read_unlock();
5188 health_code_update();
5189 return 0;
5190
5191 error_unlock:
5192 pthread_mutex_unlock(&ua_sess->lock);
5193 rcu_read_unlock();
5194 health_code_update();
5195 return -1;
5196 }
5197
5198 /*
5199 * Stop tracing for a specific UST session and app.
5200 */
5201 static
5202 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5203 {
5204 int ret = 0;
5205 struct ust_app_session *ua_sess;
5206 struct ust_registry_session *registry;
5207
5208 DBG("Stopping tracing for ust app pid %d", app->pid);
5209
5210 rcu_read_lock();
5211
5212 if (!app->compatible) {
5213 goto end_no_session;
5214 }
5215
5216 ua_sess = lookup_session_by_app(usess, app);
5217 if (ua_sess == NULL) {
5218 goto end_no_session;
5219 }
5220
5221 pthread_mutex_lock(&ua_sess->lock);
5222
5223 if (ua_sess->deleted) {
5224 pthread_mutex_unlock(&ua_sess->lock);
5225 goto end_no_session;
5226 }
5227
5228 /*
5229 * If started = 0, it means that stop trace has been called for a session
5230 * that was never started. It's possible since we can have a fail start
5231 * from either the application manager thread or the command thread. Simply
5232 * indicate that this is a stop error.
5233 */
5234 if (!ua_sess->started) {
5235 goto error_rcu_unlock;
5236 }
5237
5238 health_code_update();
5239
5240 /* This inhibits UST tracing */
5241 pthread_mutex_lock(&app->sock_lock);
5242 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
5243 pthread_mutex_unlock(&app->sock_lock);
5244 if (ret < 0) {
5245 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5246 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5247 app->pid, app->sock);
5248 goto end_unlock;
5249 } else if (ret == -EAGAIN) {
5250 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5251 app->pid, app->sock);
5252 goto end_unlock;
5253
5254 } else {
5255 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5256 ret, app->pid, app->sock);
5257 }
5258 goto error_rcu_unlock;
5259 }
5260
5261 health_code_update();
5262 ua_sess->enabled = 0;
5263
5264 /* Quiescent wait after stopping trace */
5265 pthread_mutex_lock(&app->sock_lock);
5266 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5267 pthread_mutex_unlock(&app->sock_lock);
5268 if (ret < 0) {
5269 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5270 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5271 app->pid, app->sock);
5272 } else if (ret == -EAGAIN) {
5273 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5274 app->pid, app->sock);
5275 } else {
5276 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5277 ret, app->pid, app->sock);
5278 }
5279 }
5280
5281 health_code_update();
5282
5283 registry = get_session_registry(ua_sess);
5284
5285 /* The UST app session is held registry shall not be null. */
5286 assert(registry);
5287
5288 /* Push metadata for application before freeing the application. */
5289 (void) push_metadata(registry, ua_sess->consumer);
5290
5291 end_unlock:
5292 pthread_mutex_unlock(&ua_sess->lock);
5293 end_no_session:
5294 rcu_read_unlock();
5295 health_code_update();
5296 return 0;
5297
5298 error_rcu_unlock:
5299 pthread_mutex_unlock(&ua_sess->lock);
5300 rcu_read_unlock();
5301 health_code_update();
5302 return -1;
5303 }
5304
5305 static
5306 int ust_app_flush_app_session(struct ust_app *app,
5307 struct ust_app_session *ua_sess)
5308 {
5309 int ret, retval = 0;
5310 struct lttng_ht_iter iter;
5311 struct ust_app_channel *ua_chan;
5312 struct consumer_socket *socket;
5313
5314 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5315
5316 rcu_read_lock();
5317
5318 if (!app->compatible) {
5319 goto end_not_compatible;
5320 }
5321
5322 pthread_mutex_lock(&ua_sess->lock);
5323
5324 if (ua_sess->deleted) {
5325 goto end_deleted;
5326 }
5327
5328 health_code_update();
5329
5330 /* Flushing buffers */
5331 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5332 ua_sess->consumer);
5333
5334 /* Flush buffers and push metadata. */
5335 switch (ua_sess->buffer_type) {
5336 case LTTNG_BUFFER_PER_PID:
5337 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5338 node.node) {
5339 health_code_update();
5340 ret = consumer_flush_channel(socket, ua_chan->key);
5341 if (ret) {
5342 ERR("Error flushing consumer channel");
5343 retval = -1;
5344 continue;
5345 }
5346 }
5347 break;
5348 case LTTNG_BUFFER_PER_UID:
5349 default:
5350 assert(0);
5351 break;
5352 }
5353
5354 health_code_update();
5355
5356 end_deleted:
5357 pthread_mutex_unlock(&ua_sess->lock);
5358
5359 end_not_compatible:
5360 rcu_read_unlock();
5361 health_code_update();
5362 return retval;
5363 }
5364
5365 /*
5366 * Flush buffers for all applications for a specific UST session.
5367 * Called with UST session lock held.
5368 */
5369 static
5370 int ust_app_flush_session(struct ltt_ust_session *usess)
5371
5372 {
5373 int ret = 0;
5374
5375 DBG("Flushing session buffers for all ust apps");
5376
5377 rcu_read_lock();
5378
5379 /* Flush buffers and push metadata. */
5380 switch (usess->buffer_type) {
5381 case LTTNG_BUFFER_PER_UID:
5382 {
5383 struct buffer_reg_uid *reg;
5384 struct lttng_ht_iter iter;
5385
5386 /* Flush all per UID buffers associated to that session. */
5387 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5388 struct ust_registry_session *ust_session_reg;
5389 struct buffer_reg_channel *buf_reg_chan;
5390 struct consumer_socket *socket;
5391
5392 /* Get consumer socket to use to push the metadata.*/
5393 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5394 usess->consumer);
5395 if (!socket) {
5396 /* Ignore request if no consumer is found for the session. */
5397 continue;
5398 }
5399
5400 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5401 buf_reg_chan, node.node) {
5402 /*
5403 * The following call will print error values so the return
5404 * code is of little importance because whatever happens, we
5405 * have to try them all.
5406 */
5407 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5408 }
5409
5410 ust_session_reg = reg->registry->reg.ust;
5411 /* Push metadata. */
5412 (void) push_metadata(ust_session_reg, usess->consumer);
5413 }
5414 break;
5415 }
5416 case LTTNG_BUFFER_PER_PID:
5417 {
5418 struct ust_app_session *ua_sess;
5419 struct lttng_ht_iter iter;
5420 struct ust_app *app;
5421
5422 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5423 ua_sess = lookup_session_by_app(usess, app);
5424 if (ua_sess == NULL) {
5425 continue;
5426 }
5427 (void) ust_app_flush_app_session(app, ua_sess);
5428 }
5429 break;
5430 }
5431 default:
5432 ret = -1;
5433 assert(0);
5434 break;
5435 }
5436
5437 rcu_read_unlock();
5438 health_code_update();
5439 return ret;
5440 }
5441
5442 static
5443 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5444 struct ust_app_session *ua_sess)
5445 {
5446 int ret = 0;
5447 struct lttng_ht_iter iter;
5448 struct ust_app_channel *ua_chan;
5449 struct consumer_socket *socket;
5450
5451 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5452
5453 rcu_read_lock();
5454
5455 if (!app->compatible) {
5456 goto end_not_compatible;
5457 }
5458
5459 pthread_mutex_lock(&ua_sess->lock);
5460
5461 if (ua_sess->deleted) {
5462 goto end_unlock;
5463 }
5464
5465 health_code_update();
5466
5467 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5468 ua_sess->consumer);
5469 if (!socket) {
5470 ERR("Failed to find consumer (%" PRIu32 ") socket",
5471 app->bits_per_long);
5472 ret = -1;
5473 goto end_unlock;
5474 }
5475
5476 /* Clear quiescent state. */
5477 switch (ua_sess->buffer_type) {
5478 case LTTNG_BUFFER_PER_PID:
5479 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5480 ua_chan, node.node) {
5481 health_code_update();
5482 ret = consumer_clear_quiescent_channel(socket,
5483 ua_chan->key);
5484 if (ret) {
5485 ERR("Error clearing quiescent state for consumer channel");
5486 ret = -1;
5487 continue;
5488 }
5489 }
5490 break;
5491 case LTTNG_BUFFER_PER_UID:
5492 default:
5493 assert(0);
5494 ret = -1;
5495 break;
5496 }
5497
5498 health_code_update();
5499
5500 end_unlock:
5501 pthread_mutex_unlock(&ua_sess->lock);
5502
5503 end_not_compatible:
5504 rcu_read_unlock();
5505 health_code_update();
5506 return ret;
5507 }
5508
5509 /*
5510 * Clear quiescent state in each stream for all applications for a
5511 * specific UST session.
5512 * Called with UST session lock held.
5513 */
5514 static
5515 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5516
5517 {
5518 int ret = 0;
5519
5520 DBG("Clearing stream quiescent state for all ust apps");
5521
5522 rcu_read_lock();
5523
5524 switch (usess->buffer_type) {
5525 case LTTNG_BUFFER_PER_UID:
5526 {
5527 struct lttng_ht_iter iter;
5528 struct buffer_reg_uid *reg;
5529
5530 /*
5531 * Clear quiescent for all per UID buffers associated to
5532 * that session.
5533 */
5534 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5535 struct consumer_socket *socket;
5536 struct buffer_reg_channel *buf_reg_chan;
5537
5538 /* Get associated consumer socket.*/
5539 socket = consumer_find_socket_by_bitness(
5540 reg->bits_per_long, usess->consumer);
5541 if (!socket) {
5542 /*
5543 * Ignore request if no consumer is found for
5544 * the session.
5545 */
5546 continue;
5547 }
5548
5549 cds_lfht_for_each_entry(reg->registry->channels->ht,
5550 &iter.iter, buf_reg_chan, node.node) {
5551 /*
5552 * The following call will print error values so
5553 * the return code is of little importance
5554 * because whatever happens, we have to try them
5555 * all.
5556 */
5557 (void) consumer_clear_quiescent_channel(socket,
5558 buf_reg_chan->consumer_key);
5559 }
5560 }
5561 break;
5562 }
5563 case LTTNG_BUFFER_PER_PID:
5564 {
5565 struct ust_app_session *ua_sess;
5566 struct lttng_ht_iter iter;
5567 struct ust_app *app;
5568
5569 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5570 pid_n.node) {
5571 ua_sess = lookup_session_by_app(usess, app);
5572 if (ua_sess == NULL) {
5573 continue;
5574 }
5575 (void) ust_app_clear_quiescent_app_session(app,
5576 ua_sess);
5577 }
5578 break;
5579 }
5580 default:
5581 ret = -1;
5582 assert(0);
5583 break;
5584 }
5585
5586 rcu_read_unlock();
5587 health_code_update();
5588 return ret;
5589 }
5590
5591 /*
5592 * Destroy a specific UST session in apps.
5593 */
5594 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5595 {
5596 int ret;
5597 struct ust_app_session *ua_sess;
5598 struct lttng_ht_iter iter;
5599 struct lttng_ht_node_u64 *node;
5600
5601 DBG("Destroy tracing for ust app pid %d", app->pid);
5602
5603 rcu_read_lock();
5604
5605 if (!app->compatible) {
5606 goto end;
5607 }
5608
5609 __lookup_session_by_app(usess, app, &iter);
5610 node = lttng_ht_iter_get_node_u64(&iter);
5611 if (node == NULL) {
5612 /* Session is being or is deleted. */
5613 goto end;
5614 }
5615 ua_sess = caa_container_of(node, struct ust_app_session, node);
5616
5617 health_code_update();
5618 destroy_app_session(app, ua_sess);
5619
5620 health_code_update();
5621
5622 /* Quiescent wait after stopping trace */
5623 pthread_mutex_lock(&app->sock_lock);
5624 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5625 pthread_mutex_unlock(&app->sock_lock);
5626 if (ret < 0) {
5627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5628 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5629 app->pid, app->sock);
5630 } else if (ret == -EAGAIN) {
5631 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5632 app->pid, app->sock);
5633 } else {
5634 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5635 ret, app->pid, app->sock);
5636 }
5637 }
5638 end:
5639 rcu_read_unlock();
5640 health_code_update();
5641 return 0;
5642 }
5643
5644 /*
5645 * Start tracing for the UST session.
5646 */
5647 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5648 {
5649 struct lttng_ht_iter iter;
5650 struct ust_app *app;
5651
5652 DBG("Starting all UST traces");
5653
5654 /*
5655 * Even though the start trace might fail, flag this session active so
5656 * other application coming in are started by default.
5657 */
5658 usess->active = 1;
5659
5660 rcu_read_lock();
5661
5662 /*
5663 * In a start-stop-start use-case, we need to clear the quiescent state
5664 * of each channel set by the prior stop command, thus ensuring that a
5665 * following stop or destroy is sure to grab a timestamp_end near those
5666 * operations, even if the packet is empty.
5667 */
5668 (void) ust_app_clear_quiescent_session(usess);
5669
5670 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5671 ust_app_global_update(usess, app);
5672 }
5673
5674 rcu_read_unlock();
5675
5676 return 0;
5677 }
5678
5679 /*
5680 * Start tracing for the UST session.
5681 * Called with UST session lock held.
5682 */
5683 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5684 {
5685 int ret = 0;
5686 struct lttng_ht_iter iter;
5687 struct ust_app *app;
5688
5689 DBG("Stopping all UST traces");
5690
5691 /*
5692 * Even though the stop trace might fail, flag this session inactive so
5693 * other application coming in are not started by default.
5694 */
5695 usess->active = 0;
5696
5697 rcu_read_lock();
5698
5699 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5700 ret = ust_app_stop_trace(usess, app);
5701 if (ret < 0) {
5702 /* Continue to next apps even on error */
5703 continue;
5704 }
5705 }
5706
5707 (void) ust_app_flush_session(usess);
5708
5709 rcu_read_unlock();
5710
5711 return 0;
5712 }
5713
5714 /*
5715 * Destroy app UST session.
5716 */
5717 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5718 {
5719 int ret = 0;
5720 struct lttng_ht_iter iter;
5721 struct ust_app *app;
5722
5723 DBG("Destroy all UST traces");
5724
5725 rcu_read_lock();
5726
5727 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5728 ret = destroy_trace(usess, app);
5729 if (ret < 0) {
5730 /* Continue to next apps even on error */
5731 continue;
5732 }
5733 }
5734
5735 rcu_read_unlock();
5736
5737 return 0;
5738 }
5739
5740 /* The ua_sess lock must be held by the caller. */
5741 static
5742 int find_or_create_ust_app_channel(
5743 struct ltt_ust_session *usess,
5744 struct ust_app_session *ua_sess,
5745 struct ust_app *app,
5746 struct ltt_ust_channel *uchan,
5747 struct ust_app_channel **ua_chan)
5748 {
5749 int ret = 0;
5750 struct lttng_ht_iter iter;
5751 struct lttng_ht_node_str *ua_chan_node;
5752
5753 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5754 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5755 if (ua_chan_node) {
5756 *ua_chan = caa_container_of(ua_chan_node,
5757 struct ust_app_channel, node);
5758 goto end;
5759 }
5760
5761 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5762 if (ret) {
5763 goto end;
5764 }
5765 end:
5766 return ret;
5767 }
5768
5769 static
5770 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5771 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5772 struct ust_app *app)
5773 {
5774 int ret = 0;
5775 struct ust_app_event *ua_event = NULL;
5776
5777 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5778 uevent->filter,
5779 (enum lttng_ust_abi_loglevel_type)
5780 uevent->attr.loglevel_type,
5781 uevent->attr.loglevel, uevent->exclusion);
5782 if (!ua_event) {
5783 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5784 if (ret < 0) {
5785 goto end;
5786 }
5787 } else {
5788 if (ua_event->enabled != uevent->enabled) {
5789 ret = uevent->enabled ?
5790 enable_ust_app_event(ua_sess, ua_event, app) :
5791 disable_ust_app_event(ua_sess, ua_event, app);
5792 }
5793 }
5794
5795 end:
5796 return ret;
5797 }
5798
5799 /* Called with RCU read-side lock held. */
5800 static
5801 void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5802 {
5803 int ret = 0;
5804 enum lttng_error_code ret_code;
5805 enum lttng_trigger_status t_status;
5806 struct lttng_ht_iter app_trigger_iter;
5807 struct lttng_triggers *triggers = NULL;
5808 struct ust_app_event_notifier_rule *event_notifier_rule;
5809 unsigned int count, i;
5810
5811 if (!ust_app_supports_notifiers(app)) {
5812 goto end;
5813 }
5814
5815 /*
5816 * Currrently, registering or unregistering a trigger with an
5817 * event rule condition causes a full synchronization of the event
5818 * notifiers.
5819 *
5820 * The first step attempts to add an event notifier for all registered
5821 * triggers that apply to the user space tracers. Then, the
5822 * application's event notifiers rules are all checked against the list
5823 * of registered triggers. Any event notifier that doesn't have a
5824 * matching trigger can be assumed to have been disabled.
5825 *
5826 * All of this is inefficient, but is put in place to get the feature
5827 * rolling as it is simpler at this moment. It will be optimized Soon™
5828 * to allow the state of enabled
5829 * event notifiers to be synchronized in a piece-wise way.
5830 */
5831
5832 /* Get all triggers using uid 0 (root) */
5833 ret_code = notification_thread_command_list_triggers(
5834 the_notification_thread_handle, 0, &triggers);
5835 if (ret_code != LTTNG_OK) {
5836 goto end;
5837 }
5838
5839 assert(triggers);
5840
5841 t_status = lttng_triggers_get_count(triggers, &count);
5842 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5843 goto end;
5844 }
5845
5846 for (i = 0; i < count; i++) {
5847 struct lttng_condition *condition;
5848 struct lttng_event_rule *event_rule;
5849 struct lttng_trigger *trigger;
5850 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5851 enum lttng_condition_status condition_status;
5852 uint64_t token;
5853
5854 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5855 assert(trigger);
5856
5857 token = lttng_trigger_get_tracer_token(trigger);
5858 condition = lttng_trigger_get_condition(trigger);
5859
5860 if (lttng_condition_get_type(condition) !=
5861 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
5862 /* Does not apply */
5863 continue;
5864 }
5865
5866 condition_status =
5867 lttng_condition_event_rule_matches_borrow_rule_mutable(
5868 condition, &event_rule);
5869 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5870
5871 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5872 /* Skip kernel related triggers. */
5873 continue;
5874 }
5875
5876 /*
5877 * Find or create the associated token event rule. The caller
5878 * holds the RCU read lock, so this is safe to call without
5879 * explicitly acquiring it here.
5880 */
5881 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5882 app->token_to_event_notifier_rule_ht, token);
5883 if (!looked_up_event_notifier_rule) {
5884 ret = create_ust_app_event_notifier_rule(trigger, app);
5885 if (ret < 0) {
5886 goto end;
5887 }
5888 }
5889 }
5890
5891 rcu_read_lock();
5892 /* Remove all unknown event sources from the app. */
5893 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5894 &app_trigger_iter.iter, event_notifier_rule,
5895 node.node) {
5896 const uint64_t app_token = event_notifier_rule->token;
5897 bool found = false;
5898
5899 /*
5900 * Check if the app event trigger still exists on the
5901 * notification side.
5902 */
5903 for (i = 0; i < count; i++) {
5904 uint64_t notification_thread_token;
5905 const struct lttng_trigger *trigger =
5906 lttng_triggers_get_at_index(
5907 triggers, i);
5908
5909 assert(trigger);
5910
5911 notification_thread_token =
5912 lttng_trigger_get_tracer_token(trigger);
5913
5914 if (notification_thread_token == app_token) {
5915 found = true;
5916 break;
5917 }
5918 }
5919
5920 if (found) {
5921 /* Still valid. */
5922 continue;
5923 }
5924
5925 /*
5926 * This trigger was unregistered, disable it on the tracer's
5927 * side.
5928 */
5929 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5930 &app_trigger_iter);
5931 assert(ret == 0);
5932
5933 /* Callee logs errors. */
5934 (void) disable_ust_object(app, event_notifier_rule->obj);
5935
5936 delete_ust_app_event_notifier_rule(
5937 app->sock, event_notifier_rule, app);
5938 }
5939
5940 rcu_read_unlock();
5941
5942 end:
5943 lttng_triggers_destroy(triggers);
5944 return;
5945 }
5946
5947 /*
5948 * RCU read lock must be held by the caller.
5949 */
5950 static
5951 void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5952 struct ust_app_session *ua_sess,
5953 struct ust_app *app)
5954 {
5955 int ret = 0;
5956 struct cds_lfht_iter uchan_iter;
5957 struct ltt_ust_channel *uchan;
5958
5959 assert(usess);
5960 assert(ua_sess);
5961 assert(app);
5962
5963 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5964 uchan, node.node) {
5965 struct ust_app_channel *ua_chan;
5966 struct cds_lfht_iter uevent_iter;
5967 struct ltt_ust_event *uevent;
5968
5969 /*
5970 * Search for a matching ust_app_channel. If none is found,
5971 * create it. Creating the channel will cause the ua_chan
5972 * structure to be allocated, the channel buffers to be
5973 * allocated (if necessary) and sent to the application, and
5974 * all enabled contexts will be added to the channel.
5975 */
5976 ret = find_or_create_ust_app_channel(usess, ua_sess,
5977 app, uchan, &ua_chan);
5978 if (ret) {
5979 /* Tracer is probably gone or ENOMEM. */
5980 goto end;
5981 }
5982
5983 if (!ua_chan) {
5984 /* ua_chan will be NULL for the metadata channel */
5985 continue;
5986 }
5987
5988 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5989 node.node) {
5990 ret = ust_app_channel_synchronize_event(ua_chan,
5991 uevent, ua_sess, app);
5992 if (ret) {
5993 goto end;
5994 }
5995 }
5996
5997 if (ua_chan->enabled != uchan->enabled) {
5998 ret = uchan->enabled ?
5999 enable_ust_app_channel(ua_sess, uchan, app) :
6000 disable_ust_app_channel(ua_sess, ua_chan, app);
6001 if (ret) {
6002 goto end;
6003 }
6004 }
6005 }
6006 end:
6007 return;
6008 }
6009
6010 /*
6011 * The caller must ensure that the application is compatible and is tracked
6012 * by the process attribute trackers.
6013 */
6014 static
6015 void ust_app_synchronize(struct ltt_ust_session *usess,
6016 struct ust_app *app)
6017 {
6018 int ret = 0;
6019 struct ust_app_session *ua_sess = NULL;
6020
6021 /*
6022 * The application's configuration should only be synchronized for
6023 * active sessions.
6024 */
6025 assert(usess->active);
6026
6027 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
6028 if (ret < 0) {
6029 /* Tracer is probably gone or ENOMEM. */
6030 if (ua_sess) {
6031 destroy_app_session(app, ua_sess);
6032 }
6033 goto end;
6034 }
6035 assert(ua_sess);
6036
6037 pthread_mutex_lock(&ua_sess->lock);
6038 if (ua_sess->deleted) {
6039 goto deleted_session;
6040 }
6041
6042 rcu_read_lock();
6043
6044 ust_app_synchronize_all_channels(usess, ua_sess, app);
6045
6046 /*
6047 * Create the metadata for the application. This returns gracefully if a
6048 * metadata was already set for the session.
6049 *
6050 * The metadata channel must be created after the data channels as the
6051 * consumer daemon assumes this ordering. When interacting with a relay
6052 * daemon, the consumer will use this assumption to send the
6053 * "STREAMS_SENT" message to the relay daemon.
6054 */
6055 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
6056 if (ret < 0) {
6057 ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
6058 app->sock, usess->id);
6059 }
6060
6061 rcu_read_unlock();
6062
6063 deleted_session:
6064 pthread_mutex_unlock(&ua_sess->lock);
6065 end:
6066 return;
6067 }
6068
6069 static
6070 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
6071 {
6072 struct ust_app_session *ua_sess;
6073
6074 ua_sess = lookup_session_by_app(usess, app);
6075 if (ua_sess == NULL) {
6076 return;
6077 }
6078 destroy_app_session(app, ua_sess);
6079 }
6080
6081 /*
6082 * Add channels/events from UST global domain to registered apps at sock.
6083 *
6084 * Called with session lock held.
6085 * Called with RCU read-side lock held.
6086 */
6087 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
6088 {
6089 assert(usess);
6090 assert(usess->active);
6091
6092 DBG2("UST app global update for app sock %d for session id %" PRIu64,
6093 app->sock, usess->id);
6094
6095 if (!app->compatible) {
6096 return;
6097 }
6098 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
6099 usess, app->pid) &&
6100 trace_ust_id_tracker_lookup(
6101 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
6102 usess, app->uid) &&
6103 trace_ust_id_tracker_lookup(
6104 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
6105 usess, app->gid)) {
6106 /*
6107 * Synchronize the application's internal tracing configuration
6108 * and start tracing.
6109 */
6110 ust_app_synchronize(usess, app);
6111 ust_app_start_trace(usess, app);
6112 } else {
6113 ust_app_global_destroy(usess, app);
6114 }
6115 }
6116
6117 /*
6118 * Add all event notifiers to an application.
6119 *
6120 * Called with session lock held.
6121 * Called with RCU read-side lock held.
6122 */
6123 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
6124 {
6125 DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
6126 app->name, app->pid);
6127
6128 if (!app->compatible || !ust_app_supports_notifiers(app)) {
6129 return;
6130 }
6131
6132 if (app->event_notifier_group.object == NULL) {
6133 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
6134 app->name, app->pid);
6135 return;
6136 }
6137
6138 ust_app_synchronize_event_notifier_rules(app);
6139 }
6140
6141 /*
6142 * Called with session lock held.
6143 */
6144 void ust_app_global_update_all(struct ltt_ust_session *usess)
6145 {
6146 struct lttng_ht_iter iter;
6147 struct ust_app *app;
6148
6149 rcu_read_lock();
6150 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6151 ust_app_global_update(usess, app);
6152 }
6153 rcu_read_unlock();
6154 }
6155
6156 void ust_app_global_update_all_event_notifier_rules(void)
6157 {
6158 struct lttng_ht_iter iter;
6159 struct ust_app *app;
6160
6161 rcu_read_lock();
6162 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6163 ust_app_global_update_event_notifier_rules(app);
6164 }
6165
6166 rcu_read_unlock();
6167 }
6168
6169 /*
6170 * Add context to a specific channel for global UST domain.
6171 */
6172 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6173 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6174 {
6175 int ret = 0;
6176 struct lttng_ht_node_str *ua_chan_node;
6177 struct lttng_ht_iter iter, uiter;
6178 struct ust_app_channel *ua_chan = NULL;
6179 struct ust_app_session *ua_sess;
6180 struct ust_app *app;
6181
6182 assert(usess->active);
6183
6184 rcu_read_lock();
6185 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6186 if (!app->compatible) {
6187 /*
6188 * TODO: In time, we should notice the caller of this error by
6189 * telling him that this is a version error.
6190 */
6191 continue;
6192 }
6193 ua_sess = lookup_session_by_app(usess, app);
6194 if (ua_sess == NULL) {
6195 continue;
6196 }
6197
6198 pthread_mutex_lock(&ua_sess->lock);
6199
6200 if (ua_sess->deleted) {
6201 pthread_mutex_unlock(&ua_sess->lock);
6202 continue;
6203 }
6204
6205 /* Lookup channel in the ust app session */
6206 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6207 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6208 if (ua_chan_node == NULL) {
6209 goto next_app;
6210 }
6211 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6212 node);
6213 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6214 if (ret < 0) {
6215 goto next_app;
6216 }
6217 next_app:
6218 pthread_mutex_unlock(&ua_sess->lock);
6219 }
6220
6221 rcu_read_unlock();
6222 return ret;
6223 }
6224
6225 /*
6226 * Receive registration and populate the given msg structure.
6227 *
6228 * On success return 0 else a negative value returned by the ustctl call.
6229 */
6230 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6231 {
6232 int ret;
6233 uint32_t pid, ppid, uid, gid;
6234
6235 assert(msg);
6236
6237 ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
6238 &pid, &ppid, &uid, &gid,
6239 &msg->bits_per_long,
6240 &msg->uint8_t_alignment,
6241 &msg->uint16_t_alignment,
6242 &msg->uint32_t_alignment,
6243 &msg->uint64_t_alignment,
6244 &msg->long_alignment,
6245 &msg->byte_order,
6246 msg->name);
6247 if (ret < 0) {
6248 switch (-ret) {
6249 case EPIPE:
6250 case ECONNRESET:
6251 case LTTNG_UST_ERR_EXITING:
6252 DBG3("UST app recv reg message failed. Application died");
6253 break;
6254 case LTTNG_UST_ERR_UNSUP_MAJOR:
6255 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6256 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6257 LTTNG_UST_ABI_MINOR_VERSION);
6258 break;
6259 default:
6260 ERR("UST app recv reg message failed with ret %d", ret);
6261 break;
6262 }
6263 goto error;
6264 }
6265 msg->pid = (pid_t) pid;
6266 msg->ppid = (pid_t) ppid;
6267 msg->uid = (uid_t) uid;
6268 msg->gid = (gid_t) gid;
6269
6270 error:
6271 return ret;
6272 }
6273
6274 /*
6275 * Return a ust app session object using the application object and the
6276 * session object descriptor has a key. If not found, NULL is returned.
6277 * A RCU read side lock MUST be acquired when calling this function.
6278 */
6279 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6280 int objd)
6281 {
6282 struct lttng_ht_node_ulong *node;
6283 struct lttng_ht_iter iter;
6284 struct ust_app_session *ua_sess = NULL;
6285
6286 assert(app);
6287
6288 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6289 node = lttng_ht_iter_get_node_ulong(&iter);
6290 if (node == NULL) {
6291 DBG2("UST app session find by objd %d not found", objd);
6292 goto error;
6293 }
6294
6295 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6296
6297 error:
6298 return ua_sess;
6299 }
6300
6301 /*
6302 * Return a ust app channel object using the application object and the channel
6303 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6304 * lock MUST be acquired before calling this function.
6305 */
6306 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6307 int objd)
6308 {
6309 struct lttng_ht_node_ulong *node;
6310 struct lttng_ht_iter iter;
6311 struct ust_app_channel *ua_chan = NULL;
6312
6313 assert(app);
6314
6315 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6316 node = lttng_ht_iter_get_node_ulong(&iter);
6317 if (node == NULL) {
6318 DBG2("UST app channel find by objd %d not found", objd);
6319 goto error;
6320 }
6321
6322 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6323
6324 error:
6325 return ua_chan;
6326 }
6327
6328 /*
6329 * Fixup legacy context fields for comparison:
6330 * - legacy array becomes array_nestable,
6331 * - legacy struct becomes struct_nestable,
6332 * - legacy variant becomes variant_nestable,
6333 * legacy sequences are not emitted in LTTng-UST contexts.
6334 */
6335 static int ust_app_fixup_legacy_context_fields(size_t *_nr_fields,
6336 struct lttng_ust_ctl_field **_fields)
6337 {
6338 struct lttng_ust_ctl_field *fields = *_fields, *new_fields = NULL;
6339 size_t nr_fields = *_nr_fields, new_nr_fields = 0, i, j;
6340 bool found = false;
6341 int ret = 0;
6342
6343 for (i = 0; i < nr_fields; i++) {
6344 const struct lttng_ust_ctl_field *field = &fields[i];
6345
6346 switch (field->type.atype) {
6347 case lttng_ust_ctl_atype_sequence:
6348 ERR("Unexpected legacy sequence context.");
6349 ret = -EINVAL;
6350 goto end;
6351 case lttng_ust_ctl_atype_array:
6352 switch (field->type.u.legacy.array.elem_type.atype) {
6353 case lttng_ust_ctl_atype_integer:
6354 break;
6355 default:
6356 ERR("Unexpected legacy array element type in context.");
6357 ret = -EINVAL;
6358 goto end;
6359 }
6360 found = true;
6361 /* One field for array_nested, one field for elem type. */
6362 new_nr_fields += 2;
6363 break;
6364
6365 case lttng_ust_ctl_atype_struct: /* Fallthrough */
6366 case lttng_ust_ctl_atype_variant:
6367 found = true;
6368 new_nr_fields++;
6369 break;
6370 default:
6371 new_nr_fields++;
6372 break;
6373 }
6374 }
6375 if (!found) {
6376 goto end;
6377 }
6378 new_fields = (struct lttng_ust_ctl_field *) zmalloc(sizeof(*new_fields) * new_nr_fields);
6379 if (!new_fields) {
6380 ret = -ENOMEM;
6381 goto end;
6382 }
6383 for (i = 0, j = 0; i < nr_fields; i++, j++) {
6384 const struct lttng_ust_ctl_field *field = &fields[i];
6385 struct lttng_ust_ctl_field *new_field = &new_fields[j];
6386
6387 switch (field->type.atype) {
6388 case lttng_ust_ctl_atype_array:
6389 /* One field for array_nested, one field for elem type. */
6390 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6391 new_field->type.atype = lttng_ust_ctl_atype_array_nestable;
6392 new_field->type.u.array_nestable.length = field->type.u.legacy.array.length;
6393 new_field->type.u.array_nestable.alignment = 0;
6394 new_field = &new_fields[++j]; /* elem type */
6395 new_field->type.atype = field->type.u.legacy.array.elem_type.atype;
6396 assert(new_field->type.atype == lttng_ust_ctl_atype_integer);
6397 new_field->type.u.integer = field->type.u.legacy.array.elem_type.u.basic.integer;
6398 break;
6399 case lttng_ust_ctl_atype_struct:
6400 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6401 new_field->type.atype = lttng_ust_ctl_atype_struct_nestable;
6402 new_field->type.u.struct_nestable.nr_fields = field->type.u.legacy._struct.nr_fields;
6403 new_field->type.u.struct_nestable.alignment = 0;
6404 break;
6405 case lttng_ust_ctl_atype_variant:
6406 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6407 new_field->type.atype = lttng_ust_ctl_atype_variant_nestable;
6408 new_field->type.u.variant_nestable.nr_choices = field->type.u.legacy.variant.nr_choices;
6409 strncpy(new_field->type.u.variant_nestable.tag_name,
6410 field->type.u.legacy.variant.tag_name,
6411 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6412 new_field->type.u.variant_nestable.alignment = 0;
6413 break;
6414 default:
6415 *new_field = *field;
6416 break;
6417 }
6418 }
6419 free(fields);
6420 *_fields = new_fields;
6421 *_nr_fields = new_nr_fields;
6422 end:
6423 return ret;
6424 }
6425
6426 /*
6427 * Reply to a register channel notification from an application on the notify
6428 * socket. The channel metadata is also created.
6429 *
6430 * The session UST registry lock is acquired in this function.
6431 *
6432 * On success 0 is returned else a negative value.
6433 */
6434 static int reply_ust_register_channel(int sock, int cobjd,
6435 size_t nr_fields, struct lttng_ust_ctl_field *fields)
6436 {
6437 int ret, ret_code = 0;
6438 uint32_t chan_id;
6439 uint64_t chan_reg_key;
6440 enum lttng_ust_ctl_channel_header type = LTTNG_UST_CTL_CHANNEL_HEADER_UNKNOWN;
6441 struct ust_app *app;
6442 struct ust_app_channel *ua_chan;
6443 struct ust_app_session *ua_sess;
6444 struct ust_registry_session *registry;
6445 struct ust_registry_channel *ust_reg_chan;
6446
6447 rcu_read_lock();
6448
6449 /* Lookup application. If not found, there is a code flow error. */
6450 app = find_app_by_notify_sock(sock);
6451 if (!app) {
6452 DBG("Application socket %d is being torn down. Abort event notify",
6453 sock);
6454 ret = -1;
6455 goto error_rcu_unlock;
6456 }
6457
6458 /* Lookup channel by UST object descriptor. */
6459 ua_chan = find_channel_by_objd(app, cobjd);
6460 if (!ua_chan) {
6461 DBG("Application channel is being torn down. Abort event notify");
6462 ret = 0;
6463 goto error_rcu_unlock;
6464 }
6465
6466 assert(ua_chan->session);
6467 ua_sess = ua_chan->session;
6468
6469 /* Get right session registry depending on the session buffer type. */
6470 registry = get_session_registry(ua_sess);
6471 if (!registry) {
6472 DBG("Application session is being torn down. Abort event notify");
6473 ret = 0;
6474 goto error_rcu_unlock;
6475 };
6476
6477 /* Depending on the buffer type, a different channel key is used. */
6478 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6479 chan_reg_key = ua_chan->tracing_channel_id;
6480 } else {
6481 chan_reg_key = ua_chan->key;
6482 }
6483
6484 pthread_mutex_lock(&registry->lock);
6485
6486 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6487 assert(ust_reg_chan);
6488
6489 /* Channel id is set during the object creation. */
6490 chan_id = ust_reg_chan->chan_id;
6491
6492 ret = ust_app_fixup_legacy_context_fields(&nr_fields, &fields);
6493 if (ret < 0) {
6494 ERR("Registering application channel due to legacy context fields fixup error: pid = %d, sock = %d",
6495 app->pid, app->sock);
6496 ret_code = -EINVAL;
6497 goto reply;
6498 }
6499 if (!ust_reg_chan->register_done) {
6500 /*
6501 * TODO: eventually use the registry event count for
6502 * this channel to better guess header type for per-pid
6503 * buffers.
6504 */
6505 type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
6506 ust_reg_chan->nr_ctx_fields = nr_fields;
6507 ust_reg_chan->ctx_fields = fields;
6508 fields = NULL;
6509 ust_reg_chan->header_type = type;
6510 } else {
6511 /* Get current already assigned values. */
6512 type = ust_reg_chan->header_type;
6513 /*
6514 * Validate that the context fields match between
6515 * registry and newcoming application.
6516 */
6517 if (!match_lttng_ust_ctl_field_array(ust_reg_chan->ctx_fields,
6518 ust_reg_chan->nr_ctx_fields,
6519 fields, nr_fields)) {
6520 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6521 app->pid, app->sock);
6522 ret_code = -EINVAL;
6523 goto reply;
6524 }
6525 }
6526
6527 /* Append to metadata */
6528 if (!ust_reg_chan->metadata_dumped) {
6529 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
6530 if (ret_code) {
6531 ERR("Error appending channel metadata (errno = %d)", ret_code);
6532 goto reply;
6533 }
6534 }
6535
6536 reply:
6537 DBG3("UST app replying to register channel key %" PRIu64
6538 " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
6539 ret_code);
6540
6541 ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
6542 if (ret < 0) {
6543 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6544 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6545 app->pid, app->sock);
6546 } else if (ret == -EAGAIN) {
6547 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6548 app->pid, app->sock);
6549 } else {
6550 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6551 ret, app->pid, app->sock);
6552 }
6553 goto error;
6554 }
6555
6556 /* This channel registry registration is completed. */
6557 ust_reg_chan->register_done = 1;
6558
6559 error:
6560 pthread_mutex_unlock(&registry->lock);
6561 error_rcu_unlock:
6562 rcu_read_unlock();
6563 free(fields);
6564 return ret;
6565 }
6566
6567 /*
6568 * Add event to the UST channel registry. When the event is added to the
6569 * registry, the metadata is also created. Once done, this replies to the
6570 * application with the appropriate error code.
6571 *
6572 * The session UST registry lock is acquired in the function.
6573 *
6574 * On success 0 is returned else a negative value.
6575 */
6576 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6577 char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
6578 int loglevel_value, char *model_emf_uri)
6579 {
6580 int ret, ret_code;
6581 uint32_t event_id = 0;
6582 uint64_t chan_reg_key;
6583 struct ust_app *app;
6584 struct ust_app_channel *ua_chan;
6585 struct ust_app_session *ua_sess;
6586 struct ust_registry_session *registry;
6587
6588 rcu_read_lock();
6589
6590 /* Lookup application. If not found, there is a code flow error. */
6591 app = find_app_by_notify_sock(sock);
6592 if (!app) {
6593 DBG("Application socket %d is being torn down. Abort event notify",
6594 sock);
6595 ret = -1;
6596 goto error_rcu_unlock;
6597 }
6598
6599 /* Lookup channel by UST object descriptor. */
6600 ua_chan = find_channel_by_objd(app, cobjd);
6601 if (!ua_chan) {
6602 DBG("Application channel is being torn down. Abort event notify");
6603 ret = 0;
6604 goto error_rcu_unlock;
6605 }
6606
6607 assert(ua_chan->session);
6608 ua_sess = ua_chan->session;
6609
6610 registry = get_session_registry(ua_sess);
6611 if (!registry) {
6612 DBG("Application session is being torn down. Abort event notify");
6613 ret = 0;
6614 goto error_rcu_unlock;
6615 }
6616
6617 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6618 chan_reg_key = ua_chan->tracing_channel_id;
6619 } else {
6620 chan_reg_key = ua_chan->key;
6621 }
6622
6623 pthread_mutex_lock(&registry->lock);
6624
6625 /*
6626 * From this point on, this call acquires the ownership of the sig, fields
6627 * and model_emf_uri meaning any free are done inside it if needed. These
6628 * three variables MUST NOT be read/write after this.
6629 */
6630 ret_code = ust_registry_create_event(registry, chan_reg_key,
6631 sobjd, cobjd, name, sig, nr_fields, fields,
6632 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6633 &event_id, app);
6634 sig = NULL;
6635 fields = NULL;
6636 model_emf_uri = NULL;
6637
6638 /*
6639 * The return value is returned to ustctl so in case of an error, the
6640 * application can be notified. In case of an error, it's important not to
6641 * return a negative error or else the application will get closed.
6642 */
6643 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
6644 if (ret < 0) {
6645 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6646 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6647 app->pid, app->sock);
6648 } else if (ret == -EAGAIN) {
6649 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6650 app->pid, app->sock);
6651 } else {
6652 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6653 ret, app->pid, app->sock);
6654 }
6655 /*
6656 * No need to wipe the create event since the application socket will
6657 * get close on error hence cleaning up everything by itself.
6658 */
6659 goto error;
6660 }
6661
6662 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6663 name, event_id);
6664
6665 error:
6666 pthread_mutex_unlock(&registry->lock);
6667 error_rcu_unlock:
6668 rcu_read_unlock();
6669 free(sig);
6670 free(fields);
6671 free(model_emf_uri);
6672 return ret;
6673 }
6674
6675 /*
6676 * Add enum to the UST session registry. Once done, this replies to the
6677 * application with the appropriate error code.
6678 *
6679 * The session UST registry lock is acquired within this function.
6680 *
6681 * On success 0 is returned else a negative value.
6682 */
6683 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6684 struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
6685 {
6686 int ret = 0, ret_code;
6687 struct ust_app *app;
6688 struct ust_app_session *ua_sess;
6689 struct ust_registry_session *registry;
6690 uint64_t enum_id = -1ULL;
6691
6692 rcu_read_lock();
6693
6694 /* Lookup application. If not found, there is a code flow error. */
6695 app = find_app_by_notify_sock(sock);
6696 if (!app) {
6697 /* Return an error since this is not an error */
6698 DBG("Application socket %d is being torn down. Aborting enum registration",
6699 sock);
6700 free(entries);
6701 ret = -1;
6702 goto error_rcu_unlock;
6703 }
6704
6705 /* Lookup session by UST object descriptor. */
6706 ua_sess = find_session_by_objd(app, sobjd);
6707 if (!ua_sess) {
6708 /* Return an error since this is not an error */
6709 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6710 free(entries);
6711 goto error_rcu_unlock;
6712 }
6713
6714 registry = get_session_registry(ua_sess);
6715 if (!registry) {
6716 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6717 free(entries);
6718 goto error_rcu_unlock;
6719 }
6720
6721 pthread_mutex_lock(&registry->lock);
6722
6723 /*
6724 * From this point on, the callee acquires the ownership of
6725 * entries. The variable entries MUST NOT be read/written after
6726 * call.
6727 */
6728 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6729 entries, nr_entries, &enum_id);
6730 entries = NULL;
6731
6732 /*
6733 * The return value is returned to ustctl so in case of an error, the
6734 * application can be notified. In case of an error, it's important not to
6735 * return a negative error or else the application will get closed.
6736 */
6737 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
6738 if (ret < 0) {
6739 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6740 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6741 app->pid, app->sock);
6742 } else if (ret == -EAGAIN) {
6743 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6744 app->pid, app->sock);
6745 } else {
6746 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6747 ret, app->pid, app->sock);
6748 }
6749 /*
6750 * No need to wipe the create enum since the application socket will
6751 * get close on error hence cleaning up everything by itself.
6752 */
6753 goto error;
6754 }
6755
6756 DBG3("UST registry enum %s added successfully or already found", name);
6757
6758 error:
6759 pthread_mutex_unlock(&registry->lock);
6760 error_rcu_unlock:
6761 rcu_read_unlock();
6762 return ret;
6763 }
6764
6765 /*
6766 * Handle application notification through the given notify socket.
6767 *
6768 * Return 0 on success or else a negative value.
6769 */
6770 int ust_app_recv_notify(int sock)
6771 {
6772 int ret;
6773 enum lttng_ust_ctl_notify_cmd cmd;
6774
6775 DBG3("UST app receiving notify from sock %d", sock);
6776
6777 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
6778 if (ret < 0) {
6779 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6780 DBG3("UST app recv notify failed. Application died: sock = %d",
6781 sock);
6782 } else if (ret == -EAGAIN) {
6783 WARN("UST app recv notify failed. Communication time out: sock = %d",
6784 sock);
6785 } else {
6786 ERR("UST app recv notify failed with ret %d: sock = %d",
6787 ret, sock);
6788 }
6789 goto error;
6790 }
6791
6792 switch (cmd) {
6793 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
6794 {
6795 int sobjd, cobjd, loglevel_value;
6796 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6797 size_t nr_fields;
6798 struct lttng_ust_ctl_field *fields;
6799
6800 DBG2("UST app ustctl register event received");
6801
6802 ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
6803 &loglevel_value, &sig, &nr_fields, &fields,
6804 &model_emf_uri);
6805 if (ret < 0) {
6806 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6807 DBG3("UST app recv event failed. Application died: sock = %d",
6808 sock);
6809 } else if (ret == -EAGAIN) {
6810 WARN("UST app recv event failed. Communication time out: sock = %d",
6811 sock);
6812 } else {
6813 ERR("UST app recv event failed with ret %d: sock = %d",
6814 ret, sock);
6815 }
6816 goto error;
6817 }
6818
6819 /*
6820 * Add event to the UST registry coming from the notify socket. This
6821 * call will free if needed the sig, fields and model_emf_uri. This
6822 * code path loses the ownsership of these variables and transfer them
6823 * to the this function.
6824 */
6825 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6826 fields, loglevel_value, model_emf_uri);
6827 if (ret < 0) {
6828 goto error;
6829 }
6830
6831 break;
6832 }
6833 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
6834 {
6835 int sobjd, cobjd;
6836 size_t nr_fields;
6837 struct lttng_ust_ctl_field *fields;
6838
6839 DBG2("UST app ustctl register channel received");
6840
6841 ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6842 &fields);
6843 if (ret < 0) {
6844 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6845 DBG3("UST app recv channel failed. Application died: sock = %d",
6846 sock);
6847 } else if (ret == -EAGAIN) {
6848 WARN("UST app recv channel failed. Communication time out: sock = %d",
6849 sock);
6850 } else {
6851 ERR("UST app recv channel failed with ret %d: sock = %d)",
6852 ret, sock);
6853 }
6854 goto error;
6855 }
6856
6857 /*
6858 * The fields ownership are transfered to this function call meaning
6859 * that if needed it will be freed. After this, it's invalid to access
6860 * fields or clean it up.
6861 */
6862 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6863 fields);
6864 if (ret < 0) {
6865 goto error;
6866 }
6867
6868 break;
6869 }
6870 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
6871 {
6872 int sobjd;
6873 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6874 size_t nr_entries;
6875 struct lttng_ust_ctl_enum_entry *entries;
6876
6877 DBG2("UST app ustctl register enum received");
6878
6879 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
6880 &entries, &nr_entries);
6881 if (ret < 0) {
6882 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6883 DBG3("UST app recv enum failed. Application died: sock = %d",
6884 sock);
6885 } else if (ret == -EAGAIN) {
6886 WARN("UST app recv enum failed. Communication time out: sock = %d",
6887 sock);
6888 } else {
6889 ERR("UST app recv enum failed with ret %d: sock = %d",
6890 ret, sock);
6891 }
6892 goto error;
6893 }
6894
6895 /* Callee assumes ownership of entries */
6896 ret = add_enum_ust_registry(sock, sobjd, name,
6897 entries, nr_entries);
6898 if (ret < 0) {
6899 goto error;
6900 }
6901
6902 break;
6903 }
6904 default:
6905 /* Should NEVER happen. */
6906 assert(0);
6907 }
6908
6909 error:
6910 return ret;
6911 }
6912
6913 /*
6914 * Once the notify socket hangs up, this is called. First, it tries to find the
6915 * corresponding application. On failure, the call_rcu to close the socket is
6916 * executed. If an application is found, it tries to delete it from the notify
6917 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6918 *
6919 * Note that an object needs to be allocated here so on ENOMEM failure, the
6920 * call RCU is not done but the rest of the cleanup is.
6921 */
6922 void ust_app_notify_sock_unregister(int sock)
6923 {
6924 int err_enomem = 0;
6925 struct lttng_ht_iter iter;
6926 struct ust_app *app;
6927 struct ust_app_notify_sock_obj *obj;
6928
6929 assert(sock >= 0);
6930
6931 rcu_read_lock();
6932
6933 obj = zmalloc(sizeof(*obj));
6934 if (!obj) {
6935 /*
6936 * An ENOMEM is kind of uncool. If this strikes we continue the
6937 * procedure but the call_rcu will not be called. In this case, we
6938 * accept the fd leak rather than possibly creating an unsynchronized
6939 * state between threads.
6940 *
6941 * TODO: The notify object should be created once the notify socket is
6942 * registered and stored independantely from the ust app object. The
6943 * tricky part is to synchronize the teardown of the application and
6944 * this notify object. Let's keep that in mind so we can avoid this
6945 * kind of shenanigans with ENOMEM in the teardown path.
6946 */
6947 err_enomem = 1;
6948 } else {
6949 obj->fd = sock;
6950 }
6951
6952 DBG("UST app notify socket unregister %d", sock);
6953
6954 /*
6955 * Lookup application by notify socket. If this fails, this means that the
6956 * hash table delete has already been done by the application
6957 * unregistration process so we can safely close the notify socket in a
6958 * call RCU.
6959 */
6960 app = find_app_by_notify_sock(sock);
6961 if (!app) {
6962 goto close_socket;
6963 }
6964
6965 iter.iter.node = &app->notify_sock_n.node;
6966
6967 /*
6968 * Whatever happens here either we fail or succeed, in both cases we have
6969 * to close the socket after a grace period to continue to the call RCU
6970 * here. If the deletion is successful, the application is not visible
6971 * anymore by other threads and is it fails it means that it was already
6972 * deleted from the hash table so either way we just have to close the
6973 * socket.
6974 */
6975 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6976
6977 close_socket:
6978 rcu_read_unlock();
6979
6980 /*
6981 * Close socket after a grace period to avoid for the socket to be reused
6982 * before the application object is freed creating potential race between
6983 * threads trying to add unique in the global hash table.
6984 */
6985 if (!err_enomem) {
6986 call_rcu(&obj->head, close_notify_sock_rcu);
6987 }
6988 }
6989
6990 /*
6991 * Destroy a ust app data structure and free its memory.
6992 */
6993 void ust_app_destroy(struct ust_app *app)
6994 {
6995 if (!app) {
6996 return;
6997 }
6998
6999 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
7000 }
7001
7002 /*
7003 * Take a snapshot for a given UST session. The snapshot is sent to the given
7004 * output.
7005 *
7006 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
7007 */
7008 enum lttng_error_code ust_app_snapshot_record(
7009 const struct ltt_ust_session *usess,
7010 const struct consumer_output *output, int wait,
7011 uint64_t nb_packets_per_stream)
7012 {
7013 int ret = 0;
7014 enum lttng_error_code status = LTTNG_OK;
7015 struct lttng_ht_iter iter;
7016 struct ust_app *app;
7017 char *trace_path = NULL;
7018
7019 assert(usess);
7020 assert(output);
7021
7022 rcu_read_lock();
7023
7024 switch (usess->buffer_type) {
7025 case LTTNG_BUFFER_PER_UID:
7026 {
7027 struct buffer_reg_uid *reg;
7028
7029 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7030 struct buffer_reg_channel *buf_reg_chan;
7031 struct consumer_socket *socket;
7032 char pathname[PATH_MAX];
7033 size_t consumer_path_offset = 0;
7034
7035 if (!reg->registry->reg.ust->metadata_key) {
7036 /* Skip since no metadata is present */
7037 continue;
7038 }
7039
7040 /* Get consumer socket to use to push the metadata.*/
7041 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7042 usess->consumer);
7043 if (!socket) {
7044 status = LTTNG_ERR_INVALID;
7045 goto error;
7046 }
7047
7048 memset(pathname, 0, sizeof(pathname));
7049 ret = snprintf(pathname, sizeof(pathname),
7050 DEFAULT_UST_TRACE_UID_PATH,
7051 reg->uid, reg->bits_per_long);
7052 if (ret < 0) {
7053 PERROR("snprintf snapshot path");
7054 status = LTTNG_ERR_INVALID;
7055 goto error;
7056 }
7057 /* Free path allowed on previous iteration. */
7058 free(trace_path);
7059 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7060 &consumer_path_offset);
7061 if (!trace_path) {
7062 status = LTTNG_ERR_INVALID;
7063 goto error;
7064 }
7065 /* Add the UST default trace dir to path. */
7066 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7067 buf_reg_chan, node.node) {
7068 status = consumer_snapshot_channel(socket,
7069 buf_reg_chan->consumer_key,
7070 output, 0, usess->uid,
7071 usess->gid, &trace_path[consumer_path_offset], wait,
7072 nb_packets_per_stream);
7073 if (status != LTTNG_OK) {
7074 goto error;
7075 }
7076 }
7077 status = consumer_snapshot_channel(socket,
7078 reg->registry->reg.ust->metadata_key, output, 1,
7079 usess->uid, usess->gid, &trace_path[consumer_path_offset],
7080 wait, 0);
7081 if (status != LTTNG_OK) {
7082 goto error;
7083 }
7084 }
7085 break;
7086 }
7087 case LTTNG_BUFFER_PER_PID:
7088 {
7089 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7090 struct consumer_socket *socket;
7091 struct lttng_ht_iter chan_iter;
7092 struct ust_app_channel *ua_chan;
7093 struct ust_app_session *ua_sess;
7094 struct ust_registry_session *registry;
7095 char pathname[PATH_MAX];
7096 size_t consumer_path_offset = 0;
7097
7098 ua_sess = lookup_session_by_app(usess, app);
7099 if (!ua_sess) {
7100 /* Session not associated with this app. */
7101 continue;
7102 }
7103
7104 /* Get the right consumer socket for the application. */
7105 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7106 output);
7107 if (!socket) {
7108 status = LTTNG_ERR_INVALID;
7109 goto error;
7110 }
7111
7112 /* Add the UST default trace dir to path. */
7113 memset(pathname, 0, sizeof(pathname));
7114 ret = snprintf(pathname, sizeof(pathname), "%s",
7115 ua_sess->path);
7116 if (ret < 0) {
7117 status = LTTNG_ERR_INVALID;
7118 PERROR("snprintf snapshot path");
7119 goto error;
7120 }
7121 /* Free path allowed on previous iteration. */
7122 free(trace_path);
7123 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7124 &consumer_path_offset);
7125 if (!trace_path) {
7126 status = LTTNG_ERR_INVALID;
7127 goto error;
7128 }
7129 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7130 ua_chan, node.node) {
7131 status = consumer_snapshot_channel(socket,
7132 ua_chan->key, output, 0,
7133 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7134 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7135 &trace_path[consumer_path_offset], wait,
7136 nb_packets_per_stream);
7137 switch (status) {
7138 case LTTNG_OK:
7139 break;
7140 case LTTNG_ERR_CHAN_NOT_FOUND:
7141 continue;
7142 default:
7143 goto error;
7144 }
7145 }
7146
7147 registry = get_session_registry(ua_sess);
7148 if (!registry) {
7149 DBG("Application session is being torn down. Skip application.");
7150 continue;
7151 }
7152 status = consumer_snapshot_channel(socket,
7153 registry->metadata_key, output, 1,
7154 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7155 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7156 &trace_path[consumer_path_offset], wait, 0);
7157 switch (status) {
7158 case LTTNG_OK:
7159 break;
7160 case LTTNG_ERR_CHAN_NOT_FOUND:
7161 continue;
7162 default:
7163 goto error;
7164 }
7165 }
7166 break;
7167 }
7168 default:
7169 assert(0);
7170 break;
7171 }
7172
7173 error:
7174 free(trace_path);
7175 rcu_read_unlock();
7176 return status;
7177 }
7178
7179 /*
7180 * Return the size taken by one more packet per stream.
7181 */
7182 uint64_t ust_app_get_size_one_more_packet_per_stream(
7183 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
7184 {
7185 uint64_t tot_size = 0;
7186 struct ust_app *app;
7187 struct lttng_ht_iter iter;
7188
7189 assert(usess);
7190
7191 switch (usess->buffer_type) {
7192 case LTTNG_BUFFER_PER_UID:
7193 {
7194 struct buffer_reg_uid *reg;
7195
7196 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7197 struct buffer_reg_channel *buf_reg_chan;
7198
7199 rcu_read_lock();
7200 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7201 buf_reg_chan, node.node) {
7202 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
7203 /*
7204 * Don't take channel into account if we
7205 * already grab all its packets.
7206 */
7207 continue;
7208 }
7209 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
7210 }
7211 rcu_read_unlock();
7212 }
7213 break;
7214 }
7215 case LTTNG_BUFFER_PER_PID:
7216 {
7217 rcu_read_lock();
7218 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7219 struct ust_app_channel *ua_chan;
7220 struct ust_app_session *ua_sess;
7221 struct lttng_ht_iter chan_iter;
7222
7223 ua_sess = lookup_session_by_app(usess, app);
7224 if (!ua_sess) {
7225 /* Session not associated with this app. */
7226 continue;
7227 }
7228
7229 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7230 ua_chan, node.node) {
7231 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
7232 /*
7233 * Don't take channel into account if we
7234 * already grab all its packets.
7235 */
7236 continue;
7237 }
7238 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
7239 }
7240 }
7241 rcu_read_unlock();
7242 break;
7243 }
7244 default:
7245 assert(0);
7246 break;
7247 }
7248
7249 return tot_size;
7250 }
7251
7252 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
7253 struct cds_list_head *buffer_reg_uid_list,
7254 struct consumer_output *consumer, uint64_t uchan_id,
7255 int overwrite, uint64_t *discarded, uint64_t *lost)
7256 {
7257 int ret;
7258 uint64_t consumer_chan_key;
7259
7260 *discarded = 0;
7261 *lost = 0;
7262
7263 ret = buffer_reg_uid_consumer_channel_key(
7264 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
7265 if (ret < 0) {
7266 /* Not found */
7267 ret = 0;
7268 goto end;
7269 }
7270
7271 if (overwrite) {
7272 ret = consumer_get_lost_packets(ust_session_id,
7273 consumer_chan_key, consumer, lost);
7274 } else {
7275 ret = consumer_get_discarded_events(ust_session_id,
7276 consumer_chan_key, consumer, discarded);
7277 }
7278
7279 end:
7280 return ret;
7281 }
7282
7283 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
7284 struct ltt_ust_channel *uchan,
7285 struct consumer_output *consumer, int overwrite,
7286 uint64_t *discarded, uint64_t *lost)
7287 {
7288 int ret = 0;
7289 struct lttng_ht_iter iter;
7290 struct lttng_ht_node_str *ua_chan_node;
7291 struct ust_app *app;
7292 struct ust_app_session *ua_sess;
7293 struct ust_app_channel *ua_chan;
7294
7295 *discarded = 0;
7296 *lost = 0;
7297
7298 rcu_read_lock();
7299 /*
7300 * Iterate over every registered applications. Sum counters for
7301 * all applications containing requested session and channel.
7302 */
7303 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7304 struct lttng_ht_iter uiter;
7305
7306 ua_sess = lookup_session_by_app(usess, app);
7307 if (ua_sess == NULL) {
7308 continue;
7309 }
7310
7311 /* Get channel */
7312 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
7313 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7314 /* If the session is found for the app, the channel must be there */
7315 assert(ua_chan_node);
7316
7317 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
7318
7319 if (overwrite) {
7320 uint64_t _lost;
7321
7322 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
7323 consumer, &_lost);
7324 if (ret < 0) {
7325 break;
7326 }
7327 (*lost) += _lost;
7328 } else {
7329 uint64_t _discarded;
7330
7331 ret = consumer_get_discarded_events(usess->id,
7332 ua_chan->key, consumer, &_discarded);
7333 if (ret < 0) {
7334 break;
7335 }
7336 (*discarded) += _discarded;
7337 }
7338 }
7339
7340 rcu_read_unlock();
7341 return ret;
7342 }
7343
7344 static
7345 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7346 struct ust_app *app)
7347 {
7348 int ret = 0;
7349 struct ust_app_session *ua_sess;
7350
7351 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7352
7353 rcu_read_lock();
7354
7355 ua_sess = lookup_session_by_app(usess, app);
7356 if (ua_sess == NULL) {
7357 /* The session is in teardown process. Ignore and continue. */
7358 goto end;
7359 }
7360
7361 pthread_mutex_lock(&ua_sess->lock);
7362
7363 if (ua_sess->deleted) {
7364 goto end_unlock;
7365 }
7366
7367 pthread_mutex_lock(&app->sock_lock);
7368 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
7369 pthread_mutex_unlock(&app->sock_lock);
7370
7371 end_unlock:
7372 pthread_mutex_unlock(&ua_sess->lock);
7373
7374 end:
7375 rcu_read_unlock();
7376 health_code_update();
7377 return ret;
7378 }
7379
7380 /*
7381 * Regenerate the statedump for each app in the session.
7382 */
7383 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7384 {
7385 int ret = 0;
7386 struct lttng_ht_iter iter;
7387 struct ust_app *app;
7388
7389 DBG("Regenerating the metadata for all UST apps");
7390
7391 rcu_read_lock();
7392
7393 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7394 if (!app->compatible) {
7395 continue;
7396 }
7397
7398 ret = ust_app_regenerate_statedump(usess, app);
7399 if (ret < 0) {
7400 /* Continue to the next app even on error */
7401 continue;
7402 }
7403 }
7404
7405 rcu_read_unlock();
7406
7407 return 0;
7408 }
7409
7410 /*
7411 * Rotate all the channels of a session.
7412 *
7413 * Return LTTNG_OK on success or else an LTTng error code.
7414 */
7415 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7416 {
7417 int ret;
7418 enum lttng_error_code cmd_ret = LTTNG_OK;
7419 struct lttng_ht_iter iter;
7420 struct ust_app *app;
7421 struct ltt_ust_session *usess = session->ust_session;
7422
7423 assert(usess);
7424
7425 rcu_read_lock();
7426
7427 switch (usess->buffer_type) {
7428 case LTTNG_BUFFER_PER_UID:
7429 {
7430 struct buffer_reg_uid *reg;
7431
7432 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7433 struct buffer_reg_channel *buf_reg_chan;
7434 struct consumer_socket *socket;
7435
7436 /* Get consumer socket to use to push the metadata.*/
7437 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7438 usess->consumer);
7439 if (!socket) {
7440 cmd_ret = LTTNG_ERR_INVALID;
7441 goto error;
7442 }
7443
7444 /* Rotate the data channels. */
7445 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7446 buf_reg_chan, node.node) {
7447 ret = consumer_rotate_channel(socket,
7448 buf_reg_chan->consumer_key,
7449 usess->uid, usess->gid,
7450 usess->consumer,
7451 /* is_metadata_channel */ false);
7452 if (ret < 0) {
7453 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7454 goto error;
7455 }
7456 }
7457
7458 /*
7459 * The metadata channel might not be present.
7460 *
7461 * Consumer stream allocation can be done
7462 * asynchronously and can fail on intermediary
7463 * operations (i.e add context) and lead to data
7464 * channels created with no metadata channel.
7465 */
7466 if (!reg->registry->reg.ust->metadata_key) {
7467 /* Skip since no metadata is present. */
7468 continue;
7469 }
7470
7471 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7472
7473 ret = consumer_rotate_channel(socket,
7474 reg->registry->reg.ust->metadata_key,
7475 usess->uid, usess->gid,
7476 usess->consumer,
7477 /* is_metadata_channel */ true);
7478 if (ret < 0) {
7479 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7480 goto error;
7481 }
7482 }
7483 break;
7484 }
7485 case LTTNG_BUFFER_PER_PID:
7486 {
7487 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7488 struct consumer_socket *socket;
7489 struct lttng_ht_iter chan_iter;
7490 struct ust_app_channel *ua_chan;
7491 struct ust_app_session *ua_sess;
7492 struct ust_registry_session *registry;
7493
7494 ua_sess = lookup_session_by_app(usess, app);
7495 if (!ua_sess) {
7496 /* Session not associated with this app. */
7497 continue;
7498 }
7499
7500 /* Get the right consumer socket for the application. */
7501 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7502 usess->consumer);
7503 if (!socket) {
7504 cmd_ret = LTTNG_ERR_INVALID;
7505 goto error;
7506 }
7507
7508 registry = get_session_registry(ua_sess);
7509 if (!registry) {
7510 DBG("Application session is being torn down. Skip application.");
7511 continue;
7512 }
7513
7514 /* Rotate the data channels. */
7515 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7516 ua_chan, node.node) {
7517 ret = consumer_rotate_channel(socket,
7518 ua_chan->key,
7519 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7520 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7521 ua_sess->consumer,
7522 /* is_metadata_channel */ false);
7523 if (ret < 0) {
7524 /* Per-PID buffer and application going away. */
7525 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7526 continue;
7527 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7528 goto error;
7529 }
7530 }
7531
7532 /* Rotate the metadata channel. */
7533 (void) push_metadata(registry, usess->consumer);
7534 ret = consumer_rotate_channel(socket,
7535 registry->metadata_key,
7536 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7537 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7538 ua_sess->consumer,
7539 /* is_metadata_channel */ true);
7540 if (ret < 0) {
7541 /* Per-PID buffer and application going away. */
7542 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7543 continue;
7544 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7545 goto error;
7546 }
7547 }
7548 break;
7549 }
7550 default:
7551 assert(0);
7552 break;
7553 }
7554
7555 cmd_ret = LTTNG_OK;
7556
7557 error:
7558 rcu_read_unlock();
7559 return cmd_ret;
7560 }
7561
7562 enum lttng_error_code ust_app_create_channel_subdirectories(
7563 const struct ltt_ust_session *usess)
7564 {
7565 enum lttng_error_code ret = LTTNG_OK;
7566 struct lttng_ht_iter iter;
7567 enum lttng_trace_chunk_status chunk_status;
7568 char *pathname_index;
7569 int fmt_ret;
7570
7571 assert(usess->current_trace_chunk);
7572 rcu_read_lock();
7573
7574 switch (usess->buffer_type) {
7575 case LTTNG_BUFFER_PER_UID:
7576 {
7577 struct buffer_reg_uid *reg;
7578
7579 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7580 fmt_ret = asprintf(&pathname_index,
7581 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7582 reg->uid, reg->bits_per_long);
7583 if (fmt_ret < 0) {
7584 ERR("Failed to format channel index directory");
7585 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7586 goto error;
7587 }
7588
7589 /*
7590 * Create the index subdirectory which will take care
7591 * of implicitly creating the channel's path.
7592 */
7593 chunk_status = lttng_trace_chunk_create_subdirectory(
7594 usess->current_trace_chunk,
7595 pathname_index);
7596 free(pathname_index);
7597 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7598 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7599 goto error;
7600 }
7601 }
7602 break;
7603 }
7604 case LTTNG_BUFFER_PER_PID:
7605 {
7606 struct ust_app *app;
7607
7608 /*
7609 * Create the toplevel ust/ directory in case no apps are running.
7610 */
7611 chunk_status = lttng_trace_chunk_create_subdirectory(
7612 usess->current_trace_chunk,
7613 DEFAULT_UST_TRACE_DIR);
7614 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7615 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7616 goto error;
7617 }
7618
7619 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7620 pid_n.node) {
7621 struct ust_app_session *ua_sess;
7622 struct ust_registry_session *registry;
7623
7624 ua_sess = lookup_session_by_app(usess, app);
7625 if (!ua_sess) {
7626 /* Session not associated with this app. */
7627 continue;
7628 }
7629
7630 registry = get_session_registry(ua_sess);
7631 if (!registry) {
7632 DBG("Application session is being torn down. Skip application.");
7633 continue;
7634 }
7635
7636 fmt_ret = asprintf(&pathname_index,
7637 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7638 ua_sess->path);
7639 if (fmt_ret < 0) {
7640 ERR("Failed to format channel index directory");
7641 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7642 goto error;
7643 }
7644 /*
7645 * Create the index subdirectory which will take care
7646 * of implicitly creating the channel's path.
7647 */
7648 chunk_status = lttng_trace_chunk_create_subdirectory(
7649 usess->current_trace_chunk,
7650 pathname_index);
7651 free(pathname_index);
7652 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7653 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7654 goto error;
7655 }
7656 }
7657 break;
7658 }
7659 default:
7660 abort();
7661 }
7662
7663 ret = LTTNG_OK;
7664 error:
7665 rcu_read_unlock();
7666 return ret;
7667 }
7668
7669 /*
7670 * Clear all the channels of a session.
7671 *
7672 * Return LTTNG_OK on success or else an LTTng error code.
7673 */
7674 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7675 {
7676 int ret;
7677 enum lttng_error_code cmd_ret = LTTNG_OK;
7678 struct lttng_ht_iter iter;
7679 struct ust_app *app;
7680 struct ltt_ust_session *usess = session->ust_session;
7681
7682 assert(usess);
7683
7684 rcu_read_lock();
7685
7686 if (usess->active) {
7687 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7688 cmd_ret = LTTNG_ERR_FATAL;
7689 goto end;
7690 }
7691
7692 switch (usess->buffer_type) {
7693 case LTTNG_BUFFER_PER_UID:
7694 {
7695 struct buffer_reg_uid *reg;
7696
7697 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7698 struct buffer_reg_channel *buf_reg_chan;
7699 struct consumer_socket *socket;
7700
7701 /* Get consumer socket to use to push the metadata.*/
7702 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7703 usess->consumer);
7704 if (!socket) {
7705 cmd_ret = LTTNG_ERR_INVALID;
7706 goto error_socket;
7707 }
7708
7709 /* Clear the data channels. */
7710 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7711 buf_reg_chan, node.node) {
7712 ret = consumer_clear_channel(socket,
7713 buf_reg_chan->consumer_key);
7714 if (ret < 0) {
7715 goto error;
7716 }
7717 }
7718
7719 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7720
7721 /*
7722 * Clear the metadata channel.
7723 * Metadata channel is not cleared per se but we still need to
7724 * perform a rotation operation on it behind the scene.
7725 */
7726 ret = consumer_clear_channel(socket,
7727 reg->registry->reg.ust->metadata_key);
7728 if (ret < 0) {
7729 goto error;
7730 }
7731 }
7732 break;
7733 }
7734 case LTTNG_BUFFER_PER_PID:
7735 {
7736 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7737 struct consumer_socket *socket;
7738 struct lttng_ht_iter chan_iter;
7739 struct ust_app_channel *ua_chan;
7740 struct ust_app_session *ua_sess;
7741 struct ust_registry_session *registry;
7742
7743 ua_sess = lookup_session_by_app(usess, app);
7744 if (!ua_sess) {
7745 /* Session not associated with this app. */
7746 continue;
7747 }
7748
7749 /* Get the right consumer socket for the application. */
7750 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7751 usess->consumer);
7752 if (!socket) {
7753 cmd_ret = LTTNG_ERR_INVALID;
7754 goto error_socket;
7755 }
7756
7757 registry = get_session_registry(ua_sess);
7758 if (!registry) {
7759 DBG("Application session is being torn down. Skip application.");
7760 continue;
7761 }
7762
7763 /* Clear the data channels. */
7764 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7765 ua_chan, node.node) {
7766 ret = consumer_clear_channel(socket, ua_chan->key);
7767 if (ret < 0) {
7768 /* Per-PID buffer and application going away. */
7769 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7770 continue;
7771 }
7772 goto error;
7773 }
7774 }
7775
7776 (void) push_metadata(registry, usess->consumer);
7777
7778 /*
7779 * Clear the metadata channel.
7780 * Metadata channel is not cleared per se but we still need to
7781 * perform rotation operation on it behind the scene.
7782 */
7783 ret = consumer_clear_channel(socket, registry->metadata_key);
7784 if (ret < 0) {
7785 /* Per-PID buffer and application going away. */
7786 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7787 continue;
7788 }
7789 goto error;
7790 }
7791 }
7792 break;
7793 }
7794 default:
7795 assert(0);
7796 break;
7797 }
7798
7799 cmd_ret = LTTNG_OK;
7800 goto end;
7801
7802 error:
7803 switch (-ret) {
7804 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7805 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7806 break;
7807 default:
7808 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7809 }
7810
7811 error_socket:
7812 end:
7813 rcu_read_unlock();
7814 return cmd_ret;
7815 }
7816
7817 /*
7818 * This function skips the metadata channel as the begin/end timestamps of a
7819 * metadata packet are useless.
7820 *
7821 * Moreover, opening a packet after a "clear" will cause problems for live
7822 * sessions as it will introduce padding that was not part of the first trace
7823 * chunk. The relay daemon expects the content of the metadata stream of
7824 * successive metadata trace chunks to be strict supersets of one another.
7825 *
7826 * For example, flushing a packet at the beginning of the metadata stream of
7827 * a trace chunk resulting from a "clear" session command will cause the
7828 * size of the metadata stream of the new trace chunk to not match the size of
7829 * the metadata stream of the original chunk. This will confuse the relay
7830 * daemon as the same "offset" in a metadata stream will no longer point
7831 * to the same content.
7832 */
7833 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7834 {
7835 enum lttng_error_code ret = LTTNG_OK;
7836 struct lttng_ht_iter iter;
7837 struct ltt_ust_session *usess = session->ust_session;
7838
7839 assert(usess);
7840
7841 rcu_read_lock();
7842
7843 switch (usess->buffer_type) {
7844 case LTTNG_BUFFER_PER_UID:
7845 {
7846 struct buffer_reg_uid *reg;
7847
7848 cds_list_for_each_entry (
7849 reg, &usess->buffer_reg_uid_list, lnode) {
7850 struct buffer_reg_channel *buf_reg_chan;
7851 struct consumer_socket *socket;
7852
7853 socket = consumer_find_socket_by_bitness(
7854 reg->bits_per_long, usess->consumer);
7855 if (!socket) {
7856 ret = LTTNG_ERR_FATAL;
7857 goto error;
7858 }
7859
7860 cds_lfht_for_each_entry(reg->registry->channels->ht,
7861 &iter.iter, buf_reg_chan, node.node) {
7862 const int open_ret =
7863 consumer_open_channel_packets(
7864 socket,
7865 buf_reg_chan->consumer_key);
7866
7867 if (open_ret < 0) {
7868 ret = LTTNG_ERR_UNK;
7869 goto error;
7870 }
7871 }
7872 }
7873 break;
7874 }
7875 case LTTNG_BUFFER_PER_PID:
7876 {
7877 struct ust_app *app;
7878
7879 cds_lfht_for_each_entry (
7880 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7881 struct consumer_socket *socket;
7882 struct lttng_ht_iter chan_iter;
7883 struct ust_app_channel *ua_chan;
7884 struct ust_app_session *ua_sess;
7885 struct ust_registry_session *registry;
7886
7887 ua_sess = lookup_session_by_app(usess, app);
7888 if (!ua_sess) {
7889 /* Session not associated with this app. */
7890 continue;
7891 }
7892
7893 /* Get the right consumer socket for the application. */
7894 socket = consumer_find_socket_by_bitness(
7895 app->bits_per_long, usess->consumer);
7896 if (!socket) {
7897 ret = LTTNG_ERR_FATAL;
7898 goto error;
7899 }
7900
7901 registry = get_session_registry(ua_sess);
7902 if (!registry) {
7903 DBG("Application session is being torn down. Skip application.");
7904 continue;
7905 }
7906
7907 cds_lfht_for_each_entry(ua_sess->channels->ht,
7908 &chan_iter.iter, ua_chan, node.node) {
7909 const int open_ret =
7910 consumer_open_channel_packets(
7911 socket,
7912 ua_chan->key);
7913
7914 if (open_ret < 0) {
7915 /*
7916 * Per-PID buffer and application going
7917 * away.
7918 */
7919 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7920 continue;
7921 }
7922
7923 ret = LTTNG_ERR_UNK;
7924 goto error;
7925 }
7926 }
7927 }
7928 break;
7929 }
7930 default:
7931 abort();
7932 break;
7933 }
7934
7935 error:
7936 rcu_read_unlock();
7937 return ret;
7938 }
This page took 0.278357 seconds and 3 git commands to generate.