Remove ht-cleanup thread
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <inttypes.h>
13 #include <pthread.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 #include <unistd.h>
21 #include <urcu/compiler.h>
22 #include <signal.h>
23
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/user-tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/event-rule-matches-internal.h>
33 #include <lttng/condition/event-rule-matches.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
39 #include "fd-limit.h"
40 #include "health-sessiond.h"
41 #include "ust-app.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
45 #include "utils.h"
46 #include "session.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
49 #include "rotate.h"
50 #include "event.h"
51 #include "event-notifier-error-accounting.h"
52 #include "ust-field-utils.h"
53
54 struct lttng_ht *ust_app_ht;
55 struct lttng_ht *ust_app_ht_by_sock;
56 struct lttng_ht *ust_app_ht_by_notify_sock;
57
58 static
59 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key;
63 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id;
67 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69 /*
70 * Return the incremented value of next_channel_key.
71 */
72 static uint64_t get_next_channel_key(void)
73 {
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80 }
81
82 /*
83 * Return the atomically incremented value of next_session_id.
84 */
85 static uint64_t get_next_session_id(void)
86 {
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93 }
94
95 static void copy_channel_attr_to_ustctl(
96 struct lttng_ust_ctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98 {
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = (lttng_ust_abi_output) uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107 }
108
109 /*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116 {
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119 int ev_loglevel_value;
120
121 LTTNG_ASSERT(node);
122 LTTNG_ASSERT(_key);
123
124 event = caa_container_of(node, struct ust_app_event, node.node);
125 key = (ust_app_ht_key *) _key;
126 ev_loglevel_value = event->attr.loglevel;
127
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129
130 /* Event name */
131 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
132 goto no_match;
133 }
134
135 /* Event loglevel. */
136 if (ev_loglevel_value != key->loglevel_type) {
137 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key->loglevel_type == 0 &&
139 ev_loglevel_value == -1) {
140 /*
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
145 */
146 } else {
147 goto no_match;
148 }
149 }
150
151 /* One of the filters is NULL, fail. */
152 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
153 goto no_match;
154 }
155
156 if (key->filter && event->filter) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event->filter->len != key->filter->len ||
159 memcmp(event->filter->data, key->filter->data,
160 event->filter->len) != 0) {
161 goto no_match;
162 }
163 }
164
165 /* One of the exclusions is NULL, fail. */
166 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
167 goto no_match;
168 }
169
170 if (key->exclusion && event->exclusion) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event->exclusion->count != key->exclusion->count ||
173 memcmp(event->exclusion->names, key->exclusion->names,
174 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
175 goto no_match;
176 }
177 }
178
179
180 /* Match. */
181 return 1;
182
183 no_match:
184 return 0;
185 }
186
187 /*
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
190 */
191 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
192 struct ust_app_event *event)
193 {
194 struct cds_lfht_node *node_ptr;
195 struct ust_app_ht_key key;
196 struct lttng_ht *ht;
197
198 LTTNG_ASSERT(ua_chan);
199 LTTNG_ASSERT(ua_chan->events);
200 LTTNG_ASSERT(event);
201
202 ht = ua_chan->events;
203 key.name = event->attr.name;
204 key.filter = event->filter;
205 key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel;
206 key.exclusion = event->exclusion;
207
208 node_ptr = cds_lfht_add_unique(ht->ht,
209 ht->hash_fct(event->node.key, lttng_ht_seed),
210 ht_match_ust_app_event, &key, &event->node.node);
211 LTTNG_ASSERT(node_ptr == &event->node.node);
212 }
213
214 /*
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
217 */
218 static void close_notify_sock_rcu(struct rcu_head *head)
219 {
220 int ret;
221 struct ust_app_notify_sock_obj *obj =
222 caa_container_of(head, struct ust_app_notify_sock_obj, head);
223
224 /* Must have a valid fd here. */
225 LTTNG_ASSERT(obj->fd >= 0);
226
227 ret = close(obj->fd);
228 if (ret) {
229 ERR("close notify sock %d RCU", obj->fd);
230 }
231 lttng_fd_put(LTTNG_FD_APPS, 1);
232
233 free(obj);
234 }
235
236 /*
237 * Return the session registry according to the buffer type of the given
238 * session.
239 *
240 * A registry per UID object MUST exists before calling this function or else
241 * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
242 */
243 static struct ust_registry_session *get_session_registry(
244 struct ust_app_session *ua_sess)
245 {
246 struct ust_registry_session *registry = NULL;
247
248 LTTNG_ASSERT(ua_sess);
249
250 switch (ua_sess->buffer_type) {
251 case LTTNG_BUFFER_PER_PID:
252 {
253 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
254 if (!reg_pid) {
255 goto error;
256 }
257 registry = reg_pid->registry->reg.ust;
258 break;
259 }
260 case LTTNG_BUFFER_PER_UID:
261 {
262 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
263 ua_sess->tracing_id, ua_sess->bits_per_long,
264 lttng_credentials_get_uid(&ua_sess->real_credentials));
265 if (!reg_uid) {
266 goto error;
267 }
268 registry = reg_uid->registry->reg.ust;
269 break;
270 }
271 default:
272 abort();
273 };
274
275 error:
276 return registry;
277 }
278
279 /*
280 * Delete ust context safely. RCU read lock must be held before calling
281 * this function.
282 */
283 static
284 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
285 struct ust_app *app)
286 {
287 int ret;
288
289 LTTNG_ASSERT(ua_ctx);
290
291 if (ua_ctx->obj) {
292 pthread_mutex_lock(&app->sock_lock);
293 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
294 pthread_mutex_unlock(&app->sock_lock);
295 if (ret < 0) {
296 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
297 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
298 app->pid, app->sock);
299 } else if (ret == -EAGAIN) {
300 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
301 app->pid, app->sock);
302 } else {
303 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
304 ua_ctx->obj->handle, ret,
305 app->pid, app->sock);
306 }
307 }
308 free(ua_ctx->obj);
309 }
310 free(ua_ctx);
311 }
312
313 /*
314 * Delete ust app event safely. RCU read lock must be held before calling
315 * this function.
316 */
317 static
318 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
319 struct ust_app *app)
320 {
321 int ret;
322
323 LTTNG_ASSERT(ua_event);
324
325 free(ua_event->filter);
326 if (ua_event->exclusion != NULL)
327 free(ua_event->exclusion);
328 if (ua_event->obj != NULL) {
329 pthread_mutex_lock(&app->sock_lock);
330 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
331 pthread_mutex_unlock(&app->sock_lock);
332 if (ret < 0) {
333 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
334 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
335 app->pid, app->sock);
336 } else if (ret == -EAGAIN) {
337 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
338 app->pid, app->sock);
339 } else {
340 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
341 ret, app->pid, app->sock);
342 }
343 }
344 free(ua_event->obj);
345 }
346 free(ua_event);
347 }
348
349 /*
350 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
351 * through a call_rcu().
352 */
353 static
354 void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
355 {
356 struct ust_app_event_notifier_rule *obj = caa_container_of(
357 head, struct ust_app_event_notifier_rule, rcu_head);
358
359 free(obj);
360 }
361
362 /*
363 * Delete ust app event notifier rule safely.
364 */
365 static void delete_ust_app_event_notifier_rule(int sock,
366 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
367 struct ust_app *app)
368 {
369 int ret;
370
371 LTTNG_ASSERT(ua_event_notifier_rule);
372
373 if (ua_event_notifier_rule->exclusion != NULL) {
374 free(ua_event_notifier_rule->exclusion);
375 }
376
377 if (ua_event_notifier_rule->obj != NULL) {
378 pthread_mutex_lock(&app->sock_lock);
379 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
380 pthread_mutex_unlock(&app->sock_lock);
381 if (ret < 0) {
382 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
383 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
384 app->pid, app->sock);
385 } else if (ret == -EAGAIN) {
386 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
387 app->pid, app->sock);
388 } else {
389 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
390 ret, app->pid, app->sock);
391 }
392 }
393
394 free(ua_event_notifier_rule->obj);
395 }
396
397 lttng_trigger_put(ua_event_notifier_rule->trigger);
398 call_rcu(&ua_event_notifier_rule->rcu_head,
399 free_ust_app_event_notifier_rule_rcu);
400 }
401
402 /*
403 * Release ust data object of the given stream.
404 *
405 * Return 0 on success or else a negative value.
406 */
407 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
408 struct ust_app *app)
409 {
410 int ret = 0;
411
412 LTTNG_ASSERT(stream);
413
414 if (stream->obj) {
415 pthread_mutex_lock(&app->sock_lock);
416 ret = lttng_ust_ctl_release_object(sock, stream->obj);
417 pthread_mutex_unlock(&app->sock_lock);
418 if (ret < 0) {
419 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
420 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
421 app->pid, app->sock);
422 } else if (ret == -EAGAIN) {
423 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
424 app->pid, app->sock);
425 } else {
426 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
427 ret, app->pid, app->sock);
428 }
429 }
430 lttng_fd_put(LTTNG_FD_APPS, 2);
431 free(stream->obj);
432 }
433
434 return ret;
435 }
436
437 /*
438 * Delete ust app stream safely. RCU read lock must be held before calling
439 * this function.
440 */
441 static
442 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
443 struct ust_app *app)
444 {
445 LTTNG_ASSERT(stream);
446
447 (void) release_ust_app_stream(sock, stream, app);
448 free(stream);
449 }
450
451 static
452 void delete_ust_app_channel_rcu(struct rcu_head *head)
453 {
454 struct ust_app_channel *ua_chan =
455 caa_container_of(head, struct ust_app_channel, rcu_head);
456
457 lttng_ht_destroy(ua_chan->ctx);
458 lttng_ht_destroy(ua_chan->events);
459 free(ua_chan);
460 }
461
462 /*
463 * Extract the lost packet or discarded events counter when the channel is
464 * being deleted and store the value in the parent channel so we can
465 * access it from lttng list and at stop/destroy.
466 *
467 * The session list lock must be held by the caller.
468 */
469 static
470 void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
471 {
472 uint64_t discarded = 0, lost = 0;
473 struct ltt_session *session;
474 struct ltt_ust_channel *uchan;
475
476 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
477 return;
478 }
479
480 rcu_read_lock();
481 session = session_find_by_id(ua_chan->session->tracing_id);
482 if (!session || !session->ust_session) {
483 /*
484 * Not finding the session is not an error because there are
485 * multiple ways the channels can be torn down.
486 *
487 * 1) The session daemon can initiate the destruction of the
488 * ust app session after receiving a destroy command or
489 * during its shutdown/teardown.
490 * 2) The application, since we are in per-pid tracing, is
491 * unregistering and tearing down its ust app session.
492 *
493 * Both paths are protected by the session list lock which
494 * ensures that the accounting of lost packets and discarded
495 * events is done exactly once. The session is then unpublished
496 * from the session list, resulting in this condition.
497 */
498 goto end;
499 }
500
501 if (ua_chan->attr.overwrite) {
502 consumer_get_lost_packets(ua_chan->session->tracing_id,
503 ua_chan->key, session->ust_session->consumer,
504 &lost);
505 } else {
506 consumer_get_discarded_events(ua_chan->session->tracing_id,
507 ua_chan->key, session->ust_session->consumer,
508 &discarded);
509 }
510 uchan = trace_ust_find_channel_by_name(
511 session->ust_session->domain_global.channels,
512 ua_chan->name);
513 if (!uchan) {
514 ERR("Missing UST channel to store discarded counters");
515 goto end;
516 }
517
518 uchan->per_pid_closed_app_discarded += discarded;
519 uchan->per_pid_closed_app_lost += lost;
520
521 end:
522 rcu_read_unlock();
523 if (session) {
524 session_put(session);
525 }
526 }
527
528 /*
529 * Delete ust app channel safely. RCU read lock must be held before calling
530 * this function.
531 *
532 * The session list lock must be held by the caller.
533 */
534 static
535 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
536 struct ust_app *app)
537 {
538 int ret;
539 struct lttng_ht_iter iter;
540 struct ust_app_event *ua_event;
541 struct ust_app_ctx *ua_ctx;
542 struct ust_app_stream *stream, *stmp;
543 struct ust_registry_session *registry;
544
545 LTTNG_ASSERT(ua_chan);
546
547 DBG3("UST app deleting channel %s", ua_chan->name);
548
549 /* Wipe stream */
550 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
551 cds_list_del(&stream->list);
552 delete_ust_app_stream(sock, stream, app);
553 }
554
555 /* Wipe context */
556 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
557 cds_list_del(&ua_ctx->list);
558 ret = lttng_ht_del(ua_chan->ctx, &iter);
559 LTTNG_ASSERT(!ret);
560 delete_ust_app_ctx(sock, ua_ctx, app);
561 }
562
563 /* Wipe events */
564 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
565 node.node) {
566 ret = lttng_ht_del(ua_chan->events, &iter);
567 LTTNG_ASSERT(!ret);
568 delete_ust_app_event(sock, ua_event, app);
569 }
570
571 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
572 /* Wipe and free registry from session registry. */
573 registry = get_session_registry(ua_chan->session);
574 if (registry) {
575 ust_registry_channel_del_free(registry, ua_chan->key,
576 sock >= 0);
577 }
578 /*
579 * A negative socket can be used by the caller when
580 * cleaning-up a ua_chan in an error path. Skip the
581 * accounting in this case.
582 */
583 if (sock >= 0) {
584 save_per_pid_lost_discarded_counters(ua_chan);
585 }
586 }
587
588 if (ua_chan->obj != NULL) {
589 /* Remove channel from application UST object descriptor. */
590 iter.iter.node = &ua_chan->ust_objd_node.node;
591 ret = lttng_ht_del(app->ust_objd, &iter);
592 LTTNG_ASSERT(!ret);
593 pthread_mutex_lock(&app->sock_lock);
594 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
595 pthread_mutex_unlock(&app->sock_lock);
596 if (ret < 0) {
597 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
598 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
599 ua_chan->name, app->pid,
600 app->sock);
601 } else if (ret == -EAGAIN) {
602 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
603 ua_chan->name, app->pid,
604 app->sock);
605 } else {
606 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
607 ua_chan->name, ret, app->pid,
608 app->sock);
609 }
610 }
611 lttng_fd_put(LTTNG_FD_APPS, 1);
612 free(ua_chan->obj);
613 }
614 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
615 }
616
617 int ust_app_register_done(struct ust_app *app)
618 {
619 int ret;
620
621 pthread_mutex_lock(&app->sock_lock);
622 ret = lttng_ust_ctl_register_done(app->sock);
623 pthread_mutex_unlock(&app->sock_lock);
624 return ret;
625 }
626
627 int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
628 {
629 int ret, sock;
630
631 if (app) {
632 pthread_mutex_lock(&app->sock_lock);
633 sock = app->sock;
634 } else {
635 sock = -1;
636 }
637 ret = lttng_ust_ctl_release_object(sock, data);
638 if (app) {
639 pthread_mutex_unlock(&app->sock_lock);
640 }
641 return ret;
642 }
643
644 /*
645 * Push metadata to consumer socket.
646 *
647 * RCU read-side lock must be held to guarantee existence of socket.
648 * Must be called with the ust app session lock held.
649 * Must be called with the registry lock held.
650 *
651 * On success, return the len of metadata pushed or else a negative value.
652 * Returning a -EPIPE return value means we could not send the metadata,
653 * but it can be caused by recoverable errors (e.g. the application has
654 * terminated concurrently).
655 */
656 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
657 struct consumer_socket *socket, int send_zero_data)
658 {
659 int ret;
660 char *metadata_str = NULL;
661 size_t len, offset, new_metadata_len_sent;
662 ssize_t ret_val;
663 uint64_t metadata_key, metadata_version;
664
665 LTTNG_ASSERT(registry);
666 LTTNG_ASSERT(socket);
667
668 metadata_key = registry->metadata_key;
669
670 /*
671 * Means that no metadata was assigned to the session. This can
672 * happens if no start has been done previously.
673 */
674 if (!metadata_key) {
675 return 0;
676 }
677
678 offset = registry->metadata_len_sent;
679 len = registry->metadata_len - registry->metadata_len_sent;
680 new_metadata_len_sent = registry->metadata_len;
681 metadata_version = registry->metadata_version;
682 if (len == 0) {
683 DBG3("No metadata to push for metadata key %" PRIu64,
684 registry->metadata_key);
685 ret_val = len;
686 if (send_zero_data) {
687 DBG("No metadata to push");
688 goto push_data;
689 }
690 goto end;
691 }
692
693 /* Allocate only what we have to send. */
694 metadata_str = (char *) zmalloc(len);
695 if (!metadata_str) {
696 PERROR("zmalloc ust app metadata string");
697 ret_val = -ENOMEM;
698 goto error;
699 }
700 /* Copy what we haven't sent out. */
701 memcpy(metadata_str, registry->metadata + offset, len);
702
703 push_data:
704 pthread_mutex_unlock(&registry->lock);
705 /*
706 * We need to unlock the registry while we push metadata to
707 * break a circular dependency between the consumerd metadata
708 * lock and the sessiond registry lock. Indeed, pushing metadata
709 * to the consumerd awaits that it gets pushed all the way to
710 * relayd, but doing so requires grabbing the metadata lock. If
711 * a concurrent metadata request is being performed by
712 * consumerd, this can try to grab the registry lock on the
713 * sessiond while holding the metadata lock on the consumer
714 * daemon. Those push and pull schemes are performed on two
715 * different bidirectionnal communication sockets.
716 */
717 ret = consumer_push_metadata(socket, metadata_key,
718 metadata_str, len, offset, metadata_version);
719 pthread_mutex_lock(&registry->lock);
720 if (ret < 0) {
721 /*
722 * There is an acceptable race here between the registry
723 * metadata key assignment and the creation on the
724 * consumer. The session daemon can concurrently push
725 * metadata for this registry while being created on the
726 * consumer since the metadata key of the registry is
727 * assigned *before* it is setup to avoid the consumer
728 * to ask for metadata that could possibly be not found
729 * in the session daemon.
730 *
731 * The metadata will get pushed either by the session
732 * being stopped or the consumer requesting metadata if
733 * that race is triggered.
734 */
735 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
736 ret = 0;
737 } else {
738 ERR("Error pushing metadata to consumer");
739 }
740 ret_val = ret;
741 goto error_push;
742 } else {
743 /*
744 * Metadata may have been concurrently pushed, since
745 * we're not holding the registry lock while pushing to
746 * consumer. This is handled by the fact that we send
747 * the metadata content, size, and the offset at which
748 * that metadata belongs. This may arrive out of order
749 * on the consumer side, and the consumer is able to
750 * deal with overlapping fragments. The consumer
751 * supports overlapping fragments, which must be
752 * contiguous starting from offset 0. We keep the
753 * largest metadata_len_sent value of the concurrent
754 * send.
755 */
756 registry->metadata_len_sent =
757 std::max(registry->metadata_len_sent,
758 new_metadata_len_sent);
759 }
760 free(metadata_str);
761 return len;
762
763 end:
764 error:
765 if (ret_val) {
766 /*
767 * On error, flag the registry that the metadata is
768 * closed. We were unable to push anything and this
769 * means that either the consumer is not responding or
770 * the metadata cache has been destroyed on the
771 * consumer.
772 */
773 registry->metadata_closed = 1;
774 }
775 error_push:
776 free(metadata_str);
777 return ret_val;
778 }
779
780 /*
781 * For a given application and session, push metadata to consumer.
782 * Either sock or consumer is required : if sock is NULL, the default
783 * socket to send the metadata is retrieved from consumer, if sock
784 * is not NULL we use it to send the metadata.
785 * RCU read-side lock must be held while calling this function,
786 * therefore ensuring existence of registry. It also ensures existence
787 * of socket throughout this function.
788 *
789 * Return 0 on success else a negative error.
790 * Returning a -EPIPE return value means we could not send the metadata,
791 * but it can be caused by recoverable errors (e.g. the application has
792 * terminated concurrently).
793 */
794 static int push_metadata(struct ust_registry_session *registry,
795 struct consumer_output *consumer)
796 {
797 int ret_val;
798 ssize_t ret;
799 struct consumer_socket *socket;
800
801 LTTNG_ASSERT(registry);
802 LTTNG_ASSERT(consumer);
803
804 pthread_mutex_lock(&registry->lock);
805 if (registry->metadata_closed) {
806 ret_val = -EPIPE;
807 goto error;
808 }
809
810 /* Get consumer socket to use to push the metadata.*/
811 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
812 consumer);
813 if (!socket) {
814 ret_val = -1;
815 goto error;
816 }
817
818 ret = ust_app_push_metadata(registry, socket, 0);
819 if (ret < 0) {
820 ret_val = ret;
821 goto error;
822 }
823 pthread_mutex_unlock(&registry->lock);
824 return 0;
825
826 error:
827 pthread_mutex_unlock(&registry->lock);
828 return ret_val;
829 }
830
831 /*
832 * Send to the consumer a close metadata command for the given session. Once
833 * done, the metadata channel is deleted and the session metadata pointer is
834 * nullified. The session lock MUST be held unless the application is
835 * in the destroy path.
836 *
837 * Do not hold the registry lock while communicating with the consumerd, because
838 * doing so causes inter-process deadlocks between consumerd and sessiond with
839 * the metadata request notification.
840 *
841 * Return 0 on success else a negative value.
842 */
843 static int close_metadata(struct ust_registry_session *registry,
844 struct consumer_output *consumer)
845 {
846 int ret;
847 struct consumer_socket *socket;
848 uint64_t metadata_key;
849 bool registry_was_already_closed;
850
851 LTTNG_ASSERT(registry);
852 LTTNG_ASSERT(consumer);
853
854 rcu_read_lock();
855
856 pthread_mutex_lock(&registry->lock);
857 metadata_key = registry->metadata_key;
858 registry_was_already_closed = registry->metadata_closed;
859 if (metadata_key != 0) {
860 /*
861 * Metadata closed. Even on error this means that the consumer
862 * is not responding or not found so either way a second close
863 * should NOT be emit for this registry.
864 */
865 registry->metadata_closed = 1;
866 }
867 pthread_mutex_unlock(&registry->lock);
868
869 if (metadata_key == 0 || registry_was_already_closed) {
870 ret = 0;
871 goto end;
872 }
873
874 /* Get consumer socket to use to push the metadata.*/
875 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
876 consumer);
877 if (!socket) {
878 ret = -1;
879 goto end;
880 }
881
882 ret = consumer_close_metadata(socket, metadata_key);
883 if (ret < 0) {
884 goto end;
885 }
886
887 end:
888 rcu_read_unlock();
889 return ret;
890 }
891
892 static
893 void delete_ust_app_session_rcu(struct rcu_head *head)
894 {
895 struct ust_app_session *ua_sess =
896 caa_container_of(head, struct ust_app_session, rcu_head);
897
898 lttng_ht_destroy(ua_sess->channels);
899 free(ua_sess);
900 }
901
902 /*
903 * Delete ust app session safely. RCU read lock must be held before calling
904 * this function.
905 *
906 * The session list lock must be held by the caller.
907 */
908 static
909 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
910 struct ust_app *app)
911 {
912 int ret;
913 struct lttng_ht_iter iter;
914 struct ust_app_channel *ua_chan;
915 struct ust_registry_session *registry;
916
917 LTTNG_ASSERT(ua_sess);
918
919 pthread_mutex_lock(&ua_sess->lock);
920
921 LTTNG_ASSERT(!ua_sess->deleted);
922 ua_sess->deleted = true;
923
924 registry = get_session_registry(ua_sess);
925 /* Registry can be null on error path during initialization. */
926 if (registry) {
927 /* Push metadata for application before freeing the application. */
928 (void) push_metadata(registry, ua_sess->consumer);
929
930 /*
931 * Don't ask to close metadata for global per UID buffers. Close
932 * metadata only on destroy trace session in this case. Also, the
933 * previous push metadata could have flag the metadata registry to
934 * close so don't send a close command if closed.
935 */
936 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
937 /* And ask to close it for this session registry. */
938 (void) close_metadata(registry, ua_sess->consumer);
939 }
940 }
941
942 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
943 node.node) {
944 ret = lttng_ht_del(ua_sess->channels, &iter);
945 LTTNG_ASSERT(!ret);
946 delete_ust_app_channel(sock, ua_chan, app);
947 }
948
949 /* In case of per PID, the registry is kept in the session. */
950 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
951 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
952 if (reg_pid) {
953 /*
954 * Registry can be null on error path during
955 * initialization.
956 */
957 buffer_reg_pid_remove(reg_pid);
958 buffer_reg_pid_destroy(reg_pid);
959 }
960 }
961
962 if (ua_sess->handle != -1) {
963 pthread_mutex_lock(&app->sock_lock);
964 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
965 pthread_mutex_unlock(&app->sock_lock);
966 if (ret < 0) {
967 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
968 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
969 app->pid, app->sock);
970 } else if (ret == -EAGAIN) {
971 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
972 app->pid, app->sock);
973 } else {
974 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
975 ret, app->pid, app->sock);
976 }
977 }
978
979 /* Remove session from application UST object descriptor. */
980 iter.iter.node = &ua_sess->ust_objd_node.node;
981 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
982 LTTNG_ASSERT(!ret);
983 }
984
985 pthread_mutex_unlock(&ua_sess->lock);
986
987 consumer_output_put(ua_sess->consumer);
988
989 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
990 }
991
992 /*
993 * Delete a traceable application structure from the global list. Never call
994 * this function outside of a call_rcu call.
995 */
996 static
997 void delete_ust_app(struct ust_app *app)
998 {
999 int ret, sock;
1000 struct ust_app_session *ua_sess, *tmp_ua_sess;
1001 struct lttng_ht_iter iter;
1002 struct ust_app_event_notifier_rule *event_notifier_rule;
1003 bool event_notifier_write_fd_is_open;
1004
1005 /*
1006 * The session list lock must be held during this function to guarantee
1007 * the existence of ua_sess.
1008 */
1009 session_lock_list();
1010 /* Delete ust app sessions info */
1011 sock = app->sock;
1012 app->sock = -1;
1013
1014 /* Wipe sessions */
1015 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
1016 teardown_node) {
1017 /* Free every object in the session and the session. */
1018 rcu_read_lock();
1019 delete_ust_app_session(sock, ua_sess, app);
1020 rcu_read_unlock();
1021 }
1022
1023 /* Remove the event notifier rules associated with this app. */
1024 rcu_read_lock();
1025 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
1026 &iter.iter, event_notifier_rule, node.node) {
1027 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
1028 LTTNG_ASSERT(!ret);
1029
1030 delete_ust_app_event_notifier_rule(
1031 app->sock, event_notifier_rule, app);
1032 }
1033
1034 rcu_read_unlock();
1035
1036 lttng_ht_destroy(app->sessions);
1037 lttng_ht_destroy(app->ust_sessions_objd);
1038 lttng_ht_destroy(app->ust_objd);
1039 lttng_ht_destroy(app->token_to_event_notifier_rule_ht);
1040
1041 /*
1042 * This could be NULL if the event notifier setup failed (e.g the app
1043 * was killed or the tracer does not support this feature).
1044 */
1045 if (app->event_notifier_group.object) {
1046 enum lttng_error_code ret_code;
1047 enum event_notifier_error_accounting_status status;
1048
1049 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1050 app->event_notifier_group.event_pipe);
1051
1052 ret_code = notification_thread_command_remove_tracer_event_source(
1053 the_notification_thread_handle,
1054 event_notifier_read_fd);
1055 if (ret_code != LTTNG_OK) {
1056 ERR("Failed to remove application tracer event source from notification thread");
1057 }
1058
1059 status = event_notifier_error_accounting_unregister_app(app);
1060 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1061 ERR("Error unregistering app from event notifier error accounting");
1062 }
1063
1064 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
1065 free(app->event_notifier_group.object);
1066 }
1067
1068 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1069 app->event_notifier_group.event_pipe);
1070 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1071 /*
1072 * Release the file descriptors reserved for the event notifier pipe.
1073 * The app could be destroyed before the write end of the pipe could be
1074 * passed to the application (and closed). In that case, both file
1075 * descriptors must be released.
1076 */
1077 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1078
1079 /*
1080 * Wait until we have deleted the application from the sock hash table
1081 * before closing this socket, otherwise an application could re-use the
1082 * socket ID and race with the teardown, using the same hash table entry.
1083 *
1084 * It's OK to leave the close in call_rcu. We want it to stay unique for
1085 * all RCU readers that could run concurrently with unregister app,
1086 * therefore we _need_ to only close that socket after a grace period. So
1087 * it should stay in this RCU callback.
1088 *
1089 * This close() is a very important step of the synchronization model so
1090 * every modification to this function must be carefully reviewed.
1091 */
1092 ret = close(sock);
1093 if (ret) {
1094 PERROR("close");
1095 }
1096 lttng_fd_put(LTTNG_FD_APPS, 1);
1097
1098 DBG2("UST app pid %d deleted", app->pid);
1099 free(app);
1100 session_unlock_list();
1101 }
1102
1103 /*
1104 * URCU intermediate call to delete an UST app.
1105 */
1106 static
1107 void delete_ust_app_rcu(struct rcu_head *head)
1108 {
1109 struct lttng_ht_node_ulong *node =
1110 caa_container_of(head, struct lttng_ht_node_ulong, head);
1111 struct ust_app *app =
1112 caa_container_of(node, struct ust_app, pid_n);
1113
1114 DBG3("Call RCU deleting app PID %d", app->pid);
1115 delete_ust_app(app);
1116 }
1117
1118 /*
1119 * Delete the session from the application ht and delete the data structure by
1120 * freeing every object inside and releasing them.
1121 *
1122 * The session list lock must be held by the caller.
1123 */
1124 static void destroy_app_session(struct ust_app *app,
1125 struct ust_app_session *ua_sess)
1126 {
1127 int ret;
1128 struct lttng_ht_iter iter;
1129
1130 LTTNG_ASSERT(app);
1131 LTTNG_ASSERT(ua_sess);
1132
1133 iter.iter.node = &ua_sess->node.node;
1134 ret = lttng_ht_del(app->sessions, &iter);
1135 if (ret) {
1136 /* Already scheduled for teardown. */
1137 goto end;
1138 }
1139
1140 /* Once deleted, free the data structure. */
1141 delete_ust_app_session(app->sock, ua_sess, app);
1142
1143 end:
1144 return;
1145 }
1146
1147 /*
1148 * Alloc new UST app session.
1149 */
1150 static
1151 struct ust_app_session *alloc_ust_app_session(void)
1152 {
1153 struct ust_app_session *ua_sess;
1154
1155 /* Init most of the default value by allocating and zeroing */
1156 ua_sess = (ust_app_session *) zmalloc(sizeof(struct ust_app_session));
1157 if (ua_sess == NULL) {
1158 PERROR("malloc");
1159 goto error_free;
1160 }
1161
1162 ua_sess->handle = -1;
1163 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1164 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1165 pthread_mutex_init(&ua_sess->lock, NULL);
1166
1167 return ua_sess;
1168
1169 error_free:
1170 return NULL;
1171 }
1172
1173 /*
1174 * Alloc new UST app channel.
1175 */
1176 static
1177 struct ust_app_channel *alloc_ust_app_channel(const char *name,
1178 struct ust_app_session *ua_sess,
1179 struct lttng_ust_abi_channel_attr *attr)
1180 {
1181 struct ust_app_channel *ua_chan;
1182
1183 /* Init most of the default value by allocating and zeroing */
1184 ua_chan = (ust_app_channel *) zmalloc(sizeof(struct ust_app_channel));
1185 if (ua_chan == NULL) {
1186 PERROR("malloc");
1187 goto error;
1188 }
1189
1190 /* Setup channel name */
1191 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1192 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1193
1194 ua_chan->enabled = 1;
1195 ua_chan->handle = -1;
1196 ua_chan->session = ua_sess;
1197 ua_chan->key = get_next_channel_key();
1198 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1199 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1200 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1201
1202 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1203 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1204
1205 /* Copy attributes */
1206 if (attr) {
1207 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1208 ua_chan->attr.subbuf_size = attr->subbuf_size;
1209 ua_chan->attr.num_subbuf = attr->num_subbuf;
1210 ua_chan->attr.overwrite = attr->overwrite;
1211 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1212 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1213 ua_chan->attr.output = (lttng_ust_abi_output) attr->output;
1214 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1215 }
1216 /* By default, the channel is a per cpu channel. */
1217 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1218
1219 DBG3("UST app channel %s allocated", ua_chan->name);
1220
1221 return ua_chan;
1222
1223 error:
1224 return NULL;
1225 }
1226
1227 /*
1228 * Allocate and initialize a UST app stream.
1229 *
1230 * Return newly allocated stream pointer or NULL on error.
1231 */
1232 struct ust_app_stream *ust_app_alloc_stream(void)
1233 {
1234 struct ust_app_stream *stream = NULL;
1235
1236 stream = (ust_app_stream *) zmalloc(sizeof(*stream));
1237 if (stream == NULL) {
1238 PERROR("zmalloc ust app stream");
1239 goto error;
1240 }
1241
1242 /* Zero could be a valid value for a handle so flag it to -1. */
1243 stream->handle = -1;
1244
1245 error:
1246 return stream;
1247 }
1248
1249 /*
1250 * Alloc new UST app event.
1251 */
1252 static
1253 struct ust_app_event *alloc_ust_app_event(char *name,
1254 struct lttng_ust_abi_event *attr)
1255 {
1256 struct ust_app_event *ua_event;
1257
1258 /* Init most of the default value by allocating and zeroing */
1259 ua_event = (ust_app_event *) zmalloc(sizeof(struct ust_app_event));
1260 if (ua_event == NULL) {
1261 PERROR("Failed to allocate ust_app_event structure");
1262 goto error;
1263 }
1264
1265 ua_event->enabled = 1;
1266 strncpy(ua_event->name, name, sizeof(ua_event->name));
1267 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1268 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1269
1270 /* Copy attributes */
1271 if (attr) {
1272 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1273 }
1274
1275 DBG3("UST app event %s allocated", ua_event->name);
1276
1277 return ua_event;
1278
1279 error:
1280 return NULL;
1281 }
1282
1283 /*
1284 * Allocate a new UST app event notifier rule.
1285 */
1286 static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1287 struct lttng_trigger *trigger)
1288 {
1289 enum lttng_event_rule_generate_exclusions_status
1290 generate_exclusion_status;
1291 enum lttng_condition_status cond_status;
1292 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1293 struct lttng_condition *condition = NULL;
1294 const struct lttng_event_rule *event_rule = NULL;
1295
1296 ua_event_notifier_rule = (ust_app_event_notifier_rule *) zmalloc(sizeof(struct ust_app_event_notifier_rule));
1297 if (ua_event_notifier_rule == NULL) {
1298 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1299 goto error;
1300 }
1301
1302 ua_event_notifier_rule->enabled = 1;
1303 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1304 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1305 ua_event_notifier_rule->token);
1306
1307 condition = lttng_trigger_get_condition(trigger);
1308 LTTNG_ASSERT(condition);
1309 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
1310 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
1311
1312 cond_status = lttng_condition_event_rule_matches_get_rule(
1313 condition, &event_rule);
1314 LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
1315 LTTNG_ASSERT(event_rule);
1316
1317 ua_event_notifier_rule->error_counter_index =
1318 lttng_condition_event_rule_matches_get_error_counter_index(condition);
1319 /* Acquire the event notifier's reference to the trigger. */
1320 lttng_trigger_get(trigger);
1321
1322 ua_event_notifier_rule->trigger = trigger;
1323 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1324 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1325 event_rule, &ua_event_notifier_rule->exclusion);
1326 switch (generate_exclusion_status) {
1327 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1328 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1329 break;
1330 default:
1331 /* Error occurred. */
1332 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1333 goto error_put_trigger;
1334 }
1335
1336 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1337 ua_event_notifier_rule->token);
1338
1339 return ua_event_notifier_rule;
1340
1341 error_put_trigger:
1342 lttng_trigger_put(trigger);
1343 error:
1344 free(ua_event_notifier_rule);
1345 return NULL;
1346 }
1347
1348 /*
1349 * Alloc new UST app context.
1350 */
1351 static
1352 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1353 {
1354 struct ust_app_ctx *ua_ctx;
1355
1356 ua_ctx = (ust_app_ctx *) zmalloc(sizeof(struct ust_app_ctx));
1357 if (ua_ctx == NULL) {
1358 goto error;
1359 }
1360
1361 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1362
1363 if (uctx) {
1364 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1365 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1366 char *provider_name = NULL, *ctx_name = NULL;
1367
1368 provider_name = strdup(uctx->u.app_ctx.provider_name);
1369 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1370 if (!provider_name || !ctx_name) {
1371 free(provider_name);
1372 free(ctx_name);
1373 goto error;
1374 }
1375
1376 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1377 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1378 }
1379 }
1380
1381 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1382 return ua_ctx;
1383 error:
1384 free(ua_ctx);
1385 return NULL;
1386 }
1387
1388 /*
1389 * Create a liblttng-ust filter bytecode from given bytecode.
1390 *
1391 * Return allocated filter or NULL on error.
1392 */
1393 static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1394 const struct lttng_bytecode *orig_f)
1395 {
1396 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1397
1398 /* Copy filter bytecode. */
1399 filter = (lttng_ust_abi_filter_bytecode *) zmalloc(sizeof(*filter) + orig_f->len);
1400 if (!filter) {
1401 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1402 goto error;
1403 }
1404
1405 LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
1406 sizeof(struct lttng_ust_abi_filter_bytecode));
1407 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1408 error:
1409 return filter;
1410 }
1411
1412 /*
1413 * Create a liblttng-ust capture bytecode from given bytecode.
1414 *
1415 * Return allocated filter or NULL on error.
1416 */
1417 static struct lttng_ust_abi_capture_bytecode *
1418 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1419 {
1420 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1421
1422 /* Copy capture bytecode. */
1423 capture = (lttng_ust_abi_capture_bytecode *) zmalloc(sizeof(*capture) + orig_f->len);
1424 if (!capture) {
1425 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1426 goto error;
1427 }
1428
1429 LTTNG_ASSERT(sizeof(struct lttng_bytecode) ==
1430 sizeof(struct lttng_ust_abi_capture_bytecode));
1431 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1432 error:
1433 return capture;
1434 }
1435
1436 /*
1437 * Find an ust_app using the sock and return it. RCU read side lock must be
1438 * held before calling this helper function.
1439 */
1440 struct ust_app *ust_app_find_by_sock(int sock)
1441 {
1442 struct lttng_ht_node_ulong *node;
1443 struct lttng_ht_iter iter;
1444
1445 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1446 node = lttng_ht_iter_get_node_ulong(&iter);
1447 if (node == NULL) {
1448 DBG2("UST app find by sock %d not found", sock);
1449 goto error;
1450 }
1451
1452 return caa_container_of(node, struct ust_app, sock_n);
1453
1454 error:
1455 return NULL;
1456 }
1457
1458 /*
1459 * Find an ust_app using the notify sock and return it. RCU read side lock must
1460 * be held before calling this helper function.
1461 */
1462 static struct ust_app *find_app_by_notify_sock(int sock)
1463 {
1464 struct lttng_ht_node_ulong *node;
1465 struct lttng_ht_iter iter;
1466
1467 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1468 &iter);
1469 node = lttng_ht_iter_get_node_ulong(&iter);
1470 if (node == NULL) {
1471 DBG2("UST app find by notify sock %d not found", sock);
1472 goto error;
1473 }
1474
1475 return caa_container_of(node, struct ust_app, notify_sock_n);
1476
1477 error:
1478 return NULL;
1479 }
1480
1481 /*
1482 * Lookup for an ust app event based on event name, filter bytecode and the
1483 * event loglevel.
1484 *
1485 * Return an ust_app_event object or NULL on error.
1486 */
1487 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1488 const char *name, const struct lttng_bytecode *filter,
1489 int loglevel_value,
1490 const struct lttng_event_exclusion *exclusion)
1491 {
1492 struct lttng_ht_iter iter;
1493 struct lttng_ht_node_str *node;
1494 struct ust_app_event *event = NULL;
1495 struct ust_app_ht_key key;
1496
1497 LTTNG_ASSERT(name);
1498 LTTNG_ASSERT(ht);
1499
1500 /* Setup key for event lookup. */
1501 key.name = name;
1502 key.filter = filter;
1503 key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value;
1504 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1505 key.exclusion = exclusion;
1506
1507 /* Lookup using the event name as hash and a custom match fct. */
1508 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1509 ht_match_ust_app_event, &key, &iter.iter);
1510 node = lttng_ht_iter_get_node_str(&iter);
1511 if (node == NULL) {
1512 goto end;
1513 }
1514
1515 event = caa_container_of(node, struct ust_app_event, node);
1516
1517 end:
1518 return event;
1519 }
1520
1521 /*
1522 * Look-up an event notifier rule based on its token id.
1523 *
1524 * Must be called with the RCU read lock held.
1525 * Return an ust_app_event_notifier_rule object or NULL on error.
1526 */
1527 static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1528 struct lttng_ht *ht, uint64_t token)
1529 {
1530 struct lttng_ht_iter iter;
1531 struct lttng_ht_node_u64 *node;
1532 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1533
1534 LTTNG_ASSERT(ht);
1535
1536 lttng_ht_lookup(ht, &token, &iter);
1537 node = lttng_ht_iter_get_node_u64(&iter);
1538 if (node == NULL) {
1539 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1540 token);
1541 goto end;
1542 }
1543
1544 event_notifier_rule = caa_container_of(
1545 node, struct ust_app_event_notifier_rule, node);
1546 end:
1547 return event_notifier_rule;
1548 }
1549
1550 /*
1551 * Create the channel context on the tracer.
1552 *
1553 * Called with UST app session lock held.
1554 */
1555 static
1556 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1557 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1558 {
1559 int ret;
1560
1561 health_code_update();
1562
1563 pthread_mutex_lock(&app->sock_lock);
1564 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
1565 ua_chan->obj, &ua_ctx->obj);
1566 pthread_mutex_unlock(&app->sock_lock);
1567 if (ret < 0) {
1568 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1569 ret = 0;
1570 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1571 app->pid, app->sock);
1572 } else if (ret == -EAGAIN) {
1573 ret = 0;
1574 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1575 app->pid, app->sock);
1576 } else {
1577 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1578 ret, app->pid, app->sock);
1579 }
1580 goto error;
1581 }
1582
1583 ua_ctx->handle = ua_ctx->obj->handle;
1584
1585 DBG2("UST app context handle %d created successfully for channel %s",
1586 ua_ctx->handle, ua_chan->name);
1587
1588 error:
1589 health_code_update();
1590 return ret;
1591 }
1592
1593 /*
1594 * Set the filter on the tracer.
1595 */
1596 static int set_ust_object_filter(struct ust_app *app,
1597 const struct lttng_bytecode *bytecode,
1598 struct lttng_ust_abi_object_data *ust_object)
1599 {
1600 int ret;
1601 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1602
1603 health_code_update();
1604
1605 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1606 if (!ust_bytecode) {
1607 ret = -LTTNG_ERR_NOMEM;
1608 goto error;
1609 }
1610 pthread_mutex_lock(&app->sock_lock);
1611 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
1612 ust_object);
1613 pthread_mutex_unlock(&app->sock_lock);
1614 if (ret < 0) {
1615 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1616 ret = 0;
1617 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1618 app->pid, app->sock);
1619 } else if (ret == -EAGAIN) {
1620 ret = 0;
1621 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1622 app->pid, app->sock);
1623 } else {
1624 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1625 ret, app->pid, app->sock, ust_object);
1626 }
1627 goto error;
1628 }
1629
1630 DBG2("UST filter successfully set: object = %p", ust_object);
1631
1632 error:
1633 health_code_update();
1634 free(ust_bytecode);
1635 return ret;
1636 }
1637
1638 /*
1639 * Set a capture bytecode for the passed object.
1640 * The sequence number enforces the ordering at runtime and on reception of
1641 * the captured payloads.
1642 */
1643 static int set_ust_capture(struct ust_app *app,
1644 const struct lttng_bytecode *bytecode,
1645 unsigned int capture_seqnum,
1646 struct lttng_ust_abi_object_data *ust_object)
1647 {
1648 int ret;
1649 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1650
1651 health_code_update();
1652
1653 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1654 if (!ust_bytecode) {
1655 ret = -LTTNG_ERR_NOMEM;
1656 goto error;
1657 }
1658
1659 /*
1660 * Set the sequence number to ensure the capture of fields is ordered.
1661 */
1662 ust_bytecode->seqnum = capture_seqnum;
1663
1664 pthread_mutex_lock(&app->sock_lock);
1665 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
1666 ust_object);
1667 pthread_mutex_unlock(&app->sock_lock);
1668 if (ret < 0) {
1669 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1670 ret = 0;
1671 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1672 app->pid, app->sock);
1673 } else if (ret == -EAGAIN) {
1674 ret = 0;
1675 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1676 app->pid, app->sock);
1677 } else {
1678 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1679 ret, app->pid,
1680 app->sock);
1681 }
1682
1683 goto error;
1684 }
1685
1686 DBG2("UST capture successfully set: object = %p", ust_object);
1687
1688 error:
1689 health_code_update();
1690 free(ust_bytecode);
1691 return ret;
1692 }
1693
1694 static
1695 struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1696 const struct lttng_event_exclusion *exclusion)
1697 {
1698 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1699 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1700 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1701
1702 ust_exclusion = (lttng_ust_abi_event_exclusion *) zmalloc(exclusion_alloc_size);
1703 if (!ust_exclusion) {
1704 PERROR("malloc");
1705 goto end;
1706 }
1707
1708 LTTNG_ASSERT(sizeof(struct lttng_event_exclusion) ==
1709 sizeof(struct lttng_ust_abi_event_exclusion));
1710 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1711 end:
1712 return ust_exclusion;
1713 }
1714
1715 /*
1716 * Set event exclusions on the tracer.
1717 */
1718 static int set_ust_object_exclusions(struct ust_app *app,
1719 const struct lttng_event_exclusion *exclusions,
1720 struct lttng_ust_abi_object_data *ust_object)
1721 {
1722 int ret;
1723 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1724
1725 LTTNG_ASSERT(exclusions && exclusions->count > 0);
1726
1727 health_code_update();
1728
1729 ust_exclusions = create_ust_exclusion_from_exclusion(
1730 exclusions);
1731 if (!ust_exclusions) {
1732 ret = -LTTNG_ERR_NOMEM;
1733 goto error;
1734 }
1735 pthread_mutex_lock(&app->sock_lock);
1736 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1737 pthread_mutex_unlock(&app->sock_lock);
1738 if (ret < 0) {
1739 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1740 ret = 0;
1741 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1742 app->pid, app->sock);
1743 } else if (ret == -EAGAIN) {
1744 ret = 0;
1745 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1746 app->pid, app->sock);
1747 } else {
1748 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1749 ret, app->pid, app->sock, ust_object);
1750 }
1751 goto error;
1752 }
1753
1754 DBG2("UST exclusions set successfully for object %p", ust_object);
1755
1756 error:
1757 health_code_update();
1758 free(ust_exclusions);
1759 return ret;
1760 }
1761
1762 /*
1763 * Disable the specified event on to UST tracer for the UST session.
1764 */
1765 static int disable_ust_object(struct ust_app *app,
1766 struct lttng_ust_abi_object_data *object)
1767 {
1768 int ret;
1769
1770 health_code_update();
1771
1772 pthread_mutex_lock(&app->sock_lock);
1773 ret = lttng_ust_ctl_disable(app->sock, object);
1774 pthread_mutex_unlock(&app->sock_lock);
1775 if (ret < 0) {
1776 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1777 ret = 0;
1778 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1779 app->pid, app->sock);
1780 } else if (ret == -EAGAIN) {
1781 ret = 0;
1782 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1783 app->pid, app->sock);
1784 } else {
1785 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1786 ret, app->pid, app->sock, object);
1787 }
1788 goto error;
1789 }
1790
1791 DBG2("UST app object %p disabled successfully for app: pid = %d",
1792 object, app->pid);
1793
1794 error:
1795 health_code_update();
1796 return ret;
1797 }
1798
1799 /*
1800 * Disable the specified channel on to UST tracer for the UST session.
1801 */
1802 static int disable_ust_channel(struct ust_app *app,
1803 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1804 {
1805 int ret;
1806
1807 health_code_update();
1808
1809 pthread_mutex_lock(&app->sock_lock);
1810 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
1811 pthread_mutex_unlock(&app->sock_lock);
1812 if (ret < 0) {
1813 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1814 ret = 0;
1815 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1816 app->pid, app->sock);
1817 } else if (ret == -EAGAIN) {
1818 ret = 0;
1819 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1820 app->pid, app->sock);
1821 } else {
1822 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1823 ua_chan->name, ua_sess->handle, ret,
1824 app->pid, app->sock);
1825 }
1826 goto error;
1827 }
1828
1829 DBG2("UST app channel %s disabled successfully for app: pid = %d",
1830 ua_chan->name, app->pid);
1831
1832 error:
1833 health_code_update();
1834 return ret;
1835 }
1836
1837 /*
1838 * Enable the specified channel on to UST tracer for the UST session.
1839 */
1840 static int enable_ust_channel(struct ust_app *app,
1841 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1842 {
1843 int ret;
1844
1845 health_code_update();
1846
1847 pthread_mutex_lock(&app->sock_lock);
1848 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
1849 pthread_mutex_unlock(&app->sock_lock);
1850 if (ret < 0) {
1851 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1852 ret = 0;
1853 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1854 ua_chan->name, app->pid, app->sock);
1855 } else if (ret == -EAGAIN) {
1856 ret = 0;
1857 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1858 ua_chan->name, app->pid, app->sock);
1859 } else {
1860 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1861 ua_chan->name, ua_sess->handle, ret,
1862 app->pid, app->sock);
1863 }
1864 goto error;
1865 }
1866
1867 ua_chan->enabled = 1;
1868
1869 DBG2("UST app channel %s enabled successfully for app: pid = %d",
1870 ua_chan->name, app->pid);
1871
1872 error:
1873 health_code_update();
1874 return ret;
1875 }
1876
1877 /*
1878 * Enable the specified event on to UST tracer for the UST session.
1879 */
1880 static int enable_ust_object(
1881 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1882 {
1883 int ret;
1884
1885 health_code_update();
1886
1887 pthread_mutex_lock(&app->sock_lock);
1888 ret = lttng_ust_ctl_enable(app->sock, ust_object);
1889 pthread_mutex_unlock(&app->sock_lock);
1890 if (ret < 0) {
1891 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1892 ret = 0;
1893 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1894 app->pid, app->sock);
1895 } else if (ret == -EAGAIN) {
1896 ret = 0;
1897 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1898 app->pid, app->sock);
1899 } else {
1900 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1901 ret, app->pid, app->sock, ust_object);
1902 }
1903 goto error;
1904 }
1905
1906 DBG2("UST app object %p enabled successfully for app: pid = %d",
1907 ust_object, app->pid);
1908
1909 error:
1910 health_code_update();
1911 return ret;
1912 }
1913
1914 /*
1915 * Send channel and stream buffer to application.
1916 *
1917 * Return 0 on success. On error, a negative value is returned.
1918 */
1919 static int send_channel_pid_to_ust(struct ust_app *app,
1920 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1921 {
1922 int ret;
1923 struct ust_app_stream *stream, *stmp;
1924
1925 LTTNG_ASSERT(app);
1926 LTTNG_ASSERT(ua_sess);
1927 LTTNG_ASSERT(ua_chan);
1928
1929 health_code_update();
1930
1931 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1932 app->sock);
1933
1934 /* Send channel to the application. */
1935 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1936 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1937 ret = -ENOTCONN; /* Caused by app exiting. */
1938 goto error;
1939 } else if (ret == -EAGAIN) {
1940 /* Caused by timeout. */
1941 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
1942 app->pid, ua_chan->name, ua_sess->tracing_id);
1943 /* Treat this the same way as an application that is exiting. */
1944 ret = -ENOTCONN;
1945 goto error;
1946 } else if (ret < 0) {
1947 goto error;
1948 }
1949
1950 health_code_update();
1951
1952 /* Send all streams to application. */
1953 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1954 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1955 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1956 ret = -ENOTCONN; /* Caused by app exiting. */
1957 goto error;
1958 } else if (ret == -EAGAIN) {
1959 /* Caused by timeout. */
1960 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
1961 app->pid, stream->name, ua_chan->name,
1962 ua_sess->tracing_id);
1963 /*
1964 * Treat this the same way as an application that is
1965 * exiting.
1966 */
1967 ret = -ENOTCONN;
1968 } else if (ret < 0) {
1969 goto error;
1970 }
1971 /* We don't need the stream anymore once sent to the tracer. */
1972 cds_list_del(&stream->list);
1973 delete_ust_app_stream(-1, stream, app);
1974 }
1975 /* Flag the channel that it is sent to the application. */
1976 ua_chan->is_sent = 1;
1977
1978 error:
1979 health_code_update();
1980 return ret;
1981 }
1982
1983 /*
1984 * Create the specified event onto the UST tracer for a UST session.
1985 *
1986 * Should be called with session mutex held.
1987 */
1988 static
1989 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1990 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1991 {
1992 int ret = 0;
1993
1994 health_code_update();
1995
1996 /* Create UST event on tracer */
1997 pthread_mutex_lock(&app->sock_lock);
1998 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1999 &ua_event->obj);
2000 pthread_mutex_unlock(&app->sock_lock);
2001 if (ret < 0) {
2002 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2003 ret = 0;
2004 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2005 app->pid, app->sock);
2006 } else if (ret == -EAGAIN) {
2007 ret = 0;
2008 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2009 app->pid, app->sock);
2010 } else {
2011 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2012 ua_event->attr.name, ret, app->pid,
2013 app->sock);
2014 }
2015 goto error;
2016 }
2017
2018 ua_event->handle = ua_event->obj->handle;
2019
2020 DBG2("UST app event %s created successfully for pid:%d object = %p",
2021 ua_event->attr.name, app->pid, ua_event->obj);
2022
2023 health_code_update();
2024
2025 /* Set filter if one is present. */
2026 if (ua_event->filter) {
2027 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
2028 if (ret < 0) {
2029 goto error;
2030 }
2031 }
2032
2033 /* Set exclusions for the event */
2034 if (ua_event->exclusion) {
2035 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
2036 if (ret < 0) {
2037 goto error;
2038 }
2039 }
2040
2041 /* If event not enabled, disable it on the tracer */
2042 if (ua_event->enabled) {
2043 /*
2044 * We now need to explicitly enable the event, since it
2045 * is now disabled at creation.
2046 */
2047 ret = enable_ust_object(app, ua_event->obj);
2048 if (ret < 0) {
2049 /*
2050 * If we hit an EPERM, something is wrong with our enable call. If
2051 * we get an EEXIST, there is a problem on the tracer side since we
2052 * just created it.
2053 */
2054 switch (ret) {
2055 case -LTTNG_UST_ERR_PERM:
2056 /* Code flow problem */
2057 abort();
2058 case -LTTNG_UST_ERR_EXIST:
2059 /* It's OK for our use case. */
2060 ret = 0;
2061 break;
2062 default:
2063 break;
2064 }
2065 goto error;
2066 }
2067 }
2068
2069 error:
2070 health_code_update();
2071 return ret;
2072 }
2073
2074 static int init_ust_event_notifier_from_event_rule(
2075 const struct lttng_event_rule *rule,
2076 struct lttng_ust_abi_event_notifier *event_notifier)
2077 {
2078 enum lttng_event_rule_status status;
2079 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2080 int loglevel = -1, ret = 0;
2081 const char *pattern;
2082
2083
2084 memset(event_notifier, 0, sizeof(*event_notifier));
2085
2086 if (lttng_event_rule_targets_agent_domain(rule)) {
2087 /*
2088 * Special event for agents
2089 * The actual meat of the event is in the filter that will be
2090 * attached later on.
2091 * Set the default values for the agent event.
2092 */
2093 pattern = event_get_default_agent_ust_name(
2094 lttng_event_rule_get_domain_type(rule));
2095 loglevel = 0;
2096 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2097 } else {
2098 const struct lttng_log_level_rule *log_level_rule;
2099
2100 LTTNG_ASSERT(lttng_event_rule_get_type(rule) ==
2101 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT);
2102
2103 status = lttng_event_rule_user_tracepoint_get_name_pattern(rule, &pattern);
2104 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2105 /* At this point, this is a fatal error. */
2106 abort();
2107 }
2108
2109 status = lttng_event_rule_user_tracepoint_get_log_level_rule(
2110 rule, &log_level_rule);
2111 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2112 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2113 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2114 enum lttng_log_level_rule_status llr_status;
2115
2116 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2117 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2118 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2119 llr_status = lttng_log_level_rule_exactly_get_level(
2120 log_level_rule, &loglevel);
2121 break;
2122 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2123 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2124 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2125 log_level_rule, &loglevel);
2126 break;
2127 default:
2128 abort();
2129 }
2130
2131 LTTNG_ASSERT(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2132 } else {
2133 /* At this point this is a fatal error. */
2134 abort();
2135 }
2136 }
2137
2138 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2139 ret = lttng_strncpy(event_notifier->event.name, pattern,
2140 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
2141 if (ret) {
2142 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2143 pattern);
2144 goto end;
2145 }
2146
2147 event_notifier->event.loglevel_type = ust_loglevel_type;
2148 event_notifier->event.loglevel = loglevel;
2149 end:
2150 return ret;
2151 }
2152
2153 /*
2154 * Create the specified event notifier against the user space tracer of a
2155 * given application.
2156 */
2157 static int create_ust_event_notifier(struct ust_app *app,
2158 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2159 {
2160 int ret = 0;
2161 enum lttng_condition_status condition_status;
2162 const struct lttng_condition *condition = NULL;
2163 struct lttng_ust_abi_event_notifier event_notifier;
2164 const struct lttng_event_rule *event_rule = NULL;
2165 unsigned int capture_bytecode_count = 0, i;
2166 enum lttng_condition_status cond_status;
2167 enum lttng_event_rule_type event_rule_type;
2168
2169 health_code_update();
2170 LTTNG_ASSERT(app->event_notifier_group.object);
2171
2172 condition = lttng_trigger_get_const_condition(
2173 ua_event_notifier_rule->trigger);
2174 LTTNG_ASSERT(condition);
2175 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
2176 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
2177
2178 condition_status = lttng_condition_event_rule_matches_get_rule(
2179 condition, &event_rule);
2180 LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
2181
2182 LTTNG_ASSERT(event_rule);
2183
2184 event_rule_type = lttng_event_rule_get_type(event_rule);
2185 LTTNG_ASSERT(event_rule_type == LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT ||
2186 event_rule_type == LTTNG_EVENT_RULE_TYPE_JUL_LOGGING ||
2187 event_rule_type ==
2188 LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING ||
2189 event_rule_type ==
2190 LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING);
2191
2192 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2193 event_notifier.event.token = ua_event_notifier_rule->token;
2194 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2195
2196 /* Create UST event notifier against the tracer. */
2197 pthread_mutex_lock(&app->sock_lock);
2198 ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
2199 app->event_notifier_group.object,
2200 &ua_event_notifier_rule->obj);
2201 pthread_mutex_unlock(&app->sock_lock);
2202 if (ret < 0) {
2203 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2204 ret = 0;
2205 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2206 app->pid, app->sock);
2207 } else if (ret == -EAGAIN) {
2208 ret = 0;
2209 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2210 app->pid, app->sock);
2211 } else {
2212 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2213 event_notifier.event.name, ret, app->pid,
2214 app->sock);
2215 }
2216 goto error;
2217 }
2218
2219 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2220
2221 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p",
2222 event_notifier.event.name, app->name, app->pid,
2223 ua_event_notifier_rule->obj);
2224
2225 health_code_update();
2226
2227 /* Set filter if one is present. */
2228 if (ua_event_notifier_rule->filter) {
2229 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2230 ua_event_notifier_rule->obj);
2231 if (ret < 0) {
2232 goto error;
2233 }
2234 }
2235
2236 /* Set exclusions for the event. */
2237 if (ua_event_notifier_rule->exclusion) {
2238 ret = set_ust_object_exclusions(app,
2239 ua_event_notifier_rule->exclusion,
2240 ua_event_notifier_rule->obj);
2241 if (ret < 0) {
2242 goto error;
2243 }
2244 }
2245
2246 /* Set the capture bytecodes. */
2247 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
2248 condition, &capture_bytecode_count);
2249 LTTNG_ASSERT(cond_status == LTTNG_CONDITION_STATUS_OK);
2250
2251 for (i = 0; i < capture_bytecode_count; i++) {
2252 const struct lttng_bytecode *capture_bytecode =
2253 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
2254 condition, i);
2255
2256 ret = set_ust_capture(app, capture_bytecode, i,
2257 ua_event_notifier_rule->obj);
2258 if (ret < 0) {
2259 goto error;
2260 }
2261 }
2262
2263 /*
2264 * We now need to explicitly enable the event, since it
2265 * is disabled at creation.
2266 */
2267 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2268 if (ret < 0) {
2269 /*
2270 * If we hit an EPERM, something is wrong with our enable call.
2271 * If we get an EEXIST, there is a problem on the tracer side
2272 * since we just created it.
2273 */
2274 switch (ret) {
2275 case -LTTNG_UST_ERR_PERM:
2276 /* Code flow problem. */
2277 abort();
2278 case -LTTNG_UST_ERR_EXIST:
2279 /* It's OK for our use case. */
2280 ret = 0;
2281 break;
2282 default:
2283 break;
2284 }
2285
2286 goto error;
2287 }
2288
2289 ua_event_notifier_rule->enabled = true;
2290
2291 error:
2292 health_code_update();
2293 return ret;
2294 }
2295
2296 /*
2297 * Copy data between an UST app event and a LTT event.
2298 */
2299 static void shadow_copy_event(struct ust_app_event *ua_event,
2300 struct ltt_ust_event *uevent)
2301 {
2302 size_t exclusion_alloc_size;
2303
2304 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2305 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2306
2307 ua_event->enabled = uevent->enabled;
2308
2309 /* Copy event attributes */
2310 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2311
2312 /* Copy filter bytecode */
2313 if (uevent->filter) {
2314 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2315 /* Filter might be NULL here in case of ENONEM. */
2316 }
2317
2318 /* Copy exclusion data */
2319 if (uevent->exclusion) {
2320 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2321 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2322 ua_event->exclusion = (lttng_event_exclusion *) zmalloc(exclusion_alloc_size);
2323 if (ua_event->exclusion == NULL) {
2324 PERROR("malloc");
2325 } else {
2326 memcpy(ua_event->exclusion, uevent->exclusion,
2327 exclusion_alloc_size);
2328 }
2329 }
2330 }
2331
2332 /*
2333 * Copy data between an UST app channel and a LTT channel.
2334 */
2335 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2336 struct ltt_ust_channel *uchan)
2337 {
2338 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2339
2340 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2341 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2342
2343 ua_chan->tracefile_size = uchan->tracefile_size;
2344 ua_chan->tracefile_count = uchan->tracefile_count;
2345
2346 /* Copy event attributes since the layout is different. */
2347 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2348 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2349 ua_chan->attr.overwrite = uchan->attr.overwrite;
2350 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2351 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2352 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2353 ua_chan->attr.output = (lttng_ust_abi_output) uchan->attr.output;
2354 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2355
2356 /*
2357 * Note that the attribute channel type is not set since the channel on the
2358 * tracing registry side does not have this information.
2359 */
2360
2361 ua_chan->enabled = uchan->enabled;
2362 ua_chan->tracing_channel_id = uchan->id;
2363
2364 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2365 }
2366
2367 /*
2368 * Copy data between a UST app session and a regular LTT session.
2369 */
2370 static void shadow_copy_session(struct ust_app_session *ua_sess,
2371 struct ltt_ust_session *usess, struct ust_app *app)
2372 {
2373 struct tm *timeinfo;
2374 char datetime[16];
2375 int ret;
2376 char tmp_shm_path[PATH_MAX];
2377
2378 timeinfo = localtime(&app->registration_time);
2379 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2380
2381 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2382
2383 ua_sess->tracing_id = usess->id;
2384 ua_sess->id = get_next_session_id();
2385 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2386 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2387 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2388 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2389 ua_sess->buffer_type = usess->buffer_type;
2390 ua_sess->bits_per_long = app->bits_per_long;
2391
2392 /* There is only one consumer object per session possible. */
2393 consumer_output_get(usess->consumer);
2394 ua_sess->consumer = usess->consumer;
2395
2396 ua_sess->output_traces = usess->output_traces;
2397 ua_sess->live_timer_interval = usess->live_timer_interval;
2398 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2399 &usess->metadata_attr);
2400
2401 switch (ua_sess->buffer_type) {
2402 case LTTNG_BUFFER_PER_PID:
2403 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2404 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2405 datetime);
2406 break;
2407 case LTTNG_BUFFER_PER_UID:
2408 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2409 DEFAULT_UST_TRACE_UID_PATH,
2410 lttng_credentials_get_uid(&ua_sess->real_credentials),
2411 app->bits_per_long);
2412 break;
2413 default:
2414 abort();
2415 goto error;
2416 }
2417 if (ret < 0) {
2418 PERROR("asprintf UST shadow copy session");
2419 abort();
2420 goto error;
2421 }
2422
2423 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2424 sizeof(ua_sess->root_shm_path));
2425 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2426 strncpy(ua_sess->shm_path, usess->shm_path,
2427 sizeof(ua_sess->shm_path));
2428 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2429 if (ua_sess->shm_path[0]) {
2430 switch (ua_sess->buffer_type) {
2431 case LTTNG_BUFFER_PER_PID:
2432 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2433 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2434 app->name, app->pid, datetime);
2435 break;
2436 case LTTNG_BUFFER_PER_UID:
2437 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2438 "/" DEFAULT_UST_TRACE_UID_PATH,
2439 app->uid, app->bits_per_long);
2440 break;
2441 default:
2442 abort();
2443 goto error;
2444 }
2445 if (ret < 0) {
2446 PERROR("sprintf UST shadow copy session");
2447 abort();
2448 goto error;
2449 }
2450 strncat(ua_sess->shm_path, tmp_shm_path,
2451 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2452 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2453 }
2454 return;
2455
2456 error:
2457 consumer_output_put(ua_sess->consumer);
2458 }
2459
2460 /*
2461 * Lookup sesison wrapper.
2462 */
2463 static
2464 void __lookup_session_by_app(const struct ltt_ust_session *usess,
2465 struct ust_app *app, struct lttng_ht_iter *iter)
2466 {
2467 /* Get right UST app session from app */
2468 lttng_ht_lookup(app->sessions, &usess->id, iter);
2469 }
2470
2471 /*
2472 * Return ust app session from the app session hashtable using the UST session
2473 * id.
2474 */
2475 static struct ust_app_session *lookup_session_by_app(
2476 const struct ltt_ust_session *usess, struct ust_app *app)
2477 {
2478 struct lttng_ht_iter iter;
2479 struct lttng_ht_node_u64 *node;
2480
2481 __lookup_session_by_app(usess, app, &iter);
2482 node = lttng_ht_iter_get_node_u64(&iter);
2483 if (node == NULL) {
2484 goto error;
2485 }
2486
2487 return caa_container_of(node, struct ust_app_session, node);
2488
2489 error:
2490 return NULL;
2491 }
2492
2493 /*
2494 * Setup buffer registry per PID for the given session and application. If none
2495 * is found, a new one is created, added to the global registry and
2496 * initialized. If regp is valid, it's set with the newly created object.
2497 *
2498 * Return 0 on success or else a negative value.
2499 */
2500 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2501 struct ust_app *app, struct buffer_reg_pid **regp)
2502 {
2503 int ret = 0;
2504 struct buffer_reg_pid *reg_pid;
2505
2506 LTTNG_ASSERT(ua_sess);
2507 LTTNG_ASSERT(app);
2508
2509 rcu_read_lock();
2510
2511 reg_pid = buffer_reg_pid_find(ua_sess->id);
2512 if (!reg_pid) {
2513 /*
2514 * This is the create channel path meaning that if there is NO
2515 * registry available, we have to create one for this session.
2516 */
2517 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2518 ua_sess->root_shm_path, ua_sess->shm_path);
2519 if (ret < 0) {
2520 goto error;
2521 }
2522 } else {
2523 goto end;
2524 }
2525
2526 /* Initialize registry. */
2527 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2528 app->bits_per_long, app->uint8_t_alignment,
2529 app->uint16_t_alignment, app->uint32_t_alignment,
2530 app->uint64_t_alignment, app->long_alignment,
2531 app->byte_order, app->version.major, app->version.minor,
2532 reg_pid->root_shm_path, reg_pid->shm_path,
2533 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2534 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2535 ua_sess->tracing_id,
2536 app->uid);
2537 if (ret < 0) {
2538 /*
2539 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2540 * destroy the buffer registry, because it is always expected
2541 * that if the buffer registry can be found, its ust registry is
2542 * non-NULL.
2543 */
2544 buffer_reg_pid_destroy(reg_pid);
2545 goto error;
2546 }
2547
2548 buffer_reg_pid_add(reg_pid);
2549
2550 DBG3("UST app buffer registry per PID created successfully");
2551
2552 end:
2553 if (regp) {
2554 *regp = reg_pid;
2555 }
2556 error:
2557 rcu_read_unlock();
2558 return ret;
2559 }
2560
2561 /*
2562 * Setup buffer registry per UID for the given session and application. If none
2563 * is found, a new one is created, added to the global registry and
2564 * initialized. If regp is valid, it's set with the newly created object.
2565 *
2566 * Return 0 on success or else a negative value.
2567 */
2568 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2569 struct ust_app_session *ua_sess,
2570 struct ust_app *app, struct buffer_reg_uid **regp)
2571 {
2572 int ret = 0;
2573 struct buffer_reg_uid *reg_uid;
2574
2575 LTTNG_ASSERT(usess);
2576 LTTNG_ASSERT(app);
2577
2578 rcu_read_lock();
2579
2580 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2581 if (!reg_uid) {
2582 /*
2583 * This is the create channel path meaning that if there is NO
2584 * registry available, we have to create one for this session.
2585 */
2586 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2587 LTTNG_DOMAIN_UST, &reg_uid,
2588 ua_sess->root_shm_path, ua_sess->shm_path);
2589 if (ret < 0) {
2590 goto error;
2591 }
2592 } else {
2593 goto end;
2594 }
2595
2596 /* Initialize registry. */
2597 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2598 app->bits_per_long, app->uint8_t_alignment,
2599 app->uint16_t_alignment, app->uint32_t_alignment,
2600 app->uint64_t_alignment, app->long_alignment,
2601 app->byte_order, app->version.major,
2602 app->version.minor, reg_uid->root_shm_path,
2603 reg_uid->shm_path, usess->uid, usess->gid,
2604 ua_sess->tracing_id, app->uid);
2605 if (ret < 0) {
2606 /*
2607 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2608 * destroy the buffer registry, because it is always expected
2609 * that if the buffer registry can be found, its ust registry is
2610 * non-NULL.
2611 */
2612 buffer_reg_uid_destroy(reg_uid, NULL);
2613 goto error;
2614 }
2615 /* Add node to teardown list of the session. */
2616 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2617
2618 buffer_reg_uid_add(reg_uid);
2619
2620 DBG3("UST app buffer registry per UID created successfully");
2621 end:
2622 if (regp) {
2623 *regp = reg_uid;
2624 }
2625 error:
2626 rcu_read_unlock();
2627 return ret;
2628 }
2629
2630 /*
2631 * Create a session on the tracer side for the given app.
2632 *
2633 * On success, ua_sess_ptr is populated with the session pointer or else left
2634 * untouched. If the session was created, is_created is set to 1. On error,
2635 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2636 * be NULL.
2637 *
2638 * Returns 0 on success or else a negative code which is either -ENOMEM or
2639 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2640 */
2641 static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2642 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2643 int *is_created)
2644 {
2645 int ret, created = 0;
2646 struct ust_app_session *ua_sess;
2647
2648 LTTNG_ASSERT(usess);
2649 LTTNG_ASSERT(app);
2650 LTTNG_ASSERT(ua_sess_ptr);
2651
2652 health_code_update();
2653
2654 ua_sess = lookup_session_by_app(usess, app);
2655 if (ua_sess == NULL) {
2656 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2657 app->pid, usess->id);
2658 ua_sess = alloc_ust_app_session();
2659 if (ua_sess == NULL) {
2660 /* Only malloc can failed so something is really wrong */
2661 ret = -ENOMEM;
2662 goto error;
2663 }
2664 shadow_copy_session(ua_sess, usess, app);
2665 created = 1;
2666 }
2667
2668 switch (usess->buffer_type) {
2669 case LTTNG_BUFFER_PER_PID:
2670 /* Init local registry. */
2671 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2672 if (ret < 0) {
2673 delete_ust_app_session(-1, ua_sess, app);
2674 goto error;
2675 }
2676 break;
2677 case LTTNG_BUFFER_PER_UID:
2678 /* Look for a global registry. If none exists, create one. */
2679 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2680 if (ret < 0) {
2681 delete_ust_app_session(-1, ua_sess, app);
2682 goto error;
2683 }
2684 break;
2685 default:
2686 abort();
2687 ret = -EINVAL;
2688 goto error;
2689 }
2690
2691 health_code_update();
2692
2693 if (ua_sess->handle == -1) {
2694 pthread_mutex_lock(&app->sock_lock);
2695 ret = lttng_ust_ctl_create_session(app->sock);
2696 pthread_mutex_unlock(&app->sock_lock);
2697 if (ret < 0) {
2698 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2699 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2700 app->pid, app->sock);
2701 ret = 0;
2702 } else if (ret == -EAGAIN) {
2703 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2704 app->pid, app->sock);
2705 ret = 0;
2706 } else {
2707 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2708 ret, app->pid, app->sock);
2709 }
2710 delete_ust_app_session(-1, ua_sess, app);
2711 if (ret != -ENOMEM) {
2712 /*
2713 * Tracer is probably gone or got an internal error so let's
2714 * behave like it will soon unregister or not usable.
2715 */
2716 ret = -ENOTCONN;
2717 }
2718 goto error;
2719 }
2720
2721 ua_sess->handle = ret;
2722
2723 /* Add ust app session to app's HT */
2724 lttng_ht_node_init_u64(&ua_sess->node,
2725 ua_sess->tracing_id);
2726 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2727 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2728 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2729 &ua_sess->ust_objd_node);
2730
2731 DBG2("UST app session created successfully with handle %d", ret);
2732 }
2733
2734 *ua_sess_ptr = ua_sess;
2735 if (is_created) {
2736 *is_created = created;
2737 }
2738
2739 /* Everything went well. */
2740 ret = 0;
2741
2742 error:
2743 health_code_update();
2744 return ret;
2745 }
2746
2747 /*
2748 * Match function for a hash table lookup of ust_app_ctx.
2749 *
2750 * It matches an ust app context based on the context type and, in the case
2751 * of perf counters, their name.
2752 */
2753 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2754 {
2755 struct ust_app_ctx *ctx;
2756 const struct lttng_ust_context_attr *key;
2757
2758 LTTNG_ASSERT(node);
2759 LTTNG_ASSERT(_key);
2760
2761 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2762 key = (lttng_ust_context_attr *) _key;
2763
2764 /* Context type */
2765 if (ctx->ctx.ctx != key->ctx) {
2766 goto no_match;
2767 }
2768
2769 switch(key->ctx) {
2770 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2771 if (strncmp(key->u.perf_counter.name,
2772 ctx->ctx.u.perf_counter.name,
2773 sizeof(key->u.perf_counter.name))) {
2774 goto no_match;
2775 }
2776 break;
2777 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2778 if (strcmp(key->u.app_ctx.provider_name,
2779 ctx->ctx.u.app_ctx.provider_name) ||
2780 strcmp(key->u.app_ctx.ctx_name,
2781 ctx->ctx.u.app_ctx.ctx_name)) {
2782 goto no_match;
2783 }
2784 break;
2785 default:
2786 break;
2787 }
2788
2789 /* Match. */
2790 return 1;
2791
2792 no_match:
2793 return 0;
2794 }
2795
2796 /*
2797 * Lookup for an ust app context from an lttng_ust_context.
2798 *
2799 * Must be called while holding RCU read side lock.
2800 * Return an ust_app_ctx object or NULL on error.
2801 */
2802 static
2803 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2804 struct lttng_ust_context_attr *uctx)
2805 {
2806 struct lttng_ht_iter iter;
2807 struct lttng_ht_node_ulong *node;
2808 struct ust_app_ctx *app_ctx = NULL;
2809
2810 LTTNG_ASSERT(uctx);
2811 LTTNG_ASSERT(ht);
2812
2813 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2814 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2815 ht_match_ust_app_ctx, uctx, &iter.iter);
2816 node = lttng_ht_iter_get_node_ulong(&iter);
2817 if (!node) {
2818 goto end;
2819 }
2820
2821 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2822
2823 end:
2824 return app_ctx;
2825 }
2826
2827 /*
2828 * Create a context for the channel on the tracer.
2829 *
2830 * Called with UST app session lock held and a RCU read side lock.
2831 */
2832 static
2833 int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2834 struct lttng_ust_context_attr *uctx,
2835 struct ust_app *app)
2836 {
2837 int ret = 0;
2838 struct ust_app_ctx *ua_ctx;
2839
2840 DBG2("UST app adding context to channel %s", ua_chan->name);
2841
2842 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2843 if (ua_ctx) {
2844 ret = -EEXIST;
2845 goto error;
2846 }
2847
2848 ua_ctx = alloc_ust_app_ctx(uctx);
2849 if (ua_ctx == NULL) {
2850 /* malloc failed */
2851 ret = -ENOMEM;
2852 goto error;
2853 }
2854
2855 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2856 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2857 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2858
2859 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2860 if (ret < 0) {
2861 goto error;
2862 }
2863
2864 error:
2865 return ret;
2866 }
2867
2868 /*
2869 * Enable on the tracer side a ust app event for the session and channel.
2870 *
2871 * Called with UST app session lock held.
2872 */
2873 static
2874 int enable_ust_app_event(struct ust_app_session *ua_sess,
2875 struct ust_app_event *ua_event, struct ust_app *app)
2876 {
2877 int ret;
2878
2879 ret = enable_ust_object(app, ua_event->obj);
2880 if (ret < 0) {
2881 goto error;
2882 }
2883
2884 ua_event->enabled = 1;
2885
2886 error:
2887 return ret;
2888 }
2889
2890 /*
2891 * Disable on the tracer side a ust app event for the session and channel.
2892 */
2893 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2894 struct ust_app_event *ua_event, struct ust_app *app)
2895 {
2896 int ret;
2897
2898 ret = disable_ust_object(app, ua_event->obj);
2899 if (ret < 0) {
2900 goto error;
2901 }
2902
2903 ua_event->enabled = 0;
2904
2905 error:
2906 return ret;
2907 }
2908
2909 /*
2910 * Lookup ust app channel for session and disable it on the tracer side.
2911 */
2912 static
2913 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2914 struct ust_app_channel *ua_chan, struct ust_app *app)
2915 {
2916 int ret;
2917
2918 ret = disable_ust_channel(app, ua_sess, ua_chan);
2919 if (ret < 0) {
2920 goto error;
2921 }
2922
2923 ua_chan->enabled = 0;
2924
2925 error:
2926 return ret;
2927 }
2928
2929 /*
2930 * Lookup ust app channel for session and enable it on the tracer side. This
2931 * MUST be called with a RCU read side lock acquired.
2932 */
2933 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2934 struct ltt_ust_channel *uchan, struct ust_app *app)
2935 {
2936 int ret = 0;
2937 struct lttng_ht_iter iter;
2938 struct lttng_ht_node_str *ua_chan_node;
2939 struct ust_app_channel *ua_chan;
2940
2941 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2942 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2943 if (ua_chan_node == NULL) {
2944 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2945 uchan->name, ua_sess->tracing_id);
2946 goto error;
2947 }
2948
2949 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2950
2951 ret = enable_ust_channel(app, ua_sess, ua_chan);
2952 if (ret < 0) {
2953 goto error;
2954 }
2955
2956 error:
2957 return ret;
2958 }
2959
2960 /*
2961 * Ask the consumer to create a channel and get it if successful.
2962 *
2963 * Called with UST app session lock held.
2964 *
2965 * Return 0 on success or else a negative value.
2966 */
2967 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2968 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2969 int bitness, struct ust_registry_session *registry,
2970 uint64_t trace_archive_id)
2971 {
2972 int ret;
2973 unsigned int nb_fd = 0;
2974 struct consumer_socket *socket;
2975
2976 LTTNG_ASSERT(usess);
2977 LTTNG_ASSERT(ua_sess);
2978 LTTNG_ASSERT(ua_chan);
2979 LTTNG_ASSERT(registry);
2980
2981 rcu_read_lock();
2982 health_code_update();
2983
2984 /* Get the right consumer socket for the application. */
2985 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2986 if (!socket) {
2987 ret = -EINVAL;
2988 goto error;
2989 }
2990
2991 health_code_update();
2992
2993 /* Need one fd for the channel. */
2994 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2995 if (ret < 0) {
2996 ERR("Exhausted number of available FD upon create channel");
2997 goto error;
2998 }
2999
3000 /*
3001 * Ask consumer to create channel. The consumer will return the number of
3002 * stream we have to expect.
3003 */
3004 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
3005 registry, usess->current_trace_chunk);
3006 if (ret < 0) {
3007 goto error_ask;
3008 }
3009
3010 /*
3011 * Compute the number of fd needed before receiving them. It must be 2 per
3012 * stream (2 being the default value here).
3013 */
3014 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
3015
3016 /* Reserve the amount of file descriptor we need. */
3017 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
3018 if (ret < 0) {
3019 ERR("Exhausted number of available FD upon create channel");
3020 goto error_fd_get_stream;
3021 }
3022
3023 health_code_update();
3024
3025 /*
3026 * Now get the channel from the consumer. This call will populate the stream
3027 * list of that channel and set the ust objects.
3028 */
3029 if (usess->consumer->enabled) {
3030 ret = ust_consumer_get_channel(socket, ua_chan);
3031 if (ret < 0) {
3032 goto error_destroy;
3033 }
3034 }
3035
3036 rcu_read_unlock();
3037 return 0;
3038
3039 error_destroy:
3040 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
3041 error_fd_get_stream:
3042 /*
3043 * Initiate a destroy channel on the consumer since we had an error
3044 * handling it on our side. The return value is of no importance since we
3045 * already have a ret value set by the previous error that we need to
3046 * return.
3047 */
3048 (void) ust_consumer_destroy_channel(socket, ua_chan);
3049 error_ask:
3050 lttng_fd_put(LTTNG_FD_APPS, 1);
3051 error:
3052 health_code_update();
3053 rcu_read_unlock();
3054 return ret;
3055 }
3056
3057 /*
3058 * Duplicate the ust data object of the ust app stream and save it in the
3059 * buffer registry stream.
3060 *
3061 * Return 0 on success or else a negative value.
3062 */
3063 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3064 struct ust_app_stream *stream)
3065 {
3066 int ret;
3067
3068 LTTNG_ASSERT(reg_stream);
3069 LTTNG_ASSERT(stream);
3070
3071 /* Duplicating a stream requires 2 new fds. Reserve them. */
3072 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3073 if (ret < 0) {
3074 ERR("Exhausted number of available FD upon duplicate stream");
3075 goto error;
3076 }
3077
3078 /* Duplicate object for stream once the original is in the registry. */
3079 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
3080 reg_stream->obj.ust);
3081 if (ret < 0) {
3082 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3083 reg_stream->obj.ust, stream->obj, ret);
3084 lttng_fd_put(LTTNG_FD_APPS, 2);
3085 goto error;
3086 }
3087 stream->handle = stream->obj->handle;
3088
3089 error:
3090 return ret;
3091 }
3092
3093 /*
3094 * Duplicate the ust data object of the ust app. channel and save it in the
3095 * buffer registry channel.
3096 *
3097 * Return 0 on success or else a negative value.
3098 */
3099 static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3100 struct ust_app_channel *ua_chan)
3101 {
3102 int ret;
3103
3104 LTTNG_ASSERT(buf_reg_chan);
3105 LTTNG_ASSERT(ua_chan);
3106
3107 /* Duplicating a channel requires 1 new fd. Reserve it. */
3108 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3109 if (ret < 0) {
3110 ERR("Exhausted number of available FD upon duplicate channel");
3111 goto error_fd_get;
3112 }
3113
3114 /* Duplicate object for stream once the original is in the registry. */
3115 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3116 if (ret < 0) {
3117 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3118 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3119 goto error;
3120 }
3121 ua_chan->handle = ua_chan->obj->handle;
3122
3123 return 0;
3124
3125 error:
3126 lttng_fd_put(LTTNG_FD_APPS, 1);
3127 error_fd_get:
3128 return ret;
3129 }
3130
3131 /*
3132 * For a given channel buffer registry, setup all streams of the given ust
3133 * application channel.
3134 *
3135 * Return 0 on success or else a negative value.
3136 */
3137 static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3138 struct ust_app_channel *ua_chan,
3139 struct ust_app *app)
3140 {
3141 int ret = 0;
3142 struct ust_app_stream *stream, *stmp;
3143
3144 LTTNG_ASSERT(buf_reg_chan);
3145 LTTNG_ASSERT(ua_chan);
3146
3147 DBG2("UST app setup buffer registry stream");
3148
3149 /* Send all streams to application. */
3150 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3151 struct buffer_reg_stream *reg_stream;
3152
3153 ret = buffer_reg_stream_create(&reg_stream);
3154 if (ret < 0) {
3155 goto error;
3156 }
3157
3158 /*
3159 * Keep original pointer and nullify it in the stream so the delete
3160 * stream call does not release the object.
3161 */
3162 reg_stream->obj.ust = stream->obj;
3163 stream->obj = NULL;
3164 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3165
3166 /* We don't need the streams anymore. */
3167 cds_list_del(&stream->list);
3168 delete_ust_app_stream(-1, stream, app);
3169 }
3170
3171 error:
3172 return ret;
3173 }
3174
3175 /*
3176 * Create a buffer registry channel for the given session registry and
3177 * application channel object. If regp pointer is valid, it's set with the
3178 * created object. Important, the created object is NOT added to the session
3179 * registry hash table.
3180 *
3181 * Return 0 on success else a negative value.
3182 */
3183 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3184 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3185 {
3186 int ret;
3187 struct buffer_reg_channel *buf_reg_chan = NULL;
3188
3189 LTTNG_ASSERT(reg_sess);
3190 LTTNG_ASSERT(ua_chan);
3191
3192 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3193
3194 /* Create buffer registry channel. */
3195 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3196 if (ret < 0) {
3197 goto error_create;
3198 }
3199 LTTNG_ASSERT(buf_reg_chan);
3200 buf_reg_chan->consumer_key = ua_chan->key;
3201 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3202 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3203
3204 /* Create and add a channel registry to session. */
3205 ret = ust_registry_channel_add(reg_sess->reg.ust,
3206 ua_chan->tracing_channel_id);
3207 if (ret < 0) {
3208 goto error;
3209 }
3210 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3211
3212 if (regp) {
3213 *regp = buf_reg_chan;
3214 }
3215
3216 return 0;
3217
3218 error:
3219 /* Safe because the registry channel object was not added to any HT. */
3220 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3221 error_create:
3222 return ret;
3223 }
3224
3225 /*
3226 * Setup buffer registry channel for the given session registry and application
3227 * channel object. If regp pointer is valid, it's set with the created object.
3228 *
3229 * Return 0 on success else a negative value.
3230 */
3231 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3232 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3233 struct ust_app *app)
3234 {
3235 int ret;
3236
3237 LTTNG_ASSERT(reg_sess);
3238 LTTNG_ASSERT(buf_reg_chan);
3239 LTTNG_ASSERT(ua_chan);
3240 LTTNG_ASSERT(ua_chan->obj);
3241
3242 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3243
3244 /* Setup all streams for the registry. */
3245 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3246 if (ret < 0) {
3247 goto error;
3248 }
3249
3250 buf_reg_chan->obj.ust = ua_chan->obj;
3251 ua_chan->obj = NULL;
3252
3253 return 0;
3254
3255 error:
3256 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3257 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3258 return ret;
3259 }
3260
3261 /*
3262 * Send buffer registry channel to the application.
3263 *
3264 * Return 0 on success else a negative value.
3265 */
3266 static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3267 struct ust_app *app, struct ust_app_session *ua_sess,
3268 struct ust_app_channel *ua_chan)
3269 {
3270 int ret;
3271 struct buffer_reg_stream *reg_stream;
3272
3273 LTTNG_ASSERT(buf_reg_chan);
3274 LTTNG_ASSERT(app);
3275 LTTNG_ASSERT(ua_sess);
3276 LTTNG_ASSERT(ua_chan);
3277
3278 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3279
3280 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3281 if (ret < 0) {
3282 goto error;
3283 }
3284
3285 /* Send channel to the application. */
3286 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3287 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3288 ret = -ENOTCONN; /* Caused by app exiting. */
3289 goto error;
3290 } else if (ret == -EAGAIN) {
3291 /* Caused by timeout. */
3292 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64 "\".",
3293 app->pid, ua_chan->name, ua_sess->tracing_id);
3294 /* Treat this the same way as an application that is exiting. */
3295 ret = -ENOTCONN;
3296 goto error;
3297 } else if (ret < 0) {
3298 goto error;
3299 }
3300
3301 health_code_update();
3302
3303 /* Send all streams to application. */
3304 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3305 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
3306 struct ust_app_stream stream;
3307
3308 ret = duplicate_stream_object(reg_stream, &stream);
3309 if (ret < 0) {
3310 goto error_stream_unlock;
3311 }
3312
3313 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3314 if (ret < 0) {
3315 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3316 ret = -ENOTCONN; /* Caused by app exiting. */
3317 } else if (ret == -EAGAIN) {
3318 /*
3319 * Caused by timeout.
3320 * Treat this the same way as an application
3321 * that is exiting.
3322 */
3323 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
3324 app->pid, stream.name,
3325 ua_chan->name,
3326 ua_sess->tracing_id);
3327 ret = -ENOTCONN;
3328 }
3329 (void) release_ust_app_stream(-1, &stream, app);
3330 goto error_stream_unlock;
3331 }
3332
3333 /*
3334 * The return value is not important here. This function will output an
3335 * error if needed.
3336 */
3337 (void) release_ust_app_stream(-1, &stream, app);
3338 }
3339 ua_chan->is_sent = 1;
3340
3341 error_stream_unlock:
3342 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3343 error:
3344 return ret;
3345 }
3346
3347 /*
3348 * Create and send to the application the created buffers with per UID buffers.
3349 *
3350 * This MUST be called with a RCU read side lock acquired.
3351 * The session list lock and the session's lock must be acquired.
3352 *
3353 * Return 0 on success else a negative value.
3354 */
3355 static int create_channel_per_uid(struct ust_app *app,
3356 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3357 struct ust_app_channel *ua_chan)
3358 {
3359 int ret;
3360 struct buffer_reg_uid *reg_uid;
3361 struct buffer_reg_channel *buf_reg_chan;
3362 struct ltt_session *session = NULL;
3363 enum lttng_error_code notification_ret;
3364 struct ust_registry_channel *ust_reg_chan;
3365
3366 LTTNG_ASSERT(app);
3367 LTTNG_ASSERT(usess);
3368 LTTNG_ASSERT(ua_sess);
3369 LTTNG_ASSERT(ua_chan);
3370
3371 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3372
3373 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3374 /*
3375 * The session creation handles the creation of this global registry
3376 * object. If none can be find, there is a code flow problem or a
3377 * teardown race.
3378 */
3379 LTTNG_ASSERT(reg_uid);
3380
3381 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3382 reg_uid);
3383 if (buf_reg_chan) {
3384 goto send_channel;
3385 }
3386
3387 /* Create the buffer registry channel object. */
3388 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3389 if (ret < 0) {
3390 ERR("Error creating the UST channel \"%s\" registry instance",
3391 ua_chan->name);
3392 goto error;
3393 }
3394
3395 session = session_find_by_id(ua_sess->tracing_id);
3396 LTTNG_ASSERT(session);
3397 LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
3398 LTTNG_ASSERT(session_trylock_list());
3399
3400 /*
3401 * Create the buffers on the consumer side. This call populates the
3402 * ust app channel object with all streams and data object.
3403 */
3404 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3405 app->bits_per_long, reg_uid->registry->reg.ust,
3406 session->most_recent_chunk_id.value);
3407 if (ret < 0) {
3408 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3409 ua_chan->name);
3410
3411 /*
3412 * Let's remove the previously created buffer registry channel so
3413 * it's not visible anymore in the session registry.
3414 */
3415 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3416 ua_chan->tracing_channel_id, false);
3417 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3418 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3419 goto error;
3420 }
3421
3422 /*
3423 * Setup the streams and add it to the session registry.
3424 */
3425 ret = setup_buffer_reg_channel(reg_uid->registry,
3426 ua_chan, buf_reg_chan, app);
3427 if (ret < 0) {
3428 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3429 goto error;
3430 }
3431
3432 /* Notify the notification subsystem of the channel's creation. */
3433 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3434 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
3435 ua_chan->tracing_channel_id);
3436 LTTNG_ASSERT(ust_reg_chan);
3437 ust_reg_chan->consumer_key = ua_chan->key;
3438 ust_reg_chan = NULL;
3439 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3440
3441 notification_ret = notification_thread_command_add_channel(
3442 the_notification_thread_handle, session->name,
3443 lttng_credentials_get_uid(
3444 &ua_sess->effective_credentials),
3445 lttng_credentials_get_gid(
3446 &ua_sess->effective_credentials),
3447 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3448 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3449 if (notification_ret != LTTNG_OK) {
3450 ret = - (int) notification_ret;
3451 ERR("Failed to add channel to notification thread");
3452 goto error;
3453 }
3454
3455 send_channel:
3456 /* Send buffers to the application. */
3457 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3458 if (ret < 0) {
3459 if (ret != -ENOTCONN) {
3460 ERR("Error sending channel to application");
3461 }
3462 goto error;
3463 }
3464
3465 error:
3466 if (session) {
3467 session_put(session);
3468 }
3469 return ret;
3470 }
3471
3472 /*
3473 * Create and send to the application the created buffers with per PID buffers.
3474 *
3475 * Called with UST app session lock held.
3476 * The session list lock and the session's lock must be acquired.
3477 *
3478 * Return 0 on success else a negative value.
3479 */
3480 static int create_channel_per_pid(struct ust_app *app,
3481 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3482 struct ust_app_channel *ua_chan)
3483 {
3484 int ret;
3485 struct ust_registry_session *registry;
3486 enum lttng_error_code cmd_ret;
3487 struct ltt_session *session = NULL;
3488 uint64_t chan_reg_key;
3489 struct ust_registry_channel *ust_reg_chan;
3490
3491 LTTNG_ASSERT(app);
3492 LTTNG_ASSERT(usess);
3493 LTTNG_ASSERT(ua_sess);
3494 LTTNG_ASSERT(ua_chan);
3495
3496 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3497
3498 rcu_read_lock();
3499
3500 registry = get_session_registry(ua_sess);
3501 /* The UST app session lock is held, registry shall not be null. */
3502 LTTNG_ASSERT(registry);
3503
3504 /* Create and add a new channel registry to session. */
3505 ret = ust_registry_channel_add(registry, ua_chan->key);
3506 if (ret < 0) {
3507 ERR("Error creating the UST channel \"%s\" registry instance",
3508 ua_chan->name);
3509 goto error;
3510 }
3511
3512 session = session_find_by_id(ua_sess->tracing_id);
3513 LTTNG_ASSERT(session);
3514
3515 LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
3516 LTTNG_ASSERT(session_trylock_list());
3517
3518 /* Create and get channel on the consumer side. */
3519 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3520 app->bits_per_long, registry,
3521 session->most_recent_chunk_id.value);
3522 if (ret < 0) {
3523 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3524 ua_chan->name);
3525 goto error_remove_from_registry;
3526 }
3527
3528 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3529 if (ret < 0) {
3530 if (ret != -ENOTCONN) {
3531 ERR("Error sending channel to application");
3532 }
3533 goto error_remove_from_registry;
3534 }
3535
3536 chan_reg_key = ua_chan->key;
3537 pthread_mutex_lock(&registry->lock);
3538 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3539 LTTNG_ASSERT(ust_reg_chan);
3540 ust_reg_chan->consumer_key = ua_chan->key;
3541 pthread_mutex_unlock(&registry->lock);
3542
3543 cmd_ret = notification_thread_command_add_channel(
3544 the_notification_thread_handle, session->name,
3545 lttng_credentials_get_uid(
3546 &ua_sess->effective_credentials),
3547 lttng_credentials_get_gid(
3548 &ua_sess->effective_credentials),
3549 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3550 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3551 if (cmd_ret != LTTNG_OK) {
3552 ret = - (int) cmd_ret;
3553 ERR("Failed to add channel to notification thread");
3554 goto error_remove_from_registry;
3555 }
3556
3557 error_remove_from_registry:
3558 if (ret) {
3559 ust_registry_channel_del_free(registry, ua_chan->key, false);
3560 }
3561 error:
3562 rcu_read_unlock();
3563 if (session) {
3564 session_put(session);
3565 }
3566 return ret;
3567 }
3568
3569 /*
3570 * From an already allocated ust app channel, create the channel buffers if
3571 * needed and send them to the application. This MUST be called with a RCU read
3572 * side lock acquired.
3573 *
3574 * Called with UST app session lock held.
3575 *
3576 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3577 * the application exited concurrently.
3578 */
3579 static int ust_app_channel_send(struct ust_app *app,
3580 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3581 struct ust_app_channel *ua_chan)
3582 {
3583 int ret;
3584
3585 LTTNG_ASSERT(app);
3586 LTTNG_ASSERT(usess);
3587 LTTNG_ASSERT(usess->active);
3588 LTTNG_ASSERT(ua_sess);
3589 LTTNG_ASSERT(ua_chan);
3590
3591 /* Handle buffer type before sending the channel to the application. */
3592 switch (usess->buffer_type) {
3593 case LTTNG_BUFFER_PER_UID:
3594 {
3595 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3596 if (ret < 0) {
3597 goto error;
3598 }
3599 break;
3600 }
3601 case LTTNG_BUFFER_PER_PID:
3602 {
3603 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3604 if (ret < 0) {
3605 goto error;
3606 }
3607 break;
3608 }
3609 default:
3610 abort();
3611 ret = -EINVAL;
3612 goto error;
3613 }
3614
3615 /* Initialize ust objd object using the received handle and add it. */
3616 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3617 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3618
3619 /* If channel is not enabled, disable it on the tracer */
3620 if (!ua_chan->enabled) {
3621 ret = disable_ust_channel(app, ua_sess, ua_chan);
3622 if (ret < 0) {
3623 goto error;
3624 }
3625 }
3626
3627 error:
3628 return ret;
3629 }
3630
3631 /*
3632 * Create UST app channel and return it through ua_chanp if not NULL.
3633 *
3634 * Called with UST app session lock and RCU read-side lock held.
3635 *
3636 * Return 0 on success or else a negative value.
3637 */
3638 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3639 struct ltt_ust_channel *uchan,
3640 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
3641 struct ust_app_channel **ua_chanp)
3642 {
3643 int ret = 0;
3644 struct lttng_ht_iter iter;
3645 struct lttng_ht_node_str *ua_chan_node;
3646 struct ust_app_channel *ua_chan;
3647
3648 /* Lookup channel in the ust app session */
3649 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3650 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3651 if (ua_chan_node != NULL) {
3652 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3653 goto end;
3654 }
3655
3656 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3657 if (ua_chan == NULL) {
3658 /* Only malloc can fail here */
3659 ret = -ENOMEM;
3660 goto error;
3661 }
3662 shadow_copy_channel(ua_chan, uchan);
3663
3664 /* Set channel type. */
3665 ua_chan->attr.type = type;
3666
3667 /* Only add the channel if successful on the tracer side. */
3668 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3669 end:
3670 if (ua_chanp) {
3671 *ua_chanp = ua_chan;
3672 }
3673
3674 /* Everything went well. */
3675 return 0;
3676
3677 error:
3678 return ret;
3679 }
3680
3681 /*
3682 * Create UST app event and create it on the tracer side.
3683 *
3684 * Must be called with the RCU read side lock held.
3685 * Called with ust app session mutex held.
3686 */
3687 static
3688 int create_ust_app_event(struct ust_app_session *ua_sess,
3689 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3690 struct ust_app *app)
3691 {
3692 int ret = 0;
3693 struct ust_app_event *ua_event;
3694
3695 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3696 if (ua_event == NULL) {
3697 /* Only failure mode of alloc_ust_app_event(). */
3698 ret = -ENOMEM;
3699 goto end;
3700 }
3701 shadow_copy_event(ua_event, uevent);
3702
3703 /* Create it on the tracer side */
3704 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3705 if (ret < 0) {
3706 /*
3707 * Not found previously means that it does not exist on the
3708 * tracer. If the application reports that the event existed,
3709 * it means there is a bug in the sessiond or lttng-ust
3710 * (or corruption, etc.)
3711 */
3712 if (ret == -LTTNG_UST_ERR_EXIST) {
3713 ERR("Tracer for application reported that an event being created already existed: "
3714 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3715 uevent->attr.name,
3716 app->pid, app->ppid, app->uid,
3717 app->gid);
3718 }
3719 goto error;
3720 }
3721
3722 add_unique_ust_app_event(ua_chan, ua_event);
3723
3724 DBG2("UST app create event completed: app = '%s' pid = %d",
3725 app->name, app->pid);
3726
3727 end:
3728 return ret;
3729
3730 error:
3731 /* Valid. Calling here is already in a read side lock */
3732 delete_ust_app_event(-1, ua_event, app);
3733 return ret;
3734 }
3735
3736 /*
3737 * Create UST app event notifier rule and create it on the tracer side.
3738 *
3739 * Must be called with the RCU read side lock held.
3740 * Called with ust app session mutex held.
3741 */
3742 static
3743 int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3744 struct ust_app *app)
3745 {
3746 int ret = 0;
3747 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3748
3749 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3750 if (ua_event_notifier_rule == NULL) {
3751 ret = -ENOMEM;
3752 goto end;
3753 }
3754
3755 /* Create it on the tracer side. */
3756 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3757 if (ret < 0) {
3758 /*
3759 * Not found previously means that it does not exist on the
3760 * tracer. If the application reports that the event existed,
3761 * it means there is a bug in the sessiond or lttng-ust
3762 * (or corruption, etc.)
3763 */
3764 if (ret == -LTTNG_UST_ERR_EXIST) {
3765 ERR("Tracer for application reported that an event notifier being created already exists: "
3766 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3767 lttng_trigger_get_tracer_token(trigger),
3768 app->pid, app->ppid, app->uid,
3769 app->gid);
3770 }
3771 goto error;
3772 }
3773
3774 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3775 &ua_event_notifier_rule->node);
3776
3777 DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64,
3778 app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
3779
3780 goto end;
3781
3782 error:
3783 /* The RCU read side lock is already being held by the caller. */
3784 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3785 end:
3786 return ret;
3787 }
3788
3789 /*
3790 * Create UST metadata and open it on the tracer side.
3791 *
3792 * Called with UST app session lock held and RCU read side lock.
3793 */
3794 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3795 struct ust_app *app, struct consumer_output *consumer)
3796 {
3797 int ret = 0;
3798 struct ust_app_channel *metadata;
3799 struct consumer_socket *socket;
3800 struct ust_registry_session *registry;
3801 struct ltt_session *session = NULL;
3802
3803 LTTNG_ASSERT(ua_sess);
3804 LTTNG_ASSERT(app);
3805 LTTNG_ASSERT(consumer);
3806
3807 registry = get_session_registry(ua_sess);
3808 /* The UST app session is held registry shall not be null. */
3809 LTTNG_ASSERT(registry);
3810
3811 pthread_mutex_lock(&registry->lock);
3812
3813 /* Metadata already exists for this registry or it was closed previously */
3814 if (registry->metadata_key || registry->metadata_closed) {
3815 ret = 0;
3816 goto error;
3817 }
3818
3819 /* Allocate UST metadata */
3820 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3821 if (!metadata) {
3822 /* malloc() failed */
3823 ret = -ENOMEM;
3824 goto error;
3825 }
3826
3827 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3828
3829 /* Need one fd for the channel. */
3830 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3831 if (ret < 0) {
3832 ERR("Exhausted number of available FD upon create metadata");
3833 goto error;
3834 }
3835
3836 /* Get the right consumer socket for the application. */
3837 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3838 if (!socket) {
3839 ret = -EINVAL;
3840 goto error_consumer;
3841 }
3842
3843 /*
3844 * Keep metadata key so we can identify it on the consumer side. Assign it
3845 * to the registry *before* we ask the consumer so we avoid the race of the
3846 * consumer requesting the metadata and the ask_channel call on our side
3847 * did not returned yet.
3848 */
3849 registry->metadata_key = metadata->key;
3850
3851 session = session_find_by_id(ua_sess->tracing_id);
3852 LTTNG_ASSERT(session);
3853
3854 LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
3855 LTTNG_ASSERT(session_trylock_list());
3856
3857 /*
3858 * Ask the metadata channel creation to the consumer. The metadata object
3859 * will be created by the consumer and kept their. However, the stream is
3860 * never added or monitored until we do a first push metadata to the
3861 * consumer.
3862 */
3863 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3864 registry, session->current_trace_chunk);
3865 if (ret < 0) {
3866 /* Nullify the metadata key so we don't try to close it later on. */
3867 registry->metadata_key = 0;
3868 goto error_consumer;
3869 }
3870
3871 /*
3872 * The setup command will make the metadata stream be sent to the relayd,
3873 * if applicable, and the thread managing the metadatas. This is important
3874 * because after this point, if an error occurs, the only way the stream
3875 * can be deleted is to be monitored in the consumer.
3876 */
3877 ret = consumer_setup_metadata(socket, metadata->key);
3878 if (ret < 0) {
3879 /* Nullify the metadata key so we don't try to close it later on. */
3880 registry->metadata_key = 0;
3881 goto error_consumer;
3882 }
3883
3884 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3885 metadata->key, app->pid);
3886
3887 error_consumer:
3888 lttng_fd_put(LTTNG_FD_APPS, 1);
3889 delete_ust_app_channel(-1, metadata, app);
3890 error:
3891 pthread_mutex_unlock(&registry->lock);
3892 if (session) {
3893 session_put(session);
3894 }
3895 return ret;
3896 }
3897
3898 /*
3899 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3900 * acquired before calling this function.
3901 */
3902 struct ust_app *ust_app_find_by_pid(pid_t pid)
3903 {
3904 struct ust_app *app = NULL;
3905 struct lttng_ht_node_ulong *node;
3906 struct lttng_ht_iter iter;
3907
3908 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3909 node = lttng_ht_iter_get_node_ulong(&iter);
3910 if (node == NULL) {
3911 DBG2("UST app no found with pid %d", pid);
3912 goto error;
3913 }
3914
3915 DBG2("Found UST app by pid %d", pid);
3916
3917 app = caa_container_of(node, struct ust_app, pid_n);
3918
3919 error:
3920 return app;
3921 }
3922
3923 /*
3924 * Allocate and init an UST app object using the registration information and
3925 * the command socket. This is called when the command socket connects to the
3926 * session daemon.
3927 *
3928 * The object is returned on success or else NULL.
3929 */
3930 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3931 {
3932 int ret;
3933 struct ust_app *lta = NULL;
3934 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3935
3936 LTTNG_ASSERT(msg);
3937 LTTNG_ASSERT(sock >= 0);
3938
3939 DBG3("UST app creating application for socket %d", sock);
3940
3941 if ((msg->bits_per_long == 64 &&
3942 (uatomic_read(&the_ust_consumerd64_fd) ==
3943 -EINVAL)) ||
3944 (msg->bits_per_long == 32 &&
3945 (uatomic_read(&the_ust_consumerd32_fd) ==
3946 -EINVAL))) {
3947 ERR("Registration failed: application \"%s\" (pid: %d) has "
3948 "%d-bit long, but no consumerd for this size is available.\n",
3949 msg->name, msg->pid, msg->bits_per_long);
3950 goto error;
3951 }
3952
3953 /*
3954 * Reserve the two file descriptors of the event source pipe. The write
3955 * end will be closed once it is passed to the application, at which
3956 * point a single 'put' will be performed.
3957 */
3958 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3959 if (ret) {
3960 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
3961 msg->name, (int) msg->pid);
3962 goto error;
3963 }
3964
3965 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3966 if (!event_notifier_event_source_pipe) {
3967 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
3968 msg->name, msg->pid);
3969 goto error;
3970 }
3971
3972 lta = (ust_app *) zmalloc(sizeof(struct ust_app));
3973 if (lta == NULL) {
3974 PERROR("malloc");
3975 goto error_free_pipe;
3976 }
3977
3978 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3979
3980 lta->ppid = msg->ppid;
3981 lta->uid = msg->uid;
3982 lta->gid = msg->gid;
3983
3984 lta->bits_per_long = msg->bits_per_long;
3985 lta->uint8_t_alignment = msg->uint8_t_alignment;
3986 lta->uint16_t_alignment = msg->uint16_t_alignment;
3987 lta->uint32_t_alignment = msg->uint32_t_alignment;
3988 lta->uint64_t_alignment = msg->uint64_t_alignment;
3989 lta->long_alignment = msg->long_alignment;
3990 lta->byte_order = msg->byte_order;
3991
3992 lta->v_major = msg->major;
3993 lta->v_minor = msg->minor;
3994 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3995 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3996 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3997 lta->notify_sock = -1;
3998 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3999
4000 /* Copy name and make sure it's NULL terminated. */
4001 strncpy(lta->name, msg->name, sizeof(lta->name));
4002 lta->name[UST_APP_PROCNAME_LEN] = '\0';
4003
4004 /*
4005 * Before this can be called, when receiving the registration information,
4006 * the application compatibility is checked. So, at this point, the
4007 * application can work with this session daemon.
4008 */
4009 lta->compatible = 1;
4010
4011 lta->pid = msg->pid;
4012 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
4013 lta->sock = sock;
4014 pthread_mutex_init(&lta->sock_lock, NULL);
4015 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
4016
4017 CDS_INIT_LIST_HEAD(&lta->teardown_head);
4018 return lta;
4019
4020 error_free_pipe:
4021 lttng_pipe_destroy(event_notifier_event_source_pipe);
4022 lttng_fd_put(LTTNG_FD_APPS, 2);
4023 error:
4024 return NULL;
4025 }
4026
4027 /*
4028 * For a given application object, add it to every hash table.
4029 */
4030 void ust_app_add(struct ust_app *app)
4031 {
4032 LTTNG_ASSERT(app);
4033 LTTNG_ASSERT(app->notify_sock >= 0);
4034
4035 app->registration_time = time(NULL);
4036
4037 rcu_read_lock();
4038
4039 /*
4040 * On a re-registration, we want to kick out the previous registration of
4041 * that pid
4042 */
4043 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
4044
4045 /*
4046 * The socket _should_ be unique until _we_ call close. So, a add_unique
4047 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4048 * already in the table.
4049 */
4050 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
4051
4052 /* Add application to the notify socket hash table. */
4053 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
4054 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
4055
4056 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4057 "notify_sock =%d (version %d.%d)", app->pid, app->ppid, app->uid,
4058 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
4059 app->v_minor);
4060
4061 rcu_read_unlock();
4062 }
4063
4064 /*
4065 * Set the application version into the object.
4066 *
4067 * Return 0 on success else a negative value either an errno code or a
4068 * LTTng-UST error code.
4069 */
4070 int ust_app_version(struct ust_app *app)
4071 {
4072 int ret;
4073
4074 LTTNG_ASSERT(app);
4075
4076 pthread_mutex_lock(&app->sock_lock);
4077 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
4078 pthread_mutex_unlock(&app->sock_lock);
4079 if (ret < 0) {
4080 if (ret == -LTTNG_UST_ERR_EXITING || ret == -EPIPE) {
4081 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4082 app->pid, app->sock);
4083 } else if (ret == -EAGAIN) {
4084 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4085 app->pid, app->sock);
4086 } else {
4087 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4088 ret, app->pid, app->sock);
4089 }
4090 }
4091
4092 return ret;
4093 }
4094
4095 bool ust_app_supports_notifiers(const struct ust_app *app)
4096 {
4097 return app->v_major >= 9;
4098 }
4099
4100 bool ust_app_supports_counters(const struct ust_app *app)
4101 {
4102 return app->v_major >= 9;
4103 }
4104
4105 /*
4106 * Setup the base event notifier group.
4107 *
4108 * Return 0 on success else a negative value either an errno code or a
4109 * LTTng-UST error code.
4110 */
4111 int ust_app_setup_event_notifier_group(struct ust_app *app)
4112 {
4113 int ret;
4114 int event_pipe_write_fd;
4115 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
4116 enum lttng_error_code lttng_ret;
4117 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4118
4119 LTTNG_ASSERT(app);
4120
4121 if (!ust_app_supports_notifiers(app)) {
4122 ret = -ENOSYS;
4123 goto error;
4124 }
4125
4126 /* Get the write side of the pipe. */
4127 event_pipe_write_fd = lttng_pipe_get_writefd(
4128 app->event_notifier_group.event_pipe);
4129
4130 pthread_mutex_lock(&app->sock_lock);
4131 ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
4132 event_pipe_write_fd, &event_notifier_group);
4133 pthread_mutex_unlock(&app->sock_lock);
4134 if (ret < 0) {
4135 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4136 ret = 0;
4137 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4138 app->pid, app->sock);
4139 } else if (ret == -EAGAIN) {
4140 ret = 0;
4141 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4142 app->pid, app->sock);
4143 } else {
4144 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4145 ret, app->pid, app->sock, event_pipe_write_fd);
4146 }
4147 goto error;
4148 }
4149
4150 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4151 if (ret) {
4152 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4153 app->name, app->pid);
4154 goto error;
4155 }
4156
4157 /*
4158 * Release the file descriptor that was reserved for the write-end of
4159 * the pipe.
4160 */
4161 lttng_fd_put(LTTNG_FD_APPS, 1);
4162
4163 lttng_ret = notification_thread_command_add_tracer_event_source(
4164 the_notification_thread_handle,
4165 lttng_pipe_get_readfd(
4166 app->event_notifier_group.event_pipe),
4167 LTTNG_DOMAIN_UST);
4168 if (lttng_ret != LTTNG_OK) {
4169 ERR("Failed to add tracer event source to notification thread");
4170 ret = - 1;
4171 goto error;
4172 }
4173
4174 /* Assign handle only when the complete setup is valid. */
4175 app->event_notifier_group.object = event_notifier_group;
4176
4177 event_notifier_error_accounting_status =
4178 event_notifier_error_accounting_register_app(app);
4179 switch (event_notifier_error_accounting_status) {
4180 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK:
4181 break;
4182 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED:
4183 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4184 app->sock, app->name, (int) app->pid);
4185 ret = 0;
4186 goto error_accounting;
4187 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD:
4188 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4189 app->sock, app->name, (int) app->pid);
4190 ret = 0;
4191 goto error_accounting;
4192 default:
4193 ERR("Failed to setup event notifier error accounting for app");
4194 ret = -1;
4195 goto error_accounting;
4196 }
4197
4198 return ret;
4199
4200 error_accounting:
4201 lttng_ret = notification_thread_command_remove_tracer_event_source(
4202 the_notification_thread_handle,
4203 lttng_pipe_get_readfd(
4204 app->event_notifier_group.event_pipe));
4205 if (lttng_ret != LTTNG_OK) {
4206 ERR("Failed to remove application tracer event source from notification thread");
4207 }
4208
4209 error:
4210 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
4211 free(app->event_notifier_group.object);
4212 app->event_notifier_group.object = NULL;
4213 return ret;
4214 }
4215
4216 /*
4217 * Unregister app by removing it from the global traceable app list and freeing
4218 * the data struct.
4219 *
4220 * The socket is already closed at this point so no close to sock.
4221 */
4222 void ust_app_unregister(int sock)
4223 {
4224 struct ust_app *lta;
4225 struct lttng_ht_node_ulong *node;
4226 struct lttng_ht_iter ust_app_sock_iter;
4227 struct lttng_ht_iter iter;
4228 struct ust_app_session *ua_sess;
4229 int ret;
4230
4231 rcu_read_lock();
4232
4233 /* Get the node reference for a call_rcu */
4234 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4235 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4236 LTTNG_ASSERT(node);
4237
4238 lta = caa_container_of(node, struct ust_app, sock_n);
4239 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4240
4241 /*
4242 * For per-PID buffers, perform "push metadata" and flush all
4243 * application streams before removing app from hash tables,
4244 * ensuring proper behavior of data_pending check.
4245 * Remove sessions so they are not visible during deletion.
4246 */
4247 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
4248 node.node) {
4249 struct ust_registry_session *registry;
4250
4251 ret = lttng_ht_del(lta->sessions, &iter);
4252 if (ret) {
4253 /* The session was already removed so scheduled for teardown. */
4254 continue;
4255 }
4256
4257 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4258 (void) ust_app_flush_app_session(lta, ua_sess);
4259 }
4260
4261 /*
4262 * Add session to list for teardown. This is safe since at this point we
4263 * are the only one using this list.
4264 */
4265 pthread_mutex_lock(&ua_sess->lock);
4266
4267 if (ua_sess->deleted) {
4268 pthread_mutex_unlock(&ua_sess->lock);
4269 continue;
4270 }
4271
4272 /*
4273 * Normally, this is done in the delete session process which is
4274 * executed in the call rcu below. However, upon registration we can't
4275 * afford to wait for the grace period before pushing data or else the
4276 * data pending feature can race between the unregistration and stop
4277 * command where the data pending command is sent *before* the grace
4278 * period ended.
4279 *
4280 * The close metadata below nullifies the metadata pointer in the
4281 * session so the delete session will NOT push/close a second time.
4282 */
4283 registry = get_session_registry(ua_sess);
4284 if (registry) {
4285 /* Push metadata for application before freeing the application. */
4286 (void) push_metadata(registry, ua_sess->consumer);
4287
4288 /*
4289 * Don't ask to close metadata for global per UID buffers. Close
4290 * metadata only on destroy trace session in this case. Also, the
4291 * previous push metadata could have flag the metadata registry to
4292 * close so don't send a close command if closed.
4293 */
4294 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4295 /* And ask to close it for this session registry. */
4296 (void) close_metadata(registry, ua_sess->consumer);
4297 }
4298 }
4299 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4300
4301 pthread_mutex_unlock(&ua_sess->lock);
4302 }
4303
4304 /* Remove application from PID hash table */
4305 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4306 LTTNG_ASSERT(!ret);
4307
4308 /*
4309 * Remove application from notify hash table. The thread handling the
4310 * notify socket could have deleted the node so ignore on error because
4311 * either way it's valid. The close of that socket is handled by the
4312 * apps_notify_thread.
4313 */
4314 iter.iter.node = &lta->notify_sock_n.node;
4315 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4316
4317 /*
4318 * Ignore return value since the node might have been removed before by an
4319 * add replace during app registration because the PID can be reassigned by
4320 * the OS.
4321 */
4322 iter.iter.node = &lta->pid_n.node;
4323 ret = lttng_ht_del(ust_app_ht, &iter);
4324 if (ret) {
4325 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4326 lta->pid);
4327 }
4328
4329 /* Free memory */
4330 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4331
4332 rcu_read_unlock();
4333 return;
4334 }
4335
4336 /*
4337 * Fill events array with all events name of all registered apps.
4338 */
4339 int ust_app_list_events(struct lttng_event **events)
4340 {
4341 int ret, handle;
4342 size_t nbmem, count = 0;
4343 struct lttng_ht_iter iter;
4344 struct ust_app *app;
4345 struct lttng_event *tmp_event;
4346
4347 nbmem = UST_APP_EVENT_LIST_SIZE;
4348 tmp_event = (lttng_event *) zmalloc(nbmem * sizeof(struct lttng_event));
4349 if (tmp_event == NULL) {
4350 PERROR("zmalloc ust app events");
4351 ret = -ENOMEM;
4352 goto error;
4353 }
4354
4355 rcu_read_lock();
4356
4357 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4358 struct lttng_ust_abi_tracepoint_iter uiter;
4359
4360 health_code_update();
4361
4362 if (!app->compatible) {
4363 /*
4364 * TODO: In time, we should notice the caller of this error by
4365 * telling him that this is a version error.
4366 */
4367 continue;
4368 }
4369 pthread_mutex_lock(&app->sock_lock);
4370 handle = lttng_ust_ctl_tracepoint_list(app->sock);
4371 if (handle < 0) {
4372 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4373 ERR("UST app list events getting handle failed for app pid %d",
4374 app->pid);
4375 }
4376 pthread_mutex_unlock(&app->sock_lock);
4377 continue;
4378 }
4379
4380 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
4381 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4382 /* Handle ustctl error. */
4383 if (ret < 0) {
4384 int release_ret;
4385
4386 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4387 ERR("UST app tp list get failed for app %d with ret %d",
4388 app->sock, ret);
4389 } else {
4390 DBG3("UST app tp list get failed. Application is dead");
4391 break;
4392 }
4393 free(tmp_event);
4394 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4395 if (release_ret < 0 &&
4396 release_ret != -LTTNG_UST_ERR_EXITING &&
4397 release_ret != -EPIPE) {
4398 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4399 }
4400 pthread_mutex_unlock(&app->sock_lock);
4401 goto rcu_error;
4402 }
4403
4404 health_code_update();
4405 if (count >= nbmem) {
4406 /* In case the realloc fails, we free the memory */
4407 struct lttng_event *new_tmp_event;
4408 size_t new_nbmem;
4409
4410 new_nbmem = nbmem << 1;
4411 DBG2("Reallocating event list from %zu to %zu entries",
4412 nbmem, new_nbmem);
4413 new_tmp_event = (lttng_event *) realloc(tmp_event,
4414 new_nbmem * sizeof(struct lttng_event));
4415 if (new_tmp_event == NULL) {
4416 int release_ret;
4417
4418 PERROR("realloc ust app events");
4419 free(tmp_event);
4420 ret = -ENOMEM;
4421 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4422 if (release_ret < 0 &&
4423 release_ret != -LTTNG_UST_ERR_EXITING &&
4424 release_ret != -EPIPE) {
4425 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4426 }
4427 pthread_mutex_unlock(&app->sock_lock);
4428 goto rcu_error;
4429 }
4430 /* Zero the new memory */
4431 memset(new_tmp_event + nbmem, 0,
4432 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4433 nbmem = new_nbmem;
4434 tmp_event = new_tmp_event;
4435 }
4436 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4437 tmp_event[count].loglevel = uiter.loglevel;
4438 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4439 tmp_event[count].pid = app->pid;
4440 tmp_event[count].enabled = -1;
4441 count++;
4442 }
4443 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4444 pthread_mutex_unlock(&app->sock_lock);
4445 if (ret < 0) {
4446 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
4447 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4448 app->pid, app->sock);
4449 } else if (ret == -EAGAIN) {
4450 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4451 app->pid, app->sock);
4452 } else {
4453 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4454 ret, app->pid, app->sock);
4455 }
4456 }
4457 }
4458
4459 ret = count;
4460 *events = tmp_event;
4461
4462 DBG2("UST app list events done (%zu events)", count);
4463
4464 rcu_error:
4465 rcu_read_unlock();
4466 error:
4467 health_code_update();
4468 return ret;
4469 }
4470
4471 /*
4472 * Fill events array with all events name of all registered apps.
4473 */
4474 int ust_app_list_event_fields(struct lttng_event_field **fields)
4475 {
4476 int ret, handle;
4477 size_t nbmem, count = 0;
4478 struct lttng_ht_iter iter;
4479 struct ust_app *app;
4480 struct lttng_event_field *tmp_event;
4481
4482 nbmem = UST_APP_EVENT_LIST_SIZE;
4483 tmp_event = (lttng_event_field *) zmalloc(nbmem * sizeof(struct lttng_event_field));
4484 if (tmp_event == NULL) {
4485 PERROR("zmalloc ust app event fields");
4486 ret = -ENOMEM;
4487 goto error;
4488 }
4489
4490 rcu_read_lock();
4491
4492 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4493 struct lttng_ust_abi_field_iter uiter;
4494
4495 health_code_update();
4496
4497 if (!app->compatible) {
4498 /*
4499 * TODO: In time, we should notice the caller of this error by
4500 * telling him that this is a version error.
4501 */
4502 continue;
4503 }
4504 pthread_mutex_lock(&app->sock_lock);
4505 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
4506 if (handle < 0) {
4507 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4508 ERR("UST app list field getting handle failed for app pid %d",
4509 app->pid);
4510 }
4511 pthread_mutex_unlock(&app->sock_lock);
4512 continue;
4513 }
4514
4515 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
4516 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4517 /* Handle ustctl error. */
4518 if (ret < 0) {
4519 int release_ret;
4520
4521 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4522 ERR("UST app tp list field failed for app %d with ret %d",
4523 app->sock, ret);
4524 } else {
4525 DBG3("UST app tp list field failed. Application is dead");
4526 break;
4527 }
4528 free(tmp_event);
4529 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4530 pthread_mutex_unlock(&app->sock_lock);
4531 if (release_ret < 0 &&
4532 release_ret != -LTTNG_UST_ERR_EXITING &&
4533 release_ret != -EPIPE) {
4534 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4535 }
4536 goto rcu_error;
4537 }
4538
4539 health_code_update();
4540 if (count >= nbmem) {
4541 /* In case the realloc fails, we free the memory */
4542 struct lttng_event_field *new_tmp_event;
4543 size_t new_nbmem;
4544
4545 new_nbmem = nbmem << 1;
4546 DBG2("Reallocating event field list from %zu to %zu entries",
4547 nbmem, new_nbmem);
4548 new_tmp_event = (lttng_event_field *) realloc(tmp_event,
4549 new_nbmem * sizeof(struct lttng_event_field));
4550 if (new_tmp_event == NULL) {
4551 int release_ret;
4552
4553 PERROR("realloc ust app event fields");
4554 free(tmp_event);
4555 ret = -ENOMEM;
4556 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4557 pthread_mutex_unlock(&app->sock_lock);
4558 if (release_ret &&
4559 release_ret != -LTTNG_UST_ERR_EXITING &&
4560 release_ret != -EPIPE) {
4561 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4562 }
4563 goto rcu_error;
4564 }
4565 /* Zero the new memory */
4566 memset(new_tmp_event + nbmem, 0,
4567 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4568 nbmem = new_nbmem;
4569 tmp_event = new_tmp_event;
4570 }
4571
4572 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4573 /* Mapping between these enums matches 1 to 1. */
4574 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4575 tmp_event[count].nowrite = uiter.nowrite;
4576
4577 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4578 tmp_event[count].event.loglevel = uiter.loglevel;
4579 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4580 tmp_event[count].event.pid = app->pid;
4581 tmp_event[count].event.enabled = -1;
4582 count++;
4583 }
4584 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4585 pthread_mutex_unlock(&app->sock_lock);
4586 if (ret < 0 &&
4587 ret != -LTTNG_UST_ERR_EXITING &&
4588 ret != -EPIPE) {
4589 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4590 }
4591 }
4592
4593 ret = count;
4594 *fields = tmp_event;
4595
4596 DBG2("UST app list event fields done (%zu events)", count);
4597
4598 rcu_error:
4599 rcu_read_unlock();
4600 error:
4601 health_code_update();
4602 return ret;
4603 }
4604
4605 /*
4606 * Free and clean all traceable apps of the global list.
4607 */
4608 void ust_app_clean_list(void)
4609 {
4610 int ret;
4611 struct ust_app *app;
4612 struct lttng_ht_iter iter;
4613
4614 DBG2("UST app cleaning registered apps hash table");
4615
4616 rcu_read_lock();
4617
4618 /* Cleanup notify socket hash table */
4619 if (ust_app_ht_by_notify_sock) {
4620 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4621 notify_sock_n.node) {
4622 /*
4623 * Assert that all notifiers are gone as all triggers
4624 * are unregistered prior to this clean-up.
4625 */
4626 LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4627
4628 ust_app_notify_sock_unregister(app->notify_sock);
4629 }
4630 }
4631
4632 if (ust_app_ht) {
4633 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4634 ret = lttng_ht_del(ust_app_ht, &iter);
4635 LTTNG_ASSERT(!ret);
4636 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4637 }
4638 }
4639
4640 /* Cleanup socket hash table */
4641 if (ust_app_ht_by_sock) {
4642 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4643 sock_n.node) {
4644 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4645 LTTNG_ASSERT(!ret);
4646 }
4647 }
4648
4649 rcu_read_unlock();
4650
4651 /* Destroy is done only when the ht is empty */
4652 if (ust_app_ht) {
4653 lttng_ht_destroy(ust_app_ht);
4654 }
4655 if (ust_app_ht_by_sock) {
4656 lttng_ht_destroy(ust_app_ht_by_sock);
4657 }
4658 if (ust_app_ht_by_notify_sock) {
4659 lttng_ht_destroy(ust_app_ht_by_notify_sock);
4660 }
4661 }
4662
4663 /*
4664 * Init UST app hash table.
4665 */
4666 int ust_app_ht_alloc(void)
4667 {
4668 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4669 if (!ust_app_ht) {
4670 return -1;
4671 }
4672 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4673 if (!ust_app_ht_by_sock) {
4674 return -1;
4675 }
4676 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4677 if (!ust_app_ht_by_notify_sock) {
4678 return -1;
4679 }
4680 return 0;
4681 }
4682
4683 /*
4684 * For a specific UST session, disable the channel for all registered apps.
4685 */
4686 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4687 struct ltt_ust_channel *uchan)
4688 {
4689 int ret = 0;
4690 struct lttng_ht_iter iter;
4691 struct lttng_ht_node_str *ua_chan_node;
4692 struct ust_app *app;
4693 struct ust_app_session *ua_sess;
4694 struct ust_app_channel *ua_chan;
4695
4696 LTTNG_ASSERT(usess->active);
4697 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4698 uchan->name, usess->id);
4699
4700 rcu_read_lock();
4701
4702 /* For every registered applications */
4703 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4704 struct lttng_ht_iter uiter;
4705 if (!app->compatible) {
4706 /*
4707 * TODO: In time, we should notice the caller of this error by
4708 * telling him that this is a version error.
4709 */
4710 continue;
4711 }
4712 ua_sess = lookup_session_by_app(usess, app);
4713 if (ua_sess == NULL) {
4714 continue;
4715 }
4716
4717 /* Get channel */
4718 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4719 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4720 /* If the session if found for the app, the channel must be there */
4721 LTTNG_ASSERT(ua_chan_node);
4722
4723 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4724 /* The channel must not be already disabled */
4725 LTTNG_ASSERT(ua_chan->enabled == 1);
4726
4727 /* Disable channel onto application */
4728 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4729 if (ret < 0) {
4730 /* XXX: We might want to report this error at some point... */
4731 continue;
4732 }
4733 }
4734
4735 rcu_read_unlock();
4736 return ret;
4737 }
4738
4739 /*
4740 * For a specific UST session, enable the channel for all registered apps.
4741 */
4742 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4743 struct ltt_ust_channel *uchan)
4744 {
4745 int ret = 0;
4746 struct lttng_ht_iter iter;
4747 struct ust_app *app;
4748 struct ust_app_session *ua_sess;
4749
4750 LTTNG_ASSERT(usess->active);
4751 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4752 uchan->name, usess->id);
4753
4754 rcu_read_lock();
4755
4756 /* For every registered applications */
4757 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4758 if (!app->compatible) {
4759 /*
4760 * TODO: In time, we should notice the caller of this error by
4761 * telling him that this is a version error.
4762 */
4763 continue;
4764 }
4765 ua_sess = lookup_session_by_app(usess, app);
4766 if (ua_sess == NULL) {
4767 continue;
4768 }
4769
4770 /* Enable channel onto application */
4771 ret = enable_ust_app_channel(ua_sess, uchan, app);
4772 if (ret < 0) {
4773 /* XXX: We might want to report this error at some point... */
4774 continue;
4775 }
4776 }
4777
4778 rcu_read_unlock();
4779 return ret;
4780 }
4781
4782 /*
4783 * Disable an event in a channel and for a specific session.
4784 */
4785 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4786 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4787 {
4788 int ret = 0;
4789 struct lttng_ht_iter iter, uiter;
4790 struct lttng_ht_node_str *ua_chan_node;
4791 struct ust_app *app;
4792 struct ust_app_session *ua_sess;
4793 struct ust_app_channel *ua_chan;
4794 struct ust_app_event *ua_event;
4795
4796 LTTNG_ASSERT(usess->active);
4797 DBG("UST app disabling event %s for all apps in channel "
4798 "%s for session id %" PRIu64,
4799 uevent->attr.name, uchan->name, usess->id);
4800
4801 rcu_read_lock();
4802
4803 /* For all registered applications */
4804 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4805 if (!app->compatible) {
4806 /*
4807 * TODO: In time, we should notice the caller of this error by
4808 * telling him that this is a version error.
4809 */
4810 continue;
4811 }
4812 ua_sess = lookup_session_by_app(usess, app);
4813 if (ua_sess == NULL) {
4814 /* Next app */
4815 continue;
4816 }
4817
4818 /* Lookup channel in the ust app session */
4819 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4820 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4821 if (ua_chan_node == NULL) {
4822 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4823 "Skipping", uchan->name, usess->id, app->pid);
4824 continue;
4825 }
4826 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4827
4828 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4829 uevent->filter, uevent->attr.loglevel,
4830 uevent->exclusion);
4831 if (ua_event == NULL) {
4832 DBG2("Event %s not found in channel %s for app pid %d."
4833 "Skipping", uevent->attr.name, uchan->name, app->pid);
4834 continue;
4835 }
4836
4837 ret = disable_ust_app_event(ua_sess, ua_event, app);
4838 if (ret < 0) {
4839 /* XXX: Report error someday... */
4840 continue;
4841 }
4842 }
4843
4844 rcu_read_unlock();
4845 return ret;
4846 }
4847
4848 /* The ua_sess lock must be held by the caller. */
4849 static
4850 int ust_app_channel_create(struct ltt_ust_session *usess,
4851 struct ust_app_session *ua_sess,
4852 struct ltt_ust_channel *uchan, struct ust_app *app,
4853 struct ust_app_channel **_ua_chan)
4854 {
4855 int ret = 0;
4856 struct ust_app_channel *ua_chan = NULL;
4857
4858 LTTNG_ASSERT(ua_sess);
4859 ASSERT_LOCKED(ua_sess->lock);
4860
4861 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4862 sizeof(uchan->name))) {
4863 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4864 &uchan->attr);
4865 ret = 0;
4866 } else {
4867 struct ltt_ust_context *uctx = NULL;
4868
4869 /*
4870 * Create channel onto application and synchronize its
4871 * configuration.
4872 */
4873 ret = ust_app_channel_allocate(ua_sess, uchan,
4874 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
4875 &ua_chan);
4876 if (ret < 0) {
4877 goto error;
4878 }
4879
4880 ret = ust_app_channel_send(app, usess,
4881 ua_sess, ua_chan);
4882 if (ret) {
4883 goto error;
4884 }
4885
4886 /* Add contexts. */
4887 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4888 ret = create_ust_app_channel_context(ua_chan,
4889 &uctx->ctx, app);
4890 if (ret) {
4891 goto error;
4892 }
4893 }
4894 }
4895
4896 error:
4897 if (ret < 0) {
4898 switch (ret) {
4899 case -ENOTCONN:
4900 /*
4901 * The application's socket is not valid. Either a bad socket
4902 * or a timeout on it. We can't inform the caller that for a
4903 * specific app, the session failed so lets continue here.
4904 */
4905 ret = 0; /* Not an error. */
4906 break;
4907 case -ENOMEM:
4908 default:
4909 break;
4910 }
4911 }
4912
4913 if (ret == 0 && _ua_chan) {
4914 /*
4915 * Only return the application's channel on success. Note
4916 * that the channel can still be part of the application's
4917 * channel hashtable on error.
4918 */
4919 *_ua_chan = ua_chan;
4920 }
4921 return ret;
4922 }
4923
4924 /*
4925 * Enable event for a specific session and channel on the tracer.
4926 */
4927 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4928 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4929 {
4930 int ret = 0;
4931 struct lttng_ht_iter iter, uiter;
4932 struct lttng_ht_node_str *ua_chan_node;
4933 struct ust_app *app;
4934 struct ust_app_session *ua_sess;
4935 struct ust_app_channel *ua_chan;
4936 struct ust_app_event *ua_event;
4937
4938 LTTNG_ASSERT(usess->active);
4939 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4940 uevent->attr.name, usess->id);
4941
4942 /*
4943 * NOTE: At this point, this function is called only if the session and
4944 * channel passed are already created for all apps. and enabled on the
4945 * tracer also.
4946 */
4947
4948 rcu_read_lock();
4949
4950 /* For all registered applications */
4951 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4952 if (!app->compatible) {
4953 /*
4954 * TODO: In time, we should notice the caller of this error by
4955 * telling him that this is a version error.
4956 */
4957 continue;
4958 }
4959 ua_sess = lookup_session_by_app(usess, app);
4960 if (!ua_sess) {
4961 /* The application has problem or is probably dead. */
4962 continue;
4963 }
4964
4965 pthread_mutex_lock(&ua_sess->lock);
4966
4967 if (ua_sess->deleted) {
4968 pthread_mutex_unlock(&ua_sess->lock);
4969 continue;
4970 }
4971
4972 /* Lookup channel in the ust app session */
4973 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4974 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4975 /*
4976 * It is possible that the channel cannot be found is
4977 * the channel/event creation occurs concurrently with
4978 * an application exit.
4979 */
4980 if (!ua_chan_node) {
4981 pthread_mutex_unlock(&ua_sess->lock);
4982 continue;
4983 }
4984
4985 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4986
4987 /* Get event node */
4988 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4989 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4990 if (ua_event == NULL) {
4991 DBG3("UST app enable event %s not found for app PID %d."
4992 "Skipping app", uevent->attr.name, app->pid);
4993 goto next_app;
4994 }
4995
4996 ret = enable_ust_app_event(ua_sess, ua_event, app);
4997 if (ret < 0) {
4998 pthread_mutex_unlock(&ua_sess->lock);
4999 goto error;
5000 }
5001 next_app:
5002 pthread_mutex_unlock(&ua_sess->lock);
5003 }
5004
5005 error:
5006 rcu_read_unlock();
5007 return ret;
5008 }
5009
5010 /*
5011 * For a specific existing UST session and UST channel, creates the event for
5012 * all registered apps.
5013 */
5014 int ust_app_create_event_glb(struct ltt_ust_session *usess,
5015 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
5016 {
5017 int ret = 0;
5018 struct lttng_ht_iter iter, uiter;
5019 struct lttng_ht_node_str *ua_chan_node;
5020 struct ust_app *app;
5021 struct ust_app_session *ua_sess;
5022 struct ust_app_channel *ua_chan;
5023
5024 LTTNG_ASSERT(usess->active);
5025 DBG("UST app creating event %s for all apps for session id %" PRIu64,
5026 uevent->attr.name, usess->id);
5027
5028 rcu_read_lock();
5029
5030 /* For all registered applications */
5031 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5032 if (!app->compatible) {
5033 /*
5034 * TODO: In time, we should notice the caller of this error by
5035 * telling him that this is a version error.
5036 */
5037 continue;
5038 }
5039 ua_sess = lookup_session_by_app(usess, app);
5040 if (!ua_sess) {
5041 /* The application has problem or is probably dead. */
5042 continue;
5043 }
5044
5045 pthread_mutex_lock(&ua_sess->lock);
5046
5047 if (ua_sess->deleted) {
5048 pthread_mutex_unlock(&ua_sess->lock);
5049 continue;
5050 }
5051
5052 /* Lookup channel in the ust app session */
5053 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5054 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
5055 /* If the channel is not found, there is a code flow error */
5056 LTTNG_ASSERT(ua_chan_node);
5057
5058 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
5059
5060 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5061 pthread_mutex_unlock(&ua_sess->lock);
5062 if (ret < 0) {
5063 if (ret != -LTTNG_UST_ERR_EXIST) {
5064 /* Possible value at this point: -ENOMEM. If so, we stop! */
5065 break;
5066 }
5067 DBG2("UST app event %s already exist on app PID %d",
5068 uevent->attr.name, app->pid);
5069 continue;
5070 }
5071 }
5072
5073 rcu_read_unlock();
5074 return ret;
5075 }
5076
5077 /*
5078 * Start tracing for a specific UST session and app.
5079 *
5080 * Called with UST app session lock held.
5081 *
5082 */
5083 static
5084 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
5085 {
5086 int ret = 0;
5087 struct ust_app_session *ua_sess;
5088
5089 DBG("Starting tracing for ust app pid %d", app->pid);
5090
5091 rcu_read_lock();
5092
5093 if (!app->compatible) {
5094 goto end;
5095 }
5096
5097 ua_sess = lookup_session_by_app(usess, app);
5098 if (ua_sess == NULL) {
5099 /* The session is in teardown process. Ignore and continue. */
5100 goto end;
5101 }
5102
5103 pthread_mutex_lock(&ua_sess->lock);
5104
5105 if (ua_sess->deleted) {
5106 pthread_mutex_unlock(&ua_sess->lock);
5107 goto end;
5108 }
5109
5110 if (ua_sess->enabled) {
5111 pthread_mutex_unlock(&ua_sess->lock);
5112 goto end;
5113 }
5114
5115 /* Upon restart, we skip the setup, already done */
5116 if (ua_sess->started) {
5117 goto skip_setup;
5118 }
5119
5120 health_code_update();
5121
5122 skip_setup:
5123 /* This starts the UST tracing */
5124 pthread_mutex_lock(&app->sock_lock);
5125 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
5126 pthread_mutex_unlock(&app->sock_lock);
5127 if (ret < 0) {
5128 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5129 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5130 app->pid, app->sock);
5131 pthread_mutex_unlock(&ua_sess->lock);
5132 goto end;
5133 } else if (ret == -EAGAIN) {
5134 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5135 app->pid, app->sock);
5136 pthread_mutex_unlock(&ua_sess->lock);
5137 goto end;
5138
5139 } else {
5140 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5141 ret, app->pid, app->sock);
5142 }
5143 goto error_unlock;
5144 }
5145
5146 /* Indicate that the session has been started once */
5147 ua_sess->started = 1;
5148 ua_sess->enabled = 1;
5149
5150 pthread_mutex_unlock(&ua_sess->lock);
5151
5152 health_code_update();
5153
5154 /* Quiescent wait after starting trace */
5155 pthread_mutex_lock(&app->sock_lock);
5156 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5157 pthread_mutex_unlock(&app->sock_lock);
5158 if (ret < 0) {
5159 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5160 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5161 app->pid, app->sock);
5162 } else if (ret == -EAGAIN) {
5163 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5164 app->pid, app->sock);
5165 } else {
5166 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5167 ret, app->pid, app->sock);
5168 }
5169 }
5170
5171 end:
5172 rcu_read_unlock();
5173 health_code_update();
5174 return 0;
5175
5176 error_unlock:
5177 pthread_mutex_unlock(&ua_sess->lock);
5178 rcu_read_unlock();
5179 health_code_update();
5180 return -1;
5181 }
5182
5183 /*
5184 * Stop tracing for a specific UST session and app.
5185 */
5186 static
5187 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5188 {
5189 int ret = 0;
5190 struct ust_app_session *ua_sess;
5191 struct ust_registry_session *registry;
5192
5193 DBG("Stopping tracing for ust app pid %d", app->pid);
5194
5195 rcu_read_lock();
5196
5197 if (!app->compatible) {
5198 goto end_no_session;
5199 }
5200
5201 ua_sess = lookup_session_by_app(usess, app);
5202 if (ua_sess == NULL) {
5203 goto end_no_session;
5204 }
5205
5206 pthread_mutex_lock(&ua_sess->lock);
5207
5208 if (ua_sess->deleted) {
5209 pthread_mutex_unlock(&ua_sess->lock);
5210 goto end_no_session;
5211 }
5212
5213 /*
5214 * If started = 0, it means that stop trace has been called for a session
5215 * that was never started. It's possible since we can have a fail start
5216 * from either the application manager thread or the command thread. Simply
5217 * indicate that this is a stop error.
5218 */
5219 if (!ua_sess->started) {
5220 goto error_rcu_unlock;
5221 }
5222
5223 health_code_update();
5224
5225 /* This inhibits UST tracing */
5226 pthread_mutex_lock(&app->sock_lock);
5227 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
5228 pthread_mutex_unlock(&app->sock_lock);
5229 if (ret < 0) {
5230 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5231 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5232 app->pid, app->sock);
5233 goto end_unlock;
5234 } else if (ret == -EAGAIN) {
5235 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5236 app->pid, app->sock);
5237 goto end_unlock;
5238
5239 } else {
5240 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5241 ret, app->pid, app->sock);
5242 }
5243 goto error_rcu_unlock;
5244 }
5245
5246 health_code_update();
5247 ua_sess->enabled = 0;
5248
5249 /* Quiescent wait after stopping trace */
5250 pthread_mutex_lock(&app->sock_lock);
5251 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5252 pthread_mutex_unlock(&app->sock_lock);
5253 if (ret < 0) {
5254 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5255 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5256 app->pid, app->sock);
5257 } else if (ret == -EAGAIN) {
5258 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5259 app->pid, app->sock);
5260 } else {
5261 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5262 ret, app->pid, app->sock);
5263 }
5264 }
5265
5266 health_code_update();
5267
5268 registry = get_session_registry(ua_sess);
5269
5270 /* The UST app session is held registry shall not be null. */
5271 LTTNG_ASSERT(registry);
5272
5273 /* Push metadata for application before freeing the application. */
5274 (void) push_metadata(registry, ua_sess->consumer);
5275
5276 end_unlock:
5277 pthread_mutex_unlock(&ua_sess->lock);
5278 end_no_session:
5279 rcu_read_unlock();
5280 health_code_update();
5281 return 0;
5282
5283 error_rcu_unlock:
5284 pthread_mutex_unlock(&ua_sess->lock);
5285 rcu_read_unlock();
5286 health_code_update();
5287 return -1;
5288 }
5289
5290 static
5291 int ust_app_flush_app_session(struct ust_app *app,
5292 struct ust_app_session *ua_sess)
5293 {
5294 int ret, retval = 0;
5295 struct lttng_ht_iter iter;
5296 struct ust_app_channel *ua_chan;
5297 struct consumer_socket *socket;
5298
5299 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5300
5301 rcu_read_lock();
5302
5303 if (!app->compatible) {
5304 goto end_not_compatible;
5305 }
5306
5307 pthread_mutex_lock(&ua_sess->lock);
5308
5309 if (ua_sess->deleted) {
5310 goto end_deleted;
5311 }
5312
5313 health_code_update();
5314
5315 /* Flushing buffers */
5316 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5317 ua_sess->consumer);
5318
5319 /* Flush buffers and push metadata. */
5320 switch (ua_sess->buffer_type) {
5321 case LTTNG_BUFFER_PER_PID:
5322 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5323 node.node) {
5324 health_code_update();
5325 ret = consumer_flush_channel(socket, ua_chan->key);
5326 if (ret) {
5327 ERR("Error flushing consumer channel");
5328 retval = -1;
5329 continue;
5330 }
5331 }
5332 break;
5333 case LTTNG_BUFFER_PER_UID:
5334 default:
5335 abort();
5336 break;
5337 }
5338
5339 health_code_update();
5340
5341 end_deleted:
5342 pthread_mutex_unlock(&ua_sess->lock);
5343
5344 end_not_compatible:
5345 rcu_read_unlock();
5346 health_code_update();
5347 return retval;
5348 }
5349
5350 /*
5351 * Flush buffers for all applications for a specific UST session.
5352 * Called with UST session lock held.
5353 */
5354 static
5355 int ust_app_flush_session(struct ltt_ust_session *usess)
5356
5357 {
5358 int ret = 0;
5359
5360 DBG("Flushing session buffers for all ust apps");
5361
5362 rcu_read_lock();
5363
5364 /* Flush buffers and push metadata. */
5365 switch (usess->buffer_type) {
5366 case LTTNG_BUFFER_PER_UID:
5367 {
5368 struct buffer_reg_uid *reg;
5369 struct lttng_ht_iter iter;
5370
5371 /* Flush all per UID buffers associated to that session. */
5372 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5373 struct ust_registry_session *ust_session_reg;
5374 struct buffer_reg_channel *buf_reg_chan;
5375 struct consumer_socket *socket;
5376
5377 /* Get consumer socket to use to push the metadata.*/
5378 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5379 usess->consumer);
5380 if (!socket) {
5381 /* Ignore request if no consumer is found for the session. */
5382 continue;
5383 }
5384
5385 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5386 buf_reg_chan, node.node) {
5387 /*
5388 * The following call will print error values so the return
5389 * code is of little importance because whatever happens, we
5390 * have to try them all.
5391 */
5392 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5393 }
5394
5395 ust_session_reg = reg->registry->reg.ust;
5396 /* Push metadata. */
5397 (void) push_metadata(ust_session_reg, usess->consumer);
5398 }
5399 break;
5400 }
5401 case LTTNG_BUFFER_PER_PID:
5402 {
5403 struct ust_app_session *ua_sess;
5404 struct lttng_ht_iter iter;
5405 struct ust_app *app;
5406
5407 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5408 ua_sess = lookup_session_by_app(usess, app);
5409 if (ua_sess == NULL) {
5410 continue;
5411 }
5412 (void) ust_app_flush_app_session(app, ua_sess);
5413 }
5414 break;
5415 }
5416 default:
5417 ret = -1;
5418 abort();
5419 break;
5420 }
5421
5422 rcu_read_unlock();
5423 health_code_update();
5424 return ret;
5425 }
5426
5427 static
5428 int ust_app_clear_quiescent_app_session(struct ust_app *app,
5429 struct ust_app_session *ua_sess)
5430 {
5431 int ret = 0;
5432 struct lttng_ht_iter iter;
5433 struct ust_app_channel *ua_chan;
5434 struct consumer_socket *socket;
5435
5436 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5437
5438 rcu_read_lock();
5439
5440 if (!app->compatible) {
5441 goto end_not_compatible;
5442 }
5443
5444 pthread_mutex_lock(&ua_sess->lock);
5445
5446 if (ua_sess->deleted) {
5447 goto end_unlock;
5448 }
5449
5450 health_code_update();
5451
5452 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5453 ua_sess->consumer);
5454 if (!socket) {
5455 ERR("Failed to find consumer (%" PRIu32 ") socket",
5456 app->bits_per_long);
5457 ret = -1;
5458 goto end_unlock;
5459 }
5460
5461 /* Clear quiescent state. */
5462 switch (ua_sess->buffer_type) {
5463 case LTTNG_BUFFER_PER_PID:
5464 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5465 ua_chan, node.node) {
5466 health_code_update();
5467 ret = consumer_clear_quiescent_channel(socket,
5468 ua_chan->key);
5469 if (ret) {
5470 ERR("Error clearing quiescent state for consumer channel");
5471 ret = -1;
5472 continue;
5473 }
5474 }
5475 break;
5476 case LTTNG_BUFFER_PER_UID:
5477 default:
5478 abort();
5479 ret = -1;
5480 break;
5481 }
5482
5483 health_code_update();
5484
5485 end_unlock:
5486 pthread_mutex_unlock(&ua_sess->lock);
5487
5488 end_not_compatible:
5489 rcu_read_unlock();
5490 health_code_update();
5491 return ret;
5492 }
5493
5494 /*
5495 * Clear quiescent state in each stream for all applications for a
5496 * specific UST session.
5497 * Called with UST session lock held.
5498 */
5499 static
5500 int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5501
5502 {
5503 int ret = 0;
5504
5505 DBG("Clearing stream quiescent state for all ust apps");
5506
5507 rcu_read_lock();
5508
5509 switch (usess->buffer_type) {
5510 case LTTNG_BUFFER_PER_UID:
5511 {
5512 struct lttng_ht_iter iter;
5513 struct buffer_reg_uid *reg;
5514
5515 /*
5516 * Clear quiescent for all per UID buffers associated to
5517 * that session.
5518 */
5519 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5520 struct consumer_socket *socket;
5521 struct buffer_reg_channel *buf_reg_chan;
5522
5523 /* Get associated consumer socket.*/
5524 socket = consumer_find_socket_by_bitness(
5525 reg->bits_per_long, usess->consumer);
5526 if (!socket) {
5527 /*
5528 * Ignore request if no consumer is found for
5529 * the session.
5530 */
5531 continue;
5532 }
5533
5534 cds_lfht_for_each_entry(reg->registry->channels->ht,
5535 &iter.iter, buf_reg_chan, node.node) {
5536 /*
5537 * The following call will print error values so
5538 * the return code is of little importance
5539 * because whatever happens, we have to try them
5540 * all.
5541 */
5542 (void) consumer_clear_quiescent_channel(socket,
5543 buf_reg_chan->consumer_key);
5544 }
5545 }
5546 break;
5547 }
5548 case LTTNG_BUFFER_PER_PID:
5549 {
5550 struct ust_app_session *ua_sess;
5551 struct lttng_ht_iter iter;
5552 struct ust_app *app;
5553
5554 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5555 pid_n.node) {
5556 ua_sess = lookup_session_by_app(usess, app);
5557 if (ua_sess == NULL) {
5558 continue;
5559 }
5560 (void) ust_app_clear_quiescent_app_session(app,
5561 ua_sess);
5562 }
5563 break;
5564 }
5565 default:
5566 ret = -1;
5567 abort();
5568 break;
5569 }
5570
5571 rcu_read_unlock();
5572 health_code_update();
5573 return ret;
5574 }
5575
5576 /*
5577 * Destroy a specific UST session in apps.
5578 */
5579 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5580 {
5581 int ret;
5582 struct ust_app_session *ua_sess;
5583 struct lttng_ht_iter iter;
5584 struct lttng_ht_node_u64 *node;
5585
5586 DBG("Destroy tracing for ust app pid %d", app->pid);
5587
5588 rcu_read_lock();
5589
5590 if (!app->compatible) {
5591 goto end;
5592 }
5593
5594 __lookup_session_by_app(usess, app, &iter);
5595 node = lttng_ht_iter_get_node_u64(&iter);
5596 if (node == NULL) {
5597 /* Session is being or is deleted. */
5598 goto end;
5599 }
5600 ua_sess = caa_container_of(node, struct ust_app_session, node);
5601
5602 health_code_update();
5603 destroy_app_session(app, ua_sess);
5604
5605 health_code_update();
5606
5607 /* Quiescent wait after stopping trace */
5608 pthread_mutex_lock(&app->sock_lock);
5609 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5610 pthread_mutex_unlock(&app->sock_lock);
5611 if (ret < 0) {
5612 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
5613 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
5614 app->pid, app->sock);
5615 } else if (ret == -EAGAIN) {
5616 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
5617 app->pid, app->sock);
5618 } else {
5619 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
5620 ret, app->pid, app->sock);
5621 }
5622 }
5623 end:
5624 rcu_read_unlock();
5625 health_code_update();
5626 return 0;
5627 }
5628
5629 /*
5630 * Start tracing for the UST session.
5631 */
5632 int ust_app_start_trace_all(struct ltt_ust_session *usess)
5633 {
5634 struct lttng_ht_iter iter;
5635 struct ust_app *app;
5636
5637 DBG("Starting all UST traces");
5638
5639 /*
5640 * Even though the start trace might fail, flag this session active so
5641 * other application coming in are started by default.
5642 */
5643 usess->active = 1;
5644
5645 rcu_read_lock();
5646
5647 /*
5648 * In a start-stop-start use-case, we need to clear the quiescent state
5649 * of each channel set by the prior stop command, thus ensuring that a
5650 * following stop or destroy is sure to grab a timestamp_end near those
5651 * operations, even if the packet is empty.
5652 */
5653 (void) ust_app_clear_quiescent_session(usess);
5654
5655 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5656 ust_app_global_update(usess, app);
5657 }
5658
5659 rcu_read_unlock();
5660
5661 return 0;
5662 }
5663
5664 /*
5665 * Start tracing for the UST session.
5666 * Called with UST session lock held.
5667 */
5668 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5669 {
5670 int ret = 0;
5671 struct lttng_ht_iter iter;
5672 struct ust_app *app;
5673
5674 DBG("Stopping all UST traces");
5675
5676 /*
5677 * Even though the stop trace might fail, flag this session inactive so
5678 * other application coming in are not started by default.
5679 */
5680 usess->active = 0;
5681
5682 rcu_read_lock();
5683
5684 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5685 ret = ust_app_stop_trace(usess, app);
5686 if (ret < 0) {
5687 /* Continue to next apps even on error */
5688 continue;
5689 }
5690 }
5691
5692 (void) ust_app_flush_session(usess);
5693
5694 rcu_read_unlock();
5695
5696 return 0;
5697 }
5698
5699 /*
5700 * Destroy app UST session.
5701 */
5702 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5703 {
5704 int ret = 0;
5705 struct lttng_ht_iter iter;
5706 struct ust_app *app;
5707
5708 DBG("Destroy all UST traces");
5709
5710 rcu_read_lock();
5711
5712 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5713 ret = destroy_trace(usess, app);
5714 if (ret < 0) {
5715 /* Continue to next apps even on error */
5716 continue;
5717 }
5718 }
5719
5720 rcu_read_unlock();
5721
5722 return 0;
5723 }
5724
5725 /* The ua_sess lock must be held by the caller. */
5726 static
5727 int find_or_create_ust_app_channel(
5728 struct ltt_ust_session *usess,
5729 struct ust_app_session *ua_sess,
5730 struct ust_app *app,
5731 struct ltt_ust_channel *uchan,
5732 struct ust_app_channel **ua_chan)
5733 {
5734 int ret = 0;
5735 struct lttng_ht_iter iter;
5736 struct lttng_ht_node_str *ua_chan_node;
5737
5738 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5739 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5740 if (ua_chan_node) {
5741 *ua_chan = caa_container_of(ua_chan_node,
5742 struct ust_app_channel, node);
5743 goto end;
5744 }
5745
5746 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5747 if (ret) {
5748 goto end;
5749 }
5750 end:
5751 return ret;
5752 }
5753
5754 static
5755 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5756 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5757 struct ust_app *app)
5758 {
5759 int ret = 0;
5760 struct ust_app_event *ua_event = NULL;
5761
5762 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5763 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5764 if (!ua_event) {
5765 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5766 if (ret < 0) {
5767 goto end;
5768 }
5769 } else {
5770 if (ua_event->enabled != uevent->enabled) {
5771 ret = uevent->enabled ?
5772 enable_ust_app_event(ua_sess, ua_event, app) :
5773 disable_ust_app_event(ua_sess, ua_event, app);
5774 }
5775 }
5776
5777 end:
5778 return ret;
5779 }
5780
5781 /* Called with RCU read-side lock held. */
5782 static
5783 void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5784 {
5785 int ret = 0;
5786 enum lttng_error_code ret_code;
5787 enum lttng_trigger_status t_status;
5788 struct lttng_ht_iter app_trigger_iter;
5789 struct lttng_triggers *triggers = NULL;
5790 struct ust_app_event_notifier_rule *event_notifier_rule;
5791 unsigned int count, i;
5792
5793 if (!ust_app_supports_notifiers(app)) {
5794 goto end;
5795 }
5796
5797 /*
5798 * Currrently, registering or unregistering a trigger with an
5799 * event rule condition causes a full synchronization of the event
5800 * notifiers.
5801 *
5802 * The first step attempts to add an event notifier for all registered
5803 * triggers that apply to the user space tracers. Then, the
5804 * application's event notifiers rules are all checked against the list
5805 * of registered triggers. Any event notifier that doesn't have a
5806 * matching trigger can be assumed to have been disabled.
5807 *
5808 * All of this is inefficient, but is put in place to get the feature
5809 * rolling as it is simpler at this moment. It will be optimized Soon™
5810 * to allow the state of enabled
5811 * event notifiers to be synchronized in a piece-wise way.
5812 */
5813
5814 /* Get all triggers using uid 0 (root) */
5815 ret_code = notification_thread_command_list_triggers(
5816 the_notification_thread_handle, 0, &triggers);
5817 if (ret_code != LTTNG_OK) {
5818 goto end;
5819 }
5820
5821 LTTNG_ASSERT(triggers);
5822
5823 t_status = lttng_triggers_get_count(triggers, &count);
5824 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5825 goto end;
5826 }
5827
5828 for (i = 0; i < count; i++) {
5829 struct lttng_condition *condition;
5830 struct lttng_event_rule *event_rule;
5831 struct lttng_trigger *trigger;
5832 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5833 enum lttng_condition_status condition_status;
5834 uint64_t token;
5835
5836 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5837 LTTNG_ASSERT(trigger);
5838
5839 token = lttng_trigger_get_tracer_token(trigger);
5840 condition = lttng_trigger_get_condition(trigger);
5841
5842 if (lttng_condition_get_type(condition) !=
5843 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
5844 /* Does not apply */
5845 continue;
5846 }
5847
5848 condition_status =
5849 lttng_condition_event_rule_matches_borrow_rule_mutable(
5850 condition, &event_rule);
5851 LTTNG_ASSERT(condition_status == LTTNG_CONDITION_STATUS_OK);
5852
5853 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5854 /* Skip kernel related triggers. */
5855 continue;
5856 }
5857
5858 /*
5859 * Find or create the associated token event rule. The caller
5860 * holds the RCU read lock, so this is safe to call without
5861 * explicitly acquiring it here.
5862 */
5863 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5864 app->token_to_event_notifier_rule_ht, token);
5865 if (!looked_up_event_notifier_rule) {
5866 ret = create_ust_app_event_notifier_rule(trigger, app);
5867 if (ret < 0) {
5868 goto end;
5869 }
5870 }
5871 }
5872
5873 rcu_read_lock();
5874 /* Remove all unknown event sources from the app. */
5875 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5876 &app_trigger_iter.iter, event_notifier_rule,
5877 node.node) {
5878 const uint64_t app_token = event_notifier_rule->token;
5879 bool found = false;
5880
5881 /*
5882 * Check if the app event trigger still exists on the
5883 * notification side.
5884 */
5885 for (i = 0; i < count; i++) {
5886 uint64_t notification_thread_token;
5887 const struct lttng_trigger *trigger =
5888 lttng_triggers_get_at_index(
5889 triggers, i);
5890
5891 LTTNG_ASSERT(trigger);
5892
5893 notification_thread_token =
5894 lttng_trigger_get_tracer_token(trigger);
5895
5896 if (notification_thread_token == app_token) {
5897 found = true;
5898 break;
5899 }
5900 }
5901
5902 if (found) {
5903 /* Still valid. */
5904 continue;
5905 }
5906
5907 /*
5908 * This trigger was unregistered, disable it on the tracer's
5909 * side.
5910 */
5911 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5912 &app_trigger_iter);
5913 LTTNG_ASSERT(ret == 0);
5914
5915 /* Callee logs errors. */
5916 (void) disable_ust_object(app, event_notifier_rule->obj);
5917
5918 delete_ust_app_event_notifier_rule(
5919 app->sock, event_notifier_rule, app);
5920 }
5921
5922 rcu_read_unlock();
5923
5924 end:
5925 lttng_triggers_destroy(triggers);
5926 return;
5927 }
5928
5929 /*
5930 * RCU read lock must be held by the caller.
5931 */
5932 static
5933 void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5934 struct ust_app_session *ua_sess,
5935 struct ust_app *app)
5936 {
5937 int ret = 0;
5938 struct cds_lfht_iter uchan_iter;
5939 struct ltt_ust_channel *uchan;
5940
5941 LTTNG_ASSERT(usess);
5942 LTTNG_ASSERT(ua_sess);
5943 LTTNG_ASSERT(app);
5944
5945 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5946 uchan, node.node) {
5947 struct ust_app_channel *ua_chan;
5948 struct cds_lfht_iter uevent_iter;
5949 struct ltt_ust_event *uevent;
5950
5951 /*
5952 * Search for a matching ust_app_channel. If none is found,
5953 * create it. Creating the channel will cause the ua_chan
5954 * structure to be allocated, the channel buffers to be
5955 * allocated (if necessary) and sent to the application, and
5956 * all enabled contexts will be added to the channel.
5957 */
5958 ret = find_or_create_ust_app_channel(usess, ua_sess,
5959 app, uchan, &ua_chan);
5960 if (ret) {
5961 /* Tracer is probably gone or ENOMEM. */
5962 goto end;
5963 }
5964
5965 if (!ua_chan) {
5966 /* ua_chan will be NULL for the metadata channel */
5967 continue;
5968 }
5969
5970 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5971 node.node) {
5972 ret = ust_app_channel_synchronize_event(ua_chan,
5973 uevent, ua_sess, app);
5974 if (ret) {
5975 goto end;
5976 }
5977 }
5978
5979 if (ua_chan->enabled != uchan->enabled) {
5980 ret = uchan->enabled ?
5981 enable_ust_app_channel(ua_sess, uchan, app) :
5982 disable_ust_app_channel(ua_sess, ua_chan, app);
5983 if (ret) {
5984 goto end;
5985 }
5986 }
5987 }
5988 end:
5989 return;
5990 }
5991
5992 /*
5993 * The caller must ensure that the application is compatible and is tracked
5994 * by the process attribute trackers.
5995 */
5996 static
5997 void ust_app_synchronize(struct ltt_ust_session *usess,
5998 struct ust_app *app)
5999 {
6000 int ret = 0;
6001 struct ust_app_session *ua_sess = NULL;
6002
6003 /*
6004 * The application's configuration should only be synchronized for
6005 * active sessions.
6006 */
6007 LTTNG_ASSERT(usess->active);
6008
6009 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
6010 if (ret < 0) {
6011 /* Tracer is probably gone or ENOMEM. */
6012 if (ua_sess) {
6013 destroy_app_session(app, ua_sess);
6014 }
6015 goto end;
6016 }
6017 LTTNG_ASSERT(ua_sess);
6018
6019 pthread_mutex_lock(&ua_sess->lock);
6020 if (ua_sess->deleted) {
6021 goto deleted_session;
6022 }
6023
6024 rcu_read_lock();
6025
6026 ust_app_synchronize_all_channels(usess, ua_sess, app);
6027
6028 /*
6029 * Create the metadata for the application. This returns gracefully if a
6030 * metadata was already set for the session.
6031 *
6032 * The metadata channel must be created after the data channels as the
6033 * consumer daemon assumes this ordering. When interacting with a relay
6034 * daemon, the consumer will use this assumption to send the
6035 * "STREAMS_SENT" message to the relay daemon.
6036 */
6037 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
6038 if (ret < 0) {
6039 ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
6040 app->sock, usess->id);
6041 }
6042
6043 rcu_read_unlock();
6044
6045 deleted_session:
6046 pthread_mutex_unlock(&ua_sess->lock);
6047 end:
6048 return;
6049 }
6050
6051 static
6052 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
6053 {
6054 struct ust_app_session *ua_sess;
6055
6056 ua_sess = lookup_session_by_app(usess, app);
6057 if (ua_sess == NULL) {
6058 return;
6059 }
6060 destroy_app_session(app, ua_sess);
6061 }
6062
6063 /*
6064 * Add channels/events from UST global domain to registered apps at sock.
6065 *
6066 * Called with session lock held.
6067 * Called with RCU read-side lock held.
6068 */
6069 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
6070 {
6071 LTTNG_ASSERT(usess);
6072 LTTNG_ASSERT(usess->active);
6073
6074 DBG2("UST app global update for app sock %d for session id %" PRIu64,
6075 app->sock, usess->id);
6076
6077 if (!app->compatible) {
6078 return;
6079 }
6080 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
6081 usess, app->pid) &&
6082 trace_ust_id_tracker_lookup(
6083 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
6084 usess, app->uid) &&
6085 trace_ust_id_tracker_lookup(
6086 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
6087 usess, app->gid)) {
6088 /*
6089 * Synchronize the application's internal tracing configuration
6090 * and start tracing.
6091 */
6092 ust_app_synchronize(usess, app);
6093 ust_app_start_trace(usess, app);
6094 } else {
6095 ust_app_global_destroy(usess, app);
6096 }
6097 }
6098
6099 /*
6100 * Add all event notifiers to an application.
6101 *
6102 * Called with session lock held.
6103 * Called with RCU read-side lock held.
6104 */
6105 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
6106 {
6107 DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
6108 app->name, app->pid);
6109
6110 if (!app->compatible || !ust_app_supports_notifiers(app)) {
6111 return;
6112 }
6113
6114 if (app->event_notifier_group.object == NULL) {
6115 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d",
6116 app->name, app->pid);
6117 return;
6118 }
6119
6120 ust_app_synchronize_event_notifier_rules(app);
6121 }
6122
6123 /*
6124 * Called with session lock held.
6125 */
6126 void ust_app_global_update_all(struct ltt_ust_session *usess)
6127 {
6128 struct lttng_ht_iter iter;
6129 struct ust_app *app;
6130
6131 rcu_read_lock();
6132 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6133 ust_app_global_update(usess, app);
6134 }
6135 rcu_read_unlock();
6136 }
6137
6138 void ust_app_global_update_all_event_notifier_rules(void)
6139 {
6140 struct lttng_ht_iter iter;
6141 struct ust_app *app;
6142
6143 rcu_read_lock();
6144 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6145 ust_app_global_update_event_notifier_rules(app);
6146 }
6147
6148 rcu_read_unlock();
6149 }
6150
6151 /*
6152 * Add context to a specific channel for global UST domain.
6153 */
6154 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6155 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6156 {
6157 int ret = 0;
6158 struct lttng_ht_node_str *ua_chan_node;
6159 struct lttng_ht_iter iter, uiter;
6160 struct ust_app_channel *ua_chan = NULL;
6161 struct ust_app_session *ua_sess;
6162 struct ust_app *app;
6163
6164 LTTNG_ASSERT(usess->active);
6165
6166 rcu_read_lock();
6167 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6168 if (!app->compatible) {
6169 /*
6170 * TODO: In time, we should notice the caller of this error by
6171 * telling him that this is a version error.
6172 */
6173 continue;
6174 }
6175 ua_sess = lookup_session_by_app(usess, app);
6176 if (ua_sess == NULL) {
6177 continue;
6178 }
6179
6180 pthread_mutex_lock(&ua_sess->lock);
6181
6182 if (ua_sess->deleted) {
6183 pthread_mutex_unlock(&ua_sess->lock);
6184 continue;
6185 }
6186
6187 /* Lookup channel in the ust app session */
6188 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6189 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6190 if (ua_chan_node == NULL) {
6191 goto next_app;
6192 }
6193 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6194 node);
6195 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6196 if (ret < 0) {
6197 goto next_app;
6198 }
6199 next_app:
6200 pthread_mutex_unlock(&ua_sess->lock);
6201 }
6202
6203 rcu_read_unlock();
6204 return ret;
6205 }
6206
6207 /*
6208 * Receive registration and populate the given msg structure.
6209 *
6210 * On success return 0 else a negative value returned by the ustctl call.
6211 */
6212 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6213 {
6214 int ret;
6215 uint32_t pid, ppid, uid, gid;
6216
6217 LTTNG_ASSERT(msg);
6218
6219 ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
6220 &pid, &ppid, &uid, &gid,
6221 &msg->bits_per_long,
6222 &msg->uint8_t_alignment,
6223 &msg->uint16_t_alignment,
6224 &msg->uint32_t_alignment,
6225 &msg->uint64_t_alignment,
6226 &msg->long_alignment,
6227 &msg->byte_order,
6228 msg->name);
6229 if (ret < 0) {
6230 switch (-ret) {
6231 case EPIPE:
6232 case ECONNRESET:
6233 case LTTNG_UST_ERR_EXITING:
6234 DBG3("UST app recv reg message failed. Application died");
6235 break;
6236 case LTTNG_UST_ERR_UNSUP_MAJOR:
6237 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6238 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6239 LTTNG_UST_ABI_MINOR_VERSION);
6240 break;
6241 default:
6242 ERR("UST app recv reg message failed with ret %d", ret);
6243 break;
6244 }
6245 goto error;
6246 }
6247 msg->pid = (pid_t) pid;
6248 msg->ppid = (pid_t) ppid;
6249 msg->uid = (uid_t) uid;
6250 msg->gid = (gid_t) gid;
6251
6252 error:
6253 return ret;
6254 }
6255
6256 /*
6257 * Return a ust app session object using the application object and the
6258 * session object descriptor has a key. If not found, NULL is returned.
6259 * A RCU read side lock MUST be acquired when calling this function.
6260 */
6261 static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6262 int objd)
6263 {
6264 struct lttng_ht_node_ulong *node;
6265 struct lttng_ht_iter iter;
6266 struct ust_app_session *ua_sess = NULL;
6267
6268 LTTNG_ASSERT(app);
6269
6270 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6271 node = lttng_ht_iter_get_node_ulong(&iter);
6272 if (node == NULL) {
6273 DBG2("UST app session find by objd %d not found", objd);
6274 goto error;
6275 }
6276
6277 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6278
6279 error:
6280 return ua_sess;
6281 }
6282
6283 /*
6284 * Return a ust app channel object using the application object and the channel
6285 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6286 * lock MUST be acquired before calling this function.
6287 */
6288 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6289 int objd)
6290 {
6291 struct lttng_ht_node_ulong *node;
6292 struct lttng_ht_iter iter;
6293 struct ust_app_channel *ua_chan = NULL;
6294
6295 LTTNG_ASSERT(app);
6296
6297 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6298 node = lttng_ht_iter_get_node_ulong(&iter);
6299 if (node == NULL) {
6300 DBG2("UST app channel find by objd %d not found", objd);
6301 goto error;
6302 }
6303
6304 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6305
6306 error:
6307 return ua_chan;
6308 }
6309
6310 /*
6311 * Fixup legacy context fields for comparison:
6312 * - legacy array becomes array_nestable,
6313 * - legacy struct becomes struct_nestable,
6314 * - legacy variant becomes variant_nestable,
6315 * legacy sequences are not emitted in LTTng-UST contexts.
6316 */
6317 static int ust_app_fixup_legacy_context_fields(size_t *_nr_fields,
6318 struct lttng_ust_ctl_field **_fields)
6319 {
6320 struct lttng_ust_ctl_field *fields = *_fields, *new_fields = NULL;
6321 size_t nr_fields = *_nr_fields, new_nr_fields = 0, i, j;
6322 bool found = false;
6323 int ret = 0;
6324
6325 for (i = 0; i < nr_fields; i++) {
6326 const struct lttng_ust_ctl_field *field = &fields[i];
6327
6328 switch (field->type.atype) {
6329 case lttng_ust_ctl_atype_sequence:
6330 ERR("Unexpected legacy sequence context.");
6331 ret = -EINVAL;
6332 goto end;
6333 case lttng_ust_ctl_atype_array:
6334 switch (field->type.u.legacy.array.elem_type.atype) {
6335 case lttng_ust_ctl_atype_integer:
6336 break;
6337 default:
6338 ERR("Unexpected legacy array element type in context.");
6339 ret = -EINVAL;
6340 goto end;
6341 }
6342 found = true;
6343 /* One field for array_nested, one field for elem type. */
6344 new_nr_fields += 2;
6345 break;
6346
6347 case lttng_ust_ctl_atype_struct: /* Fallthrough */
6348 case lttng_ust_ctl_atype_variant:
6349 found = true;
6350 new_nr_fields++;
6351 break;
6352 default:
6353 new_nr_fields++;
6354 break;
6355 }
6356 }
6357 if (!found) {
6358 goto end;
6359 }
6360 new_fields = (struct lttng_ust_ctl_field *) zmalloc(sizeof(*new_fields) * new_nr_fields);
6361 if (!new_fields) {
6362 ret = -ENOMEM;
6363 goto end;
6364 }
6365 for (i = 0, j = 0; i < nr_fields; i++, j++) {
6366 const struct lttng_ust_ctl_field *field = &fields[i];
6367 struct lttng_ust_ctl_field *new_field = &new_fields[j];
6368
6369 switch (field->type.atype) {
6370 case lttng_ust_ctl_atype_array:
6371 /* One field for array_nested, one field for elem type. */
6372 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6373 new_field->type.atype = lttng_ust_ctl_atype_array_nestable;
6374 new_field->type.u.array_nestable.length = field->type.u.legacy.array.length;
6375 new_field->type.u.array_nestable.alignment = 0;
6376 new_field = &new_fields[++j]; /* elem type */
6377 new_field->type.atype = field->type.u.legacy.array.elem_type.atype;
6378 assert(new_field->type.atype == lttng_ust_ctl_atype_integer);
6379 new_field->type.u.integer = field->type.u.legacy.array.elem_type.u.basic.integer;
6380 break;
6381 case lttng_ust_ctl_atype_struct:
6382 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6383 new_field->type.atype = lttng_ust_ctl_atype_struct_nestable;
6384 new_field->type.u.struct_nestable.nr_fields = field->type.u.legacy._struct.nr_fields;
6385 new_field->type.u.struct_nestable.alignment = 0;
6386 break;
6387 case lttng_ust_ctl_atype_variant:
6388 strncpy(new_field->name, field->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6389 new_field->type.atype = lttng_ust_ctl_atype_variant_nestable;
6390 new_field->type.u.variant_nestable.nr_choices = field->type.u.legacy.variant.nr_choices;
6391 strncpy(new_field->type.u.variant_nestable.tag_name,
6392 field->type.u.legacy.variant.tag_name,
6393 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
6394 new_field->type.u.variant_nestable.alignment = 0;
6395 break;
6396 default:
6397 *new_field = *field;
6398 break;
6399 }
6400 }
6401 free(fields);
6402 *_fields = new_fields;
6403 *_nr_fields = new_nr_fields;
6404 end:
6405 return ret;
6406 }
6407
6408 /*
6409 * Reply to a register channel notification from an application on the notify
6410 * socket. The channel metadata is also created.
6411 *
6412 * The session UST registry lock is acquired in this function.
6413 *
6414 * On success 0 is returned else a negative value.
6415 */
6416 static int reply_ust_register_channel(int sock, int cobjd,
6417 size_t nr_fields, struct lttng_ust_ctl_field *fields)
6418 {
6419 int ret, ret_code = 0;
6420 uint32_t chan_id;
6421 uint64_t chan_reg_key;
6422 enum lttng_ust_ctl_channel_header type = LTTNG_UST_CTL_CHANNEL_HEADER_UNKNOWN;
6423 struct ust_app *app;
6424 struct ust_app_channel *ua_chan;
6425 struct ust_app_session *ua_sess;
6426 struct ust_registry_session *registry;
6427 struct ust_registry_channel *ust_reg_chan;
6428
6429 rcu_read_lock();
6430
6431 /* Lookup application. If not found, there is a code flow error. */
6432 app = find_app_by_notify_sock(sock);
6433 if (!app) {
6434 DBG("Application socket %d is being torn down. Abort event notify",
6435 sock);
6436 ret = -1;
6437 goto error_rcu_unlock;
6438 }
6439
6440 /* Lookup channel by UST object descriptor. */
6441 ua_chan = find_channel_by_objd(app, cobjd);
6442 if (!ua_chan) {
6443 DBG("Application channel is being torn down. Abort event notify");
6444 ret = 0;
6445 goto error_rcu_unlock;
6446 }
6447
6448 LTTNG_ASSERT(ua_chan->session);
6449 ua_sess = ua_chan->session;
6450
6451 /* Get right session registry depending on the session buffer type. */
6452 registry = get_session_registry(ua_sess);
6453 if (!registry) {
6454 DBG("Application session is being torn down. Abort event notify");
6455 ret = 0;
6456 goto error_rcu_unlock;
6457 };
6458
6459 /* Depending on the buffer type, a different channel key is used. */
6460 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6461 chan_reg_key = ua_chan->tracing_channel_id;
6462 } else {
6463 chan_reg_key = ua_chan->key;
6464 }
6465
6466 pthread_mutex_lock(&registry->lock);
6467
6468 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6469 LTTNG_ASSERT(ust_reg_chan);
6470
6471 /* Channel id is set during the object creation. */
6472 chan_id = ust_reg_chan->chan_id;
6473
6474 ret = ust_app_fixup_legacy_context_fields(&nr_fields, &fields);
6475 if (ret < 0) {
6476 ERR("Registering application channel due to legacy context fields fixup error: pid = %d, sock = %d",
6477 app->pid, app->sock);
6478 ret_code = -EINVAL;
6479 goto reply;
6480 }
6481 if (!ust_reg_chan->register_done) {
6482 /*
6483 * TODO: eventually use the registry event count for
6484 * this channel to better guess header type for per-pid
6485 * buffers.
6486 */
6487 type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
6488 ust_reg_chan->nr_ctx_fields = nr_fields;
6489 ust_reg_chan->ctx_fields = fields;
6490 fields = NULL;
6491 ust_reg_chan->header_type = type;
6492 } else {
6493 /* Get current already assigned values. */
6494 type = ust_reg_chan->header_type;
6495 /*
6496 * Validate that the context fields match between
6497 * registry and newcoming application.
6498 */
6499 if (!match_lttng_ust_ctl_field_array(ust_reg_chan->ctx_fields,
6500 ust_reg_chan->nr_ctx_fields,
6501 fields, nr_fields)) {
6502 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6503 app->pid, app->sock);
6504 ret_code = -EINVAL;
6505 goto reply;
6506 }
6507 }
6508
6509 /* Append to metadata */
6510 if (!ust_reg_chan->metadata_dumped) {
6511 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
6512 if (ret_code) {
6513 ERR("Error appending channel metadata (errno = %d)", ret_code);
6514 goto reply;
6515 }
6516 }
6517
6518 reply:
6519 DBG3("UST app replying to register channel key %" PRIu64
6520 " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
6521 ret_code);
6522
6523 ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
6524 if (ret < 0) {
6525 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6526 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6527 app->pid, app->sock);
6528 } else if (ret == -EAGAIN) {
6529 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6530 app->pid, app->sock);
6531 } else {
6532 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6533 ret, app->pid, app->sock);
6534 }
6535 goto error;
6536 }
6537
6538 /* This channel registry registration is completed. */
6539 ust_reg_chan->register_done = 1;
6540
6541 error:
6542 pthread_mutex_unlock(&registry->lock);
6543 error_rcu_unlock:
6544 rcu_read_unlock();
6545 free(fields);
6546 return ret;
6547 }
6548
6549 /*
6550 * Add event to the UST channel registry. When the event is added to the
6551 * registry, the metadata is also created. Once done, this replies to the
6552 * application with the appropriate error code.
6553 *
6554 * The session UST registry lock is acquired in the function.
6555 *
6556 * On success 0 is returned else a negative value.
6557 */
6558 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6559 char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
6560 int loglevel_value, char *model_emf_uri)
6561 {
6562 int ret, ret_code;
6563 uint32_t event_id = 0;
6564 uint64_t chan_reg_key;
6565 struct ust_app *app;
6566 struct ust_app_channel *ua_chan;
6567 struct ust_app_session *ua_sess;
6568 struct ust_registry_session *registry;
6569
6570 rcu_read_lock();
6571
6572 /* Lookup application. If not found, there is a code flow error. */
6573 app = find_app_by_notify_sock(sock);
6574 if (!app) {
6575 DBG("Application socket %d is being torn down. Abort event notify",
6576 sock);
6577 ret = -1;
6578 goto error_rcu_unlock;
6579 }
6580
6581 /* Lookup channel by UST object descriptor. */
6582 ua_chan = find_channel_by_objd(app, cobjd);
6583 if (!ua_chan) {
6584 DBG("Application channel is being torn down. Abort event notify");
6585 ret = 0;
6586 goto error_rcu_unlock;
6587 }
6588
6589 LTTNG_ASSERT(ua_chan->session);
6590 ua_sess = ua_chan->session;
6591
6592 registry = get_session_registry(ua_sess);
6593 if (!registry) {
6594 DBG("Application session is being torn down. Abort event notify");
6595 ret = 0;
6596 goto error_rcu_unlock;
6597 }
6598
6599 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6600 chan_reg_key = ua_chan->tracing_channel_id;
6601 } else {
6602 chan_reg_key = ua_chan->key;
6603 }
6604
6605 pthread_mutex_lock(&registry->lock);
6606
6607 /*
6608 * From this point on, this call acquires the ownership of the sig, fields
6609 * and model_emf_uri meaning any free are done inside it if needed. These
6610 * three variables MUST NOT be read/write after this.
6611 */
6612 ret_code = ust_registry_create_event(registry, chan_reg_key,
6613 sobjd, cobjd, name, sig, nr_fields, fields,
6614 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6615 &event_id, app);
6616 sig = NULL;
6617 fields = NULL;
6618 model_emf_uri = NULL;
6619
6620 /*
6621 * The return value is returned to ustctl so in case of an error, the
6622 * application can be notified. In case of an error, it's important not to
6623 * return a negative error or else the application will get closed.
6624 */
6625 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
6626 if (ret < 0) {
6627 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6628 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6629 app->pid, app->sock);
6630 } else if (ret == -EAGAIN) {
6631 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6632 app->pid, app->sock);
6633 } else {
6634 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6635 ret, app->pid, app->sock);
6636 }
6637 /*
6638 * No need to wipe the create event since the application socket will
6639 * get close on error hence cleaning up everything by itself.
6640 */
6641 goto error;
6642 }
6643
6644 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6645 name, event_id);
6646
6647 error:
6648 pthread_mutex_unlock(&registry->lock);
6649 error_rcu_unlock:
6650 rcu_read_unlock();
6651 free(sig);
6652 free(fields);
6653 free(model_emf_uri);
6654 return ret;
6655 }
6656
6657 /*
6658 * Add enum to the UST session registry. Once done, this replies to the
6659 * application with the appropriate error code.
6660 *
6661 * The session UST registry lock is acquired within this function.
6662 *
6663 * On success 0 is returned else a negative value.
6664 */
6665 static int add_enum_ust_registry(int sock, int sobjd, char *name,
6666 struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
6667 {
6668 int ret = 0, ret_code;
6669 struct ust_app *app;
6670 struct ust_app_session *ua_sess;
6671 struct ust_registry_session *registry;
6672 uint64_t enum_id = -1ULL;
6673
6674 rcu_read_lock();
6675
6676 /* Lookup application. If not found, there is a code flow error. */
6677 app = find_app_by_notify_sock(sock);
6678 if (!app) {
6679 /* Return an error since this is not an error */
6680 DBG("Application socket %d is being torn down. Aborting enum registration",
6681 sock);
6682 free(entries);
6683 ret = -1;
6684 goto error_rcu_unlock;
6685 }
6686
6687 /* Lookup session by UST object descriptor. */
6688 ua_sess = find_session_by_objd(app, sobjd);
6689 if (!ua_sess) {
6690 /* Return an error since this is not an error */
6691 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6692 free(entries);
6693 goto error_rcu_unlock;
6694 }
6695
6696 registry = get_session_registry(ua_sess);
6697 if (!registry) {
6698 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6699 free(entries);
6700 goto error_rcu_unlock;
6701 }
6702
6703 pthread_mutex_lock(&registry->lock);
6704
6705 /*
6706 * From this point on, the callee acquires the ownership of
6707 * entries. The variable entries MUST NOT be read/written after
6708 * call.
6709 */
6710 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6711 entries, nr_entries, &enum_id);
6712 entries = NULL;
6713
6714 /*
6715 * The return value is returned to ustctl so in case of an error, the
6716 * application can be notified. In case of an error, it's important not to
6717 * return a negative error or else the application will get closed.
6718 */
6719 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
6720 if (ret < 0) {
6721 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6722 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6723 app->pid, app->sock);
6724 } else if (ret == -EAGAIN) {
6725 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6726 app->pid, app->sock);
6727 } else {
6728 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6729 ret, app->pid, app->sock);
6730 }
6731 /*
6732 * No need to wipe the create enum since the application socket will
6733 * get close on error hence cleaning up everything by itself.
6734 */
6735 goto error;
6736 }
6737
6738 DBG3("UST registry enum %s added successfully or already found", name);
6739
6740 error:
6741 pthread_mutex_unlock(&registry->lock);
6742 error_rcu_unlock:
6743 rcu_read_unlock();
6744 return ret;
6745 }
6746
6747 /*
6748 * Handle application notification through the given notify socket.
6749 *
6750 * Return 0 on success or else a negative value.
6751 */
6752 int ust_app_recv_notify(int sock)
6753 {
6754 int ret;
6755 enum lttng_ust_ctl_notify_cmd cmd;
6756
6757 DBG3("UST app receiving notify from sock %d", sock);
6758
6759 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
6760 if (ret < 0) {
6761 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6762 DBG3("UST app recv notify failed. Application died: sock = %d",
6763 sock);
6764 } else if (ret == -EAGAIN) {
6765 WARN("UST app recv notify failed. Communication time out: sock = %d",
6766 sock);
6767 } else {
6768 ERR("UST app recv notify failed with ret %d: sock = %d",
6769 ret, sock);
6770 }
6771 goto error;
6772 }
6773
6774 switch (cmd) {
6775 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
6776 {
6777 int sobjd, cobjd, loglevel_value;
6778 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6779 size_t nr_fields;
6780 struct lttng_ust_ctl_field *fields;
6781
6782 DBG2("UST app ustctl register event received");
6783
6784 ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
6785 &loglevel_value, &sig, &nr_fields, &fields,
6786 &model_emf_uri);
6787 if (ret < 0) {
6788 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6789 DBG3("UST app recv event failed. Application died: sock = %d",
6790 sock);
6791 } else if (ret == -EAGAIN) {
6792 WARN("UST app recv event failed. Communication time out: sock = %d",
6793 sock);
6794 } else {
6795 ERR("UST app recv event failed with ret %d: sock = %d",
6796 ret, sock);
6797 }
6798 goto error;
6799 }
6800
6801 /*
6802 * Add event to the UST registry coming from the notify socket. This
6803 * call will free if needed the sig, fields and model_emf_uri. This
6804 * code path loses the ownsership of these variables and transfer them
6805 * to the this function.
6806 */
6807 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6808 fields, loglevel_value, model_emf_uri);
6809 if (ret < 0) {
6810 goto error;
6811 }
6812
6813 break;
6814 }
6815 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
6816 {
6817 int sobjd, cobjd;
6818 size_t nr_fields;
6819 struct lttng_ust_ctl_field *fields;
6820
6821 DBG2("UST app ustctl register channel received");
6822
6823 ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6824 &fields);
6825 if (ret < 0) {
6826 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6827 DBG3("UST app recv channel failed. Application died: sock = %d",
6828 sock);
6829 } else if (ret == -EAGAIN) {
6830 WARN("UST app recv channel failed. Communication time out: sock = %d",
6831 sock);
6832 } else {
6833 ERR("UST app recv channel failed with ret %d: sock = %d",
6834 ret, sock);
6835 }
6836 goto error;
6837 }
6838
6839 /*
6840 * The fields ownership are transfered to this function call meaning
6841 * that if needed it will be freed. After this, it's invalid to access
6842 * fields or clean it up.
6843 */
6844 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6845 fields);
6846 if (ret < 0) {
6847 goto error;
6848 }
6849
6850 break;
6851 }
6852 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
6853 {
6854 int sobjd;
6855 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6856 size_t nr_entries;
6857 struct lttng_ust_ctl_enum_entry *entries;
6858
6859 DBG2("UST app ustctl register enum received");
6860
6861 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
6862 &entries, &nr_entries);
6863 if (ret < 0) {
6864 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
6865 DBG3("UST app recv enum failed. Application died: sock = %d",
6866 sock);
6867 } else if (ret == -EAGAIN) {
6868 WARN("UST app recv enum failed. Communication time out: sock = %d",
6869 sock);
6870 } else {
6871 ERR("UST app recv enum failed with ret %d: sock = %d",
6872 ret, sock);
6873 }
6874 goto error;
6875 }
6876
6877 /* Callee assumes ownership of entries */
6878 ret = add_enum_ust_registry(sock, sobjd, name,
6879 entries, nr_entries);
6880 if (ret < 0) {
6881 goto error;
6882 }
6883
6884 break;
6885 }
6886 default:
6887 /* Should NEVER happen. */
6888 abort();
6889 }
6890
6891 error:
6892 return ret;
6893 }
6894
6895 /*
6896 * Once the notify socket hangs up, this is called. First, it tries to find the
6897 * corresponding application. On failure, the call_rcu to close the socket is
6898 * executed. If an application is found, it tries to delete it from the notify
6899 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6900 *
6901 * Note that an object needs to be allocated here so on ENOMEM failure, the
6902 * call RCU is not done but the rest of the cleanup is.
6903 */
6904 void ust_app_notify_sock_unregister(int sock)
6905 {
6906 int err_enomem = 0;
6907 struct lttng_ht_iter iter;
6908 struct ust_app *app;
6909 struct ust_app_notify_sock_obj *obj;
6910
6911 LTTNG_ASSERT(sock >= 0);
6912
6913 rcu_read_lock();
6914
6915 obj = (ust_app_notify_sock_obj *) zmalloc(sizeof(*obj));
6916 if (!obj) {
6917 /*
6918 * An ENOMEM is kind of uncool. If this strikes we continue the
6919 * procedure but the call_rcu will not be called. In this case, we
6920 * accept the fd leak rather than possibly creating an unsynchronized
6921 * state between threads.
6922 *
6923 * TODO: The notify object should be created once the notify socket is
6924 * registered and stored independantely from the ust app object. The
6925 * tricky part is to synchronize the teardown of the application and
6926 * this notify object. Let's keep that in mind so we can avoid this
6927 * kind of shenanigans with ENOMEM in the teardown path.
6928 */
6929 err_enomem = 1;
6930 } else {
6931 obj->fd = sock;
6932 }
6933
6934 DBG("UST app notify socket unregister %d", sock);
6935
6936 /*
6937 * Lookup application by notify socket. If this fails, this means that the
6938 * hash table delete has already been done by the application
6939 * unregistration process so we can safely close the notify socket in a
6940 * call RCU.
6941 */
6942 app = find_app_by_notify_sock(sock);
6943 if (!app) {
6944 goto close_socket;
6945 }
6946
6947 iter.iter.node = &app->notify_sock_n.node;
6948
6949 /*
6950 * Whatever happens here either we fail or succeed, in both cases we have
6951 * to close the socket after a grace period to continue to the call RCU
6952 * here. If the deletion is successful, the application is not visible
6953 * anymore by other threads and is it fails it means that it was already
6954 * deleted from the hash table so either way we just have to close the
6955 * socket.
6956 */
6957 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6958
6959 close_socket:
6960 rcu_read_unlock();
6961
6962 /*
6963 * Close socket after a grace period to avoid for the socket to be reused
6964 * before the application object is freed creating potential race between
6965 * threads trying to add unique in the global hash table.
6966 */
6967 if (!err_enomem) {
6968 call_rcu(&obj->head, close_notify_sock_rcu);
6969 }
6970 }
6971
6972 /*
6973 * Destroy a ust app data structure and free its memory.
6974 */
6975 void ust_app_destroy(struct ust_app *app)
6976 {
6977 if (!app) {
6978 return;
6979 }
6980
6981 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6982 }
6983
6984 /*
6985 * Take a snapshot for a given UST session. The snapshot is sent to the given
6986 * output.
6987 *
6988 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6989 */
6990 enum lttng_error_code ust_app_snapshot_record(
6991 const struct ltt_ust_session *usess,
6992 const struct consumer_output *output, int wait,
6993 uint64_t nb_packets_per_stream)
6994 {
6995 int ret = 0;
6996 enum lttng_error_code status = LTTNG_OK;
6997 struct lttng_ht_iter iter;
6998 struct ust_app *app;
6999 char *trace_path = NULL;
7000
7001 LTTNG_ASSERT(usess);
7002 LTTNG_ASSERT(output);
7003
7004 rcu_read_lock();
7005
7006 switch (usess->buffer_type) {
7007 case LTTNG_BUFFER_PER_UID:
7008 {
7009 struct buffer_reg_uid *reg;
7010
7011 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7012 struct buffer_reg_channel *buf_reg_chan;
7013 struct consumer_socket *socket;
7014 char pathname[PATH_MAX];
7015 size_t consumer_path_offset = 0;
7016
7017 if (!reg->registry->reg.ust->metadata_key) {
7018 /* Skip since no metadata is present */
7019 continue;
7020 }
7021
7022 /* Get consumer socket to use to push the metadata.*/
7023 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7024 usess->consumer);
7025 if (!socket) {
7026 status = LTTNG_ERR_INVALID;
7027 goto error;
7028 }
7029
7030 memset(pathname, 0, sizeof(pathname));
7031 ret = snprintf(pathname, sizeof(pathname),
7032 DEFAULT_UST_TRACE_UID_PATH,
7033 reg->uid, reg->bits_per_long);
7034 if (ret < 0) {
7035 PERROR("snprintf snapshot path");
7036 status = LTTNG_ERR_INVALID;
7037 goto error;
7038 }
7039 /* Free path allowed on previous iteration. */
7040 free(trace_path);
7041 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7042 &consumer_path_offset);
7043 if (!trace_path) {
7044 status = LTTNG_ERR_INVALID;
7045 goto error;
7046 }
7047 /* Add the UST default trace dir to path. */
7048 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7049 buf_reg_chan, node.node) {
7050 status = consumer_snapshot_channel(socket,
7051 buf_reg_chan->consumer_key,
7052 output, 0, usess->uid,
7053 usess->gid, &trace_path[consumer_path_offset], wait,
7054 nb_packets_per_stream);
7055 if (status != LTTNG_OK) {
7056 goto error;
7057 }
7058 }
7059 status = consumer_snapshot_channel(socket,
7060 reg->registry->reg.ust->metadata_key, output, 1,
7061 usess->uid, usess->gid, &trace_path[consumer_path_offset],
7062 wait, 0);
7063 if (status != LTTNG_OK) {
7064 goto error;
7065 }
7066 }
7067 break;
7068 }
7069 case LTTNG_BUFFER_PER_PID:
7070 {
7071 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7072 struct consumer_socket *socket;
7073 struct lttng_ht_iter chan_iter;
7074 struct ust_app_channel *ua_chan;
7075 struct ust_app_session *ua_sess;
7076 struct ust_registry_session *registry;
7077 char pathname[PATH_MAX];
7078 size_t consumer_path_offset = 0;
7079
7080 ua_sess = lookup_session_by_app(usess, app);
7081 if (!ua_sess) {
7082 /* Session not associated with this app. */
7083 continue;
7084 }
7085
7086 /* Get the right consumer socket for the application. */
7087 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7088 output);
7089 if (!socket) {
7090 status = LTTNG_ERR_INVALID;
7091 goto error;
7092 }
7093
7094 /* Add the UST default trace dir to path. */
7095 memset(pathname, 0, sizeof(pathname));
7096 ret = snprintf(pathname, sizeof(pathname), "%s",
7097 ua_sess->path);
7098 if (ret < 0) {
7099 status = LTTNG_ERR_INVALID;
7100 PERROR("snprintf snapshot path");
7101 goto error;
7102 }
7103 /* Free path allowed on previous iteration. */
7104 free(trace_path);
7105 trace_path = setup_channel_trace_path(usess->consumer, pathname,
7106 &consumer_path_offset);
7107 if (!trace_path) {
7108 status = LTTNG_ERR_INVALID;
7109 goto error;
7110 }
7111 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7112 ua_chan, node.node) {
7113 status = consumer_snapshot_channel(socket,
7114 ua_chan->key, output, 0,
7115 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7116 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7117 &trace_path[consumer_path_offset], wait,
7118 nb_packets_per_stream);
7119 switch (status) {
7120 case LTTNG_OK:
7121 break;
7122 case LTTNG_ERR_CHAN_NOT_FOUND:
7123 continue;
7124 default:
7125 goto error;
7126 }
7127 }
7128
7129 registry = get_session_registry(ua_sess);
7130 if (!registry) {
7131 DBG("Application session is being torn down. Skip application.");
7132 continue;
7133 }
7134 status = consumer_snapshot_channel(socket,
7135 registry->metadata_key, output, 1,
7136 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7137 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7138 &trace_path[consumer_path_offset], wait, 0);
7139 switch (status) {
7140 case LTTNG_OK:
7141 break;
7142 case LTTNG_ERR_CHAN_NOT_FOUND:
7143 continue;
7144 default:
7145 goto error;
7146 }
7147 }
7148 break;
7149 }
7150 default:
7151 abort();
7152 break;
7153 }
7154
7155 error:
7156 free(trace_path);
7157 rcu_read_unlock();
7158 return status;
7159 }
7160
7161 /*
7162 * Return the size taken by one more packet per stream.
7163 */
7164 uint64_t ust_app_get_size_one_more_packet_per_stream(
7165 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
7166 {
7167 uint64_t tot_size = 0;
7168 struct ust_app *app;
7169 struct lttng_ht_iter iter;
7170
7171 LTTNG_ASSERT(usess);
7172
7173 switch (usess->buffer_type) {
7174 case LTTNG_BUFFER_PER_UID:
7175 {
7176 struct buffer_reg_uid *reg;
7177
7178 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7179 struct buffer_reg_channel *buf_reg_chan;
7180
7181 rcu_read_lock();
7182 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7183 buf_reg_chan, node.node) {
7184 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
7185 /*
7186 * Don't take channel into account if we
7187 * already grab all its packets.
7188 */
7189 continue;
7190 }
7191 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
7192 }
7193 rcu_read_unlock();
7194 }
7195 break;
7196 }
7197 case LTTNG_BUFFER_PER_PID:
7198 {
7199 rcu_read_lock();
7200 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7201 struct ust_app_channel *ua_chan;
7202 struct ust_app_session *ua_sess;
7203 struct lttng_ht_iter chan_iter;
7204
7205 ua_sess = lookup_session_by_app(usess, app);
7206 if (!ua_sess) {
7207 /* Session not associated with this app. */
7208 continue;
7209 }
7210
7211 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7212 ua_chan, node.node) {
7213 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
7214 /*
7215 * Don't take channel into account if we
7216 * already grab all its packets.
7217 */
7218 continue;
7219 }
7220 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
7221 }
7222 }
7223 rcu_read_unlock();
7224 break;
7225 }
7226 default:
7227 abort();
7228 break;
7229 }
7230
7231 return tot_size;
7232 }
7233
7234 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
7235 struct cds_list_head *buffer_reg_uid_list,
7236 struct consumer_output *consumer, uint64_t uchan_id,
7237 int overwrite, uint64_t *discarded, uint64_t *lost)
7238 {
7239 int ret;
7240 uint64_t consumer_chan_key;
7241
7242 *discarded = 0;
7243 *lost = 0;
7244
7245 ret = buffer_reg_uid_consumer_channel_key(
7246 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
7247 if (ret < 0) {
7248 /* Not found */
7249 ret = 0;
7250 goto end;
7251 }
7252
7253 if (overwrite) {
7254 ret = consumer_get_lost_packets(ust_session_id,
7255 consumer_chan_key, consumer, lost);
7256 } else {
7257 ret = consumer_get_discarded_events(ust_session_id,
7258 consumer_chan_key, consumer, discarded);
7259 }
7260
7261 end:
7262 return ret;
7263 }
7264
7265 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
7266 struct ltt_ust_channel *uchan,
7267 struct consumer_output *consumer, int overwrite,
7268 uint64_t *discarded, uint64_t *lost)
7269 {
7270 int ret = 0;
7271 struct lttng_ht_iter iter;
7272 struct lttng_ht_node_str *ua_chan_node;
7273 struct ust_app *app;
7274 struct ust_app_session *ua_sess;
7275 struct ust_app_channel *ua_chan;
7276
7277 *discarded = 0;
7278 *lost = 0;
7279
7280 rcu_read_lock();
7281 /*
7282 * Iterate over every registered applications. Sum counters for
7283 * all applications containing requested session and channel.
7284 */
7285 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7286 struct lttng_ht_iter uiter;
7287
7288 ua_sess = lookup_session_by_app(usess, app);
7289 if (ua_sess == NULL) {
7290 continue;
7291 }
7292
7293 /* Get channel */
7294 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
7295 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7296 /* If the session is found for the app, the channel must be there */
7297 LTTNG_ASSERT(ua_chan_node);
7298
7299 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
7300
7301 if (overwrite) {
7302 uint64_t _lost;
7303
7304 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
7305 consumer, &_lost);
7306 if (ret < 0) {
7307 break;
7308 }
7309 (*lost) += _lost;
7310 } else {
7311 uint64_t _discarded;
7312
7313 ret = consumer_get_discarded_events(usess->id,
7314 ua_chan->key, consumer, &_discarded);
7315 if (ret < 0) {
7316 break;
7317 }
7318 (*discarded) += _discarded;
7319 }
7320 }
7321
7322 rcu_read_unlock();
7323 return ret;
7324 }
7325
7326 static
7327 int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7328 struct ust_app *app)
7329 {
7330 int ret = 0;
7331 struct ust_app_session *ua_sess;
7332
7333 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7334
7335 rcu_read_lock();
7336
7337 ua_sess = lookup_session_by_app(usess, app);
7338 if (ua_sess == NULL) {
7339 /* The session is in teardown process. Ignore and continue. */
7340 goto end;
7341 }
7342
7343 pthread_mutex_lock(&ua_sess->lock);
7344
7345 if (ua_sess->deleted) {
7346 goto end_unlock;
7347 }
7348
7349 pthread_mutex_lock(&app->sock_lock);
7350 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
7351 pthread_mutex_unlock(&app->sock_lock);
7352
7353 end_unlock:
7354 pthread_mutex_unlock(&ua_sess->lock);
7355
7356 end:
7357 rcu_read_unlock();
7358 health_code_update();
7359 return ret;
7360 }
7361
7362 /*
7363 * Regenerate the statedump for each app in the session.
7364 */
7365 int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7366 {
7367 int ret = 0;
7368 struct lttng_ht_iter iter;
7369 struct ust_app *app;
7370
7371 DBG("Regenerating the metadata for all UST apps");
7372
7373 rcu_read_lock();
7374
7375 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7376 if (!app->compatible) {
7377 continue;
7378 }
7379
7380 ret = ust_app_regenerate_statedump(usess, app);
7381 if (ret < 0) {
7382 /* Continue to the next app even on error */
7383 continue;
7384 }
7385 }
7386
7387 rcu_read_unlock();
7388
7389 return 0;
7390 }
7391
7392 /*
7393 * Rotate all the channels of a session.
7394 *
7395 * Return LTTNG_OK on success or else an LTTng error code.
7396 */
7397 enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7398 {
7399 int ret;
7400 enum lttng_error_code cmd_ret = LTTNG_OK;
7401 struct lttng_ht_iter iter;
7402 struct ust_app *app;
7403 struct ltt_ust_session *usess = session->ust_session;
7404
7405 LTTNG_ASSERT(usess);
7406
7407 rcu_read_lock();
7408
7409 switch (usess->buffer_type) {
7410 case LTTNG_BUFFER_PER_UID:
7411 {
7412 struct buffer_reg_uid *reg;
7413
7414 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7415 struct buffer_reg_channel *buf_reg_chan;
7416 struct consumer_socket *socket;
7417
7418 /* Get consumer socket to use to push the metadata.*/
7419 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7420 usess->consumer);
7421 if (!socket) {
7422 cmd_ret = LTTNG_ERR_INVALID;
7423 goto error;
7424 }
7425
7426 /* Rotate the data channels. */
7427 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7428 buf_reg_chan, node.node) {
7429 ret = consumer_rotate_channel(socket,
7430 buf_reg_chan->consumer_key,
7431 usess->uid, usess->gid,
7432 usess->consumer,
7433 /* is_metadata_channel */ false);
7434 if (ret < 0) {
7435 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7436 goto error;
7437 }
7438 }
7439
7440 /*
7441 * The metadata channel might not be present.
7442 *
7443 * Consumer stream allocation can be done
7444 * asynchronously and can fail on intermediary
7445 * operations (i.e add context) and lead to data
7446 * channels created with no metadata channel.
7447 */
7448 if (!reg->registry->reg.ust->metadata_key) {
7449 /* Skip since no metadata is present. */
7450 continue;
7451 }
7452
7453 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7454
7455 ret = consumer_rotate_channel(socket,
7456 reg->registry->reg.ust->metadata_key,
7457 usess->uid, usess->gid,
7458 usess->consumer,
7459 /* is_metadata_channel */ true);
7460 if (ret < 0) {
7461 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7462 goto error;
7463 }
7464 }
7465 break;
7466 }
7467 case LTTNG_BUFFER_PER_PID:
7468 {
7469 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7470 struct consumer_socket *socket;
7471 struct lttng_ht_iter chan_iter;
7472 struct ust_app_channel *ua_chan;
7473 struct ust_app_session *ua_sess;
7474 struct ust_registry_session *registry;
7475
7476 ua_sess = lookup_session_by_app(usess, app);
7477 if (!ua_sess) {
7478 /* Session not associated with this app. */
7479 continue;
7480 }
7481
7482 /* Get the right consumer socket for the application. */
7483 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7484 usess->consumer);
7485 if (!socket) {
7486 cmd_ret = LTTNG_ERR_INVALID;
7487 goto error;
7488 }
7489
7490 registry = get_session_registry(ua_sess);
7491 if (!registry) {
7492 DBG("Application session is being torn down. Skip application.");
7493 continue;
7494 }
7495
7496 /* Rotate the data channels. */
7497 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7498 ua_chan, node.node) {
7499 ret = consumer_rotate_channel(socket,
7500 ua_chan->key,
7501 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7502 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7503 ua_sess->consumer,
7504 /* is_metadata_channel */ false);
7505 if (ret < 0) {
7506 /* Per-PID buffer and application going away. */
7507 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7508 continue;
7509 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7510 goto error;
7511 }
7512 }
7513
7514 /* Rotate the metadata channel. */
7515 (void) push_metadata(registry, usess->consumer);
7516 ret = consumer_rotate_channel(socket,
7517 registry->metadata_key,
7518 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7519 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7520 ua_sess->consumer,
7521 /* is_metadata_channel */ true);
7522 if (ret < 0) {
7523 /* Per-PID buffer and application going away. */
7524 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7525 continue;
7526 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7527 goto error;
7528 }
7529 }
7530 break;
7531 }
7532 default:
7533 abort();
7534 break;
7535 }
7536
7537 cmd_ret = LTTNG_OK;
7538
7539 error:
7540 rcu_read_unlock();
7541 return cmd_ret;
7542 }
7543
7544 enum lttng_error_code ust_app_create_channel_subdirectories(
7545 const struct ltt_ust_session *usess)
7546 {
7547 enum lttng_error_code ret = LTTNG_OK;
7548 struct lttng_ht_iter iter;
7549 enum lttng_trace_chunk_status chunk_status;
7550 char *pathname_index;
7551 int fmt_ret;
7552
7553 LTTNG_ASSERT(usess->current_trace_chunk);
7554 rcu_read_lock();
7555
7556 switch (usess->buffer_type) {
7557 case LTTNG_BUFFER_PER_UID:
7558 {
7559 struct buffer_reg_uid *reg;
7560
7561 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7562 fmt_ret = asprintf(&pathname_index,
7563 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7564 reg->uid, reg->bits_per_long);
7565 if (fmt_ret < 0) {
7566 ERR("Failed to format channel index directory");
7567 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7568 goto error;
7569 }
7570
7571 /*
7572 * Create the index subdirectory which will take care
7573 * of implicitly creating the channel's path.
7574 */
7575 chunk_status = lttng_trace_chunk_create_subdirectory(
7576 usess->current_trace_chunk,
7577 pathname_index);
7578 free(pathname_index);
7579 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7580 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7581 goto error;
7582 }
7583 }
7584 break;
7585 }
7586 case LTTNG_BUFFER_PER_PID:
7587 {
7588 struct ust_app *app;
7589
7590 /*
7591 * Create the toplevel ust/ directory in case no apps are running.
7592 */
7593 chunk_status = lttng_trace_chunk_create_subdirectory(
7594 usess->current_trace_chunk,
7595 DEFAULT_UST_TRACE_DIR);
7596 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7597 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7598 goto error;
7599 }
7600
7601 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7602 pid_n.node) {
7603 struct ust_app_session *ua_sess;
7604 struct ust_registry_session *registry;
7605
7606 ua_sess = lookup_session_by_app(usess, app);
7607 if (!ua_sess) {
7608 /* Session not associated with this app. */
7609 continue;
7610 }
7611
7612 registry = get_session_registry(ua_sess);
7613 if (!registry) {
7614 DBG("Application session is being torn down. Skip application.");
7615 continue;
7616 }
7617
7618 fmt_ret = asprintf(&pathname_index,
7619 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7620 ua_sess->path);
7621 if (fmt_ret < 0) {
7622 ERR("Failed to format channel index directory");
7623 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7624 goto error;
7625 }
7626 /*
7627 * Create the index subdirectory which will take care
7628 * of implicitly creating the channel's path.
7629 */
7630 chunk_status = lttng_trace_chunk_create_subdirectory(
7631 usess->current_trace_chunk,
7632 pathname_index);
7633 free(pathname_index);
7634 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7635 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7636 goto error;
7637 }
7638 }
7639 break;
7640 }
7641 default:
7642 abort();
7643 }
7644
7645 ret = LTTNG_OK;
7646 error:
7647 rcu_read_unlock();
7648 return ret;
7649 }
7650
7651 /*
7652 * Clear all the channels of a session.
7653 *
7654 * Return LTTNG_OK on success or else an LTTng error code.
7655 */
7656 enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7657 {
7658 int ret;
7659 enum lttng_error_code cmd_ret = LTTNG_OK;
7660 struct lttng_ht_iter iter;
7661 struct ust_app *app;
7662 struct ltt_ust_session *usess = session->ust_session;
7663
7664 LTTNG_ASSERT(usess);
7665
7666 rcu_read_lock();
7667
7668 if (usess->active) {
7669 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7670 cmd_ret = LTTNG_ERR_FATAL;
7671 goto end;
7672 }
7673
7674 switch (usess->buffer_type) {
7675 case LTTNG_BUFFER_PER_UID:
7676 {
7677 struct buffer_reg_uid *reg;
7678
7679 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7680 struct buffer_reg_channel *buf_reg_chan;
7681 struct consumer_socket *socket;
7682
7683 /* Get consumer socket to use to push the metadata.*/
7684 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7685 usess->consumer);
7686 if (!socket) {
7687 cmd_ret = LTTNG_ERR_INVALID;
7688 goto error_socket;
7689 }
7690
7691 /* Clear the data channels. */
7692 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7693 buf_reg_chan, node.node) {
7694 ret = consumer_clear_channel(socket,
7695 buf_reg_chan->consumer_key);
7696 if (ret < 0) {
7697 goto error;
7698 }
7699 }
7700
7701 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7702
7703 /*
7704 * Clear the metadata channel.
7705 * Metadata channel is not cleared per se but we still need to
7706 * perform a rotation operation on it behind the scene.
7707 */
7708 ret = consumer_clear_channel(socket,
7709 reg->registry->reg.ust->metadata_key);
7710 if (ret < 0) {
7711 goto error;
7712 }
7713 }
7714 break;
7715 }
7716 case LTTNG_BUFFER_PER_PID:
7717 {
7718 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7719 struct consumer_socket *socket;
7720 struct lttng_ht_iter chan_iter;
7721 struct ust_app_channel *ua_chan;
7722 struct ust_app_session *ua_sess;
7723 struct ust_registry_session *registry;
7724
7725 ua_sess = lookup_session_by_app(usess, app);
7726 if (!ua_sess) {
7727 /* Session not associated with this app. */
7728 continue;
7729 }
7730
7731 /* Get the right consumer socket for the application. */
7732 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7733 usess->consumer);
7734 if (!socket) {
7735 cmd_ret = LTTNG_ERR_INVALID;
7736 goto error_socket;
7737 }
7738
7739 registry = get_session_registry(ua_sess);
7740 if (!registry) {
7741 DBG("Application session is being torn down. Skip application.");
7742 continue;
7743 }
7744
7745 /* Clear the data channels. */
7746 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7747 ua_chan, node.node) {
7748 ret = consumer_clear_channel(socket, ua_chan->key);
7749 if (ret < 0) {
7750 /* Per-PID buffer and application going away. */
7751 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7752 continue;
7753 }
7754 goto error;
7755 }
7756 }
7757
7758 (void) push_metadata(registry, usess->consumer);
7759
7760 /*
7761 * Clear the metadata channel.
7762 * Metadata channel is not cleared per se but we still need to
7763 * perform rotation operation on it behind the scene.
7764 */
7765 ret = consumer_clear_channel(socket, registry->metadata_key);
7766 if (ret < 0) {
7767 /* Per-PID buffer and application going away. */
7768 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7769 continue;
7770 }
7771 goto error;
7772 }
7773 }
7774 break;
7775 }
7776 default:
7777 abort();
7778 break;
7779 }
7780
7781 cmd_ret = LTTNG_OK;
7782 goto end;
7783
7784 error:
7785 switch (-ret) {
7786 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7787 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7788 break;
7789 default:
7790 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7791 }
7792
7793 error_socket:
7794 end:
7795 rcu_read_unlock();
7796 return cmd_ret;
7797 }
7798
7799 /*
7800 * This function skips the metadata channel as the begin/end timestamps of a
7801 * metadata packet are useless.
7802 *
7803 * Moreover, opening a packet after a "clear" will cause problems for live
7804 * sessions as it will introduce padding that was not part of the first trace
7805 * chunk. The relay daemon expects the content of the metadata stream of
7806 * successive metadata trace chunks to be strict supersets of one another.
7807 *
7808 * For example, flushing a packet at the beginning of the metadata stream of
7809 * a trace chunk resulting from a "clear" session command will cause the
7810 * size of the metadata stream of the new trace chunk to not match the size of
7811 * the metadata stream of the original chunk. This will confuse the relay
7812 * daemon as the same "offset" in a metadata stream will no longer point
7813 * to the same content.
7814 */
7815 enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7816 {
7817 enum lttng_error_code ret = LTTNG_OK;
7818 struct lttng_ht_iter iter;
7819 struct ltt_ust_session *usess = session->ust_session;
7820
7821 LTTNG_ASSERT(usess);
7822
7823 rcu_read_lock();
7824
7825 switch (usess->buffer_type) {
7826 case LTTNG_BUFFER_PER_UID:
7827 {
7828 struct buffer_reg_uid *reg;
7829
7830 cds_list_for_each_entry (
7831 reg, &usess->buffer_reg_uid_list, lnode) {
7832 struct buffer_reg_channel *buf_reg_chan;
7833 struct consumer_socket *socket;
7834
7835 socket = consumer_find_socket_by_bitness(
7836 reg->bits_per_long, usess->consumer);
7837 if (!socket) {
7838 ret = LTTNG_ERR_FATAL;
7839 goto error;
7840 }
7841
7842 cds_lfht_for_each_entry(reg->registry->channels->ht,
7843 &iter.iter, buf_reg_chan, node.node) {
7844 const int open_ret =
7845 consumer_open_channel_packets(
7846 socket,
7847 buf_reg_chan->consumer_key);
7848
7849 if (open_ret < 0) {
7850 ret = LTTNG_ERR_UNK;
7851 goto error;
7852 }
7853 }
7854 }
7855 break;
7856 }
7857 case LTTNG_BUFFER_PER_PID:
7858 {
7859 struct ust_app *app;
7860
7861 cds_lfht_for_each_entry (
7862 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7863 struct consumer_socket *socket;
7864 struct lttng_ht_iter chan_iter;
7865 struct ust_app_channel *ua_chan;
7866 struct ust_app_session *ua_sess;
7867 struct ust_registry_session *registry;
7868
7869 ua_sess = lookup_session_by_app(usess, app);
7870 if (!ua_sess) {
7871 /* Session not associated with this app. */
7872 continue;
7873 }
7874
7875 /* Get the right consumer socket for the application. */
7876 socket = consumer_find_socket_by_bitness(
7877 app->bits_per_long, usess->consumer);
7878 if (!socket) {
7879 ret = LTTNG_ERR_FATAL;
7880 goto error;
7881 }
7882
7883 registry = get_session_registry(ua_sess);
7884 if (!registry) {
7885 DBG("Application session is being torn down. Skip application.");
7886 continue;
7887 }
7888
7889 cds_lfht_for_each_entry(ua_sess->channels->ht,
7890 &chan_iter.iter, ua_chan, node.node) {
7891 const int open_ret =
7892 consumer_open_channel_packets(
7893 socket,
7894 ua_chan->key);
7895
7896 if (open_ret < 0) {
7897 /*
7898 * Per-PID buffer and application going
7899 * away.
7900 */
7901 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7902 continue;
7903 }
7904
7905 ret = LTTNG_ERR_UNK;
7906 goto error;
7907 }
7908 }
7909 }
7910 break;
7911 }
7912 default:
7913 abort();
7914 break;
7915 }
7916
7917 error:
7918 rcu_read_unlock();
7919 return ret;
7920 }
This page took 0.219611 seconds and 4 git commands to generate.