Cleanup: liblttng-ust: change `int` flag to `bool`
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <urcu/list.h>
27 #include <urcu/hlist.h>
28 #include <pthread.h>
29 #include <errno.h>
30 #include <sys/shm.h>
31 #include <sys/ipc.h>
32 #include <stdint.h>
33 #include <stddef.h>
34 #include <inttypes.h>
35 #include <time.h>
36 #include <stdbool.h>
37 #include <lttng/ust-endian.h>
38 #include "clock.h"
39
40 #include <urcu-bp.h>
41 #include <urcu/compiler.h>
42 #include <urcu/uatomic.h>
43 #include <urcu/arch.h>
44
45 #include <lttng/tracepoint.h>
46 #include <lttng/ust-events.h>
47
48 #include <usterr-signal-safe.h>
49 #include <helper.h>
50 #include <lttng/ust-ctl.h>
51 #include <ust-comm.h>
52 #include <lttng/ust-dynamic-type.h>
53 #include <lttng/ust-context-provider.h>
54 #include "error.h"
55 #include "compat.h"
56 #include "lttng-ust-uuid.h"
57
58 #include "tracepoint-internal.h"
59 #include "string-utils.h"
60 #include "lttng-tracer.h"
61 #include "lttng-tracer-core.h"
62 #include "lttng-ust-statedump.h"
63 #include "wait.h"
64 #include "../libringbuffer/shm.h"
65 #include "jhash.h"
66
67 /*
68 * All operations within this file are called by the communication
69 * thread, under ust_lock protection.
70 */
71
72 static CDS_LIST_HEAD(sessions);
73
74 struct cds_list_head *_lttng_get_sessions(void)
75 {
76 return &sessions;
77 }
78
79 static void _lttng_event_destroy(struct lttng_event *event);
80 static void _lttng_enum_destroy(struct lttng_enum *_enum);
81
82 static
83 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
84 static
85 void lttng_session_sync_enablers(struct lttng_session *session);
86 static
87 void lttng_enabler_destroy(struct lttng_enabler *enabler);
88
89 /*
90 * Called with ust lock held.
91 */
92 int lttng_session_active(void)
93 {
94 struct lttng_session *iter;
95
96 cds_list_for_each_entry(iter, &sessions, node) {
97 if (iter->active)
98 return 1;
99 }
100 return 0;
101 }
102
103 static
104 int lttng_loglevel_match(int loglevel,
105 unsigned int has_loglevel,
106 enum lttng_ust_loglevel_type req_type,
107 int req_loglevel)
108 {
109 if (!has_loglevel)
110 loglevel = TRACE_DEFAULT;
111 switch (req_type) {
112 case LTTNG_UST_LOGLEVEL_RANGE:
113 if (loglevel <= req_loglevel
114 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_SINGLE:
119 if (loglevel == req_loglevel
120 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
121 return 1;
122 else
123 return 0;
124 case LTTNG_UST_LOGLEVEL_ALL:
125 default:
126 if (loglevel <= TRACE_DEBUG)
127 return 1;
128 else
129 return 0;
130 }
131 }
132
133 void synchronize_trace(void)
134 {
135 synchronize_rcu();
136 }
137
138 struct lttng_session *lttng_session_create(void)
139 {
140 struct lttng_session *session;
141 int i;
142
143 session = zmalloc(sizeof(struct lttng_session));
144 if (!session)
145 return NULL;
146 if (lttng_session_context_init(&session->ctx)) {
147 free(session);
148 return NULL;
149 }
150 CDS_INIT_LIST_HEAD(&session->chan_head);
151 CDS_INIT_LIST_HEAD(&session->events_head);
152 CDS_INIT_LIST_HEAD(&session->enums_head);
153 CDS_INIT_LIST_HEAD(&session->enablers_head);
154 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
155 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
156 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
157 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
158 cds_list_add(&session->node, &sessions);
159 return session;
160 }
161
162 /*
163 * Only used internally at session destruction.
164 */
165 static
166 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
167 {
168 struct channel *chan;
169 struct lttng_ust_shm_handle *handle;
170
171 cds_list_del(&lttng_chan->node);
172 lttng_destroy_context(lttng_chan->ctx);
173 chan = lttng_chan->chan;
174 handle = lttng_chan->handle;
175 /*
176 * note: lttng_chan is private data contained within handle. It
177 * will be freed along with the handle.
178 */
179 channel_destroy(chan, handle, 0);
180 }
181
182 static
183 void register_event(struct lttng_event *event)
184 {
185 int ret;
186 const struct lttng_event_desc *desc;
187
188 assert(event->registered == 0);
189 desc = event->desc;
190 ret = __tracepoint_probe_register_queue_release(desc->name,
191 desc->probe_callback,
192 event, desc->signature);
193 WARN_ON_ONCE(ret);
194 if (!ret)
195 event->registered = 1;
196 }
197
198 static
199 void unregister_event(struct lttng_event *event)
200 {
201 int ret;
202 const struct lttng_event_desc *desc;
203
204 assert(event->registered == 1);
205 desc = event->desc;
206 ret = __tracepoint_probe_unregister_queue_release(desc->name,
207 desc->probe_callback,
208 event);
209 WARN_ON_ONCE(ret);
210 if (!ret)
211 event->registered = 0;
212 }
213
214 /*
215 * Only used internally at session destruction.
216 */
217 static
218 void _lttng_event_unregister(struct lttng_event *event)
219 {
220 if (event->registered)
221 unregister_event(event);
222 }
223
224 void lttng_session_destroy(struct lttng_session *session)
225 {
226 struct lttng_channel *chan, *tmpchan;
227 struct lttng_event *event, *tmpevent;
228 struct lttng_enum *_enum, *tmp_enum;
229 struct lttng_enabler *enabler, *tmpenabler;
230
231 CMM_ACCESS_ONCE(session->active) = 0;
232 cds_list_for_each_entry(event, &session->events_head, node) {
233 _lttng_event_unregister(event);
234 }
235 synchronize_trace(); /* Wait for in-flight events to complete */
236 __tracepoint_probe_prune_release_queue();
237 cds_list_for_each_entry_safe(enabler, tmpenabler,
238 &session->enablers_head, node)
239 lttng_enabler_destroy(enabler);
240 cds_list_for_each_entry_safe(event, tmpevent,
241 &session->events_head, node)
242 _lttng_event_destroy(event);
243 cds_list_for_each_entry_safe(_enum, tmp_enum,
244 &session->enums_head, node)
245 _lttng_enum_destroy(_enum);
246 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
247 _lttng_channel_unmap(chan);
248 cds_list_del(&session->node);
249 lttng_destroy_context(session->ctx);
250 free(session);
251 }
252
253 static
254 int lttng_enum_create(const struct lttng_enum_desc *desc,
255 struct lttng_session *session)
256 {
257 const char *enum_name = desc->name;
258 struct lttng_enum *_enum;
259 struct cds_hlist_head *head;
260 int ret = 0;
261 size_t name_len = strlen(enum_name);
262 uint32_t hash;
263 int notify_socket;
264
265 /* Check if this enum is already registered for this session. */
266 hash = jhash(enum_name, name_len, 0);
267 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
268
269 _enum = lttng_ust_enum_get_from_desc(session, desc);
270 if (_enum) {
271 ret = -EEXIST;
272 goto exist;
273 }
274
275 notify_socket = lttng_get_notify_socket(session->owner);
276 if (notify_socket < 0) {
277 ret = notify_socket;
278 goto socket_error;
279 }
280
281 _enum = zmalloc(sizeof(*_enum));
282 if (!_enum) {
283 ret = -ENOMEM;
284 goto cache_error;
285 }
286 _enum->session = session;
287 _enum->desc = desc;
288
289 ret = ustcomm_register_enum(notify_socket,
290 session->objd,
291 enum_name,
292 desc->nr_entries,
293 desc->entries,
294 &_enum->id);
295 if (ret < 0) {
296 DBG("Error (%d) registering enumeration to sessiond", ret);
297 goto sessiond_register_error;
298 }
299 cds_list_add(&_enum->node, &session->enums_head);
300 cds_hlist_add_head(&_enum->hlist, head);
301 return 0;
302
303 sessiond_register_error:
304 free(_enum);
305 cache_error:
306 socket_error:
307 exist:
308 return ret;
309 }
310
311 static
312 int lttng_create_enum_check(const struct lttng_type *type,
313 struct lttng_session *session)
314 {
315 switch (type->atype) {
316 case atype_enum:
317 {
318 const struct lttng_enum_desc *enum_desc;
319 int ret;
320
321 enum_desc = type->u.basic.enumeration.desc;
322 ret = lttng_enum_create(enum_desc, session);
323 if (ret && ret != -EEXIST) {
324 DBG("Unable to create enum error: (%d)", ret);
325 return ret;
326 }
327 break;
328 }
329 case atype_dynamic:
330 {
331 const struct lttng_event_field *tag_field_generic;
332 const struct lttng_enum_desc *enum_desc;
333 int ret;
334
335 tag_field_generic = lttng_ust_dynamic_type_tag_field();
336 enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
337 ret = lttng_enum_create(enum_desc, session);
338 if (ret && ret != -EEXIST) {
339 DBG("Unable to create enum error: (%d)", ret);
340 return ret;
341 }
342 break;
343 }
344 default:
345 /* TODO: nested types when they become supported. */
346 break;
347 }
348 return 0;
349 }
350
351 static
352 int lttng_create_all_event_enums(size_t nr_fields,
353 const struct lttng_event_field *event_fields,
354 struct lttng_session *session)
355 {
356 size_t i;
357 int ret;
358
359 /* For each field, ensure enum is part of the session. */
360 for (i = 0; i < nr_fields; i++) {
361 const struct lttng_type *type = &event_fields[i].type;
362
363 ret = lttng_create_enum_check(type, session);
364 if (ret)
365 return ret;
366 }
367 return 0;
368 }
369
370 static
371 int lttng_create_all_ctx_enums(size_t nr_fields,
372 const struct lttng_ctx_field *ctx_fields,
373 struct lttng_session *session)
374 {
375 size_t i;
376 int ret;
377
378 /* For each field, ensure enum is part of the session. */
379 for (i = 0; i < nr_fields; i++) {
380 const struct lttng_type *type = &ctx_fields[i].event_field.type;
381
382 ret = lttng_create_enum_check(type, session);
383 if (ret)
384 return ret;
385 }
386 return 0;
387 }
388
389 /*
390 * Ensure that a state-dump will be performed for this session at the end
391 * of the current handle_message().
392 */
393 int lttng_session_statedump(struct lttng_session *session)
394 {
395 session->statedump_pending = 1;
396 lttng_ust_sockinfo_session_enabled(session->owner);
397 return 0;
398 }
399
400 int lttng_session_enable(struct lttng_session *session)
401 {
402 int ret = 0;
403 struct lttng_channel *chan;
404 int notify_socket;
405
406 if (session->active) {
407 ret = -EBUSY;
408 goto end;
409 }
410
411 notify_socket = lttng_get_notify_socket(session->owner);
412 if (notify_socket < 0)
413 return notify_socket;
414
415 /* Set transient enabler state to "enabled" */
416 session->tstate = 1;
417
418 /* We need to sync enablers with session before activation. */
419 lttng_session_sync_enablers(session);
420
421 /*
422 * Snapshot the number of events per channel to know the type of header
423 * we need to use.
424 */
425 cds_list_for_each_entry(chan, &session->chan_head, node) {
426 const struct lttng_ctx *ctx;
427 const struct lttng_ctx_field *fields = NULL;
428 size_t nr_fields = 0;
429 uint32_t chan_id;
430
431 /* don't change it if session stop/restart */
432 if (chan->header_type)
433 continue;
434 ctx = chan->ctx;
435 if (ctx) {
436 nr_fields = ctx->nr_fields;
437 fields = ctx->fields;
438 ret = lttng_create_all_ctx_enums(nr_fields, fields,
439 session);
440 if (ret < 0) {
441 DBG("Error (%d) adding enum to session", ret);
442 return ret;
443 }
444 }
445 ret = ustcomm_register_channel(notify_socket,
446 session,
447 session->objd,
448 chan->objd,
449 nr_fields,
450 fields,
451 &chan_id,
452 &chan->header_type);
453 if (ret) {
454 DBG("Error (%d) registering channel to sessiond", ret);
455 return ret;
456 }
457 if (chan_id != chan->id) {
458 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
459 chan_id, chan->id);
460 return -EINVAL;
461 }
462 }
463
464 /* Set atomically the state to "active" */
465 CMM_ACCESS_ONCE(session->active) = 1;
466 CMM_ACCESS_ONCE(session->been_active) = 1;
467
468 ret = lttng_session_statedump(session);
469 if (ret)
470 return ret;
471 end:
472 return ret;
473 }
474
475 int lttng_session_disable(struct lttng_session *session)
476 {
477 int ret = 0;
478
479 if (!session->active) {
480 ret = -EBUSY;
481 goto end;
482 }
483 /* Set atomically the state to "inactive" */
484 CMM_ACCESS_ONCE(session->active) = 0;
485
486 /* Set transient enabler state to "disabled" */
487 session->tstate = 0;
488 lttng_session_sync_enablers(session);
489 end:
490 return ret;
491 }
492
493 int lttng_channel_enable(struct lttng_channel *channel)
494 {
495 int ret = 0;
496
497 if (channel->enabled) {
498 ret = -EBUSY;
499 goto end;
500 }
501 /* Set transient enabler state to "enabled" */
502 channel->tstate = 1;
503 lttng_session_sync_enablers(channel->session);
504 /* Set atomically the state to "enabled" */
505 CMM_ACCESS_ONCE(channel->enabled) = 1;
506 end:
507 return ret;
508 }
509
510 int lttng_channel_disable(struct lttng_channel *channel)
511 {
512 int ret = 0;
513
514 if (!channel->enabled) {
515 ret = -EBUSY;
516 goto end;
517 }
518 /* Set atomically the state to "disabled" */
519 CMM_ACCESS_ONCE(channel->enabled) = 0;
520 /* Set transient enabler state to "enabled" */
521 channel->tstate = 0;
522 lttng_session_sync_enablers(channel->session);
523 end:
524 return ret;
525 }
526
527 /*
528 * Supports event creation while tracing session is active.
529 */
530 static
531 int lttng_event_create(const struct lttng_event_desc *desc,
532 struct lttng_channel *chan)
533 {
534 const char *event_name = desc->name;
535 struct lttng_event *event;
536 struct lttng_session *session = chan->session;
537 struct cds_hlist_head *head;
538 int ret = 0;
539 size_t name_len = strlen(event_name);
540 uint32_t hash;
541 int notify_socket, loglevel;
542 const char *uri;
543
544 hash = jhash(event_name, name_len, 0);
545 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
546
547 notify_socket = lttng_get_notify_socket(session->owner);
548 if (notify_socket < 0) {
549 ret = notify_socket;
550 goto socket_error;
551 }
552
553 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
554 session);
555 if (ret < 0) {
556 DBG("Error (%d) adding enum to session", ret);
557 goto create_enum_error;
558 }
559
560 /*
561 * Check if loglevel match. Refuse to connect event if not.
562 */
563 event = zmalloc(sizeof(struct lttng_event));
564 if (!event) {
565 ret = -ENOMEM;
566 goto cache_error;
567 }
568 event->chan = chan;
569
570 /* Event will be enabled by enabler sync. */
571 event->enabled = 0;
572 event->registered = 0;
573 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
574 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
575 event->desc = desc;
576
577 if (desc->loglevel)
578 loglevel = *(*event->desc->loglevel);
579 else
580 loglevel = TRACE_DEFAULT;
581 if (desc->u.ext.model_emf_uri)
582 uri = *(desc->u.ext.model_emf_uri);
583 else
584 uri = NULL;
585
586 /* Fetch event ID from sessiond */
587 ret = ustcomm_register_event(notify_socket,
588 session,
589 session->objd,
590 chan->objd,
591 event_name,
592 loglevel,
593 desc->signature,
594 desc->nr_fields,
595 desc->fields,
596 uri,
597 &event->id);
598 if (ret < 0) {
599 DBG("Error (%d) registering event to sessiond", ret);
600 goto sessiond_register_error;
601 }
602
603 cds_list_add(&event->node, &chan->session->events_head);
604 cds_hlist_add_head(&event->hlist, head);
605 return 0;
606
607 sessiond_register_error:
608 free(event);
609 cache_error:
610 create_enum_error:
611 socket_error:
612 return ret;
613 }
614
615 static
616 int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
617 struct lttng_enabler *enabler)
618 {
619 int loglevel = 0;
620 unsigned int has_loglevel = 0;
621
622 assert(enabler->type == LTTNG_ENABLER_STAR_GLOB);
623 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
624 desc->name, SIZE_MAX))
625 return 0;
626 if (desc->loglevel) {
627 loglevel = *(*desc->loglevel);
628 has_loglevel = 1;
629 }
630 if (!lttng_loglevel_match(loglevel,
631 has_loglevel,
632 enabler->event_param.loglevel_type,
633 enabler->event_param.loglevel))
634 return 0;
635 return 1;
636 }
637
638 static
639 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
640 struct lttng_enabler *enabler)
641 {
642 int loglevel = 0;
643 unsigned int has_loglevel = 0;
644
645 assert(enabler->type == LTTNG_ENABLER_EVENT);
646 if (strcmp(desc->name, enabler->event_param.name))
647 return 0;
648 if (desc->loglevel) {
649 loglevel = *(*desc->loglevel);
650 has_loglevel = 1;
651 }
652 if (!lttng_loglevel_match(loglevel,
653 has_loglevel,
654 enabler->event_param.loglevel_type,
655 enabler->event_param.loglevel))
656 return 0;
657 return 1;
658 }
659
660 static
661 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
662 struct lttng_enabler *enabler)
663 {
664 switch (enabler->type) {
665 case LTTNG_ENABLER_STAR_GLOB:
666 {
667 struct lttng_ust_excluder_node *excluder;
668
669 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
670 return 0;
671 }
672
673 /*
674 * If the matching event matches with an excluder,
675 * return 'does not match'
676 */
677 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
678 int count;
679
680 for (count = 0; count < excluder->excluder.count; count++) {
681 int len;
682 char *excluder_name;
683
684 excluder_name = (char *) (excluder->excluder.names)
685 + count * LTTNG_UST_SYM_NAME_LEN;
686 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
687 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
688 return 0;
689 }
690 }
691 return 1;
692 }
693 case LTTNG_ENABLER_EVENT:
694 return lttng_desc_match_event_enabler(desc, enabler);
695 default:
696 return -EINVAL;
697 }
698 }
699
700 static
701 int lttng_event_match_enabler(struct lttng_event *event,
702 struct lttng_enabler *enabler)
703 {
704 if (lttng_desc_match_enabler(event->desc, enabler)
705 && event->chan == enabler->chan)
706 return 1;
707 else
708 return 0;
709 }
710
711 static
712 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
713 struct lttng_enabler *enabler)
714 {
715 struct lttng_enabler_ref *enabler_ref;
716
717 cds_list_for_each_entry(enabler_ref,
718 &event->enablers_ref_head, node) {
719 if (enabler_ref->ref == enabler)
720 return enabler_ref;
721 }
722 return NULL;
723 }
724
725 /*
726 * Create struct lttng_event if it is missing and present in the list of
727 * tracepoint probes.
728 */
729 static
730 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
731 {
732 struct lttng_session *session = enabler->chan->session;
733 struct lttng_probe_desc *probe_desc;
734 const struct lttng_event_desc *desc;
735 struct lttng_event *event;
736 int i;
737 struct cds_list_head *probe_list;
738
739 probe_list = lttng_get_probe_list_head();
740 /*
741 * For each probe event, if we find that a probe event matches
742 * our enabler, create an associated lttng_event if not
743 * already present.
744 */
745 cds_list_for_each_entry(probe_desc, probe_list, head) {
746 for (i = 0; i < probe_desc->nr_events; i++) {
747 int ret;
748 bool found = false;
749 struct cds_hlist_head *head;
750 struct cds_hlist_node *node;
751 const char *event_name;
752 size_t name_len;
753 uint32_t hash;
754
755 desc = probe_desc->event_desc[i];
756 if (!lttng_desc_match_enabler(desc, enabler))
757 continue;
758 event_name = desc->name;
759 name_len = strlen(event_name);
760
761 /*
762 * Check if already created.
763 */
764 hash = jhash(event_name, name_len, 0);
765 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
766 cds_hlist_for_each_entry(event, node, head, hlist) {
767 if (event->desc == desc
768 && event->chan == enabler->chan) {
769 found = true;
770 break;
771 }
772 }
773 if (found)
774 continue;
775
776 /*
777 * We need to create an event for this
778 * event probe.
779 */
780 ret = lttng_event_create(probe_desc->event_desc[i],
781 enabler->chan);
782 if (ret) {
783 DBG("Unable to create event %s, error %d\n",
784 probe_desc->event_desc[i]->name, ret);
785 }
786 }
787 }
788 }
789
790 /*
791 * Iterate over all the UST sessions to unregister and destroy all probes from
792 * the probe provider descriptor received as argument. Must me called with the
793 * ust_lock held.
794 */
795 void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_desc)
796 {
797 struct cds_hlist_node *node, *tmp_node;
798 struct cds_list_head *sessionsp;
799 struct lttng_session *session;
800 struct cds_hlist_head *head;
801 struct lttng_event *event;
802 unsigned int i, j;
803
804 /* Get handle on list of sessions. */
805 sessionsp = _lttng_get_sessions();
806
807 /*
808 * Iterate over all events in the probe provider descriptions and sessions
809 * to queue the unregistration of the events.
810 */
811 for (i = 0; i < provider_desc->nr_events; i++) {
812 const struct lttng_event_desc *event_desc;
813 const char *event_name;
814 size_t name_len;
815 uint32_t hash;
816
817 event_desc = provider_desc->event_desc[i];
818 event_name = event_desc->name;
819 name_len = strlen(event_name);
820 hash = jhash(event_name, name_len, 0);
821
822 /* Iterate over all session to find the current event description. */
823 cds_list_for_each_entry(session, sessionsp, node) {
824 /*
825 * Get the list of events in the hashtable bucket and iterate to
826 * find the event matching this descriptor.
827 */
828 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
829 cds_hlist_for_each_entry(event, node, head, hlist) {
830 if (event_desc == event->desc) {
831 /* Queue the unregistration of this event. */
832 _lttng_event_unregister(event);
833 break;
834 }
835 }
836 }
837 }
838
839 /* Wait for grace period. */
840 synchronize_trace();
841 /* Prune the unregistration queue. */
842 __tracepoint_probe_prune_release_queue();
843
844 /*
845 * It is now safe to destroy the events and remove them from the event list
846 * and hashtables.
847 */
848 for (i = 0; i < provider_desc->nr_events; i++) {
849 const struct lttng_event_desc *event_desc;
850 const char *event_name;
851 size_t name_len;
852 uint32_t hash;
853
854 event_desc = provider_desc->event_desc[i];
855 event_name = event_desc->name;
856 name_len = strlen(event_name);
857 hash = jhash(event_name, name_len, 0);
858
859 /* Iterate over all sessions to find the current event description. */
860 cds_list_for_each_entry(session, sessionsp, node) {
861 /*
862 * Get the list of events in the hashtable bucket and iterate to
863 * find the event matching this descriptor.
864 */
865 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
866 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
867 if (event_desc == event->desc) {
868 /* Destroy enums of the current event. */
869 for (j = 0; j < event->desc->nr_fields; j++) {
870 const struct lttng_enum_desc *enum_desc;
871 const struct lttng_event_field *field;
872 struct lttng_enum *curr_enum;
873
874 field = &(event->desc->fields[j]);
875 if (field->type.atype != atype_enum) {
876 continue;
877 }
878
879 enum_desc = field->type.u.basic.enumeration.desc;
880 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
881 if (curr_enum) {
882 _lttng_enum_destroy(curr_enum);
883 }
884 }
885
886 /* Destroy event. */
887 _lttng_event_destroy(event);
888 break;
889 }
890 }
891 }
892 }
893 }
894
895 /*
896 * Create events associated with an enabler (if not already present),
897 * and add backward reference from the event to the enabler.
898 */
899 static
900 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
901 {
902 struct lttng_session *session = enabler->chan->session;
903 struct lttng_event *event;
904
905 /* First ensure that probe events are created for this enabler. */
906 lttng_create_event_if_missing(enabler);
907
908 /* For each event matching enabler in session event list. */
909 cds_list_for_each_entry(event, &session->events_head, node) {
910 struct lttng_enabler_ref *enabler_ref;
911
912 if (!lttng_event_match_enabler(event, enabler))
913 continue;
914
915 enabler_ref = lttng_event_enabler_ref(event, enabler);
916 if (!enabler_ref) {
917 /*
918 * If no backward ref, create it.
919 * Add backward ref from event to enabler.
920 */
921 enabler_ref = zmalloc(sizeof(*enabler_ref));
922 if (!enabler_ref)
923 return -ENOMEM;
924 enabler_ref->ref = enabler;
925 cds_list_add(&enabler_ref->node,
926 &event->enablers_ref_head);
927 }
928
929 /*
930 * Link filter bytecodes if not linked yet.
931 */
932 lttng_enabler_event_link_bytecode(event, enabler);
933
934 /* TODO: merge event context. */
935 }
936 return 0;
937 }
938
939 /*
940 * Called at library load: connect the probe on all enablers matching
941 * this event.
942 * Called with session mutex held.
943 */
944 int lttng_fix_pending_events(void)
945 {
946 struct lttng_session *session;
947
948 cds_list_for_each_entry(session, &sessions, node) {
949 lttng_session_lazy_sync_enablers(session);
950 }
951 return 0;
952 }
953
954 /*
955 * For each session of the owner thread, execute pending statedump.
956 * Only dump state for the sessions owned by the caller thread, because
957 * we don't keep ust_lock across the entire iteration.
958 */
959 void lttng_handle_pending_statedump(void *owner)
960 {
961 struct lttng_session *session;
962
963 /* Execute state dump */
964 do_lttng_ust_statedump(owner);
965
966 /* Clear pending state dump */
967 if (ust_lock()) {
968 goto end;
969 }
970 cds_list_for_each_entry(session, &sessions, node) {
971 if (session->owner != owner)
972 continue;
973 if (!session->statedump_pending)
974 continue;
975 session->statedump_pending = 0;
976 }
977 end:
978 ust_unlock();
979 return;
980 }
981
982 /*
983 * Only used internally at session destruction.
984 */
985 static
986 void _lttng_event_destroy(struct lttng_event *event)
987 {
988 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
989
990 /* Remove from event list. */
991 cds_list_del(&event->node);
992 /* Remove from event hash table. */
993 cds_hlist_del(&event->hlist);
994
995 lttng_destroy_context(event->ctx);
996 lttng_free_event_filter_runtime(event);
997 /* Free event enabler refs */
998 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
999 &event->enablers_ref_head, node)
1000 free(enabler_ref);
1001 free(event);
1002 }
1003
1004 static
1005 void _lttng_enum_destroy(struct lttng_enum *_enum)
1006 {
1007 cds_list_del(&_enum->node);
1008 cds_hlist_del(&_enum->hlist);
1009 free(_enum);
1010 }
1011
1012 void lttng_ust_events_exit(void)
1013 {
1014 struct lttng_session *session, *tmpsession;
1015
1016 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
1017 lttng_session_destroy(session);
1018 }
1019
1020 /*
1021 * Enabler management.
1022 */
1023 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1024 struct lttng_ust_event *event_param,
1025 struct lttng_channel *chan)
1026 {
1027 struct lttng_enabler *enabler;
1028
1029 enabler = zmalloc(sizeof(*enabler));
1030 if (!enabler)
1031 return NULL;
1032 enabler->type = type;
1033 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1034 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
1035 memcpy(&enabler->event_param, event_param,
1036 sizeof(enabler->event_param));
1037 enabler->chan = chan;
1038 /* ctx left NULL */
1039 enabler->enabled = 0;
1040 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
1041 lttng_session_lazy_sync_enablers(enabler->chan->session);
1042 return enabler;
1043 }
1044
1045 int lttng_enabler_enable(struct lttng_enabler *enabler)
1046 {
1047 enabler->enabled = 1;
1048 lttng_session_lazy_sync_enablers(enabler->chan->session);
1049 return 0;
1050 }
1051
1052 int lttng_enabler_disable(struct lttng_enabler *enabler)
1053 {
1054 enabler->enabled = 0;
1055 lttng_session_lazy_sync_enablers(enabler->chan->session);
1056 return 0;
1057 }
1058
1059 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1060 struct lttng_ust_filter_bytecode_node *bytecode)
1061 {
1062 bytecode->enabler = enabler;
1063 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
1064 lttng_session_lazy_sync_enablers(enabler->chan->session);
1065 return 0;
1066 }
1067
1068 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
1069 struct lttng_ust_excluder_node *excluder)
1070 {
1071 excluder->enabler = enabler;
1072 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
1073 lttng_session_lazy_sync_enablers(enabler->chan->session);
1074 return 0;
1075 }
1076
1077 int lttng_attach_context(struct lttng_ust_context *context_param,
1078 union ust_args *uargs,
1079 struct lttng_ctx **ctx, struct lttng_session *session)
1080 {
1081 /*
1082 * We cannot attach a context after trace has been started for a
1083 * session because the metadata does not allow expressing this
1084 * information outside of the original channel scope.
1085 */
1086 if (session->been_active)
1087 return -EPERM;
1088
1089 switch (context_param->ctx) {
1090 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1091 return lttng_add_pthread_id_to_ctx(ctx);
1092 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1093 {
1094 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1095
1096 perf_ctx_param = &context_param->u.perf_counter;
1097 return lttng_add_perf_counter_to_ctx(
1098 perf_ctx_param->type,
1099 perf_ctx_param->config,
1100 perf_ctx_param->name,
1101 ctx);
1102 }
1103 case LTTNG_UST_CONTEXT_VTID:
1104 return lttng_add_vtid_to_ctx(ctx);
1105 case LTTNG_UST_CONTEXT_VPID:
1106 return lttng_add_vpid_to_ctx(ctx);
1107 case LTTNG_UST_CONTEXT_PROCNAME:
1108 return lttng_add_procname_to_ctx(ctx);
1109 case LTTNG_UST_CONTEXT_IP:
1110 return lttng_add_ip_to_ctx(ctx);
1111 case LTTNG_UST_CONTEXT_CPU_ID:
1112 return lttng_add_cpu_id_to_ctx(ctx);
1113 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1114 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1115 ctx);
1116 case LTTNG_UST_CONTEXT_CGROUP_NS:
1117 return lttng_add_cgroup_ns_to_ctx(ctx);
1118 case LTTNG_UST_CONTEXT_IPC_NS:
1119 return lttng_add_ipc_ns_to_ctx(ctx);
1120 case LTTNG_UST_CONTEXT_MNT_NS:
1121 return lttng_add_mnt_ns_to_ctx(ctx);
1122 case LTTNG_UST_CONTEXT_NET_NS:
1123 return lttng_add_net_ns_to_ctx(ctx);
1124 case LTTNG_UST_CONTEXT_PID_NS:
1125 return lttng_add_pid_ns_to_ctx(ctx);
1126 case LTTNG_UST_CONTEXT_USER_NS:
1127 return lttng_add_user_ns_to_ctx(ctx);
1128 case LTTNG_UST_CONTEXT_UTS_NS:
1129 return lttng_add_uts_ns_to_ctx(ctx);
1130 case LTTNG_UST_CONTEXT_VUID:
1131 return lttng_add_vuid_to_ctx(ctx);
1132 case LTTNG_UST_CONTEXT_VEUID:
1133 return lttng_add_veuid_to_ctx(ctx);
1134 case LTTNG_UST_CONTEXT_VSUID:
1135 return lttng_add_vsuid_to_ctx(ctx);
1136 case LTTNG_UST_CONTEXT_VGID:
1137 return lttng_add_vgid_to_ctx(ctx);
1138 case LTTNG_UST_CONTEXT_VEGID:
1139 return lttng_add_vegid_to_ctx(ctx);
1140 case LTTNG_UST_CONTEXT_VSGID:
1141 return lttng_add_vsgid_to_ctx(ctx);
1142 default:
1143 return -EINVAL;
1144 }
1145 }
1146
1147 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1148 struct lttng_ust_context *context_param)
1149 {
1150 #if 0 // disabled for now.
1151 struct lttng_session *session = enabler->chan->session;
1152 int ret;
1153
1154 ret = lttng_attach_context(context_param, &enabler->ctx,
1155 session);
1156 if (ret)
1157 return ret;
1158 lttng_session_lazy_sync_enablers(enabler->chan->session);
1159 #endif
1160 return -ENOSYS;
1161 }
1162
1163 static
1164 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1165 {
1166 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
1167 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
1168
1169 /* Destroy filter bytecode */
1170 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
1171 &enabler->filter_bytecode_head, node) {
1172 free(filter_node);
1173 }
1174
1175 /* Destroy excluders */
1176 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
1177 &enabler->excluder_head, node) {
1178 free(excluder_node);
1179 }
1180
1181 /* Destroy contexts */
1182 lttng_destroy_context(enabler->ctx);
1183
1184 cds_list_del(&enabler->node);
1185 free(enabler);
1186 }
1187
1188 /*
1189 * lttng_session_sync_enablers should be called just before starting a
1190 * session.
1191 */
1192 static
1193 void lttng_session_sync_enablers(struct lttng_session *session)
1194 {
1195 struct lttng_enabler *enabler;
1196 struct lttng_event *event;
1197
1198 cds_list_for_each_entry(enabler, &session->enablers_head, node)
1199 lttng_enabler_ref_events(enabler);
1200 /*
1201 * For each event, if at least one of its enablers is enabled,
1202 * and its channel and session transient states are enabled, we
1203 * enable the event, else we disable it.
1204 */
1205 cds_list_for_each_entry(event, &session->events_head, node) {
1206 struct lttng_enabler_ref *enabler_ref;
1207 struct lttng_bytecode_runtime *runtime;
1208 int enabled = 0, has_enablers_without_bytecode = 0;
1209
1210 /* Enable events */
1211 cds_list_for_each_entry(enabler_ref,
1212 &event->enablers_ref_head, node) {
1213 if (enabler_ref->ref->enabled) {
1214 enabled = 1;
1215 break;
1216 }
1217 }
1218 /*
1219 * Enabled state is based on union of enablers, with
1220 * intesection of session and channel transient enable
1221 * states.
1222 */
1223 enabled = enabled && session->tstate && event->chan->tstate;
1224
1225 CMM_STORE_SHARED(event->enabled, enabled);
1226 /*
1227 * Sync tracepoint registration with event enabled
1228 * state.
1229 */
1230 if (enabled) {
1231 if (!event->registered)
1232 register_event(event);
1233 } else {
1234 if (event->registered)
1235 unregister_event(event);
1236 }
1237
1238 /* Check if has enablers without bytecode enabled */
1239 cds_list_for_each_entry(enabler_ref,
1240 &event->enablers_ref_head, node) {
1241 if (enabler_ref->ref->enabled
1242 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1243 has_enablers_without_bytecode = 1;
1244 break;
1245 }
1246 }
1247 event->has_enablers_without_bytecode =
1248 has_enablers_without_bytecode;
1249
1250 /* Enable filters */
1251 cds_list_for_each_entry(runtime,
1252 &event->bytecode_runtime_head, node) {
1253 lttng_filter_sync_state(runtime);
1254 }
1255 }
1256 __tracepoint_probe_prune_release_queue();
1257 }
1258
1259 /*
1260 * Apply enablers to session events, adding events to session if need
1261 * be. It is required after each modification applied to an active
1262 * session, and right before session "start".
1263 * "lazy" sync means we only sync if required.
1264 */
1265 static
1266 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1267 {
1268 /* We can skip if session is not active */
1269 if (!session->active)
1270 return;
1271 lttng_session_sync_enablers(session);
1272 }
1273
1274 /*
1275 * Update all sessions with the given app context.
1276 * Called with ust lock held.
1277 * This is invoked when an application context gets loaded/unloaded. It
1278 * ensures the context callbacks are in sync with the application
1279 * context (either app context callbacks, or dummy callbacks).
1280 */
1281 void lttng_ust_context_set_session_provider(const char *name,
1282 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1283 void (*record)(struct lttng_ctx_field *field,
1284 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1285 struct lttng_channel *chan),
1286 void (*get_value)(struct lttng_ctx_field *field,
1287 struct lttng_ctx_value *value))
1288 {
1289 struct lttng_session *session;
1290
1291 cds_list_for_each_entry(session, &sessions, node) {
1292 struct lttng_channel *chan;
1293 struct lttng_event *event;
1294 int ret;
1295
1296 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1297 name, get_size, record, get_value);
1298 if (ret)
1299 abort();
1300 cds_list_for_each_entry(chan, &session->chan_head, node) {
1301 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1302 name, get_size, record, get_value);
1303 if (ret)
1304 abort();
1305 }
1306 cds_list_for_each_entry(event, &session->events_head, node) {
1307 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1308 name, get_size, record, get_value);
1309 if (ret)
1310 abort();
1311 }
1312 }
1313 }
This page took 0.083014 seconds and 4 git commands to generate.