Fix: baddr deadlock with lttng-ust destructor
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "lttng-ust-baddr.h"
58 #include "wait.h"
59 #include "../libringbuffer/shm.h"
60 #include "jhash.h"
61
62 /*
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
65 */
66
67 static CDS_LIST_HEAD(sessions);
68
69 struct cds_list_head *_lttng_get_sessions(void)
70 {
71 return &sessions;
72 }
73
74 static void _lttng_event_destroy(struct lttng_event *event);
75
76 static
77 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78 static
79 void lttng_session_sync_enablers(struct lttng_session *session);
80 static
81 void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
83 /*
84 * Called with ust lock held.
85 */
86 int lttng_session_active(void)
87 {
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95 }
96
97 static
98 int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102 {
103 if (req_type == LTTNG_UST_LOGLEVEL_ALL)
104 return 1;
105 if (!has_loglevel)
106 loglevel = TRACE_DEFAULT;
107 switch (req_type) {
108 case LTTNG_UST_LOGLEVEL_RANGE:
109 if (loglevel <= req_loglevel || req_loglevel == -1)
110 return 1;
111 else
112 return 0;
113 case LTTNG_UST_LOGLEVEL_SINGLE:
114 if (loglevel == req_loglevel || req_loglevel == -1)
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_ALL:
119 default:
120 return 1;
121 }
122 }
123
124 void synchronize_trace(void)
125 {
126 synchronize_rcu();
127 }
128
129 struct lttng_session *lttng_session_create(void)
130 {
131 struct lttng_session *session;
132 int i;
133
134 session = zmalloc(sizeof(struct lttng_session));
135 if (!session)
136 return NULL;
137 CDS_INIT_LIST_HEAD(&session->chan_head);
138 CDS_INIT_LIST_HEAD(&session->events_head);
139 CDS_INIT_LIST_HEAD(&session->enablers_head);
140 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
141 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
142 cds_list_add(&session->node, &sessions);
143 return session;
144 }
145
146 /*
147 * Only used internally at session destruction.
148 */
149 static
150 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
151 {
152 struct channel *chan;
153 struct lttng_ust_shm_handle *handle;
154
155 cds_list_del(&lttng_chan->node);
156 lttng_destroy_context(lttng_chan->ctx);
157 chan = lttng_chan->chan;
158 handle = lttng_chan->handle;
159 /*
160 * note: lttng_chan is private data contained within handle. It
161 * will be freed along with the handle.
162 */
163 channel_destroy(chan, handle, 0);
164 }
165
166 static
167 void register_event(struct lttng_event *event)
168 {
169 int ret;
170 const struct lttng_event_desc *desc;
171
172 assert(event->registered == 0);
173 desc = event->desc;
174 ret = __tracepoint_probe_register(desc->name,
175 desc->probe_callback,
176 event, desc->signature);
177 WARN_ON_ONCE(ret);
178 if (!ret)
179 event->registered = 1;
180 }
181
182 static
183 void unregister_event(struct lttng_event *event)
184 {
185 int ret;
186 const struct lttng_event_desc *desc;
187
188 assert(event->registered == 1);
189 desc = event->desc;
190 ret = __tracepoint_probe_unregister(desc->name,
191 desc->probe_callback,
192 event);
193 WARN_ON_ONCE(ret);
194 if (!ret)
195 event->registered = 0;
196 }
197
198 /*
199 * Only used internally at session destruction.
200 */
201 static
202 void _lttng_event_unregister(struct lttng_event *event)
203 {
204 if (event->registered)
205 unregister_event(event);
206 }
207
208 void lttng_session_destroy(struct lttng_session *session)
209 {
210 struct lttng_channel *chan, *tmpchan;
211 struct lttng_event *event, *tmpevent;
212 struct lttng_enabler *enabler, *tmpenabler;
213
214 CMM_ACCESS_ONCE(session->active) = 0;
215 cds_list_for_each_entry(event, &session->events_head, node) {
216 _lttng_event_unregister(event);
217 }
218 synchronize_trace(); /* Wait for in-flight events to complete */
219 cds_list_for_each_entry_safe(enabler, tmpenabler,
220 &session->enablers_head, node)
221 lttng_enabler_destroy(enabler);
222 cds_list_for_each_entry_safe(event, tmpevent,
223 &session->events_head, node)
224 _lttng_event_destroy(event);
225 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
226 _lttng_channel_unmap(chan);
227 cds_list_del(&session->node);
228 free(session);
229 }
230
231 int lttng_session_enable(struct lttng_session *session)
232 {
233 int ret = 0;
234 struct lttng_channel *chan;
235 int notify_socket;
236
237 if (session->active) {
238 ret = -EBUSY;
239 goto end;
240 }
241
242 notify_socket = lttng_get_notify_socket(session->owner);
243 if (notify_socket < 0)
244 return notify_socket;
245
246 /* Set transient enabler state to "enabled" */
247 session->tstate = 1;
248 /* We need to sync enablers with session before activation. */
249 lttng_session_sync_enablers(session);
250
251 /*
252 * Snapshot the number of events per channel to know the type of header
253 * we need to use.
254 */
255 cds_list_for_each_entry(chan, &session->chan_head, node) {
256 const struct lttng_ctx *ctx;
257 const struct lttng_ctx_field *fields = NULL;
258 size_t nr_fields = 0;
259 uint32_t chan_id;
260
261 /* don't change it if session stop/restart */
262 if (chan->header_type)
263 continue;
264 ctx = chan->ctx;
265 if (ctx) {
266 nr_fields = ctx->nr_fields;
267 fields = ctx->fields;
268 }
269 ret = ustcomm_register_channel(notify_socket,
270 session->objd,
271 chan->objd,
272 nr_fields,
273 fields,
274 &chan_id,
275 &chan->header_type);
276 if (ret) {
277 DBG("Error (%d) registering channel to sessiond", ret);
278 return ret;
279 }
280 if (chan_id != chan->id) {
281 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
282 chan_id, chan->id);
283 return -EINVAL;
284 }
285 }
286
287 /* Set atomically the state to "active" */
288 CMM_ACCESS_ONCE(session->active) = 1;
289 CMM_ACCESS_ONCE(session->been_active) = 1;
290
291 session->statedump_pending = 1;
292 lttng_ust_sockinfo_session_enabled(session->owner);
293 end:
294 return ret;
295 }
296
297 int lttng_session_disable(struct lttng_session *session)
298 {
299 int ret = 0;
300
301 if (!session->active) {
302 ret = -EBUSY;
303 goto end;
304 }
305 /* Set atomically the state to "inactive" */
306 CMM_ACCESS_ONCE(session->active) = 0;
307
308 /* Set transient enabler state to "disabled" */
309 session->tstate = 0;
310 lttng_session_sync_enablers(session);
311 end:
312 return ret;
313 }
314
315 int lttng_channel_enable(struct lttng_channel *channel)
316 {
317 int ret = 0;
318
319 if (channel->enabled) {
320 ret = -EBUSY;
321 goto end;
322 }
323 /* Set transient enabler state to "enabled" */
324 channel->tstate = 1;
325 lttng_session_sync_enablers(channel->session);
326 /* Set atomically the state to "enabled" */
327 CMM_ACCESS_ONCE(channel->enabled) = 1;
328 end:
329 return ret;
330 }
331
332 int lttng_channel_disable(struct lttng_channel *channel)
333 {
334 int ret = 0;
335
336 if (!channel->enabled) {
337 ret = -EBUSY;
338 goto end;
339 }
340 /* Set atomically the state to "disabled" */
341 CMM_ACCESS_ONCE(channel->enabled) = 0;
342 /* Set transient enabler state to "enabled" */
343 channel->tstate = 0;
344 lttng_session_sync_enablers(channel->session);
345 end:
346 return ret;
347 }
348
349 /*
350 * Supports event creation while tracing session is active.
351 */
352 static
353 int lttng_event_create(const struct lttng_event_desc *desc,
354 struct lttng_channel *chan)
355 {
356 const char *event_name = desc->name;
357 struct lttng_event *event;
358 struct lttng_session *session = chan->session;
359 struct cds_hlist_head *head;
360 struct cds_hlist_node *node;
361 int ret = 0;
362 size_t name_len = strlen(event_name);
363 uint32_t hash;
364 int notify_socket, loglevel;
365 const char *uri;
366
367 hash = jhash(event_name, name_len, 0);
368 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
369 cds_hlist_for_each_entry(event, node, head, hlist) {
370 assert(event->desc);
371 if (!strncmp(event->desc->name, desc->name,
372 LTTNG_UST_SYM_NAME_LEN - 1)
373 && chan == event->chan) {
374 ret = -EEXIST;
375 goto exist;
376 }
377 }
378
379 notify_socket = lttng_get_notify_socket(session->owner);
380 if (notify_socket < 0) {
381 ret = notify_socket;
382 goto socket_error;
383 }
384
385 /*
386 * Check if loglevel match. Refuse to connect event if not.
387 */
388 event = zmalloc(sizeof(struct lttng_event));
389 if (!event) {
390 ret = -ENOMEM;
391 goto cache_error;
392 }
393 event->chan = chan;
394
395 /* Event will be enabled by enabler sync. */
396 event->enabled = 0;
397 event->registered = 0;
398 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
399 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
400 event->desc = desc;
401
402 if (desc->loglevel)
403 loglevel = *(*event->desc->loglevel);
404 else
405 loglevel = TRACE_DEFAULT;
406 if (desc->u.ext.model_emf_uri)
407 uri = *(desc->u.ext.model_emf_uri);
408 else
409 uri = NULL;
410
411 /* Fetch event ID from sessiond */
412 ret = ustcomm_register_event(notify_socket,
413 session->objd,
414 chan->objd,
415 event_name,
416 loglevel,
417 desc->signature,
418 desc->nr_fields,
419 desc->fields,
420 uri,
421 &event->id);
422 if (ret < 0) {
423 DBG("Error (%d) registering event to sessiond", ret);
424 goto sessiond_register_error;
425 }
426
427 /* Populate lttng_event structure before tracepoint registration. */
428 cmm_smp_wmb();
429 cds_list_add(&event->node, &chan->session->events_head);
430 cds_hlist_add_head(&event->hlist, head);
431 return 0;
432
433 sessiond_register_error:
434 free(event);
435 cache_error:
436 socket_error:
437 exist:
438 return ret;
439 }
440
441 static
442 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
443 struct lttng_enabler *enabler)
444 {
445 int loglevel = 0;
446 unsigned int has_loglevel = 0;
447
448 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
449 /* Compare excluding final '*' */
450 if (strncmp(desc->name, enabler->event_param.name,
451 strlen(enabler->event_param.name) - 1))
452 return 0;
453 if (desc->loglevel) {
454 loglevel = *(*desc->loglevel);
455 has_loglevel = 1;
456 }
457 if (!lttng_loglevel_match(loglevel,
458 has_loglevel,
459 enabler->event_param.loglevel_type,
460 enabler->event_param.loglevel))
461 return 0;
462 return 1;
463 }
464
465 static
466 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
467 struct lttng_enabler *enabler)
468 {
469 int loglevel = 0;
470 unsigned int has_loglevel = 0;
471
472 assert(enabler->type == LTTNG_ENABLER_EVENT);
473 if (strcmp(desc->name, enabler->event_param.name))
474 return 0;
475 if (desc->loglevel) {
476 loglevel = *(*desc->loglevel);
477 has_loglevel = 1;
478 }
479 if (!lttng_loglevel_match(loglevel,
480 has_loglevel,
481 enabler->event_param.loglevel_type,
482 enabler->event_param.loglevel))
483 return 0;
484 return 1;
485 }
486
487 static
488 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
489 struct lttng_enabler *enabler)
490 {
491 struct lttng_ust_excluder_node *excluder;
492
493 /* If event matches with an excluder, return 'does not match' */
494 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
495 int count;
496
497 for (count = 0; count < excluder->excluder.count; count++) {
498 int found, len;
499 char *excluder_name;
500
501 excluder_name = (char *) (excluder->excluder.names)
502 + count * LTTNG_UST_SYM_NAME_LEN;
503 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
504 if (len > 0 && excluder_name[len - 1] == '*') {
505 found = !strncmp(desc->name, excluder_name,
506 len - 1);
507 } else {
508 found = !strncmp(desc->name, excluder_name,
509 LTTNG_UST_SYM_NAME_LEN - 1);
510 }
511 if (found) {
512 return 0;
513 }
514 }
515 }
516 switch (enabler->type) {
517 case LTTNG_ENABLER_WILDCARD:
518 return lttng_desc_match_wildcard_enabler(desc, enabler);
519 case LTTNG_ENABLER_EVENT:
520 return lttng_desc_match_event_enabler(desc, enabler);
521 default:
522 return -EINVAL;
523 }
524 }
525
526 static
527 int lttng_event_match_enabler(struct lttng_event *event,
528 struct lttng_enabler *enabler)
529 {
530 if (lttng_desc_match_enabler(event->desc, enabler)
531 && event->chan == enabler->chan)
532 return 1;
533 else
534 return 0;
535 }
536
537 static
538 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
539 struct lttng_enabler *enabler)
540 {
541 struct lttng_enabler_ref *enabler_ref;
542
543 cds_list_for_each_entry(enabler_ref,
544 &event->enablers_ref_head, node) {
545 if (enabler_ref->ref == enabler)
546 return enabler_ref;
547 }
548 return NULL;
549 }
550
551 /*
552 * Create struct lttng_event if it is missing and present in the list of
553 * tracepoint probes.
554 */
555 static
556 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
557 {
558 struct lttng_session *session = enabler->chan->session;
559 struct lttng_probe_desc *probe_desc;
560 const struct lttng_event_desc *desc;
561 struct lttng_event *event;
562 int i;
563 struct cds_list_head *probe_list;
564
565 probe_list = lttng_get_probe_list_head();
566 /*
567 * For each probe event, if we find that a probe event matches
568 * our enabler, create an associated lttng_event if not
569 * already present.
570 */
571 cds_list_for_each_entry(probe_desc, probe_list, head) {
572 for (i = 0; i < probe_desc->nr_events; i++) {
573 int found = 0, ret;
574 struct cds_hlist_head *head;
575 struct cds_hlist_node *node;
576 const char *event_name;
577 size_t name_len;
578 uint32_t hash;
579
580 desc = probe_desc->event_desc[i];
581 if (!lttng_desc_match_enabler(desc, enabler))
582 continue;
583 event_name = desc->name;
584 name_len = strlen(event_name);
585
586 /*
587 * Check if already created.
588 */
589 hash = jhash(event_name, name_len, 0);
590 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
591 cds_hlist_for_each_entry(event, node, head, hlist) {
592 if (event->desc == desc
593 && event->chan == enabler->chan)
594 found = 1;
595 }
596 if (found)
597 continue;
598
599 /*
600 * We need to create an event for this
601 * event probe.
602 */
603 ret = lttng_event_create(probe_desc->event_desc[i],
604 enabler->chan);
605 if (ret) {
606 DBG("Unable to create event %s, error %d\n",
607 probe_desc->event_desc[i]->name, ret);
608 }
609 }
610 }
611 }
612
613 /*
614 * Create events associated with an enabler (if not already present),
615 * and add backward reference from the event to the enabler.
616 */
617 static
618 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
619 {
620 struct lttng_session *session = enabler->chan->session;
621 struct lttng_event *event;
622
623 /* First ensure that probe events are created for this enabler. */
624 lttng_create_event_if_missing(enabler);
625
626 /* For each event matching enabler in session event list. */
627 cds_list_for_each_entry(event, &session->events_head, node) {
628 struct lttng_enabler_ref *enabler_ref;
629
630 if (!lttng_event_match_enabler(event, enabler))
631 continue;
632
633 enabler_ref = lttng_event_enabler_ref(event, enabler);
634 if (!enabler_ref) {
635 /*
636 * If no backward ref, create it.
637 * Add backward ref from event to enabler.
638 */
639 enabler_ref = zmalloc(sizeof(*enabler_ref));
640 if (!enabler_ref)
641 return -ENOMEM;
642 enabler_ref->ref = enabler;
643 cds_list_add(&enabler_ref->node,
644 &event->enablers_ref_head);
645 }
646
647 /*
648 * Link filter bytecodes if not linked yet.
649 */
650 lttng_enabler_event_link_bytecode(event, enabler);
651
652 /* TODO: merge event context. */
653 }
654 return 0;
655 }
656
657 /*
658 * Called at library load: connect the probe on all enablers matching
659 * this event.
660 * Called with session mutex held.
661 */
662 int lttng_fix_pending_events(void)
663 {
664 struct lttng_session *session;
665
666 cds_list_for_each_entry(session, &sessions, node) {
667 lttng_session_lazy_sync_enablers(session);
668 }
669 return 0;
670 }
671
672 /*
673 * For each session of the owner thread, execute pending statedump.
674 * Only dump state for the sessions owned by the caller thread, because
675 * we don't keep ust_lock across the entire iteration.
676 */
677 void lttng_handle_pending_statedump(void *owner)
678 {
679 struct lttng_session *session;
680
681 /* Execute state dump */
682 lttng_ust_baddr_statedump(owner);
683
684 /* Clear pending state dump */
685 if (ust_lock()) {
686 goto end;
687 }
688 cds_list_for_each_entry(session, &sessions, node) {
689 if (session->owner != owner)
690 continue;
691 if (!session->statedump_pending)
692 continue;
693 session->statedump_pending = 0;
694 }
695 end:
696 ust_unlock();
697 return;
698 }
699
700 /*
701 * Only used internally at session destruction.
702 */
703 static
704 void _lttng_event_destroy(struct lttng_event *event)
705 {
706 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
707
708 cds_list_del(&event->node);
709 lttng_destroy_context(event->ctx);
710 lttng_free_event_filter_runtime(event);
711 /* Free event enabler refs */
712 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
713 &event->enablers_ref_head, node)
714 free(enabler_ref);
715 free(event);
716 }
717
718 void lttng_ust_events_exit(void)
719 {
720 struct lttng_session *session, *tmpsession;
721
722 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
723 lttng_session_destroy(session);
724 }
725
726 /*
727 * Enabler management.
728 */
729 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
730 struct lttng_ust_event *event_param,
731 struct lttng_channel *chan)
732 {
733 struct lttng_enabler *enabler;
734
735 enabler = zmalloc(sizeof(*enabler));
736 if (!enabler)
737 return NULL;
738 enabler->type = type;
739 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
740 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
741 memcpy(&enabler->event_param, event_param,
742 sizeof(enabler->event_param));
743 enabler->chan = chan;
744 /* ctx left NULL */
745 enabler->enabled = 1;
746 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
747 lttng_session_lazy_sync_enablers(enabler->chan->session);
748 return enabler;
749 }
750
751 int lttng_enabler_enable(struct lttng_enabler *enabler)
752 {
753 enabler->enabled = 1;
754 lttng_session_lazy_sync_enablers(enabler->chan->session);
755 return 0;
756 }
757
758 int lttng_enabler_disable(struct lttng_enabler *enabler)
759 {
760 enabler->enabled = 0;
761 lttng_session_lazy_sync_enablers(enabler->chan->session);
762 return 0;
763 }
764
765 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
766 struct lttng_ust_filter_bytecode_node *bytecode)
767 {
768 bytecode->enabler = enabler;
769 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
770 lttng_session_lazy_sync_enablers(enabler->chan->session);
771 return 0;
772 }
773
774 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
775 struct lttng_ust_excluder_node *excluder)
776 {
777 excluder->enabler = enabler;
778 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
779 lttng_session_lazy_sync_enablers(enabler->chan->session);
780 return 0;
781 }
782
783 int lttng_attach_context(struct lttng_ust_context *context_param,
784 struct lttng_ctx **ctx, struct lttng_session *session)
785 {
786 /*
787 * We cannot attach a context after trace has been started for a
788 * session because the metadata does not allow expressing this
789 * information outside of the original channel scope.
790 */
791 if (session->been_active)
792 return -EPERM;
793
794 switch (context_param->ctx) {
795 case LTTNG_UST_CONTEXT_PTHREAD_ID:
796 return lttng_add_pthread_id_to_ctx(ctx);
797 case LTTNG_UST_CONTEXT_VTID:
798 return lttng_add_vtid_to_ctx(ctx);
799 case LTTNG_UST_CONTEXT_VPID:
800 return lttng_add_vpid_to_ctx(ctx);
801 case LTTNG_UST_CONTEXT_PROCNAME:
802 return lttng_add_procname_to_ctx(ctx);
803 case LTTNG_UST_CONTEXT_IP:
804 return lttng_add_ip_to_ctx(ctx);
805 default:
806 return -EINVAL;
807 }
808 }
809
810 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
811 struct lttng_ust_context *context_param)
812 {
813 #if 0 // disabled for now.
814 struct lttng_session *session = enabler->chan->session;
815 int ret;
816
817 ret = lttng_attach_context(context_param, &enabler->ctx,
818 session);
819 if (ret)
820 return ret;
821 lttng_session_lazy_sync_enablers(enabler->chan->session);
822 #endif
823 return -ENOSYS;
824 }
825
826 static
827 void lttng_enabler_destroy(struct lttng_enabler *enabler)
828 {
829 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
830 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
831
832 /* Destroy filter bytecode */
833 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
834 &enabler->filter_bytecode_head, node) {
835 free(filter_node);
836 }
837
838 /* Destroy excluders */
839 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
840 &enabler->excluder_head, node) {
841 free(excluder_node);
842 }
843
844 /* Destroy contexts */
845 lttng_destroy_context(enabler->ctx);
846
847 cds_list_del(&enabler->node);
848 free(enabler);
849 }
850
851 /*
852 * lttng_session_sync_enablers should be called just before starting a
853 * session.
854 */
855 static
856 void lttng_session_sync_enablers(struct lttng_session *session)
857 {
858 struct lttng_enabler *enabler;
859 struct lttng_event *event;
860
861 cds_list_for_each_entry(enabler, &session->enablers_head, node)
862 lttng_enabler_ref_events(enabler);
863 /*
864 * For each event, if at least one of its enablers is enabled,
865 * and its channel and session transient states are enabled, we
866 * enable the event, else we disable it.
867 */
868 cds_list_for_each_entry(event, &session->events_head, node) {
869 struct lttng_enabler_ref *enabler_ref;
870 struct lttng_bytecode_runtime *runtime;
871 int enabled = 0, has_enablers_without_bytecode = 0;
872
873 /* Enable events */
874 cds_list_for_each_entry(enabler_ref,
875 &event->enablers_ref_head, node) {
876 if (enabler_ref->ref->enabled) {
877 enabled = 1;
878 break;
879 }
880 }
881 /*
882 * Enabled state is based on union of enablers, with
883 * intesection of session and channel transient enable
884 * states.
885 */
886 enabled = enabled && session->tstate && event->chan->tstate;
887
888 CMM_STORE_SHARED(event->enabled, enabled);
889 /*
890 * Sync tracepoint registration with event enabled
891 * state.
892 */
893 if (enabled) {
894 if (!event->registered)
895 register_event(event);
896 } else {
897 if (event->registered)
898 unregister_event(event);
899 }
900
901 /* Check if has enablers without bytecode enabled */
902 cds_list_for_each_entry(enabler_ref,
903 &event->enablers_ref_head, node) {
904 if (enabler_ref->ref->enabled
905 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
906 has_enablers_without_bytecode = 1;
907 break;
908 }
909 }
910 event->has_enablers_without_bytecode =
911 has_enablers_without_bytecode;
912
913 /* Enable filters */
914 cds_list_for_each_entry(runtime,
915 &event->bytecode_runtime_head, node) {
916 lttng_filter_sync_state(runtime);
917 }
918 }
919 }
920
921 /*
922 * Apply enablers to session events, adding events to session if need
923 * be. It is required after each modification applied to an active
924 * session, and right before session "start".
925 * "lazy" sync means we only sync if required.
926 */
927 static
928 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
929 {
930 /* We can skip if session is not active */
931 if (!session->active)
932 return;
933 lttng_session_sync_enablers(session);
934 }
This page took 0.05212 seconds and 5 git commands to generate.