Cleanup: remove logically dead code
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "lttng-ust-baddr.h"
58 #include "wait.h"
59 #include "../libringbuffer/shm.h"
60 #include "jhash.h"
61
62 /*
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
65 */
66
67 static CDS_LIST_HEAD(sessions);
68
69 struct cds_list_head *_lttng_get_sessions(void)
70 {
71 return &sessions;
72 }
73
74 static void _lttng_event_destroy(struct lttng_event *event);
75
76 static
77 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78 static
79 void lttng_session_sync_enablers(struct lttng_session *session);
80 static
81 void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
83 /*
84 * Called with ust lock held.
85 */
86 int lttng_session_active(void)
87 {
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95 }
96
97 static
98 int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102 {
103 if (!has_loglevel)
104 loglevel = TRACE_DEFAULT;
105 switch (req_type) {
106 case LTTNG_UST_LOGLEVEL_RANGE:
107 if (loglevel <= req_loglevel || req_loglevel == -1)
108 return 1;
109 else
110 return 0;
111 case LTTNG_UST_LOGLEVEL_SINGLE:
112 if (loglevel == req_loglevel || req_loglevel == -1)
113 return 1;
114 else
115 return 0;
116 case LTTNG_UST_LOGLEVEL_ALL:
117 default:
118 return 1;
119 }
120 }
121
122 void synchronize_trace(void)
123 {
124 synchronize_rcu();
125 }
126
127 struct lttng_session *lttng_session_create(void)
128 {
129 struct lttng_session *session;
130 int i;
131
132 session = zmalloc(sizeof(struct lttng_session));
133 if (!session)
134 return NULL;
135 CDS_INIT_LIST_HEAD(&session->chan_head);
136 CDS_INIT_LIST_HEAD(&session->events_head);
137 CDS_INIT_LIST_HEAD(&session->enablers_head);
138 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
139 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
140 cds_list_add(&session->node, &sessions);
141 return session;
142 }
143
144 /*
145 * Only used internally at session destruction.
146 */
147 static
148 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
149 {
150 struct channel *chan;
151 struct lttng_ust_shm_handle *handle;
152
153 cds_list_del(&lttng_chan->node);
154 lttng_destroy_context(lttng_chan->ctx);
155 chan = lttng_chan->chan;
156 handle = lttng_chan->handle;
157 /*
158 * note: lttng_chan is private data contained within handle. It
159 * will be freed along with the handle.
160 */
161 channel_destroy(chan, handle, 0);
162 }
163
164 static
165 void register_event(struct lttng_event *event)
166 {
167 int ret;
168 const struct lttng_event_desc *desc;
169
170 assert(event->registered == 0);
171 desc = event->desc;
172 ret = __tracepoint_probe_register(desc->name,
173 desc->probe_callback,
174 event, desc->signature);
175 WARN_ON_ONCE(ret);
176 if (!ret)
177 event->registered = 1;
178 }
179
180 static
181 void unregister_event(struct lttng_event *event)
182 {
183 int ret;
184 const struct lttng_event_desc *desc;
185
186 assert(event->registered == 1);
187 desc = event->desc;
188 ret = __tracepoint_probe_unregister(desc->name,
189 desc->probe_callback,
190 event);
191 WARN_ON_ONCE(ret);
192 if (!ret)
193 event->registered = 0;
194 }
195
196 /*
197 * Only used internally at session destruction.
198 */
199 static
200 void _lttng_event_unregister(struct lttng_event *event)
201 {
202 if (event->registered)
203 unregister_event(event);
204 }
205
206 void lttng_session_destroy(struct lttng_session *session)
207 {
208 struct lttng_channel *chan, *tmpchan;
209 struct lttng_event *event, *tmpevent;
210 struct lttng_enabler *enabler, *tmpenabler;
211
212 CMM_ACCESS_ONCE(session->active) = 0;
213 cds_list_for_each_entry(event, &session->events_head, node) {
214 _lttng_event_unregister(event);
215 }
216 synchronize_trace(); /* Wait for in-flight events to complete */
217 cds_list_for_each_entry_safe(enabler, tmpenabler,
218 &session->enablers_head, node)
219 lttng_enabler_destroy(enabler);
220 cds_list_for_each_entry_safe(event, tmpevent,
221 &session->events_head, node)
222 _lttng_event_destroy(event);
223 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
224 _lttng_channel_unmap(chan);
225 cds_list_del(&session->node);
226 free(session);
227 }
228
229 int lttng_session_enable(struct lttng_session *session)
230 {
231 int ret = 0;
232 struct lttng_channel *chan;
233 int notify_socket;
234
235 if (session->active) {
236 ret = -EBUSY;
237 goto end;
238 }
239
240 notify_socket = lttng_get_notify_socket(session->owner);
241 if (notify_socket < 0)
242 return notify_socket;
243
244 /* Set transient enabler state to "enabled" */
245 session->tstate = 1;
246 /* We need to sync enablers with session before activation. */
247 lttng_session_sync_enablers(session);
248
249 /*
250 * Snapshot the number of events per channel to know the type of header
251 * we need to use.
252 */
253 cds_list_for_each_entry(chan, &session->chan_head, node) {
254 const struct lttng_ctx *ctx;
255 const struct lttng_ctx_field *fields = NULL;
256 size_t nr_fields = 0;
257 uint32_t chan_id;
258
259 /* don't change it if session stop/restart */
260 if (chan->header_type)
261 continue;
262 ctx = chan->ctx;
263 if (ctx) {
264 nr_fields = ctx->nr_fields;
265 fields = ctx->fields;
266 }
267 ret = ustcomm_register_channel(notify_socket,
268 session->objd,
269 chan->objd,
270 nr_fields,
271 fields,
272 &chan_id,
273 &chan->header_type);
274 if (ret) {
275 DBG("Error (%d) registering channel to sessiond", ret);
276 return ret;
277 }
278 if (chan_id != chan->id) {
279 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
280 chan_id, chan->id);
281 return -EINVAL;
282 }
283 }
284
285 /* Set atomically the state to "active" */
286 CMM_ACCESS_ONCE(session->active) = 1;
287 CMM_ACCESS_ONCE(session->been_active) = 1;
288
289 session->statedump_pending = 1;
290 lttng_ust_sockinfo_session_enabled(session->owner);
291 end:
292 return ret;
293 }
294
295 int lttng_session_disable(struct lttng_session *session)
296 {
297 int ret = 0;
298
299 if (!session->active) {
300 ret = -EBUSY;
301 goto end;
302 }
303 /* Set atomically the state to "inactive" */
304 CMM_ACCESS_ONCE(session->active) = 0;
305
306 /* Set transient enabler state to "disabled" */
307 session->tstate = 0;
308 lttng_session_sync_enablers(session);
309 end:
310 return ret;
311 }
312
313 int lttng_channel_enable(struct lttng_channel *channel)
314 {
315 int ret = 0;
316
317 if (channel->enabled) {
318 ret = -EBUSY;
319 goto end;
320 }
321 /* Set transient enabler state to "enabled" */
322 channel->tstate = 1;
323 lttng_session_sync_enablers(channel->session);
324 /* Set atomically the state to "enabled" */
325 CMM_ACCESS_ONCE(channel->enabled) = 1;
326 end:
327 return ret;
328 }
329
330 int lttng_channel_disable(struct lttng_channel *channel)
331 {
332 int ret = 0;
333
334 if (!channel->enabled) {
335 ret = -EBUSY;
336 goto end;
337 }
338 /* Set atomically the state to "disabled" */
339 CMM_ACCESS_ONCE(channel->enabled) = 0;
340 /* Set transient enabler state to "enabled" */
341 channel->tstate = 0;
342 lttng_session_sync_enablers(channel->session);
343 end:
344 return ret;
345 }
346
347 /*
348 * Supports event creation while tracing session is active.
349 */
350 static
351 int lttng_event_create(const struct lttng_event_desc *desc,
352 struct lttng_channel *chan)
353 {
354 const char *event_name = desc->name;
355 struct lttng_event *event;
356 struct lttng_session *session = chan->session;
357 struct cds_hlist_head *head;
358 struct cds_hlist_node *node;
359 int ret = 0;
360 size_t name_len = strlen(event_name);
361 uint32_t hash;
362 int notify_socket, loglevel;
363 const char *uri;
364
365 hash = jhash(event_name, name_len, 0);
366 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
367 cds_hlist_for_each_entry(event, node, head, hlist) {
368 assert(event->desc);
369 if (!strncmp(event->desc->name, desc->name,
370 LTTNG_UST_SYM_NAME_LEN - 1)
371 && chan == event->chan) {
372 ret = -EEXIST;
373 goto exist;
374 }
375 }
376
377 notify_socket = lttng_get_notify_socket(session->owner);
378 if (notify_socket < 0) {
379 ret = notify_socket;
380 goto socket_error;
381 }
382
383 /*
384 * Check if loglevel match. Refuse to connect event if not.
385 */
386 event = zmalloc(sizeof(struct lttng_event));
387 if (!event) {
388 ret = -ENOMEM;
389 goto cache_error;
390 }
391 event->chan = chan;
392
393 /* Event will be enabled by enabler sync. */
394 event->enabled = 0;
395 event->registered = 0;
396 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
397 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
398 event->desc = desc;
399
400 if (desc->loglevel)
401 loglevel = *(*event->desc->loglevel);
402 else
403 loglevel = TRACE_DEFAULT;
404 if (desc->u.ext.model_emf_uri)
405 uri = *(desc->u.ext.model_emf_uri);
406 else
407 uri = NULL;
408
409 /* Fetch event ID from sessiond */
410 ret = ustcomm_register_event(notify_socket,
411 session->objd,
412 chan->objd,
413 event_name,
414 loglevel,
415 desc->signature,
416 desc->nr_fields,
417 desc->fields,
418 uri,
419 &event->id);
420 if (ret < 0) {
421 DBG("Error (%d) registering event to sessiond", ret);
422 goto sessiond_register_error;
423 }
424
425 /* Populate lttng_event structure before tracepoint registration. */
426 cmm_smp_wmb();
427 cds_list_add(&event->node, &chan->session->events_head);
428 cds_hlist_add_head(&event->hlist, head);
429 return 0;
430
431 sessiond_register_error:
432 free(event);
433 cache_error:
434 socket_error:
435 exist:
436 return ret;
437 }
438
439 static
440 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
441 struct lttng_enabler *enabler)
442 {
443 int loglevel = 0;
444 unsigned int has_loglevel = 0;
445
446 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
447 /* Compare excluding final '*' */
448 if (strncmp(desc->name, enabler->event_param.name,
449 strlen(enabler->event_param.name) - 1))
450 return 0;
451 if (desc->loglevel) {
452 loglevel = *(*desc->loglevel);
453 has_loglevel = 1;
454 }
455 if (!lttng_loglevel_match(loglevel,
456 has_loglevel,
457 enabler->event_param.loglevel_type,
458 enabler->event_param.loglevel))
459 return 0;
460 return 1;
461 }
462
463 static
464 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
465 struct lttng_enabler *enabler)
466 {
467 int loglevel = 0;
468 unsigned int has_loglevel = 0;
469
470 assert(enabler->type == LTTNG_ENABLER_EVENT);
471 if (strcmp(desc->name, enabler->event_param.name))
472 return 0;
473 if (desc->loglevel) {
474 loglevel = *(*desc->loglevel);
475 has_loglevel = 1;
476 }
477 if (!lttng_loglevel_match(loglevel,
478 has_loglevel,
479 enabler->event_param.loglevel_type,
480 enabler->event_param.loglevel))
481 return 0;
482 return 1;
483 }
484
485 static
486 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
487 struct lttng_enabler *enabler)
488 {
489 struct lttng_ust_excluder_node *excluder;
490
491 /* If event matches with an excluder, return 'does not match' */
492 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
493 int count;
494
495 for (count = 0; count < excluder->excluder.count; count++) {
496 int found, len;
497 char *excluder_name;
498
499 excluder_name = (char *) (excluder->excluder.names)
500 + count * LTTNG_UST_SYM_NAME_LEN;
501 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
502 if (len > 0 && excluder_name[len - 1] == '*') {
503 found = !strncmp(desc->name, excluder_name,
504 len - 1);
505 } else {
506 found = !strncmp(desc->name, excluder_name,
507 LTTNG_UST_SYM_NAME_LEN - 1);
508 }
509 if (found) {
510 return 0;
511 }
512 }
513 }
514 switch (enabler->type) {
515 case LTTNG_ENABLER_WILDCARD:
516 return lttng_desc_match_wildcard_enabler(desc, enabler);
517 case LTTNG_ENABLER_EVENT:
518 return lttng_desc_match_event_enabler(desc, enabler);
519 default:
520 return -EINVAL;
521 }
522 }
523
524 static
525 int lttng_event_match_enabler(struct lttng_event *event,
526 struct lttng_enabler *enabler)
527 {
528 if (lttng_desc_match_enabler(event->desc, enabler)
529 && event->chan == enabler->chan)
530 return 1;
531 else
532 return 0;
533 }
534
535 static
536 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
537 struct lttng_enabler *enabler)
538 {
539 struct lttng_enabler_ref *enabler_ref;
540
541 cds_list_for_each_entry(enabler_ref,
542 &event->enablers_ref_head, node) {
543 if (enabler_ref->ref == enabler)
544 return enabler_ref;
545 }
546 return NULL;
547 }
548
549 /*
550 * Create struct lttng_event if it is missing and present in the list of
551 * tracepoint probes.
552 */
553 static
554 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
555 {
556 struct lttng_session *session = enabler->chan->session;
557 struct lttng_probe_desc *probe_desc;
558 const struct lttng_event_desc *desc;
559 struct lttng_event *event;
560 int i;
561 struct cds_list_head *probe_list;
562
563 probe_list = lttng_get_probe_list_head();
564 /*
565 * For each probe event, if we find that a probe event matches
566 * our enabler, create an associated lttng_event if not
567 * already present.
568 */
569 cds_list_for_each_entry(probe_desc, probe_list, head) {
570 for (i = 0; i < probe_desc->nr_events; i++) {
571 int found = 0, ret;
572 struct cds_hlist_head *head;
573 struct cds_hlist_node *node;
574 const char *event_name;
575 size_t name_len;
576 uint32_t hash;
577
578 desc = probe_desc->event_desc[i];
579 if (!lttng_desc_match_enabler(desc, enabler))
580 continue;
581 event_name = desc->name;
582 name_len = strlen(event_name);
583
584 /*
585 * Check if already created.
586 */
587 hash = jhash(event_name, name_len, 0);
588 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
589 cds_hlist_for_each_entry(event, node, head, hlist) {
590 if (event->desc == desc
591 && event->chan == enabler->chan)
592 found = 1;
593 }
594 if (found)
595 continue;
596
597 /*
598 * We need to create an event for this
599 * event probe.
600 */
601 ret = lttng_event_create(probe_desc->event_desc[i],
602 enabler->chan);
603 if (ret) {
604 DBG("Unable to create event %s, error %d\n",
605 probe_desc->event_desc[i]->name, ret);
606 }
607 }
608 }
609 }
610
611 /*
612 * Create events associated with an enabler (if not already present),
613 * and add backward reference from the event to the enabler.
614 */
615 static
616 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
617 {
618 struct lttng_session *session = enabler->chan->session;
619 struct lttng_event *event;
620
621 /* First ensure that probe events are created for this enabler. */
622 lttng_create_event_if_missing(enabler);
623
624 /* For each event matching enabler in session event list. */
625 cds_list_for_each_entry(event, &session->events_head, node) {
626 struct lttng_enabler_ref *enabler_ref;
627
628 if (!lttng_event_match_enabler(event, enabler))
629 continue;
630
631 enabler_ref = lttng_event_enabler_ref(event, enabler);
632 if (!enabler_ref) {
633 /*
634 * If no backward ref, create it.
635 * Add backward ref from event to enabler.
636 */
637 enabler_ref = zmalloc(sizeof(*enabler_ref));
638 if (!enabler_ref)
639 return -ENOMEM;
640 enabler_ref->ref = enabler;
641 cds_list_add(&enabler_ref->node,
642 &event->enablers_ref_head);
643 }
644
645 /*
646 * Link filter bytecodes if not linked yet.
647 */
648 lttng_enabler_event_link_bytecode(event, enabler);
649
650 /* TODO: merge event context. */
651 }
652 return 0;
653 }
654
655 /*
656 * Called at library load: connect the probe on all enablers matching
657 * this event.
658 * Called with session mutex held.
659 */
660 int lttng_fix_pending_events(void)
661 {
662 struct lttng_session *session;
663
664 cds_list_for_each_entry(session, &sessions, node) {
665 lttng_session_lazy_sync_enablers(session);
666 }
667 return 0;
668 }
669
670 /*
671 * For each session of the owner thread, execute pending statedump.
672 * Only dump state for the sessions owned by the caller thread, because
673 * we don't keep ust_lock across the entire iteration.
674 */
675 void lttng_handle_pending_statedump(void *owner)
676 {
677 struct lttng_session *session;
678
679 /* Execute state dump */
680 lttng_ust_baddr_statedump(owner);
681
682 /* Clear pending state dump */
683 if (ust_lock()) {
684 goto end;
685 }
686 cds_list_for_each_entry(session, &sessions, node) {
687 if (session->owner != owner)
688 continue;
689 if (!session->statedump_pending)
690 continue;
691 session->statedump_pending = 0;
692 }
693 end:
694 ust_unlock();
695 return;
696 }
697
698 /*
699 * Only used internally at session destruction.
700 */
701 static
702 void _lttng_event_destroy(struct lttng_event *event)
703 {
704 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
705
706 cds_list_del(&event->node);
707 lttng_destroy_context(event->ctx);
708 lttng_free_event_filter_runtime(event);
709 /* Free event enabler refs */
710 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
711 &event->enablers_ref_head, node)
712 free(enabler_ref);
713 free(event);
714 }
715
716 void lttng_ust_events_exit(void)
717 {
718 struct lttng_session *session, *tmpsession;
719
720 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
721 lttng_session_destroy(session);
722 }
723
724 /*
725 * Enabler management.
726 */
727 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
728 struct lttng_ust_event *event_param,
729 struct lttng_channel *chan)
730 {
731 struct lttng_enabler *enabler;
732
733 enabler = zmalloc(sizeof(*enabler));
734 if (!enabler)
735 return NULL;
736 enabler->type = type;
737 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
738 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
739 memcpy(&enabler->event_param, event_param,
740 sizeof(enabler->event_param));
741 enabler->chan = chan;
742 /* ctx left NULL */
743 enabler->enabled = 1;
744 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
745 lttng_session_lazy_sync_enablers(enabler->chan->session);
746 return enabler;
747 }
748
749 int lttng_enabler_enable(struct lttng_enabler *enabler)
750 {
751 enabler->enabled = 1;
752 lttng_session_lazy_sync_enablers(enabler->chan->session);
753 return 0;
754 }
755
756 int lttng_enabler_disable(struct lttng_enabler *enabler)
757 {
758 enabler->enabled = 0;
759 lttng_session_lazy_sync_enablers(enabler->chan->session);
760 return 0;
761 }
762
763 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
764 struct lttng_ust_filter_bytecode_node *bytecode)
765 {
766 bytecode->enabler = enabler;
767 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
768 lttng_session_lazy_sync_enablers(enabler->chan->session);
769 return 0;
770 }
771
772 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
773 struct lttng_ust_excluder_node *excluder)
774 {
775 excluder->enabler = enabler;
776 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
777 lttng_session_lazy_sync_enablers(enabler->chan->session);
778 return 0;
779 }
780
781 int lttng_attach_context(struct lttng_ust_context *context_param,
782 struct lttng_ctx **ctx, struct lttng_session *session)
783 {
784 /*
785 * We cannot attach a context after trace has been started for a
786 * session because the metadata does not allow expressing this
787 * information outside of the original channel scope.
788 */
789 if (session->been_active)
790 return -EPERM;
791
792 switch (context_param->ctx) {
793 case LTTNG_UST_CONTEXT_PTHREAD_ID:
794 return lttng_add_pthread_id_to_ctx(ctx);
795 case LTTNG_UST_CONTEXT_VTID:
796 return lttng_add_vtid_to_ctx(ctx);
797 case LTTNG_UST_CONTEXT_VPID:
798 return lttng_add_vpid_to_ctx(ctx);
799 case LTTNG_UST_CONTEXT_PROCNAME:
800 return lttng_add_procname_to_ctx(ctx);
801 case LTTNG_UST_CONTEXT_IP:
802 return lttng_add_ip_to_ctx(ctx);
803 default:
804 return -EINVAL;
805 }
806 }
807
808 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
809 struct lttng_ust_context *context_param)
810 {
811 #if 0 // disabled for now.
812 struct lttng_session *session = enabler->chan->session;
813 int ret;
814
815 ret = lttng_attach_context(context_param, &enabler->ctx,
816 session);
817 if (ret)
818 return ret;
819 lttng_session_lazy_sync_enablers(enabler->chan->session);
820 #endif
821 return -ENOSYS;
822 }
823
824 static
825 void lttng_enabler_destroy(struct lttng_enabler *enabler)
826 {
827 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
828 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
829
830 /* Destroy filter bytecode */
831 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
832 &enabler->filter_bytecode_head, node) {
833 free(filter_node);
834 }
835
836 /* Destroy excluders */
837 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
838 &enabler->excluder_head, node) {
839 free(excluder_node);
840 }
841
842 /* Destroy contexts */
843 lttng_destroy_context(enabler->ctx);
844
845 cds_list_del(&enabler->node);
846 free(enabler);
847 }
848
849 /*
850 * lttng_session_sync_enablers should be called just before starting a
851 * session.
852 */
853 static
854 void lttng_session_sync_enablers(struct lttng_session *session)
855 {
856 struct lttng_enabler *enabler;
857 struct lttng_event *event;
858
859 cds_list_for_each_entry(enabler, &session->enablers_head, node)
860 lttng_enabler_ref_events(enabler);
861 /*
862 * For each event, if at least one of its enablers is enabled,
863 * and its channel and session transient states are enabled, we
864 * enable the event, else we disable it.
865 */
866 cds_list_for_each_entry(event, &session->events_head, node) {
867 struct lttng_enabler_ref *enabler_ref;
868 struct lttng_bytecode_runtime *runtime;
869 int enabled = 0, has_enablers_without_bytecode = 0;
870
871 /* Enable events */
872 cds_list_for_each_entry(enabler_ref,
873 &event->enablers_ref_head, node) {
874 if (enabler_ref->ref->enabled) {
875 enabled = 1;
876 break;
877 }
878 }
879 /*
880 * Enabled state is based on union of enablers, with
881 * intesection of session and channel transient enable
882 * states.
883 */
884 enabled = enabled && session->tstate && event->chan->tstate;
885
886 CMM_STORE_SHARED(event->enabled, enabled);
887 /*
888 * Sync tracepoint registration with event enabled
889 * state.
890 */
891 if (enabled) {
892 if (!event->registered)
893 register_event(event);
894 } else {
895 if (event->registered)
896 unregister_event(event);
897 }
898
899 /* Check if has enablers without bytecode enabled */
900 cds_list_for_each_entry(enabler_ref,
901 &event->enablers_ref_head, node) {
902 if (enabler_ref->ref->enabled
903 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
904 has_enablers_without_bytecode = 1;
905 break;
906 }
907 }
908 event->has_enablers_without_bytecode =
909 has_enablers_without_bytecode;
910
911 /* Enable filters */
912 cds_list_for_each_entry(runtime,
913 &event->bytecode_runtime_head, node) {
914 lttng_filter_sync_state(runtime);
915 }
916 }
917 }
918
919 /*
920 * Apply enablers to session events, adding events to session if need
921 * be. It is required after each modification applied to an active
922 * session, and right before session "start".
923 * "lazy" sync means we only sync if required.
924 */
925 static
926 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
927 {
928 /* We can skip if session is not active */
929 if (!session->active)
930 return;
931 lttng_session_sync_enablers(session);
932 }
This page took 0.047861 seconds and 5 git commands to generate.