Fix: filter attach vs event enable race
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "lttng-ust-baddr.h"
58 #include "wait.h"
59 #include "../libringbuffer/shm.h"
60 #include "jhash.h"
61
62 /*
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
65 */
66
67 static CDS_LIST_HEAD(sessions);
68
69 struct cds_list_head *_lttng_get_sessions(void)
70 {
71 return &sessions;
72 }
73
74 static void _lttng_event_destroy(struct lttng_event *event);
75
76 static
77 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78 static
79 void lttng_session_sync_enablers(struct lttng_session *session);
80 static
81 void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
83 /*
84 * Called with ust lock held.
85 */
86 int lttng_session_active(void)
87 {
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95 }
96
97 static
98 int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102 {
103 if (!has_loglevel)
104 loglevel = TRACE_DEFAULT;
105 switch (req_type) {
106 case LTTNG_UST_LOGLEVEL_RANGE:
107 if (loglevel <= req_loglevel || req_loglevel == -1)
108 return 1;
109 else
110 return 0;
111 case LTTNG_UST_LOGLEVEL_SINGLE:
112 if (loglevel == req_loglevel || req_loglevel == -1)
113 return 1;
114 else
115 return 0;
116 case LTTNG_UST_LOGLEVEL_ALL:
117 default:
118 return 1;
119 }
120 }
121
122 void synchronize_trace(void)
123 {
124 synchronize_rcu();
125 }
126
127 struct lttng_session *lttng_session_create(void)
128 {
129 struct lttng_session *session;
130 int i;
131
132 session = zmalloc(sizeof(struct lttng_session));
133 if (!session)
134 return NULL;
135 CDS_INIT_LIST_HEAD(&session->chan_head);
136 CDS_INIT_LIST_HEAD(&session->events_head);
137 CDS_INIT_LIST_HEAD(&session->enablers_head);
138 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
139 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
140 cds_list_add(&session->node, &sessions);
141 return session;
142 }
143
144 /*
145 * Only used internally at session destruction.
146 */
147 static
148 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
149 {
150 struct channel *chan;
151 struct lttng_ust_shm_handle *handle;
152
153 cds_list_del(&lttng_chan->node);
154 lttng_destroy_context(lttng_chan->ctx);
155 chan = lttng_chan->chan;
156 handle = lttng_chan->handle;
157 /*
158 * note: lttng_chan is private data contained within handle. It
159 * will be freed along with the handle.
160 */
161 channel_destroy(chan, handle, 0);
162 }
163
164 static
165 void register_event(struct lttng_event *event)
166 {
167 int ret;
168 const struct lttng_event_desc *desc;
169
170 assert(event->registered == 0);
171 desc = event->desc;
172 ret = __tracepoint_probe_register(desc->name,
173 desc->probe_callback,
174 event, desc->signature);
175 WARN_ON_ONCE(ret);
176 if (!ret)
177 event->registered = 1;
178 }
179
180 static
181 void unregister_event(struct lttng_event *event)
182 {
183 int ret;
184 const struct lttng_event_desc *desc;
185
186 assert(event->registered == 1);
187 desc = event->desc;
188 ret = __tracepoint_probe_unregister(desc->name,
189 desc->probe_callback,
190 event);
191 WARN_ON_ONCE(ret);
192 if (!ret)
193 event->registered = 0;
194 }
195
196 /*
197 * Only used internally at session destruction.
198 */
199 static
200 void _lttng_event_unregister(struct lttng_event *event)
201 {
202 if (event->registered)
203 unregister_event(event);
204 }
205
206 void lttng_session_destroy(struct lttng_session *session)
207 {
208 struct lttng_channel *chan, *tmpchan;
209 struct lttng_event *event, *tmpevent;
210 struct lttng_enabler *enabler, *tmpenabler;
211
212 CMM_ACCESS_ONCE(session->active) = 0;
213 cds_list_for_each_entry(event, &session->events_head, node) {
214 _lttng_event_unregister(event);
215 }
216 synchronize_trace(); /* Wait for in-flight events to complete */
217 cds_list_for_each_entry_safe(enabler, tmpenabler,
218 &session->enablers_head, node)
219 lttng_enabler_destroy(enabler);
220 cds_list_for_each_entry_safe(event, tmpevent,
221 &session->events_head, node)
222 _lttng_event_destroy(event);
223 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
224 _lttng_channel_unmap(chan);
225 cds_list_del(&session->node);
226 free(session);
227 }
228
229 int lttng_session_enable(struct lttng_session *session)
230 {
231 int ret = 0;
232 struct lttng_channel *chan;
233 int notify_socket;
234
235 if (session->active) {
236 ret = -EBUSY;
237 goto end;
238 }
239
240 notify_socket = lttng_get_notify_socket(session->owner);
241 if (notify_socket < 0)
242 return notify_socket;
243
244 /* Set transient enabler state to "enabled" */
245 session->tstate = 1;
246
247 /*
248 * Snapshot the number of events per channel to know the type of header
249 * we need to use.
250 */
251 cds_list_for_each_entry(chan, &session->chan_head, node) {
252 const struct lttng_ctx *ctx;
253 const struct lttng_ctx_field *fields = NULL;
254 size_t nr_fields = 0;
255 uint32_t chan_id;
256
257 /* don't change it if session stop/restart */
258 if (chan->header_type)
259 continue;
260 ctx = chan->ctx;
261 if (ctx) {
262 nr_fields = ctx->nr_fields;
263 fields = ctx->fields;
264 }
265 ret = ustcomm_register_channel(notify_socket,
266 session->objd,
267 chan->objd,
268 nr_fields,
269 fields,
270 &chan_id,
271 &chan->header_type);
272 if (ret) {
273 DBG("Error (%d) registering channel to sessiond", ret);
274 return ret;
275 }
276 if (chan_id != chan->id) {
277 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
278 chan_id, chan->id);
279 return -EINVAL;
280 }
281 }
282
283 /* We need to sync enablers with session before activation. */
284 lttng_session_sync_enablers(session);
285
286 /* Set atomically the state to "active" */
287 CMM_ACCESS_ONCE(session->active) = 1;
288 CMM_ACCESS_ONCE(session->been_active) = 1;
289
290 session->statedump_pending = 1;
291 lttng_ust_sockinfo_session_enabled(session->owner);
292 end:
293 return ret;
294 }
295
296 int lttng_session_disable(struct lttng_session *session)
297 {
298 int ret = 0;
299
300 if (!session->active) {
301 ret = -EBUSY;
302 goto end;
303 }
304 /* Set atomically the state to "inactive" */
305 CMM_ACCESS_ONCE(session->active) = 0;
306
307 /* Set transient enabler state to "disabled" */
308 session->tstate = 0;
309 lttng_session_sync_enablers(session);
310 end:
311 return ret;
312 }
313
314 int lttng_channel_enable(struct lttng_channel *channel)
315 {
316 int ret = 0;
317
318 if (channel->enabled) {
319 ret = -EBUSY;
320 goto end;
321 }
322 /* Set transient enabler state to "enabled" */
323 channel->tstate = 1;
324 lttng_session_sync_enablers(channel->session);
325 /* Set atomically the state to "enabled" */
326 CMM_ACCESS_ONCE(channel->enabled) = 1;
327 end:
328 return ret;
329 }
330
331 int lttng_channel_disable(struct lttng_channel *channel)
332 {
333 int ret = 0;
334
335 if (!channel->enabled) {
336 ret = -EBUSY;
337 goto end;
338 }
339 /* Set atomically the state to "disabled" */
340 CMM_ACCESS_ONCE(channel->enabled) = 0;
341 /* Set transient enabler state to "enabled" */
342 channel->tstate = 0;
343 lttng_session_sync_enablers(channel->session);
344 end:
345 return ret;
346 }
347
348 /*
349 * Supports event creation while tracing session is active.
350 */
351 static
352 int lttng_event_create(const struct lttng_event_desc *desc,
353 struct lttng_channel *chan)
354 {
355 const char *event_name = desc->name;
356 struct lttng_event *event;
357 struct lttng_session *session = chan->session;
358 struct cds_hlist_head *head;
359 struct cds_hlist_node *node;
360 int ret = 0;
361 size_t name_len = strlen(event_name);
362 uint32_t hash;
363 int notify_socket, loglevel;
364 const char *uri;
365
366 hash = jhash(event_name, name_len, 0);
367 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
368 cds_hlist_for_each_entry(event, node, head, hlist) {
369 assert(event->desc);
370 if (!strncmp(event->desc->name, desc->name,
371 LTTNG_UST_SYM_NAME_LEN - 1)
372 && chan == event->chan) {
373 ret = -EEXIST;
374 goto exist;
375 }
376 }
377
378 notify_socket = lttng_get_notify_socket(session->owner);
379 if (notify_socket < 0) {
380 ret = notify_socket;
381 goto socket_error;
382 }
383
384 /*
385 * Check if loglevel match. Refuse to connect event if not.
386 */
387 event = zmalloc(sizeof(struct lttng_event));
388 if (!event) {
389 ret = -ENOMEM;
390 goto cache_error;
391 }
392 event->chan = chan;
393
394 /* Event will be enabled by enabler sync. */
395 event->enabled = 0;
396 event->registered = 0;
397 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
398 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
399 event->desc = desc;
400
401 if (desc->loglevel)
402 loglevel = *(*event->desc->loglevel);
403 else
404 loglevel = TRACE_DEFAULT;
405 if (desc->u.ext.model_emf_uri)
406 uri = *(desc->u.ext.model_emf_uri);
407 else
408 uri = NULL;
409
410 /* Fetch event ID from sessiond */
411 ret = ustcomm_register_event(notify_socket,
412 session->objd,
413 chan->objd,
414 event_name,
415 loglevel,
416 desc->signature,
417 desc->nr_fields,
418 desc->fields,
419 uri,
420 &event->id);
421 if (ret < 0) {
422 DBG("Error (%d) registering event to sessiond", ret);
423 goto sessiond_register_error;
424 }
425
426 /* Populate lttng_event structure before tracepoint registration. */
427 cmm_smp_wmb();
428 cds_list_add(&event->node, &chan->session->events_head);
429 cds_hlist_add_head(&event->hlist, head);
430 return 0;
431
432 sessiond_register_error:
433 free(event);
434 cache_error:
435 socket_error:
436 exist:
437 return ret;
438 }
439
440 static
441 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
442 struct lttng_enabler *enabler)
443 {
444 int loglevel = 0;
445 unsigned int has_loglevel = 0;
446
447 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
448 /* Compare excluding final '*' */
449 if (strncmp(desc->name, enabler->event_param.name,
450 strlen(enabler->event_param.name) - 1))
451 return 0;
452 if (desc->loglevel) {
453 loglevel = *(*desc->loglevel);
454 has_loglevel = 1;
455 }
456 if (!lttng_loglevel_match(loglevel,
457 has_loglevel,
458 enabler->event_param.loglevel_type,
459 enabler->event_param.loglevel))
460 return 0;
461 return 1;
462 }
463
464 static
465 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
466 struct lttng_enabler *enabler)
467 {
468 int loglevel = 0;
469 unsigned int has_loglevel = 0;
470
471 assert(enabler->type == LTTNG_ENABLER_EVENT);
472 if (strcmp(desc->name, enabler->event_param.name))
473 return 0;
474 if (desc->loglevel) {
475 loglevel = *(*desc->loglevel);
476 has_loglevel = 1;
477 }
478 if (!lttng_loglevel_match(loglevel,
479 has_loglevel,
480 enabler->event_param.loglevel_type,
481 enabler->event_param.loglevel))
482 return 0;
483 return 1;
484 }
485
486 static
487 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
488 struct lttng_enabler *enabler)
489 {
490 struct lttng_ust_excluder_node *excluder;
491
492 /* If event matches with an excluder, return 'does not match' */
493 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
494 int count;
495
496 for (count = 0; count < excluder->excluder.count; count++) {
497 int found, len;
498 char *excluder_name;
499
500 excluder_name = (char *) (excluder->excluder.names)
501 + count * LTTNG_UST_SYM_NAME_LEN;
502 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
503 if (len > 0 && excluder_name[len - 1] == '*') {
504 found = !strncmp(desc->name, excluder_name,
505 len - 1);
506 } else {
507 found = !strncmp(desc->name, excluder_name,
508 LTTNG_UST_SYM_NAME_LEN - 1);
509 }
510 if (found) {
511 return 0;
512 }
513 }
514 }
515 switch (enabler->type) {
516 case LTTNG_ENABLER_WILDCARD:
517 return lttng_desc_match_wildcard_enabler(desc, enabler);
518 case LTTNG_ENABLER_EVENT:
519 return lttng_desc_match_event_enabler(desc, enabler);
520 default:
521 return -EINVAL;
522 }
523 }
524
525 static
526 int lttng_event_match_enabler(struct lttng_event *event,
527 struct lttng_enabler *enabler)
528 {
529 if (lttng_desc_match_enabler(event->desc, enabler)
530 && event->chan == enabler->chan)
531 return 1;
532 else
533 return 0;
534 }
535
536 static
537 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
538 struct lttng_enabler *enabler)
539 {
540 struct lttng_enabler_ref *enabler_ref;
541
542 cds_list_for_each_entry(enabler_ref,
543 &event->enablers_ref_head, node) {
544 if (enabler_ref->ref == enabler)
545 return enabler_ref;
546 }
547 return NULL;
548 }
549
550 /*
551 * Create struct lttng_event if it is missing and present in the list of
552 * tracepoint probes.
553 */
554 static
555 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
556 {
557 struct lttng_session *session = enabler->chan->session;
558 struct lttng_probe_desc *probe_desc;
559 const struct lttng_event_desc *desc;
560 struct lttng_event *event;
561 int i;
562 struct cds_list_head *probe_list;
563
564 probe_list = lttng_get_probe_list_head();
565 /*
566 * For each probe event, if we find that a probe event matches
567 * our enabler, create an associated lttng_event if not
568 * already present.
569 */
570 cds_list_for_each_entry(probe_desc, probe_list, head) {
571 for (i = 0; i < probe_desc->nr_events; i++) {
572 int found = 0, ret;
573 struct cds_hlist_head *head;
574 struct cds_hlist_node *node;
575 const char *event_name;
576 size_t name_len;
577 uint32_t hash;
578
579 desc = probe_desc->event_desc[i];
580 if (!lttng_desc_match_enabler(desc, enabler))
581 continue;
582 event_name = desc->name;
583 name_len = strlen(event_name);
584
585 /*
586 * Check if already created.
587 */
588 hash = jhash(event_name, name_len, 0);
589 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
590 cds_hlist_for_each_entry(event, node, head, hlist) {
591 if (event->desc == desc
592 && event->chan == enabler->chan)
593 found = 1;
594 }
595 if (found)
596 continue;
597
598 /*
599 * We need to create an event for this
600 * event probe.
601 */
602 ret = lttng_event_create(probe_desc->event_desc[i],
603 enabler->chan);
604 if (ret) {
605 DBG("Unable to create event %s, error %d\n",
606 probe_desc->event_desc[i]->name, ret);
607 }
608 }
609 }
610 }
611
612 /*
613 * Create events associated with an enabler (if not already present),
614 * and add backward reference from the event to the enabler.
615 */
616 static
617 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
618 {
619 struct lttng_session *session = enabler->chan->session;
620 struct lttng_event *event;
621
622 /* First ensure that probe events are created for this enabler. */
623 lttng_create_event_if_missing(enabler);
624
625 /* For each event matching enabler in session event list. */
626 cds_list_for_each_entry(event, &session->events_head, node) {
627 struct lttng_enabler_ref *enabler_ref;
628
629 if (!lttng_event_match_enabler(event, enabler))
630 continue;
631
632 enabler_ref = lttng_event_enabler_ref(event, enabler);
633 if (!enabler_ref) {
634 /*
635 * If no backward ref, create it.
636 * Add backward ref from event to enabler.
637 */
638 enabler_ref = zmalloc(sizeof(*enabler_ref));
639 if (!enabler_ref)
640 return -ENOMEM;
641 enabler_ref->ref = enabler;
642 cds_list_add(&enabler_ref->node,
643 &event->enablers_ref_head);
644 }
645
646 /*
647 * Link filter bytecodes if not linked yet.
648 */
649 lttng_enabler_event_link_bytecode(event, enabler);
650
651 /* TODO: merge event context. */
652 }
653 return 0;
654 }
655
656 /*
657 * Called at library load: connect the probe on all enablers matching
658 * this event.
659 * Called with session mutex held.
660 */
661 int lttng_fix_pending_events(void)
662 {
663 struct lttng_session *session;
664
665 cds_list_for_each_entry(session, &sessions, node) {
666 lttng_session_lazy_sync_enablers(session);
667 }
668 return 0;
669 }
670
671 /*
672 * For each session of the owner thread, execute pending statedump.
673 * Only dump state for the sessions owned by the caller thread, because
674 * we don't keep ust_lock across the entire iteration.
675 */
676 void lttng_handle_pending_statedump(void *owner)
677 {
678 struct lttng_session *session;
679
680 /* Execute state dump */
681 lttng_ust_baddr_statedump(owner);
682
683 /* Clear pending state dump */
684 if (ust_lock()) {
685 goto end;
686 }
687 cds_list_for_each_entry(session, &sessions, node) {
688 if (session->owner != owner)
689 continue;
690 if (!session->statedump_pending)
691 continue;
692 session->statedump_pending = 0;
693 }
694 end:
695 ust_unlock();
696 return;
697 }
698
699 /*
700 * Only used internally at session destruction.
701 */
702 static
703 void _lttng_event_destroy(struct lttng_event *event)
704 {
705 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
706
707 cds_list_del(&event->node);
708 lttng_destroy_context(event->ctx);
709 lttng_free_event_filter_runtime(event);
710 /* Free event enabler refs */
711 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
712 &event->enablers_ref_head, node)
713 free(enabler_ref);
714 free(event);
715 }
716
717 void lttng_ust_events_exit(void)
718 {
719 struct lttng_session *session, *tmpsession;
720
721 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
722 lttng_session_destroy(session);
723 }
724
725 /*
726 * Enabler management.
727 */
728 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
729 struct lttng_ust_event *event_param,
730 struct lttng_channel *chan)
731 {
732 struct lttng_enabler *enabler;
733
734 enabler = zmalloc(sizeof(*enabler));
735 if (!enabler)
736 return NULL;
737 enabler->type = type;
738 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
739 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
740 memcpy(&enabler->event_param, event_param,
741 sizeof(enabler->event_param));
742 enabler->chan = chan;
743 /* ctx left NULL */
744 /*
745 * The "disable" event create comm field has been added to fix a
746 * race between event creation (of a started trace) and enabling
747 * filtering. New session daemon always set the "disable" field
748 * to 1, and are aware that they need to explicitly enable the
749 * event. Older session daemon (within same ABI) leave it at 0,
750 * and therefore we need to enable it here, keeping the original
751 * racy behavior.
752 */
753 enabler->enabled = !event_param->disabled;
754 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
755 lttng_session_lazy_sync_enablers(enabler->chan->session);
756 return enabler;
757 }
758
759 int lttng_enabler_enable(struct lttng_enabler *enabler)
760 {
761 enabler->enabled = 1;
762 lttng_session_lazy_sync_enablers(enabler->chan->session);
763 return 0;
764 }
765
766 int lttng_enabler_disable(struct lttng_enabler *enabler)
767 {
768 enabler->enabled = 0;
769 lttng_session_lazy_sync_enablers(enabler->chan->session);
770 return 0;
771 }
772
773 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
774 struct lttng_ust_filter_bytecode_node *bytecode)
775 {
776 bytecode->enabler = enabler;
777 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
778 lttng_session_lazy_sync_enablers(enabler->chan->session);
779 return 0;
780 }
781
782 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
783 struct lttng_ust_excluder_node *excluder)
784 {
785 excluder->enabler = enabler;
786 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
787 lttng_session_lazy_sync_enablers(enabler->chan->session);
788 return 0;
789 }
790
791 int lttng_attach_context(struct lttng_ust_context *context_param,
792 struct lttng_ctx **ctx, struct lttng_session *session)
793 {
794 /*
795 * We cannot attach a context after trace has been started for a
796 * session because the metadata does not allow expressing this
797 * information outside of the original channel scope.
798 */
799 if (session->been_active)
800 return -EPERM;
801
802 switch (context_param->ctx) {
803 case LTTNG_UST_CONTEXT_PTHREAD_ID:
804 return lttng_add_pthread_id_to_ctx(ctx);
805 case LTTNG_UST_CONTEXT_VTID:
806 return lttng_add_vtid_to_ctx(ctx);
807 case LTTNG_UST_CONTEXT_VPID:
808 return lttng_add_vpid_to_ctx(ctx);
809 case LTTNG_UST_CONTEXT_PROCNAME:
810 return lttng_add_procname_to_ctx(ctx);
811 case LTTNG_UST_CONTEXT_IP:
812 return lttng_add_ip_to_ctx(ctx);
813 default:
814 return -EINVAL;
815 }
816 }
817
818 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
819 struct lttng_ust_context *context_param)
820 {
821 #if 0 // disabled for now.
822 struct lttng_session *session = enabler->chan->session;
823 int ret;
824
825 ret = lttng_attach_context(context_param, &enabler->ctx,
826 session);
827 if (ret)
828 return ret;
829 lttng_session_lazy_sync_enablers(enabler->chan->session);
830 #endif
831 return -ENOSYS;
832 }
833
834 static
835 void lttng_enabler_destroy(struct lttng_enabler *enabler)
836 {
837 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
838 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
839
840 /* Destroy filter bytecode */
841 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
842 &enabler->filter_bytecode_head, node) {
843 free(filter_node);
844 }
845
846 /* Destroy excluders */
847 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
848 &enabler->excluder_head, node) {
849 free(excluder_node);
850 }
851
852 /* Destroy contexts */
853 lttng_destroy_context(enabler->ctx);
854
855 cds_list_del(&enabler->node);
856 free(enabler);
857 }
858
859 /*
860 * lttng_session_sync_enablers should be called just before starting a
861 * session.
862 */
863 static
864 void lttng_session_sync_enablers(struct lttng_session *session)
865 {
866 struct lttng_enabler *enabler;
867 struct lttng_event *event;
868
869 cds_list_for_each_entry(enabler, &session->enablers_head, node)
870 lttng_enabler_ref_events(enabler);
871 /*
872 * For each event, if at least one of its enablers is enabled,
873 * and its channel and session transient states are enabled, we
874 * enable the event, else we disable it.
875 */
876 cds_list_for_each_entry(event, &session->events_head, node) {
877 struct lttng_enabler_ref *enabler_ref;
878 struct lttng_bytecode_runtime *runtime;
879 int enabled = 0, has_enablers_without_bytecode = 0;
880
881 /* Enable events */
882 cds_list_for_each_entry(enabler_ref,
883 &event->enablers_ref_head, node) {
884 if (enabler_ref->ref->enabled) {
885 enabled = 1;
886 break;
887 }
888 }
889 /*
890 * Enabled state is based on union of enablers, with
891 * intesection of session and channel transient enable
892 * states.
893 */
894 enabled = enabled && session->tstate && event->chan->tstate;
895
896 CMM_STORE_SHARED(event->enabled, enabled);
897 /*
898 * Sync tracepoint registration with event enabled
899 * state.
900 */
901 if (enabled) {
902 if (!event->registered)
903 register_event(event);
904 } else {
905 if (event->registered)
906 unregister_event(event);
907 }
908
909 /* Check if has enablers without bytecode enabled */
910 cds_list_for_each_entry(enabler_ref,
911 &event->enablers_ref_head, node) {
912 if (enabler_ref->ref->enabled
913 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
914 has_enablers_without_bytecode = 1;
915 break;
916 }
917 }
918 event->has_enablers_without_bytecode =
919 has_enablers_without_bytecode;
920
921 /* Enable filters */
922 cds_list_for_each_entry(runtime,
923 &event->bytecode_runtime_head, node) {
924 lttng_filter_sync_state(runtime);
925 }
926 }
927 }
928
929 /*
930 * Apply enablers to session events, adding events to session if need
931 * be. It is required after each modification applied to an active
932 * session, and right before session "start".
933 * "lazy" sync means we only sync if required.
934 */
935 static
936 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
937 {
938 /* We can skip if session is not active */
939 if (!session->active)
940 return;
941 lttng_session_sync_enablers(session);
942 }
This page took 0.052701 seconds and 4 git commands to generate.