Fix: filter attach vs event enable race
[lttng-ust.git] / liblttng-ust / lttng-events.c
CommitLineData
8020ceb5 1/*
7dd08bec 2 * lttng-events.c
8020ceb5 3 *
8020ceb5
MD
4 * Holds LTTng per-session event registry.
5 *
e92f3e28
MD
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
8020ceb5
MD
21 */
22
b5234c06
MD
23#define _GNU_SOURCE
24#include <stdio.h>
25#include <urcu/list.h>
8165c8da 26#include <urcu/hlist.h>
b5234c06 27#include <pthread.h>
b5234c06 28#include <errno.h>
f4681817
MD
29#include <sys/shm.h>
30#include <sys/ipc.h>
44c72f10
MD
31#include <stdint.h>
32#include <stddef.h>
28b12049
MD
33#include <inttypes.h>
34#include <time.h>
2ae57758 35#include <lttng/ust-endian.h>
28b12049 36#include "clock.h"
44c72f10
MD
37
38#include <urcu-bp.h>
39#include <urcu/compiler.h>
40#include <urcu/uatomic.h>
41#include <urcu/arch.h>
42
43#include <lttng/tracepoint.h>
4318ae1b 44#include <lttng/ust-events.h>
44c72f10
MD
45
46#include <usterr-signal-safe.h>
47#include <helper.h>
7d3e35b6 48#include <lttng/ust-ctl.h>
32ce8569 49#include <ust-comm.h>
1f18504e 50#include "error.h"
08114193 51#include "compat.h"
eda498b8 52#include "lttng-ust-uuid.h"
44c72f10 53
457a6b58 54#include "tracepoint-internal.h"
7dd08bec
MD
55#include "lttng-tracer.h"
56#include "lttng-tracer-core.h"
37dddb65 57#include "lttng-ust-baddr.h"
b728d87e 58#include "wait.h"
8d8a24c8 59#include "../libringbuffer/shm.h"
596c4223 60#include "jhash.h"
8165c8da
MD
61
62/*
3327ac33
MD
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
8165c8da 65 */
8165c8da 66
b5234c06 67static CDS_LIST_HEAD(sessions);
8165c8da 68
37dddb65
MD
69struct cds_list_head *_lttng_get_sessions(void)
70{
71 return &sessions;
72}
73
7dd08bec 74static void _lttng_event_destroy(struct lttng_event *event);
8020ceb5 75
e58095ef
MD
76static
77void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78static
79void lttng_session_sync_enablers(struct lttng_session *session);
80static
81void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
6715d7d1
MD
83/*
84 * Called with ust lock held.
85 */
86int lttng_session_active(void)
87{
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95}
96
e58095ef
MD
97static
98int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
457a6b58
MD
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102{
e58095ef
MD
103 if (!has_loglevel)
104 loglevel = TRACE_DEFAULT;
457a6b58
MD
105 switch (req_type) {
106 case LTTNG_UST_LOGLEVEL_RANGE:
e58095ef 107 if (loglevel <= req_loglevel || req_loglevel == -1)
457a6b58
MD
108 return 1;
109 else
110 return 0;
111 case LTTNG_UST_LOGLEVEL_SINGLE:
e58095ef 112 if (loglevel == req_loglevel || req_loglevel == -1)
457a6b58
MD
113 return 1;
114 else
115 return 0;
116 case LTTNG_UST_LOGLEVEL_ALL:
117 default:
118 return 1;
119 }
120}
121
8020ceb5
MD
122void synchronize_trace(void)
123{
8020ceb5 124 synchronize_rcu();
8020ceb5
MD
125}
126
7dd08bec 127struct lttng_session *lttng_session_create(void)
8020ceb5 128{
7dd08bec 129 struct lttng_session *session;
74d81a6c 130 int i;
8020ceb5 131
7dd08bec 132 session = zmalloc(sizeof(struct lttng_session));
8020ceb5
MD
133 if (!session)
134 return NULL;
e58095ef
MD
135 CDS_INIT_LIST_HEAD(&session->chan_head);
136 CDS_INIT_LIST_HEAD(&session->events_head);
137 CDS_INIT_LIST_HEAD(&session->enablers_head);
d56fa719
MD
138 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
139 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
e58095ef 140 cds_list_add(&session->node, &sessions);
8020ceb5
MD
141 return session;
142}
143
74d81a6c
MD
144/*
145 * Only used internally at session destruction.
146 */
147static
148void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
149{
150 struct channel *chan;
151 struct lttng_ust_shm_handle *handle;
152
153 cds_list_del(&lttng_chan->node);
154 lttng_destroy_context(lttng_chan->ctx);
155 chan = lttng_chan->chan;
156 handle = lttng_chan->handle;
157 /*
158 * note: lttng_chan is private data contained within handle. It
159 * will be freed along with the handle.
160 */
161 channel_destroy(chan, handle, 0);
162}
163
ac6b4ac6
MD
164static
165void register_event(struct lttng_event *event)
166{
167 int ret;
168 const struct lttng_event_desc *desc;
169
170 assert(event->registered == 0);
171 desc = event->desc;
172 ret = __tracepoint_probe_register(desc->name,
173 desc->probe_callback,
174 event, desc->signature);
175 WARN_ON_ONCE(ret);
176 if (!ret)
177 event->registered = 1;
178}
179
180static
181void unregister_event(struct lttng_event *event)
182{
183 int ret;
184 const struct lttng_event_desc *desc;
185
186 assert(event->registered == 1);
187 desc = event->desc;
188 ret = __tracepoint_probe_unregister(desc->name,
189 desc->probe_callback,
190 event);
191 WARN_ON_ONCE(ret);
192 if (!ret)
193 event->registered = 0;
194}
195
196/*
197 * Only used internally at session destruction.
198 */
199static
200void _lttng_event_unregister(struct lttng_event *event)
201{
202 if (event->registered)
203 unregister_event(event);
204}
205
7dd08bec 206void lttng_session_destroy(struct lttng_session *session)
8020ceb5 207{
7dd08bec
MD
208 struct lttng_channel *chan, *tmpchan;
209 struct lttng_event *event, *tmpevent;
e58095ef 210 struct lttng_enabler *enabler, *tmpenabler;
8020ceb5 211
b5234c06 212 CMM_ACCESS_ONCE(session->active) = 0;
e58095ef 213 cds_list_for_each_entry(event, &session->events_head, node) {
ac6b4ac6 214 _lttng_event_unregister(event);
8020ceb5
MD
215 }
216 synchronize_trace(); /* Wait for in-flight events to complete */
e58095ef
MD
217 cds_list_for_each_entry_safe(enabler, tmpenabler,
218 &session->enablers_head, node)
219 lttng_enabler_destroy(enabler);
220 cds_list_for_each_entry_safe(event, tmpevent,
221 &session->events_head, node)
7dd08bec 222 _lttng_event_destroy(event);
e58095ef 223 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
74d81a6c 224 _lttng_channel_unmap(chan);
e58095ef 225 cds_list_del(&session->node);
b5234c06 226 free(session);
8020ceb5
MD
227}
228
7dd08bec 229int lttng_session_enable(struct lttng_session *session)
8020ceb5
MD
230{
231 int ret = 0;
7dd08bec 232 struct lttng_channel *chan;
32ce8569 233 int notify_socket;
8020ceb5 234
8020ceb5
MD
235 if (session->active) {
236 ret = -EBUSY;
237 goto end;
238 }
239
32ce8569
MD
240 notify_socket = lttng_get_notify_socket(session->owner);
241 if (notify_socket < 0)
242 return notify_socket;
243
ac6b4ac6
MD
244 /* Set transient enabler state to "enabled" */
245 session->tstate = 1;
e58095ef 246
8020ceb5
MD
247 /*
248 * Snapshot the number of events per channel to know the type of header
249 * we need to use.
250 */
e58095ef 251 cds_list_for_each_entry(chan, &session->chan_head, node) {
32ce8569 252 const struct lttng_ctx *ctx;
83e43212 253 const struct lttng_ctx_field *fields = NULL;
32ce8569 254 size_t nr_fields = 0;
6ca18e66 255 uint32_t chan_id;
32ce8569
MD
256
257 /* don't change it if session stop/restart */
8020ceb5 258 if (chan->header_type)
32ce8569
MD
259 continue;
260 ctx = chan->ctx;
261 if (ctx) {
262 nr_fields = ctx->nr_fields;
83e43212 263 fields = ctx->fields;
32ce8569
MD
264 }
265 ret = ustcomm_register_channel(notify_socket,
266 session->objd,
267 chan->objd,
268 nr_fields,
269 fields,
6ca18e66 270 &chan_id,
32ce8569 271 &chan->header_type);
b869b5ae
MD
272 if (ret) {
273 DBG("Error (%d) registering channel to sessiond", ret);
32ce8569 274 return ret;
b869b5ae 275 }
6ca18e66
MD
276 if (chan_id != chan->id) {
277 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
278 chan_id, chan->id);
279 return -EINVAL;
280 }
8020ceb5
MD
281 }
282
79ee7d56
FD
283 /* We need to sync enablers with session before activation. */
284 lttng_session_sync_enablers(session);
285
ac6b4ac6 286 /* Set atomically the state to "active" */
b5234c06
MD
287 CMM_ACCESS_ONCE(session->active) = 1;
288 CMM_ACCESS_ONCE(session->been_active) = 1;
95c25348 289
246be17e
PW
290 session->statedump_pending = 1;
291 lttng_ust_sockinfo_session_enabled(session->owner);
8020ceb5 292end:
8020ceb5
MD
293 return ret;
294}
295
7dd08bec 296int lttng_session_disable(struct lttng_session *session)
8020ceb5
MD
297{
298 int ret = 0;
299
8020ceb5
MD
300 if (!session->active) {
301 ret = -EBUSY;
302 goto end;
303 }
ac6b4ac6 304 /* Set atomically the state to "inactive" */
b5234c06 305 CMM_ACCESS_ONCE(session->active) = 0;
ac6b4ac6
MD
306
307 /* Set transient enabler state to "disabled" */
308 session->tstate = 0;
309 lttng_session_sync_enablers(session);
8020ceb5 310end:
8020ceb5
MD
311 return ret;
312}
313
7dd08bec 314int lttng_channel_enable(struct lttng_channel *channel)
976fe9ea 315{
ac6b4ac6 316 int ret = 0;
976fe9ea 317
ac6b4ac6
MD
318 if (channel->enabled) {
319 ret = -EBUSY;
320 goto end;
321 }
322 /* Set transient enabler state to "enabled" */
323 channel->tstate = 1;
324 lttng_session_sync_enablers(channel->session);
325 /* Set atomically the state to "enabled" */
326 CMM_ACCESS_ONCE(channel->enabled) = 1;
327end:
328 return ret;
976fe9ea
MD
329}
330
7dd08bec 331int lttng_channel_disable(struct lttng_channel *channel)
976fe9ea 332{
ac6b4ac6 333 int ret = 0;
976fe9ea 334
ac6b4ac6
MD
335 if (!channel->enabled) {
336 ret = -EBUSY;
337 goto end;
338 }
339 /* Set atomically the state to "disabled" */
340 CMM_ACCESS_ONCE(channel->enabled) = 0;
341 /* Set transient enabler state to "enabled" */
342 channel->tstate = 0;
343 lttng_session_sync_enablers(channel->session);
344end:
345 return ret;
976fe9ea
MD
346}
347
8020ceb5
MD
348/*
349 * Supports event creation while tracing session is active.
350 */
e58095ef
MD
351static
352int lttng_event_create(const struct lttng_event_desc *desc,
353 struct lttng_channel *chan)
8020ceb5 354{
e58095ef 355 const char *event_name = desc->name;
7dd08bec 356 struct lttng_event *event;
32ce8569 357 struct lttng_session *session = chan->session;
d56fa719
MD
358 struct cds_hlist_head *head;
359 struct cds_hlist_node *node;
576599a0 360 int ret = 0;
d56fa719
MD
361 size_t name_len = strlen(event_name);
362 uint32_t hash;
32ce8569
MD
363 int notify_socket, loglevel;
364 const char *uri;
8020ceb5 365
d56fa719
MD
366 hash = jhash(event_name, name_len, 0);
367 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
368 cds_hlist_for_each_entry(event, node, head, hlist) {
e58095ef 369 assert(event->desc);
d970f72e
MD
370 if (!strncmp(event->desc->name, desc->name,
371 LTTNG_UST_SYM_NAME_LEN - 1)
372 && chan == event->chan) {
576599a0 373 ret = -EEXIST;
8020ceb5 374 goto exist;
576599a0
MD
375 }
376 }
457a6b58 377
32ce8569
MD
378 notify_socket = lttng_get_notify_socket(session->owner);
379 if (notify_socket < 0) {
380 ret = notify_socket;
381 goto socket_error;
382 }
383
457a6b58
MD
384 /*
385 * Check if loglevel match. Refuse to connect event if not.
386 */
7dd08bec 387 event = zmalloc(sizeof(struct lttng_event));
576599a0
MD
388 if (!event) {
389 ret = -ENOMEM;
8020ceb5 390 goto cache_error;
576599a0 391 }
8020ceb5 392 event->chan = chan;
e58095ef 393
ac6b4ac6
MD
394 /* Event will be enabled by enabler sync. */
395 event->enabled = 0;
396 event->registered = 0;
e58095ef
MD
397 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
398 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
399 event->desc = desc;
32ce8569
MD
400
401 if (desc->loglevel)
402 loglevel = *(*event->desc->loglevel);
403 else
404 loglevel = TRACE_DEFAULT;
405 if (desc->u.ext.model_emf_uri)
406 uri = *(desc->u.ext.model_emf_uri);
407 else
408 uri = NULL;
409
13b21cd6
MD
410 /* Fetch event ID from sessiond */
411 ret = ustcomm_register_event(notify_socket,
412 session->objd,
413 chan->objd,
414 event_name,
415 loglevel,
416 desc->signature,
417 desc->nr_fields,
418 desc->fields,
419 uri,
420 &event->id);
421 if (ret < 0) {
422 DBG("Error (%d) registering event to sessiond", ret);
423 goto sessiond_register_error;
32ce8569 424 }
2b213b16 425
7dd08bec 426 /* Populate lttng_event structure before tracepoint registration. */
b5234c06 427 cmm_smp_wmb();
e58095ef 428 cds_list_add(&event->node, &chan->session->events_head);
d56fa719 429 cds_hlist_add_head(&event->hlist, head);
576599a0 430 return 0;
8020ceb5 431
32ce8569 432sessiond_register_error:
b5234c06 433 free(event);
8020ceb5 434cache_error:
32ce8569 435socket_error:
8020ceb5 436exist:
576599a0 437 return ret;
8020ceb5
MD
438}
439
e58095ef
MD
440static
441int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
442 struct lttng_enabler *enabler)
443{
444 int loglevel = 0;
7e2e405c 445 unsigned int has_loglevel = 0;
e58095ef
MD
446
447 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
448 /* Compare excluding final '*' */
449 if (strncmp(desc->name, enabler->event_param.name,
450 strlen(enabler->event_param.name) - 1))
451 return 0;
452 if (desc->loglevel) {
453 loglevel = *(*desc->loglevel);
454 has_loglevel = 1;
455 }
456 if (!lttng_loglevel_match(loglevel,
457 has_loglevel,
458 enabler->event_param.loglevel_type,
459 enabler->event_param.loglevel))
460 return 0;
461 return 1;
462}
463
464static
465int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
466 struct lttng_enabler *enabler)
467{
468 int loglevel = 0;
469 unsigned int has_loglevel = 0;
470
471 assert(enabler->type == LTTNG_ENABLER_EVENT);
472 if (strcmp(desc->name, enabler->event_param.name))
473 return 0;
474 if (desc->loglevel) {
475 loglevel = *(*desc->loglevel);
476 has_loglevel = 1;
477 }
478 if (!lttng_loglevel_match(loglevel,
479 has_loglevel,
480 enabler->event_param.loglevel_type,
481 enabler->event_param.loglevel))
482 return 0;
483 return 1;
484}
485
486static
487int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
488 struct lttng_enabler *enabler)
489{
ed5b5bbd
JI
490 struct lttng_ust_excluder_node *excluder;
491
492 /* If event matches with an excluder, return 'does not match' */
493 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
494 int count;
495
496 for (count = 0; count < excluder->excluder.count; count++) {
497 int found, len;
498 char *excluder_name;
499
500 excluder_name = (char *) (excluder->excluder.names)
501 + count * LTTNG_UST_SYM_NAME_LEN;
502 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
503 if (len > 0 && excluder_name[len - 1] == '*') {
504 found = !strncmp(desc->name, excluder_name,
505 len - 1);
506 } else {
507 found = !strncmp(desc->name, excluder_name,
508 LTTNG_UST_SYM_NAME_LEN - 1);
509 }
510 if (found) {
511 return 0;
512 }
513 }
514 }
e58095ef
MD
515 switch (enabler->type) {
516 case LTTNG_ENABLER_WILDCARD:
517 return lttng_desc_match_wildcard_enabler(desc, enabler);
518 case LTTNG_ENABLER_EVENT:
519 return lttng_desc_match_event_enabler(desc, enabler);
520 default:
521 return -EINVAL;
522 }
523}
524
525static
526int lttng_event_match_enabler(struct lttng_event *event,
527 struct lttng_enabler *enabler)
528{
d970f72e
MD
529 if (lttng_desc_match_enabler(event->desc, enabler)
530 && event->chan == enabler->chan)
531 return 1;
532 else
533 return 0;
e58095ef
MD
534}
535
536static
537struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
538 struct lttng_enabler *enabler)
539{
540 struct lttng_enabler_ref *enabler_ref;
541
542 cds_list_for_each_entry(enabler_ref,
543 &event->enablers_ref_head, node) {
544 if (enabler_ref->ref == enabler)
545 return enabler_ref;
546 }
547 return NULL;
548}
549
8020ceb5 550/*
e58095ef
MD
551 * Create struct lttng_event if it is missing and present in the list of
552 * tracepoint probes.
8020ceb5 553 */
e58095ef
MD
554static
555void lttng_create_event_if_missing(struct lttng_enabler *enabler)
8020ceb5 556{
e58095ef
MD
557 struct lttng_session *session = enabler->chan->session;
558 struct lttng_probe_desc *probe_desc;
559 const struct lttng_event_desc *desc;
560 struct lttng_event *event;
561 int i;
562 struct cds_list_head *probe_list;
563
564 probe_list = lttng_get_probe_list_head();
565 /*
566 * For each probe event, if we find that a probe event matches
567 * our enabler, create an associated lttng_event if not
568 * already present.
569 */
570 cds_list_for_each_entry(probe_desc, probe_list, head) {
571 for (i = 0; i < probe_desc->nr_events; i++) {
572 int found = 0, ret;
d56fa719
MD
573 struct cds_hlist_head *head;
574 struct cds_hlist_node *node;
575 const char *event_name;
576 size_t name_len;
577 uint32_t hash;
e58095ef
MD
578
579 desc = probe_desc->event_desc[i];
580 if (!lttng_desc_match_enabler(desc, enabler))
581 continue;
d56fa719
MD
582 event_name = desc->name;
583 name_len = strlen(event_name);
e58095ef
MD
584
585 /*
d56fa719 586 * Check if already created.
e58095ef 587 */
d56fa719
MD
588 hash = jhash(event_name, name_len, 0);
589 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
590 cds_hlist_for_each_entry(event, node, head, hlist) {
d970f72e
MD
591 if (event->desc == desc
592 && event->chan == enabler->chan)
e58095ef
MD
593 found = 1;
594 }
595 if (found)
596 continue;
597
598 /*
599 * We need to create an event for this
600 * event probe.
601 */
602 ret = lttng_event_create(probe_desc->event_desc[i],
603 enabler->chan);
604 if (ret) {
32ce8569
MD
605 DBG("Unable to create event %s, error %d\n",
606 probe_desc->event_desc[i]->name, ret);
e58095ef 607 }
8165c8da 608 }
8020ceb5 609 }
8020ceb5
MD
610}
611
612/*
e58095ef
MD
613 * Create events associated with an enabler (if not already present),
614 * and add backward reference from the event to the enabler.
8020ceb5
MD
615 */
616static
e58095ef 617int lttng_enabler_ref_events(struct lttng_enabler *enabler)
8020ceb5 618{
e58095ef
MD
619 struct lttng_session *session = enabler->chan->session;
620 struct lttng_event *event;
621
622 /* First ensure that probe events are created for this enabler. */
623 lttng_create_event_if_missing(enabler);
624
625 /* For each event matching enabler in session event list. */
626 cds_list_for_each_entry(event, &session->events_head, node) {
627 struct lttng_enabler_ref *enabler_ref;
628
629 if (!lttng_event_match_enabler(event, enabler))
630 continue;
631
632 enabler_ref = lttng_event_enabler_ref(event, enabler);
633 if (!enabler_ref) {
634 /*
635 * If no backward ref, create it.
636 * Add backward ref from event to enabler.
637 */
638 enabler_ref = zmalloc(sizeof(*enabler_ref));
639 if (!enabler_ref)
640 return -ENOMEM;
641 enabler_ref->ref = enabler;
642 cds_list_add(&enabler_ref->node,
643 &event->enablers_ref_head);
8165c8da 644 }
e58095ef
MD
645
646 /*
647 * Link filter bytecodes if not linked yet.
648 */
649 lttng_enabler_event_link_bytecode(event, enabler);
650
651 /* TODO: merge event context. */
652 }
653 return 0;
654}
655
656/*
657 * Called at library load: connect the probe on all enablers matching
658 * this event.
5f733922 659 * Called with session mutex held.
e58095ef 660 */
5f733922 661int lttng_fix_pending_events(void)
e58095ef
MD
662{
663 struct lttng_session *session;
664
665 cds_list_for_each_entry(session, &sessions, node) {
666 lttng_session_lazy_sync_enablers(session);
8020ceb5 667 }
e58095ef
MD
668 return 0;
669}
670
246be17e 671/*
37dddb65
MD
672 * For each session of the owner thread, execute pending statedump.
673 * Only dump state for the sessions owned by the caller thread, because
674 * we don't keep ust_lock across the entire iteration.
246be17e 675 */
3327ac33 676void lttng_handle_pending_statedump(void *owner)
246be17e
PW
677{
678 struct lttng_session *session;
679
37dddb65
MD
680 /* Execute state dump */
681 lttng_ust_baddr_statedump(owner);
682
683 /* Clear pending state dump */
3327ac33
MD
684 if (ust_lock()) {
685 goto end;
686 }
246be17e 687 cds_list_for_each_entry(session, &sessions, node) {
37dddb65
MD
688 if (session->owner != owner)
689 continue;
690 if (!session->statedump_pending)
691 continue;
692 session->statedump_pending = 0;
246be17e 693 }
3327ac33 694end:
37dddb65 695 ust_unlock();
3327ac33 696 return;
246be17e
PW
697}
698
e58095ef
MD
699/*
700 * Only used internally at session destruction.
701 */
702static
703void _lttng_event_destroy(struct lttng_event *event)
704{
705 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
706
e58095ef 707 cds_list_del(&event->node);
8020ceb5 708 lttng_destroy_context(event->ctx);
f488575f 709 lttng_free_event_filter_runtime(event);
e58095ef
MD
710 /* Free event enabler refs */
711 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
712 &event->enablers_ref_head, node)
713 free(enabler_ref);
b5234c06 714 free(event);
8020ceb5
MD
715}
716
003fedf4 717void lttng_ust_events_exit(void)
8020ceb5 718{
7dd08bec 719 struct lttng_session *session, *tmpsession;
8020ceb5 720
e58095ef 721 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
7dd08bec 722 lttng_session_destroy(session);
8020ceb5 723}
457a6b58 724
e58095ef
MD
725/*
726 * Enabler management.
727 */
728struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
729 struct lttng_ust_event *event_param,
730 struct lttng_channel *chan)
457a6b58 731{
e58095ef
MD
732 struct lttng_enabler *enabler;
733
734 enabler = zmalloc(sizeof(*enabler));
735 if (!enabler)
736 return NULL;
737 enabler->type = type;
738 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
0f63324a 739 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
e58095ef
MD
740 memcpy(&enabler->event_param, event_param,
741 sizeof(enabler->event_param));
742 enabler->chan = chan;
743 /* ctx left NULL */
353c9dc0
MD
744 /*
745 * The "disable" event create comm field has been added to fix a
746 * race between event creation (of a started trace) and enabling
747 * filtering. New session daemon always set the "disable" field
748 * to 1, and are aware that they need to explicitly enable the
749 * event. Older session daemon (within same ABI) leave it at 0,
750 * and therefore we need to enable it here, keeping the original
751 * racy behavior.
752 */
753 enabler->enabled = !event_param->disabled;
e58095ef
MD
754 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
755 lttng_session_lazy_sync_enablers(enabler->chan->session);
756 return enabler;
457a6b58
MD
757}
758
e58095ef 759int lttng_enabler_enable(struct lttng_enabler *enabler)
457a6b58 760{
e58095ef
MD
761 enabler->enabled = 1;
762 lttng_session_lazy_sync_enablers(enabler->chan->session);
763 return 0;
457a6b58 764}
457a6b58 765
e58095ef 766int lttng_enabler_disable(struct lttng_enabler *enabler)
457a6b58 767{
e58095ef
MD
768 enabler->enabled = 0;
769 lttng_session_lazy_sync_enablers(enabler->chan->session);
770 return 0;
771}
457a6b58 772
e58095ef
MD
773int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
774 struct lttng_ust_filter_bytecode_node *bytecode)
775{
776 bytecode->enabler = enabler;
777 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
778 lttng_session_lazy_sync_enablers(enabler->chan->session);
779 return 0;
0bfb5cbd
JI
780}
781
782int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
783 struct lttng_ust_excluder_node *excluder)
784{
785 excluder->enabler = enabler;
786 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
787 lttng_session_lazy_sync_enablers(enabler->chan->session);
788 return 0;
e58095ef 789}
f488575f 790
e58095ef
MD
791int lttng_attach_context(struct lttng_ust_context *context_param,
792 struct lttng_ctx **ctx, struct lttng_session *session)
793{
457a6b58 794 /*
e58095ef
MD
795 * We cannot attach a context after trace has been started for a
796 * session because the metadata does not allow expressing this
797 * information outside of the original channel scope.
457a6b58 798 */
e58095ef
MD
799 if (session->been_active)
800 return -EPERM;
457a6b58 801
e58095ef
MD
802 switch (context_param->ctx) {
803 case LTTNG_UST_CONTEXT_PTHREAD_ID:
804 return lttng_add_pthread_id_to_ctx(ctx);
805 case LTTNG_UST_CONTEXT_VTID:
806 return lttng_add_vtid_to_ctx(ctx);
807 case LTTNG_UST_CONTEXT_VPID:
808 return lttng_add_vpid_to_ctx(ctx);
809 case LTTNG_UST_CONTEXT_PROCNAME:
810 return lttng_add_procname_to_ctx(ctx);
96f85541
MD
811 case LTTNG_UST_CONTEXT_IP:
812 return lttng_add_ip_to_ctx(ctx);
e58095ef
MD
813 default:
814 return -EINVAL;
457a6b58 815 }
457a6b58
MD
816}
817
e58095ef
MD
818int lttng_enabler_attach_context(struct lttng_enabler *enabler,
819 struct lttng_ust_context *context_param)
457a6b58 820{
e58095ef
MD
821#if 0 // disabled for now.
822 struct lttng_session *session = enabler->chan->session;
823 int ret;
457a6b58 824
e58095ef
MD
825 ret = lttng_attach_context(context_param, &enabler->ctx,
826 session);
827 if (ret)
828 return ret;
829 lttng_session_lazy_sync_enablers(enabler->chan->session);
830#endif
831 return -ENOSYS;
457a6b58
MD
832}
833
e58095ef
MD
834static
835void lttng_enabler_destroy(struct lttng_enabler *enabler)
457a6b58 836{
e58095ef 837 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
0f63324a 838 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
457a6b58 839
e58095ef
MD
840 /* Destroy filter bytecode */
841 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
842 &enabler->filter_bytecode_head, node) {
843 free(filter_node);
457a6b58 844 }
e58095ef 845
0f63324a
JI
846 /* Destroy excluders */
847 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
848 &enabler->excluder_head, node) {
849 free(excluder_node);
850 }
851
e58095ef
MD
852 /* Destroy contexts */
853 lttng_destroy_context(enabler->ctx);
854
855 cds_list_del(&enabler->node);
856 free(enabler);
457a6b58
MD
857}
858
e58095ef
MD
859/*
860 * lttng_session_sync_enablers should be called just before starting a
861 * session.
862 */
457a6b58 863static
e58095ef 864void lttng_session_sync_enablers(struct lttng_session *session)
457a6b58 865{
e58095ef
MD
866 struct lttng_enabler *enabler;
867 struct lttng_event *event;
457a6b58 868
e58095ef
MD
869 cds_list_for_each_entry(enabler, &session->enablers_head, node)
870 lttng_enabler_ref_events(enabler);
871 /*
872 * For each event, if at least one of its enablers is enabled,
ac6b4ac6
MD
873 * and its channel and session transient states are enabled, we
874 * enable the event, else we disable it.
e58095ef
MD
875 */
876 cds_list_for_each_entry(event, &session->events_head, node) {
877 struct lttng_enabler_ref *enabler_ref;
878 struct lttng_bytecode_runtime *runtime;
dcdeaff0 879 int enabled = 0, has_enablers_without_bytecode = 0;
e58095ef
MD
880
881 /* Enable events */
882 cds_list_for_each_entry(enabler_ref,
883 &event->enablers_ref_head, node) {
884 if (enabler_ref->ref->enabled) {
885 enabled = 1;
886 break;
887 }
888 }
ac6b4ac6
MD
889 /*
890 * Enabled state is based on union of enablers, with
891 * intesection of session and channel transient enable
892 * states.
893 */
894 enabled = enabled && session->tstate && event->chan->tstate;
895
896 CMM_STORE_SHARED(event->enabled, enabled);
897 /*
898 * Sync tracepoint registration with event enabled
899 * state.
900 */
901 if (enabled) {
902 if (!event->registered)
903 register_event(event);
904 } else {
905 if (event->registered)
906 unregister_event(event);
907 }
457a6b58 908
1f49fc05 909 /* Check if has enablers without bytecode enabled */
dcdeaff0
MD
910 cds_list_for_each_entry(enabler_ref,
911 &event->enablers_ref_head, node) {
1f49fc05
MD
912 if (enabler_ref->ref->enabled
913 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
dcdeaff0
MD
914 has_enablers_without_bytecode = 1;
915 break;
916 }
917 }
918 event->has_enablers_without_bytecode =
919 has_enablers_without_bytecode;
920
e58095ef
MD
921 /* Enable filters */
922 cds_list_for_each_entry(runtime,
923 &event->bytecode_runtime_head, node) {
924 lttng_filter_sync_state(runtime);
457a6b58
MD
925 }
926 }
457a6b58
MD
927}
928
e58095ef
MD
929/*
930 * Apply enablers to session events, adding events to session if need
931 * be. It is required after each modification applied to an active
932 * session, and right before session "start".
933 * "lazy" sync means we only sync if required.
934 */
935static
936void lttng_session_lazy_sync_enablers(struct lttng_session *session)
457a6b58 937{
e58095ef
MD
938 /* We can skip if session is not active */
939 if (!session->active)
940 return;
941 lttng_session_sync_enablers(session);
457a6b58 942}
This page took 0.080337 seconds and 4 git commands to generate.