bytecode: generalize `struct lttng_filter_bytecode_node`
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/tracer.h>
40 #include <lttng/event-notifier-notification.h>
41 #include <lttng/abi-old.h>
42 #include <lttng/endian.h>
43 #include <lttng/string-utils.h>
44 #include <lttng/utils.h>
45 #include <ringbuffer/backend.h>
46 #include <ringbuffer/frontend.h>
47 #include <wrapper/time.h>
48
49 #define METADATA_CACHE_DEFAULT_SIZE 4096
50
51 static LIST_HEAD(sessions);
52 static LIST_HEAD(event_notifier_groups);
53 static LIST_HEAD(lttng_transport_list);
54 /*
55 * Protect the sessions and metadata caches.
56 */
57 static DEFINE_MUTEX(sessions_mutex);
58 static struct kmem_cache *event_cache;
59 static struct kmem_cache *event_notifier_cache;
60
61 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
62 static void lttng_session_sync_event_enablers(struct lttng_session *session);
63 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
64 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
65 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
66
67 static void _lttng_event_destroy(struct lttng_event *event);
68 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
69 static void _lttng_channel_destroy(struct lttng_channel *chan);
70 static int _lttng_event_unregister(struct lttng_event *event);
71 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
72 static
73 int _lttng_event_metadata_statedump(struct lttng_session *session,
74 struct lttng_channel *chan,
75 struct lttng_event *event);
76 static
77 int _lttng_session_metadata_statedump(struct lttng_session *session);
78 static
79 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
80 static
81 int _lttng_type_statedump(struct lttng_session *session,
82 const struct lttng_type *type,
83 size_t nesting);
84 static
85 int _lttng_field_statedump(struct lttng_session *session,
86 const struct lttng_event_field *field,
87 size_t nesting);
88
89 void synchronize_trace(void)
90 {
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
92 synchronize_rcu();
93 #else
94 synchronize_sched();
95 #endif
96
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
98 #ifdef CONFIG_PREEMPT_RT_FULL
99 synchronize_rcu();
100 #endif
101 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
102 #ifdef CONFIG_PREEMPT_RT
103 synchronize_rcu();
104 #endif
105 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
106 }
107
108 void lttng_lock_sessions(void)
109 {
110 mutex_lock(&sessions_mutex);
111 }
112
113 void lttng_unlock_sessions(void)
114 {
115 mutex_unlock(&sessions_mutex);
116 }
117
118 static struct lttng_transport *lttng_transport_find(const char *name)
119 {
120 struct lttng_transport *transport;
121
122 list_for_each_entry(transport, &lttng_transport_list, node) {
123 if (!strcmp(transport->name, name))
124 return transport;
125 }
126 return NULL;
127 }
128
129 /*
130 * Called with sessions lock held.
131 */
132 int lttng_session_active(void)
133 {
134 struct lttng_session *iter;
135
136 list_for_each_entry(iter, &sessions, list) {
137 if (iter->active)
138 return 1;
139 }
140 return 0;
141 }
142
143 struct lttng_session *lttng_session_create(void)
144 {
145 struct lttng_session *session;
146 struct lttng_metadata_cache *metadata_cache;
147 int i;
148
149 mutex_lock(&sessions_mutex);
150 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
151 if (!session)
152 goto err;
153 INIT_LIST_HEAD(&session->chan);
154 INIT_LIST_HEAD(&session->events);
155 lttng_guid_gen(&session->uuid);
156
157 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
158 GFP_KERNEL);
159 if (!metadata_cache)
160 goto err_free_session;
161 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
162 if (!metadata_cache->data)
163 goto err_free_cache;
164 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
165 kref_init(&metadata_cache->refcount);
166 mutex_init(&metadata_cache->lock);
167 session->metadata_cache = metadata_cache;
168 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
169 memcpy(&metadata_cache->uuid, &session->uuid,
170 sizeof(metadata_cache->uuid));
171 INIT_LIST_HEAD(&session->enablers_head);
172 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
173 INIT_HLIST_HEAD(&session->events_ht.table[i]);
174 list_add(&session->list, &sessions);
175 session->pid_tracker.session = session;
176 session->pid_tracker.tracker_type = TRACKER_PID;
177 session->vpid_tracker.session = session;
178 session->vpid_tracker.tracker_type = TRACKER_VPID;
179 session->uid_tracker.session = session;
180 session->uid_tracker.tracker_type = TRACKER_UID;
181 session->vuid_tracker.session = session;
182 session->vuid_tracker.tracker_type = TRACKER_VUID;
183 session->gid_tracker.session = session;
184 session->gid_tracker.tracker_type = TRACKER_GID;
185 session->vgid_tracker.session = session;
186 session->vgid_tracker.tracker_type = TRACKER_VGID;
187 mutex_unlock(&sessions_mutex);
188 return session;
189
190 err_free_cache:
191 kfree(metadata_cache);
192 err_free_session:
193 lttng_kvfree(session);
194 err:
195 mutex_unlock(&sessions_mutex);
196 return NULL;
197 }
198
199 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
200 {
201 struct lttng_transport *transport = NULL;
202 struct lttng_event_notifier_group *event_notifier_group;
203 const char *transport_name = "relay-event-notifier";
204 size_t subbuf_size = 4096; //TODO
205 size_t num_subbuf = 16; //TODO
206 unsigned int switch_timer_interval = 0;
207 unsigned int read_timer_interval = 0;
208 int i;
209
210 mutex_lock(&sessions_mutex);
211
212 transport = lttng_transport_find(transport_name);
213 if (!transport) {
214 printk(KERN_WARNING "LTTng: transport %s not found\n",
215 transport_name);
216 goto notransport;
217 }
218 if (!try_module_get(transport->owner)) {
219 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
220 transport_name);
221 goto notransport;
222 }
223
224 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
225 GFP_KERNEL);
226 if (!event_notifier_group)
227 goto nomem;
228
229 /*
230 * Initialize the ring buffer used to store event notifier
231 * notifications.
232 */
233 event_notifier_group->ops = &transport->ops;
234 event_notifier_group->chan = transport->ops.channel_create(
235 transport_name, event_notifier_group, NULL,
236 subbuf_size, num_subbuf, switch_timer_interval,
237 read_timer_interval);
238 if (!event_notifier_group->chan)
239 goto create_error;
240
241 event_notifier_group->transport = transport;
242
243 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
244 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
245 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
246 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
247
248 list_add(&event_notifier_group->node, &event_notifier_groups);
249
250 mutex_unlock(&sessions_mutex);
251
252 return event_notifier_group;
253
254 create_error:
255 lttng_kvfree(event_notifier_group);
256 nomem:
257 if (transport)
258 module_put(transport->owner);
259 notransport:
260 mutex_unlock(&sessions_mutex);
261 return NULL;
262 }
263
264 void metadata_cache_destroy(struct kref *kref)
265 {
266 struct lttng_metadata_cache *cache =
267 container_of(kref, struct lttng_metadata_cache, refcount);
268 vfree(cache->data);
269 kfree(cache);
270 }
271
272 void lttng_session_destroy(struct lttng_session *session)
273 {
274 struct lttng_channel *chan, *tmpchan;
275 struct lttng_event *event, *tmpevent;
276 struct lttng_metadata_stream *metadata_stream;
277 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
278 int ret;
279
280 mutex_lock(&sessions_mutex);
281 WRITE_ONCE(session->active, 0);
282 list_for_each_entry(chan, &session->chan, list) {
283 ret = lttng_syscalls_unregister_event(chan);
284 WARN_ON(ret);
285 }
286 list_for_each_entry(event, &session->events, list) {
287 ret = _lttng_event_unregister(event);
288 WARN_ON(ret);
289 }
290 synchronize_trace(); /* Wait for in-flight events to complete */
291 list_for_each_entry(chan, &session->chan, list) {
292 ret = lttng_syscalls_destroy_event(chan);
293 WARN_ON(ret);
294 }
295 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
296 &session->enablers_head, node)
297 lttng_event_enabler_destroy(event_enabler);
298 list_for_each_entry_safe(event, tmpevent, &session->events, list)
299 _lttng_event_destroy(event);
300 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
301 BUG_ON(chan->channel_type == METADATA_CHANNEL);
302 _lttng_channel_destroy(chan);
303 }
304 mutex_lock(&session->metadata_cache->lock);
305 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
306 _lttng_metadata_channel_hangup(metadata_stream);
307 mutex_unlock(&session->metadata_cache->lock);
308 lttng_id_tracker_destroy(&session->pid_tracker, false);
309 lttng_id_tracker_destroy(&session->vpid_tracker, false);
310 lttng_id_tracker_destroy(&session->uid_tracker, false);
311 lttng_id_tracker_destroy(&session->vuid_tracker, false);
312 lttng_id_tracker_destroy(&session->gid_tracker, false);
313 lttng_id_tracker_destroy(&session->vgid_tracker, false);
314 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
315 list_del(&session->list);
316 mutex_unlock(&sessions_mutex);
317 lttng_kvfree(session);
318 }
319
320 void lttng_event_notifier_group_destroy(
321 struct lttng_event_notifier_group *event_notifier_group)
322 {
323 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
324 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
325 int ret;
326
327 if (!event_notifier_group)
328 return;
329
330 mutex_lock(&sessions_mutex);
331
332 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
333 WARN_ON(ret);
334
335 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
336 &event_notifier_group->event_notifiers_head, list) {
337 ret = _lttng_event_notifier_unregister(event_notifier);
338 WARN_ON(ret);
339 }
340
341 /* Wait for in-flight event notifier to complete */
342 synchronize_trace();
343
344 irq_work_sync(&event_notifier_group->wakeup_pending);
345
346 kfree(event_notifier_group->sc_filter);
347
348 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
349 &event_notifier_group->enablers_head, node)
350 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
351
352 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
353 &event_notifier_group->event_notifiers_head, list)
354 _lttng_event_notifier_destroy(event_notifier);
355
356 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
357 module_put(event_notifier_group->transport->owner);
358 list_del(&event_notifier_group->node);
359
360 mutex_unlock(&sessions_mutex);
361 lttng_kvfree(event_notifier_group);
362 }
363
364 int lttng_session_statedump(struct lttng_session *session)
365 {
366 int ret;
367
368 mutex_lock(&sessions_mutex);
369 ret = lttng_statedump_start(session);
370 mutex_unlock(&sessions_mutex);
371 return ret;
372 }
373
374 int lttng_session_enable(struct lttng_session *session)
375 {
376 int ret = 0;
377 struct lttng_channel *chan;
378
379 mutex_lock(&sessions_mutex);
380 if (session->active) {
381 ret = -EBUSY;
382 goto end;
383 }
384
385 /* Set transient enabler state to "enabled" */
386 session->tstate = 1;
387
388 /* We need to sync enablers with session before activation. */
389 lttng_session_sync_event_enablers(session);
390
391 /*
392 * Snapshot the number of events per channel to know the type of header
393 * we need to use.
394 */
395 list_for_each_entry(chan, &session->chan, list) {
396 if (chan->header_type)
397 continue; /* don't change it if session stop/restart */
398 if (chan->free_event_id < 31)
399 chan->header_type = 1; /* compact */
400 else
401 chan->header_type = 2; /* large */
402 }
403
404 /* Clear each stream's quiescent state. */
405 list_for_each_entry(chan, &session->chan, list) {
406 if (chan->channel_type != METADATA_CHANNEL)
407 lib_ring_buffer_clear_quiescent_channel(chan->chan);
408 }
409
410 WRITE_ONCE(session->active, 1);
411 WRITE_ONCE(session->been_active, 1);
412 ret = _lttng_session_metadata_statedump(session);
413 if (ret) {
414 WRITE_ONCE(session->active, 0);
415 goto end;
416 }
417 ret = lttng_statedump_start(session);
418 if (ret)
419 WRITE_ONCE(session->active, 0);
420 end:
421 mutex_unlock(&sessions_mutex);
422 return ret;
423 }
424
425 int lttng_session_disable(struct lttng_session *session)
426 {
427 int ret = 0;
428 struct lttng_channel *chan;
429
430 mutex_lock(&sessions_mutex);
431 if (!session->active) {
432 ret = -EBUSY;
433 goto end;
434 }
435 WRITE_ONCE(session->active, 0);
436
437 /* Set transient enabler state to "disabled" */
438 session->tstate = 0;
439 lttng_session_sync_event_enablers(session);
440
441 /* Set each stream's quiescent state. */
442 list_for_each_entry(chan, &session->chan, list) {
443 if (chan->channel_type != METADATA_CHANNEL)
444 lib_ring_buffer_set_quiescent_channel(chan->chan);
445 }
446 end:
447 mutex_unlock(&sessions_mutex);
448 return ret;
449 }
450
451 int lttng_session_metadata_regenerate(struct lttng_session *session)
452 {
453 int ret = 0;
454 struct lttng_channel *chan;
455 struct lttng_event *event;
456 struct lttng_metadata_cache *cache = session->metadata_cache;
457 struct lttng_metadata_stream *stream;
458
459 mutex_lock(&sessions_mutex);
460 if (!session->active) {
461 ret = -EBUSY;
462 goto end;
463 }
464
465 mutex_lock(&cache->lock);
466 memset(cache->data, 0, cache->cache_alloc);
467 cache->metadata_written = 0;
468 cache->version++;
469 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
470 stream->metadata_out = 0;
471 stream->metadata_in = 0;
472 }
473 mutex_unlock(&cache->lock);
474
475 session->metadata_dumped = 0;
476 list_for_each_entry(chan, &session->chan, list) {
477 chan->metadata_dumped = 0;
478 }
479
480 list_for_each_entry(event, &session->events, list) {
481 event->metadata_dumped = 0;
482 }
483
484 ret = _lttng_session_metadata_statedump(session);
485
486 end:
487 mutex_unlock(&sessions_mutex);
488 return ret;
489 }
490
491 int lttng_channel_enable(struct lttng_channel *channel)
492 {
493 int ret = 0;
494
495 mutex_lock(&sessions_mutex);
496 if (channel->channel_type == METADATA_CHANNEL) {
497 ret = -EPERM;
498 goto end;
499 }
500 if (channel->enabled) {
501 ret = -EEXIST;
502 goto end;
503 }
504 /* Set transient enabler state to "enabled" */
505 channel->tstate = 1;
506 lttng_session_sync_event_enablers(channel->session);
507 /* Set atomically the state to "enabled" */
508 WRITE_ONCE(channel->enabled, 1);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_channel_disable(struct lttng_channel *channel)
515 {
516 int ret = 0;
517
518 mutex_lock(&sessions_mutex);
519 if (channel->channel_type == METADATA_CHANNEL) {
520 ret = -EPERM;
521 goto end;
522 }
523 if (!channel->enabled) {
524 ret = -EEXIST;
525 goto end;
526 }
527 /* Set atomically the state to "disabled" */
528 WRITE_ONCE(channel->enabled, 0);
529 /* Set transient enabler state to "enabled" */
530 channel->tstate = 0;
531 lttng_session_sync_event_enablers(channel->session);
532 end:
533 mutex_unlock(&sessions_mutex);
534 return ret;
535 }
536
537 int lttng_event_enable(struct lttng_event *event)
538 {
539 int ret = 0;
540
541 mutex_lock(&sessions_mutex);
542 if (event->chan->channel_type == METADATA_CHANNEL) {
543 ret = -EPERM;
544 goto end;
545 }
546 if (event->enabled) {
547 ret = -EEXIST;
548 goto end;
549 }
550 switch (event->instrumentation) {
551 case LTTNG_KERNEL_TRACEPOINT:
552 case LTTNG_KERNEL_SYSCALL:
553 ret = -EINVAL;
554 break;
555 case LTTNG_KERNEL_KPROBE:
556 case LTTNG_KERNEL_UPROBE:
557 case LTTNG_KERNEL_NOOP:
558 WRITE_ONCE(event->enabled, 1);
559 break;
560 case LTTNG_KERNEL_KRETPROBE:
561 ret = lttng_kretprobes_event_enable_state(event, 1);
562 break;
563 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
564 default:
565 WARN_ON_ONCE(1);
566 ret = -EINVAL;
567 }
568 end:
569 mutex_unlock(&sessions_mutex);
570 return ret;
571 }
572
573 int lttng_event_disable(struct lttng_event *event)
574 {
575 int ret = 0;
576
577 mutex_lock(&sessions_mutex);
578 if (event->chan->channel_type == METADATA_CHANNEL) {
579 ret = -EPERM;
580 goto end;
581 }
582 if (!event->enabled) {
583 ret = -EEXIST;
584 goto end;
585 }
586 switch (event->instrumentation) {
587 case LTTNG_KERNEL_TRACEPOINT:
588 case LTTNG_KERNEL_SYSCALL:
589 ret = -EINVAL;
590 break;
591 case LTTNG_KERNEL_KPROBE:
592 case LTTNG_KERNEL_UPROBE:
593 case LTTNG_KERNEL_NOOP:
594 WRITE_ONCE(event->enabled, 0);
595 break;
596 case LTTNG_KERNEL_KRETPROBE:
597 ret = lttng_kretprobes_event_enable_state(event, 0);
598 break;
599 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
600 default:
601 WARN_ON_ONCE(1);
602 ret = -EINVAL;
603 }
604 end:
605 mutex_unlock(&sessions_mutex);
606 return ret;
607 }
608
609 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
610 {
611 int ret = 0;
612
613 mutex_lock(&sessions_mutex);
614 if (event_notifier->enabled) {
615 ret = -EEXIST;
616 goto end;
617 }
618 switch (event_notifier->instrumentation) {
619 case LTTNG_KERNEL_TRACEPOINT:
620 case LTTNG_KERNEL_SYSCALL:
621 ret = -EINVAL;
622 break;
623 case LTTNG_KERNEL_KPROBE:
624 case LTTNG_KERNEL_UPROBE:
625 WRITE_ONCE(event_notifier->enabled, 1);
626 break;
627 case LTTNG_KERNEL_FUNCTION:
628 case LTTNG_KERNEL_NOOP:
629 case LTTNG_KERNEL_KRETPROBE:
630 default:
631 WARN_ON_ONCE(1);
632 ret = -EINVAL;
633 }
634 end:
635 mutex_unlock(&sessions_mutex);
636 return ret;
637 }
638
639 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
640 {
641 int ret = 0;
642
643 mutex_lock(&sessions_mutex);
644 if (!event_notifier->enabled) {
645 ret = -EEXIST;
646 goto end;
647 }
648 switch (event_notifier->instrumentation) {
649 case LTTNG_KERNEL_TRACEPOINT:
650 case LTTNG_KERNEL_SYSCALL:
651 ret = -EINVAL;
652 break;
653 case LTTNG_KERNEL_KPROBE:
654 case LTTNG_KERNEL_UPROBE:
655 WRITE_ONCE(event_notifier->enabled, 0);
656 break;
657 case LTTNG_KERNEL_FUNCTION:
658 case LTTNG_KERNEL_NOOP:
659 case LTTNG_KERNEL_KRETPROBE:
660 default:
661 WARN_ON_ONCE(1);
662 ret = -EINVAL;
663 }
664 end:
665 mutex_unlock(&sessions_mutex);
666 return ret;
667 }
668
669 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
670 const char *transport_name,
671 void *buf_addr,
672 size_t subbuf_size, size_t num_subbuf,
673 unsigned int switch_timer_interval,
674 unsigned int read_timer_interval,
675 enum channel_type channel_type)
676 {
677 struct lttng_channel *chan;
678 struct lttng_transport *transport = NULL;
679
680 mutex_lock(&sessions_mutex);
681 if (session->been_active && channel_type != METADATA_CHANNEL)
682 goto active; /* Refuse to add channel to active session */
683 transport = lttng_transport_find(transport_name);
684 if (!transport) {
685 printk(KERN_WARNING "LTTng: transport %s not found\n",
686 transport_name);
687 goto notransport;
688 }
689 if (!try_module_get(transport->owner)) {
690 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
691 goto notransport;
692 }
693 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
694 if (!chan)
695 goto nomem;
696 chan->session = session;
697 chan->id = session->free_chan_id++;
698 chan->ops = &transport->ops;
699 /*
700 * Note: the channel creation op already writes into the packet
701 * headers. Therefore the "chan" information used as input
702 * should be already accessible.
703 */
704 chan->chan = transport->ops.channel_create(transport_name,
705 chan, buf_addr, subbuf_size, num_subbuf,
706 switch_timer_interval, read_timer_interval);
707 if (!chan->chan)
708 goto create_error;
709 chan->tstate = 1;
710 chan->enabled = 1;
711 chan->transport = transport;
712 chan->channel_type = channel_type;
713 list_add(&chan->list, &session->chan);
714 mutex_unlock(&sessions_mutex);
715 return chan;
716
717 create_error:
718 kfree(chan);
719 nomem:
720 if (transport)
721 module_put(transport->owner);
722 notransport:
723 active:
724 mutex_unlock(&sessions_mutex);
725 return NULL;
726 }
727
728 /*
729 * Only used internally at session destruction for per-cpu channels, and
730 * when metadata channel is released.
731 * Needs to be called with sessions mutex held.
732 */
733 static
734 void _lttng_channel_destroy(struct lttng_channel *chan)
735 {
736 chan->ops->channel_destroy(chan->chan);
737 module_put(chan->transport->owner);
738 list_del(&chan->list);
739 lttng_destroy_context(chan->ctx);
740 kfree(chan);
741 }
742
743 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
744 {
745 BUG_ON(chan->channel_type != METADATA_CHANNEL);
746
747 /* Protect the metadata cache with the sessions_mutex. */
748 mutex_lock(&sessions_mutex);
749 _lttng_channel_destroy(chan);
750 mutex_unlock(&sessions_mutex);
751 }
752 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
753
754 static
755 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
756 {
757 stream->finalized = 1;
758 wake_up_interruptible(&stream->read_wait);
759 }
760
761 /*
762 * Supports event creation while tracing session is active.
763 * Needs to be called with sessions mutex held.
764 */
765 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
766 struct lttng_kernel_event *event_param,
767 void *filter,
768 const struct lttng_event_desc *event_desc,
769 enum lttng_kernel_instrumentation itype)
770 {
771 struct lttng_session *session = chan->session;
772 struct lttng_event *event;
773 const char *event_name;
774 struct hlist_head *head;
775 int ret;
776
777 if (chan->free_event_id == -1U) {
778 ret = -EMFILE;
779 goto full;
780 }
781
782 switch (itype) {
783 case LTTNG_KERNEL_TRACEPOINT:
784 event_name = event_desc->name;
785 break;
786 case LTTNG_KERNEL_KPROBE:
787 case LTTNG_KERNEL_UPROBE:
788 case LTTNG_KERNEL_KRETPROBE:
789 case LTTNG_KERNEL_NOOP:
790 case LTTNG_KERNEL_SYSCALL:
791 event_name = event_param->name;
792 break;
793 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
794 default:
795 WARN_ON_ONCE(1);
796 ret = -EINVAL;
797 goto type_error;
798 }
799
800 head = utils_borrow_hash_table_bucket(session->events_ht.table,
801 LTTNG_EVENT_HT_SIZE, event_name);
802 lttng_hlist_for_each_entry(event, head, hlist) {
803 WARN_ON_ONCE(!event->desc);
804 if (!strncmp(event->desc->name, event_name,
805 LTTNG_KERNEL_SYM_NAME_LEN - 1)
806 && chan == event->chan) {
807 ret = -EEXIST;
808 goto exist;
809 }
810 }
811
812 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
813 if (!event) {
814 ret = -ENOMEM;
815 goto cache_error;
816 }
817 event->chan = chan;
818 event->filter = filter;
819 event->id = chan->free_event_id++;
820 event->instrumentation = itype;
821 event->evtype = LTTNG_TYPE_EVENT;
822 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
823 INIT_LIST_HEAD(&event->enablers_ref_head);
824
825 switch (itype) {
826 case LTTNG_KERNEL_TRACEPOINT:
827 /* Event will be enabled by enabler sync. */
828 event->enabled = 0;
829 event->registered = 0;
830 event->desc = lttng_event_desc_get(event_name);
831 if (!event->desc) {
832 ret = -ENOENT;
833 goto register_error;
834 }
835 /* Populate lttng_event structure before event registration. */
836 smp_wmb();
837 break;
838 case LTTNG_KERNEL_KPROBE:
839 /*
840 * Needs to be explicitly enabled after creation, since
841 * we may want to apply filters.
842 */
843 event->enabled = 0;
844 event->registered = 1;
845 /*
846 * Populate lttng_event structure before event
847 * registration.
848 */
849 smp_wmb();
850 ret = lttng_kprobes_register_event(event_name,
851 event_param->u.kprobe.symbol_name,
852 event_param->u.kprobe.offset,
853 event_param->u.kprobe.addr,
854 event);
855 if (ret) {
856 ret = -EINVAL;
857 goto register_error;
858 }
859 ret = try_module_get(event->desc->owner);
860 WARN_ON_ONCE(!ret);
861 break;
862 case LTTNG_KERNEL_KRETPROBE:
863 {
864 struct lttng_event *event_return;
865
866 /* kretprobe defines 2 events */
867 /*
868 * Needs to be explicitly enabled after creation, since
869 * we may want to apply filters.
870 */
871 event->enabled = 0;
872 event->registered = 1;
873 event_return =
874 kmem_cache_zalloc(event_cache, GFP_KERNEL);
875 if (!event_return) {
876 ret = -ENOMEM;
877 goto register_error;
878 }
879 event_return->chan = chan;
880 event_return->filter = filter;
881 event_return->id = chan->free_event_id++;
882 event_return->enabled = 0;
883 event_return->registered = 1;
884 event_return->instrumentation = itype;
885 /*
886 * Populate lttng_event structure before kretprobe registration.
887 */
888 smp_wmb();
889 ret = lttng_kretprobes_register(event_name,
890 event_param->u.kretprobe.symbol_name,
891 event_param->u.kretprobe.offset,
892 event_param->u.kretprobe.addr,
893 event, event_return);
894 if (ret) {
895 kmem_cache_free(event_cache, event_return);
896 ret = -EINVAL;
897 goto register_error;
898 }
899 /* Take 2 refs on the module: one per event. */
900 ret = try_module_get(event->desc->owner);
901 WARN_ON_ONCE(!ret);
902 ret = try_module_get(event->desc->owner);
903 WARN_ON_ONCE(!ret);
904 ret = _lttng_event_metadata_statedump(chan->session, chan,
905 event_return);
906 WARN_ON_ONCE(ret > 0);
907 if (ret) {
908 kmem_cache_free(event_cache, event_return);
909 module_put(event->desc->owner);
910 module_put(event->desc->owner);
911 goto statedump_error;
912 }
913 list_add(&event_return->list, &chan->session->events);
914 break;
915 }
916 case LTTNG_KERNEL_NOOP:
917 case LTTNG_KERNEL_SYSCALL:
918 /*
919 * Needs to be explicitly enabled after creation, since
920 * we may want to apply filters.
921 */
922 event->enabled = 0;
923 event->registered = 0;
924 event->desc = event_desc;
925 switch (event_param->u.syscall.entryexit) {
926 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
927 ret = -EINVAL;
928 goto register_error;
929 case LTTNG_KERNEL_SYSCALL_ENTRY:
930 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
931 break;
932 case LTTNG_KERNEL_SYSCALL_EXIT:
933 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
934 break;
935 }
936 switch (event_param->u.syscall.abi) {
937 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
938 ret = -EINVAL;
939 goto register_error;
940 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
941 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
942 break;
943 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
944 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
945 break;
946 }
947 if (!event->desc) {
948 ret = -EINVAL;
949 goto register_error;
950 }
951 break;
952 case LTTNG_KERNEL_UPROBE:
953 /*
954 * Needs to be explicitly enabled after creation, since
955 * we may want to apply filters.
956 */
957 event->enabled = 0;
958 event->registered = 1;
959
960 /*
961 * Populate lttng_event structure before event
962 * registration.
963 */
964 smp_wmb();
965
966 ret = lttng_uprobes_register_event(event_param->name,
967 event_param->u.uprobe.fd,
968 event);
969 if (ret)
970 goto register_error;
971 ret = try_module_get(event->desc->owner);
972 WARN_ON_ONCE(!ret);
973 break;
974 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
975 default:
976 WARN_ON_ONCE(1);
977 ret = -EINVAL;
978 goto register_error;
979 }
980 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
981 WARN_ON_ONCE(ret > 0);
982 if (ret) {
983 goto statedump_error;
984 }
985 hlist_add_head(&event->hlist, head);
986 list_add(&event->list, &chan->session->events);
987 return event;
988
989 statedump_error:
990 /* If a statedump error occurs, events will not be readable. */
991 register_error:
992 kmem_cache_free(event_cache, event);
993 cache_error:
994 exist:
995 type_error:
996 full:
997 return ERR_PTR(ret);
998 }
999
1000 struct lttng_event_notifier *_lttng_event_notifier_create(
1001 const struct lttng_event_desc *event_desc,
1002 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
1003 struct lttng_kernel_event_notifier *event_notifier_param,
1004 void *filter, enum lttng_kernel_instrumentation itype)
1005 {
1006 struct lttng_event_notifier *event_notifier;
1007 const char *event_name;
1008 struct hlist_head *head;
1009 int ret;
1010
1011 switch (itype) {
1012 case LTTNG_KERNEL_TRACEPOINT:
1013 event_name = event_desc->name;
1014 break;
1015 case LTTNG_KERNEL_KPROBE:
1016 case LTTNG_KERNEL_UPROBE:
1017 case LTTNG_KERNEL_SYSCALL:
1018 event_name = event_notifier_param->event.name;
1019 break;
1020 case LTTNG_KERNEL_KRETPROBE:
1021 case LTTNG_KERNEL_FUNCTION:
1022 case LTTNG_KERNEL_NOOP:
1023 default:
1024 WARN_ON_ONCE(1);
1025 ret = -EINVAL;
1026 goto type_error;
1027 }
1028
1029 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1030 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1031 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1032 WARN_ON_ONCE(!event_notifier->desc);
1033 if (!strncmp(event_notifier->desc->name, event_name,
1034 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1035 && event_notifier_group == event_notifier->group
1036 && token == event_notifier->user_token) {
1037 ret = -EEXIST;
1038 goto exist;
1039 }
1040 }
1041
1042 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1043 if (!event_notifier) {
1044 ret = -ENOMEM;
1045 goto cache_error;
1046 }
1047
1048 event_notifier->group = event_notifier_group;
1049 event_notifier->user_token = token;
1050 event_notifier->filter = filter;
1051 event_notifier->instrumentation = itype;
1052 event_notifier->evtype = LTTNG_TYPE_EVENT;
1053 event_notifier->send_notification = lttng_event_notifier_notification_send;
1054 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1055 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1056
1057 switch (itype) {
1058 case LTTNG_KERNEL_TRACEPOINT:
1059 /* Event will be enabled by enabler sync. */
1060 event_notifier->enabled = 0;
1061 event_notifier->registered = 0;
1062 event_notifier->desc = lttng_event_desc_get(event_name);
1063 if (!event_notifier->desc) {
1064 ret = -ENOENT;
1065 goto register_error;
1066 }
1067 /* Populate lttng_event_notifier structure before event registration. */
1068 smp_wmb();
1069 break;
1070 case LTTNG_KERNEL_KPROBE:
1071 /*
1072 * Needs to be explicitly enabled after creation, since
1073 * we may want to apply filters.
1074 */
1075 event_notifier->enabled = 0;
1076 event_notifier->registered = 1;
1077 /*
1078 * Populate lttng_event_notifier structure before event
1079 * registration.
1080 */
1081 smp_wmb();
1082 ret = lttng_kprobes_register_event_notifier(
1083 event_notifier_param->event.u.kprobe.symbol_name,
1084 event_notifier_param->event.u.kprobe.offset,
1085 event_notifier_param->event.u.kprobe.addr,
1086 event_notifier);
1087 if (ret) {
1088 ret = -EINVAL;
1089 goto register_error;
1090 }
1091 ret = try_module_get(event_notifier->desc->owner);
1092 WARN_ON_ONCE(!ret);
1093 break;
1094 case LTTNG_KERNEL_NOOP:
1095 case LTTNG_KERNEL_SYSCALL:
1096 /*
1097 * Needs to be explicitly enabled after creation, since
1098 * we may want to apply filters.
1099 */
1100 event_notifier->enabled = 0;
1101 event_notifier->registered = 0;
1102 event_notifier->desc = event_desc;
1103 switch (event_notifier_param->event.u.syscall.entryexit) {
1104 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1105 ret = -EINVAL;
1106 goto register_error;
1107 case LTTNG_KERNEL_SYSCALL_ENTRY:
1108 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1109 break;
1110 case LTTNG_KERNEL_SYSCALL_EXIT:
1111 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1112 break;
1113 }
1114 switch (event_notifier_param->event.u.syscall.abi) {
1115 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1116 ret = -EINVAL;
1117 goto register_error;
1118 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1119 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1120 break;
1121 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1122 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1123 break;
1124 }
1125
1126 if (!event_notifier->desc) {
1127 ret = -EINVAL;
1128 goto register_error;
1129 }
1130 break;
1131 case LTTNG_KERNEL_UPROBE:
1132 /*
1133 * Needs to be explicitly enabled after creation, since
1134 * we may want to apply filters.
1135 */
1136 event_notifier->enabled = 0;
1137 event_notifier->registered = 1;
1138
1139 /*
1140 * Populate lttng_event_notifier structure before
1141 * event_notifier registration.
1142 */
1143 smp_wmb();
1144
1145 ret = lttng_uprobes_register_event_notifier(
1146 event_notifier_param->event.name,
1147 event_notifier_param->event.u.uprobe.fd,
1148 event_notifier);
1149 if (ret)
1150 goto register_error;
1151 ret = try_module_get(event_notifier->desc->owner);
1152 WARN_ON_ONCE(!ret);
1153 break;
1154 case LTTNG_KERNEL_KRETPROBE:
1155 case LTTNG_KERNEL_FUNCTION:
1156 default:
1157 WARN_ON_ONCE(1);
1158 ret = -EINVAL;
1159 goto register_error;
1160 }
1161
1162 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1163 hlist_add_head(&event_notifier->hlist, head);
1164 return event_notifier;
1165
1166 register_error:
1167 kmem_cache_free(event_notifier_cache, event_notifier);
1168 cache_error:
1169 exist:
1170 type_error:
1171 return ERR_PTR(ret);
1172 }
1173
1174 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1175 struct lttng_kernel_event *event_param,
1176 void *filter,
1177 const struct lttng_event_desc *event_desc,
1178 enum lttng_kernel_instrumentation itype)
1179 {
1180 struct lttng_event *event;
1181
1182 mutex_lock(&sessions_mutex);
1183 event = _lttng_event_create(chan, event_param, filter, event_desc,
1184 itype);
1185 mutex_unlock(&sessions_mutex);
1186 return event;
1187 }
1188
1189 struct lttng_event_notifier *lttng_event_notifier_create(
1190 const struct lttng_event_desc *event_desc,
1191 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1192 struct lttng_kernel_event_notifier *event_notifier_param,
1193 void *filter, enum lttng_kernel_instrumentation itype)
1194 {
1195 struct lttng_event_notifier *event_notifier;
1196
1197 mutex_lock(&sessions_mutex);
1198 event_notifier = _lttng_event_notifier_create(event_desc, id,
1199 event_notifier_group, event_notifier_param, filter, itype);
1200 mutex_unlock(&sessions_mutex);
1201 return event_notifier;
1202 }
1203
1204 /* Only used for tracepoints for now. */
1205 static
1206 void register_event(struct lttng_event *event)
1207 {
1208 const struct lttng_event_desc *desc;
1209 int ret = -EINVAL;
1210
1211 if (event->registered)
1212 return;
1213
1214 desc = event->desc;
1215 switch (event->instrumentation) {
1216 case LTTNG_KERNEL_TRACEPOINT:
1217 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1218 desc->probe_callback,
1219 event);
1220 break;
1221 case LTTNG_KERNEL_SYSCALL:
1222 ret = lttng_syscall_filter_enable_event(event->chan, event);
1223 break;
1224 case LTTNG_KERNEL_KPROBE:
1225 case LTTNG_KERNEL_UPROBE:
1226 case LTTNG_KERNEL_KRETPROBE:
1227 case LTTNG_KERNEL_NOOP:
1228 ret = 0;
1229 break;
1230 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1231 default:
1232 WARN_ON_ONCE(1);
1233 }
1234 if (!ret)
1235 event->registered = 1;
1236 }
1237
1238 /*
1239 * Only used internally at session destruction.
1240 */
1241 int _lttng_event_unregister(struct lttng_event *event)
1242 {
1243 const struct lttng_event_desc *desc;
1244 int ret = -EINVAL;
1245
1246 if (!event->registered)
1247 return 0;
1248
1249 desc = event->desc;
1250 switch (event->instrumentation) {
1251 case LTTNG_KERNEL_TRACEPOINT:
1252 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1253 event->desc->probe_callback,
1254 event);
1255 break;
1256 case LTTNG_KERNEL_KPROBE:
1257 lttng_kprobes_unregister_event(event);
1258 ret = 0;
1259 break;
1260 case LTTNG_KERNEL_KRETPROBE:
1261 lttng_kretprobes_unregister(event);
1262 ret = 0;
1263 break;
1264 case LTTNG_KERNEL_SYSCALL:
1265 ret = lttng_syscall_filter_disable_event(event->chan, event);
1266 break;
1267 case LTTNG_KERNEL_NOOP:
1268 ret = 0;
1269 break;
1270 case LTTNG_KERNEL_UPROBE:
1271 lttng_uprobes_unregister_event(event);
1272 ret = 0;
1273 break;
1274 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1275 default:
1276 WARN_ON_ONCE(1);
1277 }
1278 if (!ret)
1279 event->registered = 0;
1280 return ret;
1281 }
1282
1283 /* Only used for tracepoints for now. */
1284 static
1285 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1286 {
1287 const struct lttng_event_desc *desc;
1288 int ret = -EINVAL;
1289
1290 if (event_notifier->registered)
1291 return;
1292
1293 desc = event_notifier->desc;
1294 switch (event_notifier->instrumentation) {
1295 case LTTNG_KERNEL_TRACEPOINT:
1296 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1297 desc->event_notifier_callback,
1298 event_notifier);
1299 break;
1300 case LTTNG_KERNEL_SYSCALL:
1301 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1302 break;
1303 case LTTNG_KERNEL_KPROBE:
1304 case LTTNG_KERNEL_UPROBE:
1305 ret = 0;
1306 break;
1307 case LTTNG_KERNEL_KRETPROBE:
1308 case LTTNG_KERNEL_FUNCTION:
1309 case LTTNG_KERNEL_NOOP:
1310 default:
1311 WARN_ON_ONCE(1);
1312 }
1313 if (!ret)
1314 event_notifier->registered = 1;
1315 }
1316
1317 static
1318 int _lttng_event_notifier_unregister(
1319 struct lttng_event_notifier *event_notifier)
1320 {
1321 const struct lttng_event_desc *desc;
1322 int ret = -EINVAL;
1323
1324 if (!event_notifier->registered)
1325 return 0;
1326
1327 desc = event_notifier->desc;
1328 switch (event_notifier->instrumentation) {
1329 case LTTNG_KERNEL_TRACEPOINT:
1330 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1331 event_notifier->desc->event_notifier_callback,
1332 event_notifier);
1333 break;
1334 case LTTNG_KERNEL_KPROBE:
1335 lttng_kprobes_unregister_event_notifier(event_notifier);
1336 ret = 0;
1337 break;
1338 case LTTNG_KERNEL_UPROBE:
1339 lttng_uprobes_unregister_event_notifier(event_notifier);
1340 ret = 0;
1341 break;
1342 case LTTNG_KERNEL_SYSCALL:
1343 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1344 break;
1345 case LTTNG_KERNEL_KRETPROBE:
1346 case LTTNG_KERNEL_FUNCTION:
1347 case LTTNG_KERNEL_NOOP:
1348 default:
1349 WARN_ON_ONCE(1);
1350 }
1351 if (!ret)
1352 event_notifier->registered = 0;
1353 return ret;
1354 }
1355
1356 /*
1357 * Only used internally at session destruction.
1358 */
1359 static
1360 void _lttng_event_destroy(struct lttng_event *event)
1361 {
1362 switch (event->instrumentation) {
1363 case LTTNG_KERNEL_TRACEPOINT:
1364 lttng_event_desc_put(event->desc);
1365 break;
1366 case LTTNG_KERNEL_KPROBE:
1367 module_put(event->desc->owner);
1368 lttng_kprobes_destroy_event_private(event);
1369 break;
1370 case LTTNG_KERNEL_KRETPROBE:
1371 module_put(event->desc->owner);
1372 lttng_kretprobes_destroy_private(event);
1373 break;
1374 case LTTNG_KERNEL_NOOP:
1375 case LTTNG_KERNEL_SYSCALL:
1376 break;
1377 case LTTNG_KERNEL_UPROBE:
1378 module_put(event->desc->owner);
1379 lttng_uprobes_destroy_event_private(event);
1380 break;
1381 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1382 default:
1383 WARN_ON_ONCE(1);
1384 }
1385 list_del(&event->list);
1386 lttng_destroy_context(event->ctx);
1387 kmem_cache_free(event_cache, event);
1388 }
1389
1390 /*
1391 * Only used internally at session destruction.
1392 */
1393 static
1394 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1395 {
1396 switch (event_notifier->instrumentation) {
1397 case LTTNG_KERNEL_TRACEPOINT:
1398 lttng_event_desc_put(event_notifier->desc);
1399 break;
1400 case LTTNG_KERNEL_KPROBE:
1401 module_put(event_notifier->desc->owner);
1402 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1403 break;
1404 case LTTNG_KERNEL_NOOP:
1405 case LTTNG_KERNEL_SYSCALL:
1406 break;
1407 case LTTNG_KERNEL_UPROBE:
1408 module_put(event_notifier->desc->owner);
1409 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1410 break;
1411 case LTTNG_KERNEL_KRETPROBE:
1412 case LTTNG_KERNEL_FUNCTION:
1413 default:
1414 WARN_ON_ONCE(1);
1415 }
1416 list_del(&event_notifier->list);
1417 kmem_cache_free(event_notifier_cache, event_notifier);
1418 }
1419
1420 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1421 enum tracker_type tracker_type)
1422 {
1423 switch (tracker_type) {
1424 case TRACKER_PID:
1425 return &session->pid_tracker;
1426 case TRACKER_VPID:
1427 return &session->vpid_tracker;
1428 case TRACKER_UID:
1429 return &session->uid_tracker;
1430 case TRACKER_VUID:
1431 return &session->vuid_tracker;
1432 case TRACKER_GID:
1433 return &session->gid_tracker;
1434 case TRACKER_VGID:
1435 return &session->vgid_tracker;
1436 default:
1437 WARN_ON_ONCE(1);
1438 return NULL;
1439 }
1440 }
1441
1442 int lttng_session_track_id(struct lttng_session *session,
1443 enum tracker_type tracker_type, int id)
1444 {
1445 struct lttng_id_tracker *tracker;
1446 int ret;
1447
1448 tracker = get_tracker(session, tracker_type);
1449 if (!tracker)
1450 return -EINVAL;
1451 if (id < -1)
1452 return -EINVAL;
1453 mutex_lock(&sessions_mutex);
1454 if (id == -1) {
1455 /* track all ids: destroy tracker. */
1456 lttng_id_tracker_destroy(tracker, true);
1457 ret = 0;
1458 } else {
1459 ret = lttng_id_tracker_add(tracker, id);
1460 }
1461 mutex_unlock(&sessions_mutex);
1462 return ret;
1463 }
1464
1465 int lttng_session_untrack_id(struct lttng_session *session,
1466 enum tracker_type tracker_type, int id)
1467 {
1468 struct lttng_id_tracker *tracker;
1469 int ret;
1470
1471 tracker = get_tracker(session, tracker_type);
1472 if (!tracker)
1473 return -EINVAL;
1474 if (id < -1)
1475 return -EINVAL;
1476 mutex_lock(&sessions_mutex);
1477 if (id == -1) {
1478 /* untrack all ids: replace by empty tracker. */
1479 ret = lttng_id_tracker_empty_set(tracker);
1480 } else {
1481 ret = lttng_id_tracker_del(tracker, id);
1482 }
1483 mutex_unlock(&sessions_mutex);
1484 return ret;
1485 }
1486
1487 static
1488 void *id_list_start(struct seq_file *m, loff_t *pos)
1489 {
1490 struct lttng_id_tracker *id_tracker = m->private;
1491 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1492 struct lttng_id_hash_node *e;
1493 int iter = 0, i;
1494
1495 mutex_lock(&sessions_mutex);
1496 if (id_tracker_p) {
1497 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1498 struct hlist_head *head = &id_tracker_p->id_hash[i];
1499
1500 lttng_hlist_for_each_entry(e, head, hlist) {
1501 if (iter++ >= *pos)
1502 return e;
1503 }
1504 }
1505 } else {
1506 /* ID tracker disabled. */
1507 if (iter >= *pos && iter == 0) {
1508 return id_tracker_p; /* empty tracker */
1509 }
1510 iter++;
1511 }
1512 /* End of list */
1513 return NULL;
1514 }
1515
1516 /* Called with sessions_mutex held. */
1517 static
1518 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1519 {
1520 struct lttng_id_tracker *id_tracker = m->private;
1521 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1522 struct lttng_id_hash_node *e;
1523 int iter = 0, i;
1524
1525 (*ppos)++;
1526 if (id_tracker_p) {
1527 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1528 struct hlist_head *head = &id_tracker_p->id_hash[i];
1529
1530 lttng_hlist_for_each_entry(e, head, hlist) {
1531 if (iter++ >= *ppos)
1532 return e;
1533 }
1534 }
1535 } else {
1536 /* ID tracker disabled. */
1537 if (iter >= *ppos && iter == 0)
1538 return p; /* empty tracker */
1539 iter++;
1540 }
1541
1542 /* End of list */
1543 return NULL;
1544 }
1545
1546 static
1547 void id_list_stop(struct seq_file *m, void *p)
1548 {
1549 mutex_unlock(&sessions_mutex);
1550 }
1551
1552 static
1553 int id_list_show(struct seq_file *m, void *p)
1554 {
1555 struct lttng_id_tracker *id_tracker = m->private;
1556 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1557 int id;
1558
1559 if (p == id_tracker_p) {
1560 /* Tracker disabled. */
1561 id = -1;
1562 } else {
1563 const struct lttng_id_hash_node *e = p;
1564
1565 id = lttng_id_tracker_get_node_id(e);
1566 }
1567 switch (id_tracker->tracker_type) {
1568 case TRACKER_PID:
1569 seq_printf(m, "process { pid = %d; };\n", id);
1570 break;
1571 case TRACKER_VPID:
1572 seq_printf(m, "process { vpid = %d; };\n", id);
1573 break;
1574 case TRACKER_UID:
1575 seq_printf(m, "user { uid = %d; };\n", id);
1576 break;
1577 case TRACKER_VUID:
1578 seq_printf(m, "user { vuid = %d; };\n", id);
1579 break;
1580 case TRACKER_GID:
1581 seq_printf(m, "group { gid = %d; };\n", id);
1582 break;
1583 case TRACKER_VGID:
1584 seq_printf(m, "group { vgid = %d; };\n", id);
1585 break;
1586 default:
1587 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1588 }
1589 return 0;
1590 }
1591
1592 static
1593 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1594 .start = id_list_start,
1595 .next = id_list_next,
1596 .stop = id_list_stop,
1597 .show = id_list_show,
1598 };
1599
1600 static
1601 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1602 {
1603 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1604 }
1605
1606 static
1607 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1608 {
1609 struct seq_file *m = file->private_data;
1610 struct lttng_id_tracker *id_tracker = m->private;
1611 int ret;
1612
1613 WARN_ON_ONCE(!id_tracker);
1614 ret = seq_release(inode, file);
1615 if (!ret)
1616 fput(id_tracker->session->file);
1617 return ret;
1618 }
1619
1620 const struct file_operations lttng_tracker_ids_list_fops = {
1621 .owner = THIS_MODULE,
1622 .open = lttng_tracker_ids_list_open,
1623 .read = seq_read,
1624 .llseek = seq_lseek,
1625 .release = lttng_tracker_ids_list_release,
1626 };
1627
1628 int lttng_session_list_tracker_ids(struct lttng_session *session,
1629 enum tracker_type tracker_type)
1630 {
1631 struct file *tracker_ids_list_file;
1632 struct seq_file *m;
1633 int file_fd, ret;
1634
1635 file_fd = lttng_get_unused_fd();
1636 if (file_fd < 0) {
1637 ret = file_fd;
1638 goto fd_error;
1639 }
1640
1641 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1642 &lttng_tracker_ids_list_fops,
1643 NULL, O_RDWR);
1644 if (IS_ERR(tracker_ids_list_file)) {
1645 ret = PTR_ERR(tracker_ids_list_file);
1646 goto file_error;
1647 }
1648 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1649 ret = -EOVERFLOW;
1650 goto refcount_error;
1651 }
1652 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1653 if (ret < 0)
1654 goto open_error;
1655 m = tracker_ids_list_file->private_data;
1656
1657 m->private = get_tracker(session, tracker_type);
1658 BUG_ON(!m->private);
1659 fd_install(file_fd, tracker_ids_list_file);
1660
1661 return file_fd;
1662
1663 open_error:
1664 atomic_long_dec(&session->file->f_count);
1665 refcount_error:
1666 fput(tracker_ids_list_file);
1667 file_error:
1668 put_unused_fd(file_fd);
1669 fd_error:
1670 return ret;
1671 }
1672
1673 /*
1674 * Enabler management.
1675 */
1676 static
1677 int lttng_match_enabler_star_glob(const char *desc_name,
1678 const char *pattern)
1679 {
1680 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1681 desc_name, LTTNG_SIZE_MAX))
1682 return 0;
1683 return 1;
1684 }
1685
1686 static
1687 int lttng_match_enabler_name(const char *desc_name,
1688 const char *name)
1689 {
1690 if (strcmp(desc_name, name))
1691 return 0;
1692 return 1;
1693 }
1694
1695 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1696 struct lttng_enabler *enabler)
1697 {
1698 const char *desc_name, *enabler_name;
1699 bool compat = false, entry = false;
1700
1701 enabler_name = enabler->event_param.name;
1702 switch (enabler->event_param.instrumentation) {
1703 case LTTNG_KERNEL_TRACEPOINT:
1704 desc_name = desc->name;
1705 switch (enabler->format_type) {
1706 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1707 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1708 case LTTNG_ENABLER_FORMAT_NAME:
1709 return lttng_match_enabler_name(desc_name, enabler_name);
1710 default:
1711 return -EINVAL;
1712 }
1713 break;
1714 case LTTNG_KERNEL_SYSCALL:
1715 desc_name = desc->name;
1716 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1717 desc_name += strlen("compat_");
1718 compat = true;
1719 }
1720 if (!strncmp(desc_name, "syscall_exit_",
1721 strlen("syscall_exit_"))) {
1722 desc_name += strlen("syscall_exit_");
1723 } else if (!strncmp(desc_name, "syscall_entry_",
1724 strlen("syscall_entry_"))) {
1725 desc_name += strlen("syscall_entry_");
1726 entry = true;
1727 } else {
1728 WARN_ON_ONCE(1);
1729 return -EINVAL;
1730 }
1731 switch (enabler->event_param.u.syscall.entryexit) {
1732 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1733 break;
1734 case LTTNG_KERNEL_SYSCALL_ENTRY:
1735 if (!entry)
1736 return 0;
1737 break;
1738 case LTTNG_KERNEL_SYSCALL_EXIT:
1739 if (entry)
1740 return 0;
1741 break;
1742 default:
1743 return -EINVAL;
1744 }
1745 switch (enabler->event_param.u.syscall.abi) {
1746 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1747 break;
1748 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1749 if (compat)
1750 return 0;
1751 break;
1752 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1753 if (!compat)
1754 return 0;
1755 break;
1756 default:
1757 return -EINVAL;
1758 }
1759 switch (enabler->event_param.u.syscall.match) {
1760 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1761 switch (enabler->format_type) {
1762 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1763 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1764 case LTTNG_ENABLER_FORMAT_NAME:
1765 return lttng_match_enabler_name(desc_name, enabler_name);
1766 default:
1767 return -EINVAL;
1768 }
1769 break;
1770 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1771 return -EINVAL; /* Not implemented. */
1772 default:
1773 return -EINVAL;
1774 }
1775 break;
1776 default:
1777 WARN_ON_ONCE(1);
1778 return -EINVAL;
1779 }
1780 }
1781
1782 static
1783 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1784 struct lttng_event *event)
1785 {
1786 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1787 event_enabler);
1788
1789 if (base_enabler->event_param.instrumentation != event->instrumentation)
1790 return 0;
1791 if (lttng_desc_match_enabler(event->desc, base_enabler)
1792 && event->chan == event_enabler->chan)
1793 return 1;
1794 else
1795 return 0;
1796 }
1797
1798 static
1799 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1800 struct lttng_event_notifier *event_notifier)
1801 {
1802 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1803 event_notifier_enabler);
1804
1805 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1806 return 0;
1807 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1808 && event_notifier->group == event_notifier_enabler->group
1809 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1810 return 1;
1811 else
1812 return 0;
1813 }
1814
1815 static
1816 struct lttng_enabler_ref *lttng_enabler_ref(
1817 struct list_head *enablers_ref_list,
1818 struct lttng_enabler *enabler)
1819 {
1820 struct lttng_enabler_ref *enabler_ref;
1821
1822 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1823 if (enabler_ref->ref == enabler)
1824 return enabler_ref;
1825 }
1826 return NULL;
1827 }
1828
1829 static
1830 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1831 {
1832 struct lttng_session *session = event_enabler->chan->session;
1833 struct lttng_probe_desc *probe_desc;
1834 const struct lttng_event_desc *desc;
1835 int i;
1836 struct list_head *probe_list;
1837
1838 probe_list = lttng_get_probe_list_head();
1839 /*
1840 * For each probe event, if we find that a probe event matches
1841 * our enabler, create an associated lttng_event if not
1842 * already present.
1843 */
1844 list_for_each_entry(probe_desc, probe_list, head) {
1845 for (i = 0; i < probe_desc->nr_events; i++) {
1846 int found = 0;
1847 struct hlist_head *head;
1848 struct lttng_event *event;
1849
1850 desc = probe_desc->event_desc[i];
1851 if (!lttng_desc_match_enabler(desc,
1852 lttng_event_enabler_as_enabler(event_enabler)))
1853 continue;
1854
1855 /*
1856 * Check if already created.
1857 */
1858 head = utils_borrow_hash_table_bucket(
1859 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1860 desc->name);
1861 lttng_hlist_for_each_entry(event, head, hlist) {
1862 if (event->desc == desc
1863 && event->chan == event_enabler->chan)
1864 found = 1;
1865 }
1866 if (found)
1867 continue;
1868
1869 /*
1870 * We need to create an event for this
1871 * event probe.
1872 */
1873 event = _lttng_event_create(event_enabler->chan,
1874 NULL, NULL, desc,
1875 LTTNG_KERNEL_TRACEPOINT);
1876 if (!event) {
1877 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1878 probe_desc->event_desc[i]->name);
1879 }
1880 }
1881 }
1882 }
1883
1884 static
1885 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1886 {
1887 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
1888 struct lttng_probe_desc *probe_desc;
1889 const struct lttng_event_desc *desc;
1890 int i;
1891 struct list_head *probe_list;
1892
1893 probe_list = lttng_get_probe_list_head();
1894 /*
1895 * For each probe event, if we find that a probe event matches
1896 * our enabler, create an associated lttng_event_notifier if not
1897 * already present.
1898 */
1899 list_for_each_entry(probe_desc, probe_list, head) {
1900 for (i = 0; i < probe_desc->nr_events; i++) {
1901 int found = 0;
1902 struct hlist_head *head;
1903 struct lttng_event_notifier *event_notifier;
1904
1905 desc = probe_desc->event_desc[i];
1906 if (!lttng_desc_match_enabler(desc,
1907 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
1908 continue;
1909
1910 /*
1911 * Check if already created.
1912 */
1913 head = utils_borrow_hash_table_bucket(
1914 event_notifier_group->event_notifiers_ht.table,
1915 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
1916 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1917 if (event_notifier->desc == desc
1918 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1919 found = 1;
1920 }
1921 if (found)
1922 continue;
1923
1924 /*
1925 * We need to create a event_notifier for this event probe.
1926 */
1927 event_notifier = _lttng_event_notifier_create(desc,
1928 event_notifier_enabler->base.user_token,
1929 event_notifier_group, NULL, NULL,
1930 LTTNG_KERNEL_TRACEPOINT);
1931 if (IS_ERR(event_notifier)) {
1932 printk(KERN_INFO "Unable to create event_notifier %s\n",
1933 probe_desc->event_desc[i]->name);
1934 }
1935 }
1936 }
1937 }
1938
1939 static
1940 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1941 {
1942 int ret;
1943
1944 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
1945 WARN_ON_ONCE(ret);
1946 }
1947
1948 static
1949 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1950 {
1951 int ret;
1952
1953 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
1954 WARN_ON_ONCE(ret);
1955 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
1956 WARN_ON_ONCE(ret);
1957 }
1958
1959 /*
1960 * Create struct lttng_event if it is missing and present in the list of
1961 * tracepoint probes.
1962 * Should be called with sessions mutex held.
1963 */
1964 static
1965 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1966 {
1967 switch (event_enabler->base.event_param.instrumentation) {
1968 case LTTNG_KERNEL_TRACEPOINT:
1969 lttng_create_tracepoint_event_if_missing(event_enabler);
1970 break;
1971 case LTTNG_KERNEL_SYSCALL:
1972 lttng_create_syscall_event_if_missing(event_enabler);
1973 break;
1974 default:
1975 WARN_ON_ONCE(1);
1976 break;
1977 }
1978 }
1979
1980 /*
1981 * Create events associated with an event_enabler (if not already present),
1982 * and add backward reference from the event to the enabler.
1983 * Should be called with sessions mutex held.
1984 */
1985 static
1986 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1987 {
1988 struct lttng_channel *chan = event_enabler->chan;
1989 struct lttng_session *session = event_enabler->chan->session;
1990 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1991 struct lttng_event *event;
1992
1993 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1994 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1995 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1996 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
1997 !strcmp(base_enabler->event_param.name, "*")) {
1998 if (base_enabler->enabled)
1999 WRITE_ONCE(chan->syscall_all, 1);
2000 else
2001 WRITE_ONCE(chan->syscall_all, 0);
2002 }
2003
2004 /* First ensure that probe events are created for this enabler. */
2005 lttng_create_event_if_missing(event_enabler);
2006
2007 /* For each event matching event_enabler in session event list. */
2008 list_for_each_entry(event, &session->events, list) {
2009 struct lttng_enabler_ref *enabler_ref;
2010
2011 if (!lttng_event_enabler_match_event(event_enabler, event))
2012 continue;
2013 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2014 lttng_event_enabler_as_enabler(event_enabler));
2015 if (!enabler_ref) {
2016 /*
2017 * If no backward ref, create it.
2018 * Add backward ref from event to event_enabler.
2019 */
2020 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2021 if (!enabler_ref)
2022 return -ENOMEM;
2023 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2024 list_add(&enabler_ref->node,
2025 &event->enablers_ref_head);
2026 }
2027
2028 /*
2029 * Link filter bytecodes if not linked yet.
2030 */
2031 lttng_enabler_link_bytecode(event->desc,
2032 lttng_static_ctx,
2033 &event->filter_bytecode_runtime_head,
2034 lttng_event_enabler_as_enabler(event_enabler));
2035
2036 /* TODO: merge event context. */
2037 }
2038 return 0;
2039 }
2040
2041 /*
2042 * Create struct lttng_event_notifier if it is missing and present in the list of
2043 * tracepoint probes.
2044 * Should be called with sessions mutex held.
2045 */
2046 static
2047 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2048 {
2049 switch (event_notifier_enabler->base.event_param.instrumentation) {
2050 case LTTNG_KERNEL_TRACEPOINT:
2051 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2052 break;
2053 case LTTNG_KERNEL_SYSCALL:
2054 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2055 break;
2056 default:
2057 WARN_ON_ONCE(1);
2058 break;
2059 }
2060 }
2061
2062 /*
2063 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2064 */
2065 static
2066 int lttng_event_notifier_enabler_ref_event_notifiers(
2067 struct lttng_event_notifier_enabler *event_notifier_enabler)
2068 {
2069 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2070 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2071 struct lttng_event_notifier *event_notifier;
2072
2073 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2074 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2075 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2076 !strcmp(base_enabler->event_param.name, "*")) {
2077
2078 int enabled = base_enabler->enabled;
2079 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2080
2081 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2082 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2083
2084 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2085 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2086
2087 }
2088
2089 /* First ensure that probe event_notifiers are created for this enabler. */
2090 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2091
2092 /* Link the created event_notifier with its associated enabler. */
2093 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2094 struct lttng_enabler_ref *enabler_ref;
2095
2096 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2097 continue;
2098
2099 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2100 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2101 if (!enabler_ref) {
2102 /*
2103 * If no backward ref, create it.
2104 * Add backward ref from event_notifier to enabler.
2105 */
2106 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2107 if (!enabler_ref)
2108 return -ENOMEM;
2109
2110 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2111 event_notifier_enabler);
2112 list_add(&enabler_ref->node,
2113 &event_notifier->enablers_ref_head);
2114 }
2115
2116 /*
2117 * Link filter bytecodes if not linked yet.
2118 */
2119 lttng_enabler_link_bytecode(event_notifier->desc,
2120 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2121 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2122 }
2123 return 0;
2124 }
2125
2126 /*
2127 * Called at module load: connect the probe on all enablers matching
2128 * this event.
2129 * Called with sessions lock held.
2130 */
2131 int lttng_fix_pending_events(void)
2132 {
2133 struct lttng_session *session;
2134
2135 list_for_each_entry(session, &sessions, list)
2136 lttng_session_lazy_sync_event_enablers(session);
2137 return 0;
2138 }
2139
2140 static bool lttng_event_notifier_group_has_active_event_notifiers(
2141 struct lttng_event_notifier_group *event_notifier_group)
2142 {
2143 struct lttng_event_notifier_enabler *event_notifier_enabler;
2144
2145 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2146 node) {
2147 if (event_notifier_enabler->base.enabled)
2148 return true;
2149 }
2150 return false;
2151 }
2152
2153 bool lttng_event_notifier_active(void)
2154 {
2155 struct lttng_event_notifier_group *event_notifier_group;
2156
2157 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2158 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2159 return true;
2160 }
2161 return false;
2162 }
2163
2164 int lttng_fix_pending_event_notifiers(void)
2165 {
2166 struct lttng_event_notifier_group *event_notifier_group;
2167
2168 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2169 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2170 return 0;
2171 }
2172
2173 struct lttng_event_enabler *lttng_event_enabler_create(
2174 enum lttng_enabler_format_type format_type,
2175 struct lttng_kernel_event *event_param,
2176 struct lttng_channel *chan)
2177 {
2178 struct lttng_event_enabler *event_enabler;
2179
2180 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2181 if (!event_enabler)
2182 return NULL;
2183 event_enabler->base.format_type = format_type;
2184 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2185 memcpy(&event_enabler->base.event_param, event_param,
2186 sizeof(event_enabler->base.event_param));
2187 event_enabler->chan = chan;
2188 /* ctx left NULL */
2189 event_enabler->base.enabled = 0;
2190 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2191 mutex_lock(&sessions_mutex);
2192 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2193 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2194 mutex_unlock(&sessions_mutex);
2195 return event_enabler;
2196 }
2197
2198 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2199 {
2200 mutex_lock(&sessions_mutex);
2201 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2202 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2203 mutex_unlock(&sessions_mutex);
2204 return 0;
2205 }
2206
2207 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2208 {
2209 mutex_lock(&sessions_mutex);
2210 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2211 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2212 mutex_unlock(&sessions_mutex);
2213 return 0;
2214 }
2215
2216 static
2217 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2218 struct lttng_kernel_filter_bytecode __user *bytecode)
2219 {
2220 struct lttng_bytecode_node *bytecode_node;
2221 uint32_t bytecode_len;
2222 int ret;
2223
2224 ret = get_user(bytecode_len, &bytecode->len);
2225 if (ret)
2226 return ret;
2227 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
2228 GFP_KERNEL);
2229 if (!bytecode_node)
2230 return -ENOMEM;
2231 ret = copy_from_user(&bytecode_node->bc, bytecode,
2232 sizeof(*bytecode) + bytecode_len);
2233 if (ret)
2234 goto error_free;
2235
2236 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2237 bytecode_node->enabler = enabler;
2238 /* Enforce length based on allocated size */
2239 bytecode_node->bc.len = bytecode_len;
2240 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2241
2242 return 0;
2243
2244 error_free:
2245 kfree(bytecode_node);
2246 return ret;
2247 }
2248
2249 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2250 struct lttng_kernel_filter_bytecode __user *bytecode)
2251 {
2252 int ret;
2253 ret = lttng_enabler_attach_filter_bytecode(
2254 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2255 if (ret)
2256 goto error;
2257
2258 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2259 return 0;
2260
2261 error:
2262 return ret;
2263 }
2264
2265 int lttng_event_add_callsite(struct lttng_event *event,
2266 struct lttng_kernel_event_callsite __user *callsite)
2267 {
2268
2269 switch (event->instrumentation) {
2270 case LTTNG_KERNEL_UPROBE:
2271 return lttng_uprobes_event_add_callsite(event, callsite);
2272 default:
2273 return -EINVAL;
2274 }
2275 }
2276
2277 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2278 struct lttng_kernel_context *context_param)
2279 {
2280 return -ENOSYS;
2281 }
2282
2283 static
2284 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2285 {
2286 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2287
2288 /* Destroy filter bytecode */
2289 list_for_each_entry_safe(filter_node, tmp_filter_node,
2290 &enabler->filter_bytecode_head, node) {
2291 kfree(filter_node);
2292 }
2293 }
2294
2295 static
2296 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2297 {
2298 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2299
2300 /* Destroy contexts */
2301 lttng_destroy_context(event_enabler->ctx);
2302
2303 list_del(&event_enabler->node);
2304 kfree(event_enabler);
2305 }
2306
2307 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2308 struct lttng_event_notifier_group *event_notifier_group,
2309 enum lttng_enabler_format_type format_type,
2310 struct lttng_kernel_event_notifier *event_notifier_param)
2311 {
2312 struct lttng_event_notifier_enabler *event_notifier_enabler;
2313
2314 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2315 if (!event_notifier_enabler)
2316 return NULL;
2317
2318 event_notifier_enabler->base.format_type = format_type;
2319 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2320
2321 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2322 sizeof(event_notifier_enabler->base.event_param));
2323 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2324
2325 event_notifier_enabler->base.enabled = 0;
2326 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2327 event_notifier_enabler->group = event_notifier_group;
2328
2329 mutex_lock(&sessions_mutex);
2330 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2331 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2332
2333 mutex_unlock(&sessions_mutex);
2334
2335 return event_notifier_enabler;
2336 }
2337
2338 int lttng_event_notifier_enabler_enable(
2339 struct lttng_event_notifier_enabler *event_notifier_enabler)
2340 {
2341 mutex_lock(&sessions_mutex);
2342 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2343 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2344 mutex_unlock(&sessions_mutex);
2345 return 0;
2346 }
2347
2348 int lttng_event_notifier_enabler_disable(
2349 struct lttng_event_notifier_enabler *event_notifier_enabler)
2350 {
2351 mutex_lock(&sessions_mutex);
2352 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2353 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2354 mutex_unlock(&sessions_mutex);
2355 return 0;
2356 }
2357
2358 int lttng_event_notifier_enabler_attach_filter_bytecode(
2359 struct lttng_event_notifier_enabler *event_notifier_enabler,
2360 struct lttng_kernel_filter_bytecode __user *bytecode)
2361 {
2362 int ret;
2363
2364 ret = lttng_enabler_attach_filter_bytecode(
2365 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2366 bytecode);
2367 if (ret)
2368 goto error;
2369
2370 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2371 return 0;
2372
2373 error:
2374 return ret;
2375 }
2376
2377 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2378 struct lttng_kernel_event_callsite __user *callsite)
2379 {
2380
2381 switch (event_notifier->instrumentation) {
2382 case LTTNG_KERNEL_UPROBE:
2383 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2384 callsite);
2385 default:
2386 return -EINVAL;
2387 }
2388 }
2389
2390 int lttng_event_notifier_enabler_attach_context(
2391 struct lttng_event_notifier_enabler *event_notifier_enabler,
2392 struct lttng_kernel_context *context_param)
2393 {
2394 return -ENOSYS;
2395 }
2396
2397 static
2398 void lttng_event_notifier_enabler_destroy(
2399 struct lttng_event_notifier_enabler *event_notifier_enabler)
2400 {
2401 if (!event_notifier_enabler) {
2402 return;
2403 }
2404
2405 list_del(&event_notifier_enabler->node);
2406
2407 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2408 kfree(event_notifier_enabler);
2409 }
2410
2411 /*
2412 * lttng_session_sync_event_enablers should be called just before starting a
2413 * session.
2414 * Should be called with sessions mutex held.
2415 */
2416 static
2417 void lttng_session_sync_event_enablers(struct lttng_session *session)
2418 {
2419 struct lttng_event_enabler *event_enabler;
2420 struct lttng_event *event;
2421
2422 list_for_each_entry(event_enabler, &session->enablers_head, node)
2423 lttng_event_enabler_ref_events(event_enabler);
2424 /*
2425 * For each event, if at least one of its enablers is enabled,
2426 * and its channel and session transient states are enabled, we
2427 * enable the event, else we disable it.
2428 */
2429 list_for_each_entry(event, &session->events, list) {
2430 struct lttng_enabler_ref *enabler_ref;
2431 struct lttng_bytecode_runtime *runtime;
2432 int enabled = 0, has_enablers_without_bytecode = 0;
2433
2434 switch (event->instrumentation) {
2435 case LTTNG_KERNEL_TRACEPOINT:
2436 case LTTNG_KERNEL_SYSCALL:
2437 /* Enable events */
2438 list_for_each_entry(enabler_ref,
2439 &event->enablers_ref_head, node) {
2440 if (enabler_ref->ref->enabled) {
2441 enabled = 1;
2442 break;
2443 }
2444 }
2445 break;
2446 default:
2447 /* Not handled with lazy sync. */
2448 continue;
2449 }
2450 /*
2451 * Enabled state is based on union of enablers, with
2452 * intesection of session and channel transient enable
2453 * states.
2454 */
2455 enabled = enabled && session->tstate && event->chan->tstate;
2456
2457 WRITE_ONCE(event->enabled, enabled);
2458 /*
2459 * Sync tracepoint registration with event enabled
2460 * state.
2461 */
2462 if (enabled) {
2463 register_event(event);
2464 } else {
2465 _lttng_event_unregister(event);
2466 }
2467
2468 /* Check if has enablers without bytecode enabled */
2469 list_for_each_entry(enabler_ref,
2470 &event->enablers_ref_head, node) {
2471 if (enabler_ref->ref->enabled
2472 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2473 has_enablers_without_bytecode = 1;
2474 break;
2475 }
2476 }
2477 event->has_enablers_without_bytecode =
2478 has_enablers_without_bytecode;
2479
2480 /* Enable filters */
2481 list_for_each_entry(runtime,
2482 &event->filter_bytecode_runtime_head, node)
2483 lttng_filter_sync_state(runtime);
2484 }
2485 }
2486
2487 /*
2488 * Apply enablers to session events, adding events to session if need
2489 * be. It is required after each modification applied to an active
2490 * session, and right before session "start".
2491 * "lazy" sync means we only sync if required.
2492 * Should be called with sessions mutex held.
2493 */
2494 static
2495 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2496 {
2497 /* We can skip if session is not active */
2498 if (!session->active)
2499 return;
2500 lttng_session_sync_event_enablers(session);
2501 }
2502
2503 static
2504 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2505 {
2506 struct lttng_event_notifier_enabler *event_notifier_enabler;
2507 struct lttng_event_notifier *event_notifier;
2508
2509 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2510 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2511
2512 /*
2513 * For each event_notifier, if at least one of its enablers is enabled,
2514 * we enable the event_notifier, else we disable it.
2515 */
2516 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2517 struct lttng_enabler_ref *enabler_ref;
2518 struct lttng_bytecode_runtime *runtime;
2519 int enabled = 0, has_enablers_without_bytecode = 0;
2520
2521 switch (event_notifier->instrumentation) {
2522 case LTTNG_KERNEL_TRACEPOINT:
2523 case LTTNG_KERNEL_SYSCALL:
2524 /* Enable event_notifiers */
2525 list_for_each_entry(enabler_ref,
2526 &event_notifier->enablers_ref_head, node) {
2527 if (enabler_ref->ref->enabled) {
2528 enabled = 1;
2529 break;
2530 }
2531 }
2532 break;
2533 default:
2534 /* Not handled with sync. */
2535 continue;
2536 }
2537
2538 WRITE_ONCE(event_notifier->enabled, enabled);
2539 /*
2540 * Sync tracepoint registration with event_notifier enabled
2541 * state.
2542 */
2543 if (enabled) {
2544 if (!event_notifier->registered)
2545 register_event_notifier(event_notifier);
2546 } else {
2547 if (event_notifier->registered)
2548 _lttng_event_notifier_unregister(event_notifier);
2549 }
2550
2551 /* Check if has enablers without bytecode enabled */
2552 list_for_each_entry(enabler_ref,
2553 &event_notifier->enablers_ref_head, node) {
2554 if (enabler_ref->ref->enabled
2555 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2556 has_enablers_without_bytecode = 1;
2557 break;
2558 }
2559 }
2560 event_notifier->has_enablers_without_bytecode =
2561 has_enablers_without_bytecode;
2562
2563 /* Enable filters */
2564 list_for_each_entry(runtime,
2565 &event_notifier->filter_bytecode_runtime_head, node)
2566 lttng_filter_sync_state(runtime);
2567 }
2568 }
2569
2570 /*
2571 * Serialize at most one packet worth of metadata into a metadata
2572 * channel.
2573 * We grab the metadata cache mutex to get exclusive access to our metadata
2574 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2575 * allows us to do racy operations such as looking for remaining space left in
2576 * packet and write, since mutual exclusion protects us from concurrent writes.
2577 * Mutual exclusion on the metadata cache allow us to read the cache content
2578 * without racing against reallocation of the cache by updates.
2579 * Returns the number of bytes written in the channel, 0 if no data
2580 * was written and a negative value on error.
2581 */
2582 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2583 struct channel *chan, bool *coherent)
2584 {
2585 struct lib_ring_buffer_ctx ctx;
2586 int ret = 0;
2587 size_t len, reserve_len;
2588
2589 /*
2590 * Ensure we support mutiple get_next / put sequences followed by
2591 * put_next. The metadata cache lock protects reading the metadata
2592 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2593 * "flush" operations on the buffer invoked by different processes.
2594 * Moreover, since the metadata cache memory can be reallocated, we
2595 * need to have exclusive access against updates even though we only
2596 * read it.
2597 */
2598 mutex_lock(&stream->metadata_cache->lock);
2599 WARN_ON(stream->metadata_in < stream->metadata_out);
2600 if (stream->metadata_in != stream->metadata_out)
2601 goto end;
2602
2603 /* Metadata regenerated, change the version. */
2604 if (stream->metadata_cache->version != stream->version)
2605 stream->version = stream->metadata_cache->version;
2606
2607 len = stream->metadata_cache->metadata_written -
2608 stream->metadata_in;
2609 if (!len)
2610 goto end;
2611 reserve_len = min_t(size_t,
2612 stream->transport->ops.packet_avail_size(chan),
2613 len);
2614 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2615 sizeof(char), -1);
2616 /*
2617 * If reservation failed, return an error to the caller.
2618 */
2619 ret = stream->transport->ops.event_reserve(&ctx, 0);
2620 if (ret != 0) {
2621 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2622 stream->coherent = false;
2623 goto end;
2624 }
2625 stream->transport->ops.event_write(&ctx,
2626 stream->metadata_cache->data + stream->metadata_in,
2627 reserve_len);
2628 stream->transport->ops.event_commit(&ctx);
2629 stream->metadata_in += reserve_len;
2630 if (reserve_len < len)
2631 stream->coherent = false;
2632 else
2633 stream->coherent = true;
2634 ret = reserve_len;
2635
2636 end:
2637 if (coherent)
2638 *coherent = stream->coherent;
2639 mutex_unlock(&stream->metadata_cache->lock);
2640 return ret;
2641 }
2642
2643 static
2644 void lttng_metadata_begin(struct lttng_session *session)
2645 {
2646 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2647 mutex_lock(&session->metadata_cache->lock);
2648 }
2649
2650 static
2651 void lttng_metadata_end(struct lttng_session *session)
2652 {
2653 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2654 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2655 struct lttng_metadata_stream *stream;
2656
2657 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2658 wake_up_interruptible(&stream->read_wait);
2659 mutex_unlock(&session->metadata_cache->lock);
2660 }
2661 }
2662
2663 /*
2664 * Write the metadata to the metadata cache.
2665 * Must be called with sessions_mutex held.
2666 * The metadata cache lock protects us from concurrent read access from
2667 * thread outputting metadata content to ring buffer.
2668 * The content of the printf is printed as a single atomic metadata
2669 * transaction.
2670 */
2671 int lttng_metadata_printf(struct lttng_session *session,
2672 const char *fmt, ...)
2673 {
2674 char *str;
2675 size_t len;
2676 va_list ap;
2677
2678 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2679
2680 va_start(ap, fmt);
2681 str = kvasprintf(GFP_KERNEL, fmt, ap);
2682 va_end(ap);
2683 if (!str)
2684 return -ENOMEM;
2685
2686 len = strlen(str);
2687 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2688 if (session->metadata_cache->metadata_written + len >
2689 session->metadata_cache->cache_alloc) {
2690 char *tmp_cache_realloc;
2691 unsigned int tmp_cache_alloc_size;
2692
2693 tmp_cache_alloc_size = max_t(unsigned int,
2694 session->metadata_cache->cache_alloc + len,
2695 session->metadata_cache->cache_alloc << 1);
2696 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2697 if (!tmp_cache_realloc)
2698 goto err;
2699 if (session->metadata_cache->data) {
2700 memcpy(tmp_cache_realloc,
2701 session->metadata_cache->data,
2702 session->metadata_cache->cache_alloc);
2703 vfree(session->metadata_cache->data);
2704 }
2705
2706 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2707 session->metadata_cache->data = tmp_cache_realloc;
2708 }
2709 memcpy(session->metadata_cache->data +
2710 session->metadata_cache->metadata_written,
2711 str, len);
2712 session->metadata_cache->metadata_written += len;
2713 kfree(str);
2714
2715 return 0;
2716
2717 err:
2718 kfree(str);
2719 return -ENOMEM;
2720 }
2721
2722 static
2723 int print_tabs(struct lttng_session *session, size_t nesting)
2724 {
2725 size_t i;
2726
2727 for (i = 0; i < nesting; i++) {
2728 int ret;
2729
2730 ret = lttng_metadata_printf(session, " ");
2731 if (ret) {
2732 return ret;
2733 }
2734 }
2735 return 0;
2736 }
2737
2738 static
2739 int lttng_field_name_statedump(struct lttng_session *session,
2740 const struct lttng_event_field *field,
2741 size_t nesting)
2742 {
2743 return lttng_metadata_printf(session, " _%s;\n", field->name);
2744 }
2745
2746 static
2747 int _lttng_integer_type_statedump(struct lttng_session *session,
2748 const struct lttng_type *type,
2749 size_t nesting)
2750 {
2751 int ret;
2752
2753 WARN_ON_ONCE(type->atype != atype_integer);
2754 ret = print_tabs(session, nesting);
2755 if (ret)
2756 return ret;
2757 ret = lttng_metadata_printf(session,
2758 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2759 type->u.integer.size,
2760 type->u.integer.alignment,
2761 type->u.integer.signedness,
2762 (type->u.integer.encoding == lttng_encode_none)
2763 ? "none"
2764 : (type->u.integer.encoding == lttng_encode_UTF8)
2765 ? "UTF8"
2766 : "ASCII",
2767 type->u.integer.base,
2768 #if __BYTE_ORDER == __BIG_ENDIAN
2769 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2770 #else
2771 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2772 #endif
2773 );
2774 return ret;
2775 }
2776
2777 /*
2778 * Must be called with sessions_mutex held.
2779 */
2780 static
2781 int _lttng_struct_type_statedump(struct lttng_session *session,
2782 const struct lttng_type *type,
2783 size_t nesting)
2784 {
2785 int ret;
2786 uint32_t i, nr_fields;
2787 unsigned int alignment;
2788
2789 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2790
2791 ret = print_tabs(session, nesting);
2792 if (ret)
2793 return ret;
2794 ret = lttng_metadata_printf(session,
2795 "struct {\n");
2796 if (ret)
2797 return ret;
2798 nr_fields = type->u.struct_nestable.nr_fields;
2799 for (i = 0; i < nr_fields; i++) {
2800 const struct lttng_event_field *iter_field;
2801
2802 iter_field = &type->u.struct_nestable.fields[i];
2803 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2804 if (ret)
2805 return ret;
2806 }
2807 ret = print_tabs(session, nesting);
2808 if (ret)
2809 return ret;
2810 alignment = type->u.struct_nestable.alignment;
2811 if (alignment) {
2812 ret = lttng_metadata_printf(session,
2813 "} align(%u)",
2814 alignment);
2815 } else {
2816 ret = lttng_metadata_printf(session,
2817 "}");
2818 }
2819 return ret;
2820 }
2821
2822 /*
2823 * Must be called with sessions_mutex held.
2824 */
2825 static
2826 int _lttng_struct_field_statedump(struct lttng_session *session,
2827 const struct lttng_event_field *field,
2828 size_t nesting)
2829 {
2830 int ret;
2831
2832 ret = _lttng_struct_type_statedump(session,
2833 &field->type, nesting);
2834 if (ret)
2835 return ret;
2836 return lttng_field_name_statedump(session, field, nesting);
2837 }
2838
2839 /*
2840 * Must be called with sessions_mutex held.
2841 */
2842 static
2843 int _lttng_variant_type_statedump(struct lttng_session *session,
2844 const struct lttng_type *type,
2845 size_t nesting)
2846 {
2847 int ret;
2848 uint32_t i, nr_choices;
2849
2850 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2851 /*
2852 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2853 */
2854 if (type->u.variant_nestable.alignment != 0)
2855 return -EINVAL;
2856 ret = print_tabs(session, nesting);
2857 if (ret)
2858 return ret;
2859 ret = lttng_metadata_printf(session,
2860 "variant <_%s> {\n",
2861 type->u.variant_nestable.tag_name);
2862 if (ret)
2863 return ret;
2864 nr_choices = type->u.variant_nestable.nr_choices;
2865 for (i = 0; i < nr_choices; i++) {
2866 const struct lttng_event_field *iter_field;
2867
2868 iter_field = &type->u.variant_nestable.choices[i];
2869 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2870 if (ret)
2871 return ret;
2872 }
2873 ret = print_tabs(session, nesting);
2874 if (ret)
2875 return ret;
2876 ret = lttng_metadata_printf(session,
2877 "}");
2878 return ret;
2879 }
2880
2881 /*
2882 * Must be called with sessions_mutex held.
2883 */
2884 static
2885 int _lttng_variant_field_statedump(struct lttng_session *session,
2886 const struct lttng_event_field *field,
2887 size_t nesting)
2888 {
2889 int ret;
2890
2891 ret = _lttng_variant_type_statedump(session,
2892 &field->type, nesting);
2893 if (ret)
2894 return ret;
2895 return lttng_field_name_statedump(session, field, nesting);
2896 }
2897
2898 /*
2899 * Must be called with sessions_mutex held.
2900 */
2901 static
2902 int _lttng_array_field_statedump(struct lttng_session *session,
2903 const struct lttng_event_field *field,
2904 size_t nesting)
2905 {
2906 int ret;
2907 const struct lttng_type *elem_type;
2908
2909 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2910
2911 if (field->type.u.array_nestable.alignment) {
2912 ret = print_tabs(session, nesting);
2913 if (ret)
2914 return ret;
2915 ret = lttng_metadata_printf(session,
2916 "struct { } align(%u) _%s_padding;\n",
2917 field->type.u.array_nestable.alignment * CHAR_BIT,
2918 field->name);
2919 if (ret)
2920 return ret;
2921 }
2922 /*
2923 * Nested compound types: Only array of structures and variants are
2924 * currently supported.
2925 */
2926 elem_type = field->type.u.array_nestable.elem_type;
2927 switch (elem_type->atype) {
2928 case atype_integer:
2929 case atype_struct_nestable:
2930 case atype_variant_nestable:
2931 ret = _lttng_type_statedump(session, elem_type, nesting);
2932 if (ret)
2933 return ret;
2934 break;
2935
2936 default:
2937 return -EINVAL;
2938 }
2939 ret = lttng_metadata_printf(session,
2940 " _%s[%u];\n",
2941 field->name,
2942 field->type.u.array_nestable.length);
2943 return ret;
2944 }
2945
2946 /*
2947 * Must be called with sessions_mutex held.
2948 */
2949 static
2950 int _lttng_sequence_field_statedump(struct lttng_session *session,
2951 const struct lttng_event_field *field,
2952 size_t nesting)
2953 {
2954 int ret;
2955 const char *length_name;
2956 const struct lttng_type *elem_type;
2957
2958 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2959
2960 length_name = field->type.u.sequence_nestable.length_name;
2961
2962 if (field->type.u.sequence_nestable.alignment) {
2963 ret = print_tabs(session, nesting);
2964 if (ret)
2965 return ret;
2966 ret = lttng_metadata_printf(session,
2967 "struct { } align(%u) _%s_padding;\n",
2968 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2969 field->name);
2970 if (ret)
2971 return ret;
2972 }
2973
2974 /*
2975 * Nested compound types: Only array of structures and variants are
2976 * currently supported.
2977 */
2978 elem_type = field->type.u.sequence_nestable.elem_type;
2979 switch (elem_type->atype) {
2980 case atype_integer:
2981 case atype_struct_nestable:
2982 case atype_variant_nestable:
2983 ret = _lttng_type_statedump(session, elem_type, nesting);
2984 if (ret)
2985 return ret;
2986 break;
2987
2988 default:
2989 return -EINVAL;
2990 }
2991 ret = lttng_metadata_printf(session,
2992 " _%s[ _%s ];\n",
2993 field->name,
2994 field->type.u.sequence_nestable.length_name);
2995 return ret;
2996 }
2997
2998 /*
2999 * Must be called with sessions_mutex held.
3000 */
3001 static
3002 int _lttng_enum_type_statedump(struct lttng_session *session,
3003 const struct lttng_type *type,
3004 size_t nesting)
3005 {
3006 const struct lttng_enum_desc *enum_desc;
3007 const struct lttng_type *container_type;
3008 int ret;
3009 unsigned int i, nr_entries;
3010
3011 container_type = type->u.enum_nestable.container_type;
3012 if (container_type->atype != atype_integer) {
3013 ret = -EINVAL;
3014 goto end;
3015 }
3016 enum_desc = type->u.enum_nestable.desc;
3017 nr_entries = enum_desc->nr_entries;
3018
3019 ret = print_tabs(session, nesting);
3020 if (ret)
3021 goto end;
3022 ret = lttng_metadata_printf(session, "enum : ");
3023 if (ret)
3024 goto end;
3025 ret = _lttng_integer_type_statedump(session, container_type, 0);
3026 if (ret)
3027 goto end;
3028 ret = lttng_metadata_printf(session, " {\n");
3029 if (ret)
3030 goto end;
3031 /* Dump all entries */
3032 for (i = 0; i < nr_entries; i++) {
3033 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3034 int j, len;
3035
3036 ret = print_tabs(session, nesting + 1);
3037 if (ret)
3038 goto end;
3039 ret = lttng_metadata_printf(session,
3040 "\"");
3041 if (ret)
3042 goto end;
3043 len = strlen(entry->string);
3044 /* Escape the character '"' */
3045 for (j = 0; j < len; j++) {
3046 char c = entry->string[j];
3047
3048 switch (c) {
3049 case '"':
3050 ret = lttng_metadata_printf(session,
3051 "\\\"");
3052 break;
3053 case '\\':
3054 ret = lttng_metadata_printf(session,
3055 "\\\\");
3056 break;
3057 default:
3058 ret = lttng_metadata_printf(session,
3059 "%c", c);
3060 break;
3061 }
3062 if (ret)
3063 goto end;
3064 }
3065 ret = lttng_metadata_printf(session, "\"");
3066 if (ret)
3067 goto end;
3068
3069 if (entry->options.is_auto) {
3070 ret = lttng_metadata_printf(session, ",\n");
3071 if (ret)
3072 goto end;
3073 } else {
3074 ret = lttng_metadata_printf(session,
3075 " = ");
3076 if (ret)
3077 goto end;
3078 if (entry->start.signedness)
3079 ret = lttng_metadata_printf(session,
3080 "%lld", (long long) entry->start.value);
3081 else
3082 ret = lttng_metadata_printf(session,
3083 "%llu", entry->start.value);
3084 if (ret)
3085 goto end;
3086 if (entry->start.signedness == entry->end.signedness &&
3087 entry->start.value
3088 == entry->end.value) {
3089 ret = lttng_metadata_printf(session,
3090 ",\n");
3091 } else {
3092 if (entry->end.signedness) {
3093 ret = lttng_metadata_printf(session,
3094 " ... %lld,\n",
3095 (long long) entry->end.value);
3096 } else {
3097 ret = lttng_metadata_printf(session,
3098 " ... %llu,\n",
3099 entry->end.value);
3100 }
3101 }
3102 if (ret)
3103 goto end;
3104 }
3105 }
3106 ret = print_tabs(session, nesting);
3107 if (ret)
3108 goto end;
3109 ret = lttng_metadata_printf(session, "}");
3110 end:
3111 return ret;
3112 }
3113
3114 /*
3115 * Must be called with sessions_mutex held.
3116 */
3117 static
3118 int _lttng_enum_field_statedump(struct lttng_session *session,
3119 const struct lttng_event_field *field,
3120 size_t nesting)
3121 {
3122 int ret;
3123
3124 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3125 if (ret)
3126 return ret;
3127 return lttng_field_name_statedump(session, field, nesting);
3128 }
3129
3130 static
3131 int _lttng_integer_field_statedump(struct lttng_session *session,
3132 const struct lttng_event_field *field,
3133 size_t nesting)
3134 {
3135 int ret;
3136
3137 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3138 if (ret)
3139 return ret;
3140 return lttng_field_name_statedump(session, field, nesting);
3141 }
3142
3143 static
3144 int _lttng_string_type_statedump(struct lttng_session *session,
3145 const struct lttng_type *type,
3146 size_t nesting)
3147 {
3148 int ret;
3149
3150 WARN_ON_ONCE(type->atype != atype_string);
3151 /* Default encoding is UTF8 */
3152 ret = print_tabs(session, nesting);
3153 if (ret)
3154 return ret;
3155 ret = lttng_metadata_printf(session,
3156 "string%s",
3157 type->u.string.encoding == lttng_encode_ASCII ?
3158 " { encoding = ASCII; }" : "");
3159 return ret;
3160 }
3161
3162 static
3163 int _lttng_string_field_statedump(struct lttng_session *session,
3164 const struct lttng_event_field *field,
3165 size_t nesting)
3166 {
3167 int ret;
3168
3169 WARN_ON_ONCE(field->type.atype != atype_string);
3170 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3171 if (ret)
3172 return ret;
3173 return lttng_field_name_statedump(session, field, nesting);
3174 }
3175
3176 /*
3177 * Must be called with sessions_mutex held.
3178 */
3179 static
3180 int _lttng_type_statedump(struct lttng_session *session,
3181 const struct lttng_type *type,
3182 size_t nesting)
3183 {
3184 int ret = 0;
3185
3186 switch (type->atype) {
3187 case atype_integer:
3188 ret = _lttng_integer_type_statedump(session, type, nesting);
3189 break;
3190 case atype_enum_nestable:
3191 ret = _lttng_enum_type_statedump(session, type, nesting);
3192 break;
3193 case atype_string:
3194 ret = _lttng_string_type_statedump(session, type, nesting);
3195 break;
3196 case atype_struct_nestable:
3197 ret = _lttng_struct_type_statedump(session, type, nesting);
3198 break;
3199 case atype_variant_nestable:
3200 ret = _lttng_variant_type_statedump(session, type, nesting);
3201 break;
3202
3203 /* Nested arrays and sequences are not supported yet. */
3204 case atype_array_nestable:
3205 case atype_sequence_nestable:
3206 default:
3207 WARN_ON_ONCE(1);
3208 return -EINVAL;
3209 }
3210 return ret;
3211 }
3212
3213 /*
3214 * Must be called with sessions_mutex held.
3215 */
3216 static
3217 int _lttng_field_statedump(struct lttng_session *session,
3218 const struct lttng_event_field *field,
3219 size_t nesting)
3220 {
3221 int ret = 0;
3222
3223 switch (field->type.atype) {
3224 case atype_integer:
3225 ret = _lttng_integer_field_statedump(session, field, nesting);
3226 break;
3227 case atype_enum_nestable:
3228 ret = _lttng_enum_field_statedump(session, field, nesting);
3229 break;
3230 case atype_string:
3231 ret = _lttng_string_field_statedump(session, field, nesting);
3232 break;
3233 case atype_struct_nestable:
3234 ret = _lttng_struct_field_statedump(session, field, nesting);
3235 break;
3236 case atype_array_nestable:
3237 ret = _lttng_array_field_statedump(session, field, nesting);
3238 break;
3239 case atype_sequence_nestable:
3240 ret = _lttng_sequence_field_statedump(session, field, nesting);
3241 break;
3242 case atype_variant_nestable:
3243 ret = _lttng_variant_field_statedump(session, field, nesting);
3244 break;
3245
3246 default:
3247 WARN_ON_ONCE(1);
3248 return -EINVAL;
3249 }
3250 return ret;
3251 }
3252
3253 static
3254 int _lttng_context_metadata_statedump(struct lttng_session *session,
3255 struct lttng_ctx *ctx)
3256 {
3257 int ret = 0;
3258 int i;
3259
3260 if (!ctx)
3261 return 0;
3262 for (i = 0; i < ctx->nr_fields; i++) {
3263 const struct lttng_ctx_field *field = &ctx->fields[i];
3264
3265 ret = _lttng_field_statedump(session, &field->event_field, 2);
3266 if (ret)
3267 return ret;
3268 }
3269 return ret;
3270 }
3271
3272 static
3273 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3274 struct lttng_event *event)
3275 {
3276 const struct lttng_event_desc *desc = event->desc;
3277 int ret = 0;
3278 int i;
3279
3280 for (i = 0; i < desc->nr_fields; i++) {
3281 const struct lttng_event_field *field = &desc->fields[i];
3282
3283 ret = _lttng_field_statedump(session, field, 2);
3284 if (ret)
3285 return ret;
3286 }
3287 return ret;
3288 }
3289
3290 /*
3291 * Must be called with sessions_mutex held.
3292 * The entire event metadata is printed as a single atomic metadata
3293 * transaction.
3294 */
3295 static
3296 int _lttng_event_metadata_statedump(struct lttng_session *session,
3297 struct lttng_channel *chan,
3298 struct lttng_event *event)
3299 {
3300 int ret = 0;
3301
3302 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3303 return 0;
3304 if (chan->channel_type == METADATA_CHANNEL)
3305 return 0;
3306
3307 lttng_metadata_begin(session);
3308
3309 ret = lttng_metadata_printf(session,
3310 "event {\n"
3311 " name = \"%s\";\n"
3312 " id = %u;\n"
3313 " stream_id = %u;\n",
3314 event->desc->name,
3315 event->id,
3316 event->chan->id);
3317 if (ret)
3318 goto end;
3319
3320 if (event->ctx) {
3321 ret = lttng_metadata_printf(session,
3322 " context := struct {\n");
3323 if (ret)
3324 goto end;
3325 }
3326 ret = _lttng_context_metadata_statedump(session, event->ctx);
3327 if (ret)
3328 goto end;
3329 if (event->ctx) {
3330 ret = lttng_metadata_printf(session,
3331 " };\n");
3332 if (ret)
3333 goto end;
3334 }
3335
3336 ret = lttng_metadata_printf(session,
3337 " fields := struct {\n"
3338 );
3339 if (ret)
3340 goto end;
3341
3342 ret = _lttng_fields_metadata_statedump(session, event);
3343 if (ret)
3344 goto end;
3345
3346 /*
3347 * LTTng space reservation can only reserve multiples of the
3348 * byte size.
3349 */
3350 ret = lttng_metadata_printf(session,
3351 " };\n"
3352 "};\n\n");
3353 if (ret)
3354 goto end;
3355
3356 event->metadata_dumped = 1;
3357 end:
3358 lttng_metadata_end(session);
3359 return ret;
3360
3361 }
3362
3363 /*
3364 * Must be called with sessions_mutex held.
3365 * The entire channel metadata is printed as a single atomic metadata
3366 * transaction.
3367 */
3368 static
3369 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3370 struct lttng_channel *chan)
3371 {
3372 int ret = 0;
3373
3374 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3375 return 0;
3376
3377 if (chan->channel_type == METADATA_CHANNEL)
3378 return 0;
3379
3380 lttng_metadata_begin(session);
3381
3382 WARN_ON_ONCE(!chan->header_type);
3383 ret = lttng_metadata_printf(session,
3384 "stream {\n"
3385 " id = %u;\n"
3386 " event.header := %s;\n"
3387 " packet.context := struct packet_context;\n",
3388 chan->id,
3389 chan->header_type == 1 ? "struct event_header_compact" :
3390 "struct event_header_large");
3391 if (ret)
3392 goto end;
3393
3394 if (chan->ctx) {
3395 ret = lttng_metadata_printf(session,
3396 " event.context := struct {\n");
3397 if (ret)
3398 goto end;
3399 }
3400 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3401 if (ret)
3402 goto end;
3403 if (chan->ctx) {
3404 ret = lttng_metadata_printf(session,
3405 " };\n");
3406 if (ret)
3407 goto end;
3408 }
3409
3410 ret = lttng_metadata_printf(session,
3411 "};\n\n");
3412
3413 chan->metadata_dumped = 1;
3414 end:
3415 lttng_metadata_end(session);
3416 return ret;
3417 }
3418
3419 /*
3420 * Must be called with sessions_mutex held.
3421 */
3422 static
3423 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3424 {
3425 return lttng_metadata_printf(session,
3426 "struct packet_context {\n"
3427 " uint64_clock_monotonic_t timestamp_begin;\n"
3428 " uint64_clock_monotonic_t timestamp_end;\n"
3429 " uint64_t content_size;\n"
3430 " uint64_t packet_size;\n"
3431 " uint64_t packet_seq_num;\n"
3432 " unsigned long events_discarded;\n"
3433 " uint32_t cpu_id;\n"
3434 "};\n\n"
3435 );
3436 }
3437
3438 /*
3439 * Compact header:
3440 * id: range: 0 - 30.
3441 * id 31 is reserved to indicate an extended header.
3442 *
3443 * Large header:
3444 * id: range: 0 - 65534.
3445 * id 65535 is reserved to indicate an extended header.
3446 *
3447 * Must be called with sessions_mutex held.
3448 */
3449 static
3450 int _lttng_event_header_declare(struct lttng_session *session)
3451 {
3452 return lttng_metadata_printf(session,
3453 "struct event_header_compact {\n"
3454 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3455 " variant <id> {\n"
3456 " struct {\n"
3457 " uint27_clock_monotonic_t timestamp;\n"
3458 " } compact;\n"
3459 " struct {\n"
3460 " uint32_t id;\n"
3461 " uint64_clock_monotonic_t timestamp;\n"
3462 " } extended;\n"
3463 " } v;\n"
3464 "} align(%u);\n"
3465 "\n"
3466 "struct event_header_large {\n"
3467 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3468 " variant <id> {\n"
3469 " struct {\n"
3470 " uint32_clock_monotonic_t timestamp;\n"
3471 " } compact;\n"
3472 " struct {\n"
3473 " uint32_t id;\n"
3474 " uint64_clock_monotonic_t timestamp;\n"
3475 " } extended;\n"
3476 " } v;\n"
3477 "} align(%u);\n\n",
3478 lttng_alignof(uint32_t) * CHAR_BIT,
3479 lttng_alignof(uint16_t) * CHAR_BIT
3480 );
3481 }
3482
3483 /*
3484 * Approximation of NTP time of day to clock monotonic correlation,
3485 * taken at start of trace.
3486 * Yes, this is only an approximation. Yes, we can (and will) do better
3487 * in future versions.
3488 * This function may return a negative offset. It may happen if the
3489 * system sets the REALTIME clock to 0 after boot.
3490 *
3491 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3492 * y2038 compliant.
3493 */
3494 static
3495 int64_t measure_clock_offset(void)
3496 {
3497 uint64_t monotonic_avg, monotonic[2], realtime;
3498 uint64_t tcf = trace_clock_freq();
3499 int64_t offset;
3500 unsigned long flags;
3501 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3502 struct timespec64 rts = { 0, 0 };
3503 #else
3504 struct timespec rts = { 0, 0 };
3505 #endif
3506
3507 /* Disable interrupts to increase correlation precision. */
3508 local_irq_save(flags);
3509 monotonic[0] = trace_clock_read64();
3510 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3511 ktime_get_real_ts64(&rts);
3512 #else
3513 getnstimeofday(&rts);
3514 #endif
3515 monotonic[1] = trace_clock_read64();
3516 local_irq_restore(flags);
3517
3518 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3519 realtime = (uint64_t) rts.tv_sec * tcf;
3520 if (tcf == NSEC_PER_SEC) {
3521 realtime += rts.tv_nsec;
3522 } else {
3523 uint64_t n = rts.tv_nsec * tcf;
3524
3525 do_div(n, NSEC_PER_SEC);
3526 realtime += n;
3527 }
3528 offset = (int64_t) realtime - monotonic_avg;
3529 return offset;
3530 }
3531
3532 static
3533 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3534 {
3535 int ret = 0;
3536 size_t i;
3537 char cur;
3538
3539 i = 0;
3540 cur = string[i];
3541 while (cur != '\0') {
3542 switch (cur) {
3543 case '\n':
3544 ret = lttng_metadata_printf(session, "%s", "\\n");
3545 break;
3546 case '\\':
3547 case '"':
3548 ret = lttng_metadata_printf(session, "%c", '\\');
3549 if (ret)
3550 goto error;
3551 /* We still print the current char */
3552 /* Fallthrough */
3553 default:
3554 ret = lttng_metadata_printf(session, "%c", cur);
3555 break;
3556 }
3557
3558 if (ret)
3559 goto error;
3560
3561 cur = string[++i];
3562 }
3563 error:
3564 return ret;
3565 }
3566
3567 static
3568 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3569 const char *field_value)
3570 {
3571 int ret;
3572
3573 ret = lttng_metadata_printf(session, " %s = \"", field);
3574 if (ret)
3575 goto error;
3576
3577 ret = print_escaped_ctf_string(session, field_value);
3578 if (ret)
3579 goto error;
3580
3581 ret = lttng_metadata_printf(session, "\";\n");
3582
3583 error:
3584 return ret;
3585 }
3586
3587 /*
3588 * Output metadata into this session's metadata buffers.
3589 * Must be called with sessions_mutex held.
3590 */
3591 static
3592 int _lttng_session_metadata_statedump(struct lttng_session *session)
3593 {
3594 unsigned char *uuid_c = session->uuid.b;
3595 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3596 const char *product_uuid;
3597 struct lttng_channel *chan;
3598 struct lttng_event *event;
3599 int ret = 0;
3600
3601 if (!LTTNG_READ_ONCE(session->active))
3602 return 0;
3603
3604 lttng_metadata_begin(session);
3605
3606 if (session->metadata_dumped)
3607 goto skip_session;
3608
3609 snprintf(uuid_s, sizeof(uuid_s),
3610 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3611 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3612 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3613 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3614 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3615
3616 ret = lttng_metadata_printf(session,
3617 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3618 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3619 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3620 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3621 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3622 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3623 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3624 "\n"
3625 "trace {\n"
3626 " major = %u;\n"
3627 " minor = %u;\n"
3628 " uuid = \"%s\";\n"
3629 " byte_order = %s;\n"
3630 " packet.header := struct {\n"
3631 " uint32_t magic;\n"
3632 " uint8_t uuid[16];\n"
3633 " uint32_t stream_id;\n"
3634 " uint64_t stream_instance_id;\n"
3635 " };\n"
3636 "};\n\n",
3637 lttng_alignof(uint8_t) * CHAR_BIT,
3638 lttng_alignof(uint16_t) * CHAR_BIT,
3639 lttng_alignof(uint32_t) * CHAR_BIT,
3640 lttng_alignof(uint64_t) * CHAR_BIT,
3641 sizeof(unsigned long) * CHAR_BIT,
3642 lttng_alignof(unsigned long) * CHAR_BIT,
3643 CTF_SPEC_MAJOR,
3644 CTF_SPEC_MINOR,
3645 uuid_s,
3646 #if __BYTE_ORDER == __BIG_ENDIAN
3647 "be"
3648 #else
3649 "le"
3650 #endif
3651 );
3652 if (ret)
3653 goto end;
3654
3655 ret = lttng_metadata_printf(session,
3656 "env {\n"
3657 " hostname = \"%s\";\n"
3658 " domain = \"kernel\";\n"
3659 " sysname = \"%s\";\n"
3660 " kernel_release = \"%s\";\n"
3661 " kernel_version = \"%s\";\n"
3662 " tracer_name = \"lttng-modules\";\n"
3663 " tracer_major = %d;\n"
3664 " tracer_minor = %d;\n"
3665 " tracer_patchlevel = %d;\n"
3666 " trace_buffering_scheme = \"global\";\n",
3667 current->nsproxy->uts_ns->name.nodename,
3668 utsname()->sysname,
3669 utsname()->release,
3670 utsname()->version,
3671 LTTNG_MODULES_MAJOR_VERSION,
3672 LTTNG_MODULES_MINOR_VERSION,
3673 LTTNG_MODULES_PATCHLEVEL_VERSION
3674 );
3675 if (ret)
3676 goto end;
3677
3678 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3679 if (ret)
3680 goto end;
3681 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3682 session->creation_time);
3683 if (ret)
3684 goto end;
3685
3686 /* Add the product UUID to the 'env' section */
3687 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3688 if (product_uuid) {
3689 ret = lttng_metadata_printf(session,
3690 " product_uuid = \"%s\";\n",
3691 product_uuid
3692 );
3693 if (ret)
3694 goto end;
3695 }
3696
3697 /* Close the 'env' section */
3698 ret = lttng_metadata_printf(session, "};\n\n");
3699 if (ret)
3700 goto end;
3701
3702 ret = lttng_metadata_printf(session,
3703 "clock {\n"
3704 " name = \"%s\";\n",
3705 trace_clock_name()
3706 );
3707 if (ret)
3708 goto end;
3709
3710 if (!trace_clock_uuid(clock_uuid_s)) {
3711 ret = lttng_metadata_printf(session,
3712 " uuid = \"%s\";\n",
3713 clock_uuid_s
3714 );
3715 if (ret)
3716 goto end;
3717 }
3718
3719 ret = lttng_metadata_printf(session,
3720 " description = \"%s\";\n"
3721 " freq = %llu; /* Frequency, in Hz */\n"
3722 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3723 " offset = %lld;\n"
3724 "};\n\n",
3725 trace_clock_description(),
3726 (unsigned long long) trace_clock_freq(),
3727 (long long) measure_clock_offset()
3728 );
3729 if (ret)
3730 goto end;
3731
3732 ret = lttng_metadata_printf(session,
3733 "typealias integer {\n"
3734 " size = 27; align = 1; signed = false;\n"
3735 " map = clock.%s.value;\n"
3736 "} := uint27_clock_monotonic_t;\n"
3737 "\n"
3738 "typealias integer {\n"
3739 " size = 32; align = %u; signed = false;\n"
3740 " map = clock.%s.value;\n"
3741 "} := uint32_clock_monotonic_t;\n"
3742 "\n"
3743 "typealias integer {\n"
3744 " size = 64; align = %u; signed = false;\n"
3745 " map = clock.%s.value;\n"
3746 "} := uint64_clock_monotonic_t;\n\n",
3747 trace_clock_name(),
3748 lttng_alignof(uint32_t) * CHAR_BIT,
3749 trace_clock_name(),
3750 lttng_alignof(uint64_t) * CHAR_BIT,
3751 trace_clock_name()
3752 );
3753 if (ret)
3754 goto end;
3755
3756 ret = _lttng_stream_packet_context_declare(session);
3757 if (ret)
3758 goto end;
3759
3760 ret = _lttng_event_header_declare(session);
3761 if (ret)
3762 goto end;
3763
3764 skip_session:
3765 list_for_each_entry(chan, &session->chan, list) {
3766 ret = _lttng_channel_metadata_statedump(session, chan);
3767 if (ret)
3768 goto end;
3769 }
3770
3771 list_for_each_entry(event, &session->events, list) {
3772 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3773 if (ret)
3774 goto end;
3775 }
3776 session->metadata_dumped = 1;
3777 end:
3778 lttng_metadata_end(session);
3779 return ret;
3780 }
3781
3782 /**
3783 * lttng_transport_register - LTT transport registration
3784 * @transport: transport structure
3785 *
3786 * Registers a transport which can be used as output to extract the data out of
3787 * LTTng. The module calling this registration function must ensure that no
3788 * trap-inducing code will be executed by the transport functions. E.g.
3789 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3790 * is made visible to the transport function. This registration acts as a
3791 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3792 * after its registration must it synchronize the TLBs.
3793 */
3794 void lttng_transport_register(struct lttng_transport *transport)
3795 {
3796 /*
3797 * Make sure no page fault can be triggered by the module about to be
3798 * registered. We deal with this here so we don't have to call
3799 * vmalloc_sync_mappings() in each module's init.
3800 */
3801 wrapper_vmalloc_sync_mappings();
3802
3803 mutex_lock(&sessions_mutex);
3804 list_add_tail(&transport->node, &lttng_transport_list);
3805 mutex_unlock(&sessions_mutex);
3806 }
3807 EXPORT_SYMBOL_GPL(lttng_transport_register);
3808
3809 /**
3810 * lttng_transport_unregister - LTT transport unregistration
3811 * @transport: transport structure
3812 */
3813 void lttng_transport_unregister(struct lttng_transport *transport)
3814 {
3815 mutex_lock(&sessions_mutex);
3816 list_del(&transport->node);
3817 mutex_unlock(&sessions_mutex);
3818 }
3819 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3820
3821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3822
3823 enum cpuhp_state lttng_hp_prepare;
3824 enum cpuhp_state lttng_hp_online;
3825
3826 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3827 {
3828 struct lttng_cpuhp_node *lttng_node;
3829
3830 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3831 switch (lttng_node->component) {
3832 case LTTNG_RING_BUFFER_FRONTEND:
3833 return 0;
3834 case LTTNG_RING_BUFFER_BACKEND:
3835 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3836 case LTTNG_RING_BUFFER_ITER:
3837 return 0;
3838 case LTTNG_CONTEXT_PERF_COUNTERS:
3839 return 0;
3840 default:
3841 return -EINVAL;
3842 }
3843 }
3844
3845 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3846 {
3847 struct lttng_cpuhp_node *lttng_node;
3848
3849 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3850 switch (lttng_node->component) {
3851 case LTTNG_RING_BUFFER_FRONTEND:
3852 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3853 case LTTNG_RING_BUFFER_BACKEND:
3854 return 0;
3855 case LTTNG_RING_BUFFER_ITER:
3856 return 0;
3857 case LTTNG_CONTEXT_PERF_COUNTERS:
3858 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3859 default:
3860 return -EINVAL;
3861 }
3862 }
3863
3864 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3865 {
3866 struct lttng_cpuhp_node *lttng_node;
3867
3868 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3869 switch (lttng_node->component) {
3870 case LTTNG_RING_BUFFER_FRONTEND:
3871 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3872 case LTTNG_RING_BUFFER_BACKEND:
3873 return 0;
3874 case LTTNG_RING_BUFFER_ITER:
3875 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3876 case LTTNG_CONTEXT_PERF_COUNTERS:
3877 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3878 default:
3879 return -EINVAL;
3880 }
3881 }
3882
3883 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3884 {
3885 struct lttng_cpuhp_node *lttng_node;
3886
3887 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3888 switch (lttng_node->component) {
3889 case LTTNG_RING_BUFFER_FRONTEND:
3890 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3891 case LTTNG_RING_BUFFER_BACKEND:
3892 return 0;
3893 case LTTNG_RING_BUFFER_ITER:
3894 return 0;
3895 case LTTNG_CONTEXT_PERF_COUNTERS:
3896 return 0;
3897 default:
3898 return -EINVAL;
3899 }
3900 }
3901
3902 static int __init lttng_init_cpu_hotplug(void)
3903 {
3904 int ret;
3905
3906 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3907 lttng_hotplug_prepare,
3908 lttng_hotplug_dead);
3909 if (ret < 0) {
3910 return ret;
3911 }
3912 lttng_hp_prepare = ret;
3913 lttng_rb_set_hp_prepare(ret);
3914
3915 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3916 lttng_hotplug_online,
3917 lttng_hotplug_offline);
3918 if (ret < 0) {
3919 cpuhp_remove_multi_state(lttng_hp_prepare);
3920 lttng_hp_prepare = 0;
3921 return ret;
3922 }
3923 lttng_hp_online = ret;
3924 lttng_rb_set_hp_online(ret);
3925
3926 return 0;
3927 }
3928
3929 static void __exit lttng_exit_cpu_hotplug(void)
3930 {
3931 lttng_rb_set_hp_online(0);
3932 cpuhp_remove_multi_state(lttng_hp_online);
3933 lttng_rb_set_hp_prepare(0);
3934 cpuhp_remove_multi_state(lttng_hp_prepare);
3935 }
3936
3937 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3938 static int lttng_init_cpu_hotplug(void)
3939 {
3940 return 0;
3941 }
3942 static void lttng_exit_cpu_hotplug(void)
3943 {
3944 }
3945 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3946
3947
3948 static int __init lttng_events_init(void)
3949 {
3950 int ret;
3951
3952 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3953 if (ret)
3954 return ret;
3955 ret = wrapper_get_pfnblock_flags_mask_init();
3956 if (ret)
3957 return ret;
3958 ret = wrapper_get_pageblock_flags_mask_init();
3959 if (ret)
3960 return ret;
3961 ret = lttng_probes_init();
3962 if (ret)
3963 return ret;
3964 ret = lttng_context_init();
3965 if (ret)
3966 return ret;
3967 ret = lttng_tracepoint_init();
3968 if (ret)
3969 goto error_tp;
3970 event_cache = KMEM_CACHE(lttng_event, 0);
3971 if (!event_cache) {
3972 ret = -ENOMEM;
3973 goto error_kmem_event;
3974 }
3975 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
3976 if (!event_notifier_cache) {
3977 ret = -ENOMEM;
3978 goto error_kmem_event_notifier;
3979 }
3980 ret = lttng_abi_init();
3981 if (ret)
3982 goto error_abi;
3983 ret = lttng_logger_init();
3984 if (ret)
3985 goto error_logger;
3986 ret = lttng_init_cpu_hotplug();
3987 if (ret)
3988 goto error_hotplug;
3989 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3990 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3991 __stringify(LTTNG_MODULES_MINOR_VERSION),
3992 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3993 LTTNG_MODULES_EXTRAVERSION,
3994 LTTNG_VERSION_NAME,
3995 #ifdef LTTNG_EXTRA_VERSION_GIT
3996 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3997 #else
3998 "",
3999 #endif
4000 #ifdef LTTNG_EXTRA_VERSION_NAME
4001 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4002 #else
4003 "");
4004 #endif
4005 return 0;
4006
4007 error_hotplug:
4008 lttng_logger_exit();
4009 error_logger:
4010 lttng_abi_exit();
4011 error_abi:
4012 kmem_cache_destroy(event_notifier_cache);
4013 error_kmem_event_notifier:
4014 kmem_cache_destroy(event_cache);
4015 error_kmem_event:
4016 lttng_tracepoint_exit();
4017 error_tp:
4018 lttng_context_exit();
4019 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4020 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4021 __stringify(LTTNG_MODULES_MINOR_VERSION),
4022 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4023 LTTNG_MODULES_EXTRAVERSION,
4024 LTTNG_VERSION_NAME,
4025 #ifdef LTTNG_EXTRA_VERSION_GIT
4026 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4027 #else
4028 "",
4029 #endif
4030 #ifdef LTTNG_EXTRA_VERSION_NAME
4031 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4032 #else
4033 "");
4034 #endif
4035 return ret;
4036 }
4037
4038 module_init(lttng_events_init);
4039
4040 static void __exit lttng_events_exit(void)
4041 {
4042 struct lttng_session *session, *tmpsession;
4043
4044 lttng_exit_cpu_hotplug();
4045 lttng_logger_exit();
4046 lttng_abi_exit();
4047 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4048 lttng_session_destroy(session);
4049 kmem_cache_destroy(event_cache);
4050 kmem_cache_destroy(event_notifier_cache);
4051 lttng_tracepoint_exit();
4052 lttng_context_exit();
4053 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4054 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4055 __stringify(LTTNG_MODULES_MINOR_VERSION),
4056 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4057 LTTNG_MODULES_EXTRAVERSION,
4058 LTTNG_VERSION_NAME,
4059 #ifdef LTTNG_EXTRA_VERSION_GIT
4060 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4061 #else
4062 "",
4063 #endif
4064 #ifdef LTTNG_EXTRA_VERSION_NAME
4065 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4066 #else
4067 "");
4068 #endif
4069 }
4070
4071 module_exit(lttng_events_exit);
4072
4073 #include <generated/patches.h>
4074 #ifdef LTTNG_EXTRA_VERSION_GIT
4075 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4076 #endif
4077 #ifdef LTTNG_EXTRA_VERSION_NAME
4078 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4079 #endif
4080 MODULE_LICENSE("GPL and additional rights");
4081 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4082 MODULE_DESCRIPTION("LTTng tracer");
4083 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4084 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4085 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4086 LTTNG_MODULES_EXTRAVERSION);
This page took 0.172593 seconds and 4 git commands to generate.