246c71029dcc23d9f4dd602a645a5b069b0abcd1
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/tracer.h>
40 #include <lttng/abi-old.h>
41 #include <lttng/endian.h>
42 #include <lttng/string-utils.h>
43 #include <lttng/utils.h>
44 #include <ringbuffer/backend.h>
45 #include <ringbuffer/frontend.h>
46 #include <wrapper/time.h>
47
48 #define METADATA_CACHE_DEFAULT_SIZE 4096
49
50 static LIST_HEAD(sessions);
51 static LIST_HEAD(event_notifier_groups);
52 static LIST_HEAD(lttng_transport_list);
53 /*
54 * Protect the sessions and metadata caches.
55 */
56 static DEFINE_MUTEX(sessions_mutex);
57 static struct kmem_cache *event_cache;
58
59 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
60 static void lttng_session_sync_event_enablers(struct lttng_session *session);
61 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
62
63 static void _lttng_event_destroy(struct lttng_event *event);
64 static void _lttng_channel_destroy(struct lttng_channel *chan);
65 static int _lttng_event_unregister(struct lttng_event *event);
66 static
67 int _lttng_event_metadata_statedump(struct lttng_session *session,
68 struct lttng_channel *chan,
69 struct lttng_event *event);
70 static
71 int _lttng_session_metadata_statedump(struct lttng_session *session);
72 static
73 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
74 static
75 int _lttng_type_statedump(struct lttng_session *session,
76 const struct lttng_type *type,
77 size_t nesting);
78 static
79 int _lttng_field_statedump(struct lttng_session *session,
80 const struct lttng_event_field *field,
81 size_t nesting);
82
83 void synchronize_trace(void)
84 {
85 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
86 synchronize_rcu();
87 #else
88 synchronize_sched();
89 #endif
90
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
92 #ifdef CONFIG_PREEMPT_RT_FULL
93 synchronize_rcu();
94 #endif
95 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
96 #ifdef CONFIG_PREEMPT_RT
97 synchronize_rcu();
98 #endif
99 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
100 }
101
102 void lttng_lock_sessions(void)
103 {
104 mutex_lock(&sessions_mutex);
105 }
106
107 void lttng_unlock_sessions(void)
108 {
109 mutex_unlock(&sessions_mutex);
110 }
111
112 static struct lttng_transport *lttng_transport_find(const char *name)
113 {
114 struct lttng_transport *transport;
115
116 list_for_each_entry(transport, &lttng_transport_list, node) {
117 if (!strcmp(transport->name, name))
118 return transport;
119 }
120 return NULL;
121 }
122
123 /*
124 * Called with sessions lock held.
125 */
126 int lttng_session_active(void)
127 {
128 struct lttng_session *iter;
129
130 list_for_each_entry(iter, &sessions, list) {
131 if (iter->active)
132 return 1;
133 }
134 return 0;
135 }
136
137 struct lttng_session *lttng_session_create(void)
138 {
139 struct lttng_session *session;
140 struct lttng_metadata_cache *metadata_cache;
141 int i;
142
143 mutex_lock(&sessions_mutex);
144 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
145 if (!session)
146 goto err;
147 INIT_LIST_HEAD(&session->chan);
148 INIT_LIST_HEAD(&session->events);
149 lttng_guid_gen(&session->uuid);
150
151 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
152 GFP_KERNEL);
153 if (!metadata_cache)
154 goto err_free_session;
155 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
156 if (!metadata_cache->data)
157 goto err_free_cache;
158 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
159 kref_init(&metadata_cache->refcount);
160 mutex_init(&metadata_cache->lock);
161 session->metadata_cache = metadata_cache;
162 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
163 memcpy(&metadata_cache->uuid, &session->uuid,
164 sizeof(metadata_cache->uuid));
165 INIT_LIST_HEAD(&session->enablers_head);
166 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
167 INIT_HLIST_HEAD(&session->events_ht.table[i]);
168 list_add(&session->list, &sessions);
169 session->pid_tracker.session = session;
170 session->pid_tracker.tracker_type = TRACKER_PID;
171 session->vpid_tracker.session = session;
172 session->vpid_tracker.tracker_type = TRACKER_VPID;
173 session->uid_tracker.session = session;
174 session->uid_tracker.tracker_type = TRACKER_UID;
175 session->vuid_tracker.session = session;
176 session->vuid_tracker.tracker_type = TRACKER_VUID;
177 session->gid_tracker.session = session;
178 session->gid_tracker.tracker_type = TRACKER_GID;
179 session->vgid_tracker.session = session;
180 session->vgid_tracker.tracker_type = TRACKER_VGID;
181 mutex_unlock(&sessions_mutex);
182 return session;
183
184 err_free_cache:
185 kfree(metadata_cache);
186 err_free_session:
187 lttng_kvfree(session);
188 err:
189 mutex_unlock(&sessions_mutex);
190 return NULL;
191 }
192
193 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
194 {
195 struct lttng_transport *transport = NULL;
196 struct lttng_event_notifier_group *event_notifier_group;
197 const char *transport_name = "relay-event-notifier";
198 size_t subbuf_size = 4096; //TODO
199 size_t num_subbuf = 16; //TODO
200 unsigned int switch_timer_interval = 0;
201 unsigned int read_timer_interval = 0;
202
203 mutex_lock(&sessions_mutex);
204
205 transport = lttng_transport_find(transport_name);
206 if (!transport) {
207 printk(KERN_WARNING "LTTng: transport %s not found\n",
208 transport_name);
209 goto notransport;
210 }
211 if (!try_module_get(transport->owner)) {
212 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
213 transport_name);
214 goto notransport;
215 }
216
217 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
218 GFP_KERNEL);
219 if (!event_notifier_group)
220 goto nomem;
221
222 /*
223 * Initialize the ring buffer used to store event notifier
224 * notifications.
225 */
226 event_notifier_group->ops = &transport->ops;
227 event_notifier_group->chan = transport->ops.channel_create(
228 transport_name, event_notifier_group, NULL,
229 subbuf_size, num_subbuf, switch_timer_interval,
230 read_timer_interval);
231 if (!event_notifier_group->chan)
232 goto create_error;
233
234 event_notifier_group->transport = transport;
235 list_add(&event_notifier_group->node, &event_notifier_groups);
236
237 mutex_unlock(&sessions_mutex);
238
239 return event_notifier_group;
240
241 create_error:
242 lttng_kvfree(event_notifier_group);
243 nomem:
244 if (transport)
245 module_put(transport->owner);
246 notransport:
247 mutex_unlock(&sessions_mutex);
248 return NULL;
249 }
250
251 void metadata_cache_destroy(struct kref *kref)
252 {
253 struct lttng_metadata_cache *cache =
254 container_of(kref, struct lttng_metadata_cache, refcount);
255 vfree(cache->data);
256 kfree(cache);
257 }
258
259 void lttng_session_destroy(struct lttng_session *session)
260 {
261 struct lttng_channel *chan, *tmpchan;
262 struct lttng_event *event, *tmpevent;
263 struct lttng_metadata_stream *metadata_stream;
264 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
265 int ret;
266
267 mutex_lock(&sessions_mutex);
268 WRITE_ONCE(session->active, 0);
269 list_for_each_entry(chan, &session->chan, list) {
270 ret = lttng_syscalls_unregister(chan);
271 WARN_ON(ret);
272 }
273 list_for_each_entry(event, &session->events, list) {
274 ret = _lttng_event_unregister(event);
275 WARN_ON(ret);
276 }
277 synchronize_trace(); /* Wait for in-flight events to complete */
278 list_for_each_entry(chan, &session->chan, list) {
279 ret = lttng_syscalls_destroy(chan);
280 WARN_ON(ret);
281 }
282 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
283 &session->enablers_head, node)
284 lttng_event_enabler_destroy(event_enabler);
285 list_for_each_entry_safe(event, tmpevent, &session->events, list)
286 _lttng_event_destroy(event);
287 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
288 BUG_ON(chan->channel_type == METADATA_CHANNEL);
289 _lttng_channel_destroy(chan);
290 }
291 mutex_lock(&session->metadata_cache->lock);
292 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
293 _lttng_metadata_channel_hangup(metadata_stream);
294 mutex_unlock(&session->metadata_cache->lock);
295 lttng_id_tracker_destroy(&session->pid_tracker, false);
296 lttng_id_tracker_destroy(&session->vpid_tracker, false);
297 lttng_id_tracker_destroy(&session->uid_tracker, false);
298 lttng_id_tracker_destroy(&session->vuid_tracker, false);
299 lttng_id_tracker_destroy(&session->gid_tracker, false);
300 lttng_id_tracker_destroy(&session->vgid_tracker, false);
301 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
302 list_del(&session->list);
303 mutex_unlock(&sessions_mutex);
304 lttng_kvfree(session);
305 }
306
307 void lttng_event_notifier_group_destroy(struct lttng_event_notifier_group *event_notifier_group)
308 {
309 if (!event_notifier_group)
310 return;
311
312 mutex_lock(&sessions_mutex);
313 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
314 module_put(event_notifier_group->transport->owner);
315 list_del(&event_notifier_group->node);
316 mutex_unlock(&sessions_mutex);
317 lttng_kvfree(event_notifier_group);
318 }
319
320 int lttng_session_statedump(struct lttng_session *session)
321 {
322 int ret;
323
324 mutex_lock(&sessions_mutex);
325 ret = lttng_statedump_start(session);
326 mutex_unlock(&sessions_mutex);
327 return ret;
328 }
329
330 int lttng_session_enable(struct lttng_session *session)
331 {
332 int ret = 0;
333 struct lttng_channel *chan;
334
335 mutex_lock(&sessions_mutex);
336 if (session->active) {
337 ret = -EBUSY;
338 goto end;
339 }
340
341 /* Set transient enabler state to "enabled" */
342 session->tstate = 1;
343
344 /* We need to sync enablers with session before activation. */
345 lttng_session_sync_event_enablers(session);
346
347 /*
348 * Snapshot the number of events per channel to know the type of header
349 * we need to use.
350 */
351 list_for_each_entry(chan, &session->chan, list) {
352 if (chan->header_type)
353 continue; /* don't change it if session stop/restart */
354 if (chan->free_event_id < 31)
355 chan->header_type = 1; /* compact */
356 else
357 chan->header_type = 2; /* large */
358 }
359
360 /* Clear each stream's quiescent state. */
361 list_for_each_entry(chan, &session->chan, list) {
362 if (chan->channel_type != METADATA_CHANNEL)
363 lib_ring_buffer_clear_quiescent_channel(chan->chan);
364 }
365
366 WRITE_ONCE(session->active, 1);
367 WRITE_ONCE(session->been_active, 1);
368 ret = _lttng_session_metadata_statedump(session);
369 if (ret) {
370 WRITE_ONCE(session->active, 0);
371 goto end;
372 }
373 ret = lttng_statedump_start(session);
374 if (ret)
375 WRITE_ONCE(session->active, 0);
376 end:
377 mutex_unlock(&sessions_mutex);
378 return ret;
379 }
380
381 int lttng_session_disable(struct lttng_session *session)
382 {
383 int ret = 0;
384 struct lttng_channel *chan;
385
386 mutex_lock(&sessions_mutex);
387 if (!session->active) {
388 ret = -EBUSY;
389 goto end;
390 }
391 WRITE_ONCE(session->active, 0);
392
393 /* Set transient enabler state to "disabled" */
394 session->tstate = 0;
395 lttng_session_sync_event_enablers(session);
396
397 /* Set each stream's quiescent state. */
398 list_for_each_entry(chan, &session->chan, list) {
399 if (chan->channel_type != METADATA_CHANNEL)
400 lib_ring_buffer_set_quiescent_channel(chan->chan);
401 }
402 end:
403 mutex_unlock(&sessions_mutex);
404 return ret;
405 }
406
407 int lttng_session_metadata_regenerate(struct lttng_session *session)
408 {
409 int ret = 0;
410 struct lttng_channel *chan;
411 struct lttng_event *event;
412 struct lttng_metadata_cache *cache = session->metadata_cache;
413 struct lttng_metadata_stream *stream;
414
415 mutex_lock(&sessions_mutex);
416 if (!session->active) {
417 ret = -EBUSY;
418 goto end;
419 }
420
421 mutex_lock(&cache->lock);
422 memset(cache->data, 0, cache->cache_alloc);
423 cache->metadata_written = 0;
424 cache->version++;
425 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
426 stream->metadata_out = 0;
427 stream->metadata_in = 0;
428 }
429 mutex_unlock(&cache->lock);
430
431 session->metadata_dumped = 0;
432 list_for_each_entry(chan, &session->chan, list) {
433 chan->metadata_dumped = 0;
434 }
435
436 list_for_each_entry(event, &session->events, list) {
437 event->metadata_dumped = 0;
438 }
439
440 ret = _lttng_session_metadata_statedump(session);
441
442 end:
443 mutex_unlock(&sessions_mutex);
444 return ret;
445 }
446
447 int lttng_channel_enable(struct lttng_channel *channel)
448 {
449 int ret = 0;
450
451 mutex_lock(&sessions_mutex);
452 if (channel->channel_type == METADATA_CHANNEL) {
453 ret = -EPERM;
454 goto end;
455 }
456 if (channel->enabled) {
457 ret = -EEXIST;
458 goto end;
459 }
460 /* Set transient enabler state to "enabled" */
461 channel->tstate = 1;
462 lttng_session_sync_event_enablers(channel->session);
463 /* Set atomically the state to "enabled" */
464 WRITE_ONCE(channel->enabled, 1);
465 end:
466 mutex_unlock(&sessions_mutex);
467 return ret;
468 }
469
470 int lttng_channel_disable(struct lttng_channel *channel)
471 {
472 int ret = 0;
473
474 mutex_lock(&sessions_mutex);
475 if (channel->channel_type == METADATA_CHANNEL) {
476 ret = -EPERM;
477 goto end;
478 }
479 if (!channel->enabled) {
480 ret = -EEXIST;
481 goto end;
482 }
483 /* Set atomically the state to "disabled" */
484 WRITE_ONCE(channel->enabled, 0);
485 /* Set transient enabler state to "enabled" */
486 channel->tstate = 0;
487 lttng_session_sync_event_enablers(channel->session);
488 end:
489 mutex_unlock(&sessions_mutex);
490 return ret;
491 }
492
493 int lttng_event_enable(struct lttng_event *event)
494 {
495 int ret = 0;
496
497 mutex_lock(&sessions_mutex);
498 if (event->chan->channel_type == METADATA_CHANNEL) {
499 ret = -EPERM;
500 goto end;
501 }
502 if (event->enabled) {
503 ret = -EEXIST;
504 goto end;
505 }
506 switch (event->instrumentation) {
507 case LTTNG_KERNEL_TRACEPOINT:
508 case LTTNG_KERNEL_SYSCALL:
509 ret = -EINVAL;
510 break;
511 case LTTNG_KERNEL_KPROBE:
512 case LTTNG_KERNEL_UPROBE:
513 case LTTNG_KERNEL_NOOP:
514 WRITE_ONCE(event->enabled, 1);
515 break;
516 case LTTNG_KERNEL_KRETPROBE:
517 ret = lttng_kretprobes_event_enable_state(event, 1);
518 break;
519 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
520 default:
521 WARN_ON_ONCE(1);
522 ret = -EINVAL;
523 }
524 end:
525 mutex_unlock(&sessions_mutex);
526 return ret;
527 }
528
529 int lttng_event_disable(struct lttng_event *event)
530 {
531 int ret = 0;
532
533 mutex_lock(&sessions_mutex);
534 if (event->chan->channel_type == METADATA_CHANNEL) {
535 ret = -EPERM;
536 goto end;
537 }
538 if (!event->enabled) {
539 ret = -EEXIST;
540 goto end;
541 }
542 switch (event->instrumentation) {
543 case LTTNG_KERNEL_TRACEPOINT:
544 case LTTNG_KERNEL_SYSCALL:
545 ret = -EINVAL;
546 break;
547 case LTTNG_KERNEL_KPROBE:
548 case LTTNG_KERNEL_UPROBE:
549 case LTTNG_KERNEL_NOOP:
550 WRITE_ONCE(event->enabled, 0);
551 break;
552 case LTTNG_KERNEL_KRETPROBE:
553 ret = lttng_kretprobes_event_enable_state(event, 0);
554 break;
555 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
556 default:
557 WARN_ON_ONCE(1);
558 ret = -EINVAL;
559 }
560 end:
561 mutex_unlock(&sessions_mutex);
562 return ret;
563 }
564
565 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
566 const char *transport_name,
567 void *buf_addr,
568 size_t subbuf_size, size_t num_subbuf,
569 unsigned int switch_timer_interval,
570 unsigned int read_timer_interval,
571 enum channel_type channel_type)
572 {
573 struct lttng_channel *chan;
574 struct lttng_transport *transport = NULL;
575
576 mutex_lock(&sessions_mutex);
577 if (session->been_active && channel_type != METADATA_CHANNEL)
578 goto active; /* Refuse to add channel to active session */
579 transport = lttng_transport_find(transport_name);
580 if (!transport) {
581 printk(KERN_WARNING "LTTng: transport %s not found\n",
582 transport_name);
583 goto notransport;
584 }
585 if (!try_module_get(transport->owner)) {
586 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
587 goto notransport;
588 }
589 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
590 if (!chan)
591 goto nomem;
592 chan->session = session;
593 chan->id = session->free_chan_id++;
594 chan->ops = &transport->ops;
595 /*
596 * Note: the channel creation op already writes into the packet
597 * headers. Therefore the "chan" information used as input
598 * should be already accessible.
599 */
600 chan->chan = transport->ops.channel_create(transport_name,
601 chan, buf_addr, subbuf_size, num_subbuf,
602 switch_timer_interval, read_timer_interval);
603 if (!chan->chan)
604 goto create_error;
605 chan->tstate = 1;
606 chan->enabled = 1;
607 chan->transport = transport;
608 chan->channel_type = channel_type;
609 list_add(&chan->list, &session->chan);
610 mutex_unlock(&sessions_mutex);
611 return chan;
612
613 create_error:
614 kfree(chan);
615 nomem:
616 if (transport)
617 module_put(transport->owner);
618 notransport:
619 active:
620 mutex_unlock(&sessions_mutex);
621 return NULL;
622 }
623
624 /*
625 * Only used internally at session destruction for per-cpu channels, and
626 * when metadata channel is released.
627 * Needs to be called with sessions mutex held.
628 */
629 static
630 void _lttng_channel_destroy(struct lttng_channel *chan)
631 {
632 chan->ops->channel_destroy(chan->chan);
633 module_put(chan->transport->owner);
634 list_del(&chan->list);
635 lttng_destroy_context(chan->ctx);
636 kfree(chan);
637 }
638
639 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
640 {
641 BUG_ON(chan->channel_type != METADATA_CHANNEL);
642
643 /* Protect the metadata cache with the sessions_mutex. */
644 mutex_lock(&sessions_mutex);
645 _lttng_channel_destroy(chan);
646 mutex_unlock(&sessions_mutex);
647 }
648 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
649
650 static
651 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
652 {
653 stream->finalized = 1;
654 wake_up_interruptible(&stream->read_wait);
655 }
656
657 /*
658 * Supports event creation while tracing session is active.
659 * Needs to be called with sessions mutex held.
660 */
661 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
662 struct lttng_kernel_event *event_param,
663 void *filter,
664 const struct lttng_event_desc *event_desc,
665 enum lttng_kernel_instrumentation itype)
666 {
667 struct lttng_session *session = chan->session;
668 struct lttng_event *event;
669 const char *event_name;
670 struct hlist_head *head;
671 int ret;
672
673 if (chan->free_event_id == -1U) {
674 ret = -EMFILE;
675 goto full;
676 }
677
678 switch (itype) {
679 case LTTNG_KERNEL_TRACEPOINT:
680 event_name = event_desc->name;
681 break;
682 case LTTNG_KERNEL_KPROBE:
683 case LTTNG_KERNEL_UPROBE:
684 case LTTNG_KERNEL_KRETPROBE:
685 case LTTNG_KERNEL_NOOP:
686 case LTTNG_KERNEL_SYSCALL:
687 event_name = event_param->name;
688 break;
689 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
690 default:
691 WARN_ON_ONCE(1);
692 ret = -EINVAL;
693 goto type_error;
694 }
695
696 head = utils_borrow_hash_table_bucket(session->events_ht.table,
697 LTTNG_EVENT_HT_SIZE, event_name);
698 lttng_hlist_for_each_entry(event, head, hlist) {
699 WARN_ON_ONCE(!event->desc);
700 if (!strncmp(event->desc->name, event_name,
701 LTTNG_KERNEL_SYM_NAME_LEN - 1)
702 && chan == event->chan) {
703 ret = -EEXIST;
704 goto exist;
705 }
706 }
707
708 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
709 if (!event) {
710 ret = -ENOMEM;
711 goto cache_error;
712 }
713 event->chan = chan;
714 event->filter = filter;
715 event->id = chan->free_event_id++;
716 event->instrumentation = itype;
717 event->evtype = LTTNG_TYPE_EVENT;
718 INIT_LIST_HEAD(&event->bytecode_runtime_head);
719 INIT_LIST_HEAD(&event->enablers_ref_head);
720
721 switch (itype) {
722 case LTTNG_KERNEL_TRACEPOINT:
723 /* Event will be enabled by enabler sync. */
724 event->enabled = 0;
725 event->registered = 0;
726 event->desc = lttng_event_desc_get(event_name);
727 if (!event->desc) {
728 ret = -ENOENT;
729 goto register_error;
730 }
731 /* Populate lttng_event structure before event registration. */
732 smp_wmb();
733 break;
734 case LTTNG_KERNEL_KPROBE:
735 /*
736 * Needs to be explicitly enabled after creation, since
737 * we may want to apply filters.
738 */
739 event->enabled = 0;
740 event->registered = 1;
741 /*
742 * Populate lttng_event structure before event
743 * registration.
744 */
745 smp_wmb();
746 ret = lttng_kprobes_register(event_name,
747 event_param->u.kprobe.symbol_name,
748 event_param->u.kprobe.offset,
749 event_param->u.kprobe.addr,
750 event);
751 if (ret) {
752 ret = -EINVAL;
753 goto register_error;
754 }
755 ret = try_module_get(event->desc->owner);
756 WARN_ON_ONCE(!ret);
757 break;
758 case LTTNG_KERNEL_KRETPROBE:
759 {
760 struct lttng_event *event_return;
761
762 /* kretprobe defines 2 events */
763 /*
764 * Needs to be explicitly enabled after creation, since
765 * we may want to apply filters.
766 */
767 event->enabled = 0;
768 event->registered = 1;
769 event_return =
770 kmem_cache_zalloc(event_cache, GFP_KERNEL);
771 if (!event_return) {
772 ret = -ENOMEM;
773 goto register_error;
774 }
775 event_return->chan = chan;
776 event_return->filter = filter;
777 event_return->id = chan->free_event_id++;
778 event_return->enabled = 0;
779 event_return->registered = 1;
780 event_return->instrumentation = itype;
781 /*
782 * Populate lttng_event structure before kretprobe registration.
783 */
784 smp_wmb();
785 ret = lttng_kretprobes_register(event_name,
786 event_param->u.kretprobe.symbol_name,
787 event_param->u.kretprobe.offset,
788 event_param->u.kretprobe.addr,
789 event, event_return);
790 if (ret) {
791 kmem_cache_free(event_cache, event_return);
792 ret = -EINVAL;
793 goto register_error;
794 }
795 /* Take 2 refs on the module: one per event. */
796 ret = try_module_get(event->desc->owner);
797 WARN_ON_ONCE(!ret);
798 ret = try_module_get(event->desc->owner);
799 WARN_ON_ONCE(!ret);
800 ret = _lttng_event_metadata_statedump(chan->session, chan,
801 event_return);
802 WARN_ON_ONCE(ret > 0);
803 if (ret) {
804 kmem_cache_free(event_cache, event_return);
805 module_put(event->desc->owner);
806 module_put(event->desc->owner);
807 goto statedump_error;
808 }
809 list_add(&event_return->list, &chan->session->events);
810 break;
811 }
812 case LTTNG_KERNEL_NOOP:
813 case LTTNG_KERNEL_SYSCALL:
814 /*
815 * Needs to be explicitly enabled after creation, since
816 * we may want to apply filters.
817 */
818 event->enabled = 0;
819 event->registered = 0;
820 event->desc = event_desc;
821 switch (event_param->u.syscall.entryexit) {
822 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
823 ret = -EINVAL;
824 goto register_error;
825 case LTTNG_KERNEL_SYSCALL_ENTRY:
826 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
827 break;
828 case LTTNG_KERNEL_SYSCALL_EXIT:
829 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
830 break;
831 }
832 switch (event_param->u.syscall.abi) {
833 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
834 ret = -EINVAL;
835 goto register_error;
836 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
837 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
838 break;
839 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
840 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
841 break;
842 }
843 if (!event->desc) {
844 ret = -EINVAL;
845 goto register_error;
846 }
847 break;
848 case LTTNG_KERNEL_UPROBE:
849 /*
850 * Needs to be explicitly enabled after creation, since
851 * we may want to apply filters.
852 */
853 event->enabled = 0;
854 event->registered = 1;
855
856 /*
857 * Populate lttng_event structure before event
858 * registration.
859 */
860 smp_wmb();
861
862 ret = lttng_uprobes_register(event_param->name,
863 event_param->u.uprobe.fd,
864 event);
865 if (ret)
866 goto register_error;
867 ret = try_module_get(event->desc->owner);
868 WARN_ON_ONCE(!ret);
869 break;
870 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
871 default:
872 WARN_ON_ONCE(1);
873 ret = -EINVAL;
874 goto register_error;
875 }
876 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
877 WARN_ON_ONCE(ret > 0);
878 if (ret) {
879 goto statedump_error;
880 }
881 hlist_add_head(&event->hlist, head);
882 list_add(&event->list, &chan->session->events);
883 return event;
884
885 statedump_error:
886 /* If a statedump error occurs, events will not be readable. */
887 register_error:
888 kmem_cache_free(event_cache, event);
889 cache_error:
890 exist:
891 type_error:
892 full:
893 return ERR_PTR(ret);
894 }
895
896 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
897 struct lttng_kernel_event *event_param,
898 void *filter,
899 const struct lttng_event_desc *event_desc,
900 enum lttng_kernel_instrumentation itype)
901 {
902 struct lttng_event *event;
903
904 mutex_lock(&sessions_mutex);
905 event = _lttng_event_create(chan, event_param, filter, event_desc,
906 itype);
907 mutex_unlock(&sessions_mutex);
908 return event;
909 }
910
911 /* Only used for tracepoints for now. */
912 static
913 void register_event(struct lttng_event *event)
914 {
915 const struct lttng_event_desc *desc;
916 int ret = -EINVAL;
917
918 if (event->registered)
919 return;
920
921 desc = event->desc;
922 switch (event->instrumentation) {
923 case LTTNG_KERNEL_TRACEPOINT:
924 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
925 desc->probe_callback,
926 event);
927 break;
928 case LTTNG_KERNEL_SYSCALL:
929 ret = lttng_syscall_filter_enable(event->chan, event);
930 break;
931 case LTTNG_KERNEL_KPROBE:
932 case LTTNG_KERNEL_UPROBE:
933 case LTTNG_KERNEL_KRETPROBE:
934 case LTTNG_KERNEL_NOOP:
935 ret = 0;
936 break;
937 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
938 default:
939 WARN_ON_ONCE(1);
940 }
941 if (!ret)
942 event->registered = 1;
943 }
944
945 /*
946 * Only used internally at session destruction.
947 */
948 int _lttng_event_unregister(struct lttng_event *event)
949 {
950 const struct lttng_event_desc *desc;
951 int ret = -EINVAL;
952
953 if (!event->registered)
954 return 0;
955
956 desc = event->desc;
957 switch (event->instrumentation) {
958 case LTTNG_KERNEL_TRACEPOINT:
959 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
960 event->desc->probe_callback,
961 event);
962 break;
963 case LTTNG_KERNEL_KPROBE:
964 lttng_kprobes_unregister(event);
965 ret = 0;
966 break;
967 case LTTNG_KERNEL_KRETPROBE:
968 lttng_kretprobes_unregister(event);
969 ret = 0;
970 break;
971 case LTTNG_KERNEL_SYSCALL:
972 ret = lttng_syscall_filter_disable(event->chan, event);
973 break;
974 case LTTNG_KERNEL_NOOP:
975 ret = 0;
976 break;
977 case LTTNG_KERNEL_UPROBE:
978 lttng_uprobes_unregister(event);
979 ret = 0;
980 break;
981 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
982 default:
983 WARN_ON_ONCE(1);
984 }
985 if (!ret)
986 event->registered = 0;
987 return ret;
988 }
989
990 /*
991 * Only used internally at session destruction.
992 */
993 static
994 void _lttng_event_destroy(struct lttng_event *event)
995 {
996 switch (event->instrumentation) {
997 case LTTNG_KERNEL_TRACEPOINT:
998 lttng_event_desc_put(event->desc);
999 break;
1000 case LTTNG_KERNEL_KPROBE:
1001 module_put(event->desc->owner);
1002 lttng_kprobes_destroy_private(event);
1003 break;
1004 case LTTNG_KERNEL_KRETPROBE:
1005 module_put(event->desc->owner);
1006 lttng_kretprobes_destroy_private(event);
1007 break;
1008 case LTTNG_KERNEL_NOOP:
1009 case LTTNG_KERNEL_SYSCALL:
1010 break;
1011 case LTTNG_KERNEL_UPROBE:
1012 module_put(event->desc->owner);
1013 lttng_uprobes_destroy_private(event);
1014 break;
1015 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1016 default:
1017 WARN_ON_ONCE(1);
1018 }
1019 list_del(&event->list);
1020 lttng_destroy_context(event->ctx);
1021 kmem_cache_free(event_cache, event);
1022 }
1023
1024 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1025 enum tracker_type tracker_type)
1026 {
1027 switch (tracker_type) {
1028 case TRACKER_PID:
1029 return &session->pid_tracker;
1030 case TRACKER_VPID:
1031 return &session->vpid_tracker;
1032 case TRACKER_UID:
1033 return &session->uid_tracker;
1034 case TRACKER_VUID:
1035 return &session->vuid_tracker;
1036 case TRACKER_GID:
1037 return &session->gid_tracker;
1038 case TRACKER_VGID:
1039 return &session->vgid_tracker;
1040 default:
1041 WARN_ON_ONCE(1);
1042 return NULL;
1043 }
1044 }
1045
1046 int lttng_session_track_id(struct lttng_session *session,
1047 enum tracker_type tracker_type, int id)
1048 {
1049 struct lttng_id_tracker *tracker;
1050 int ret;
1051
1052 tracker = get_tracker(session, tracker_type);
1053 if (!tracker)
1054 return -EINVAL;
1055 if (id < -1)
1056 return -EINVAL;
1057 mutex_lock(&sessions_mutex);
1058 if (id == -1) {
1059 /* track all ids: destroy tracker. */
1060 lttng_id_tracker_destroy(tracker, true);
1061 ret = 0;
1062 } else {
1063 ret = lttng_id_tracker_add(tracker, id);
1064 }
1065 mutex_unlock(&sessions_mutex);
1066 return ret;
1067 }
1068
1069 int lttng_session_untrack_id(struct lttng_session *session,
1070 enum tracker_type tracker_type, int id)
1071 {
1072 struct lttng_id_tracker *tracker;
1073 int ret;
1074
1075 tracker = get_tracker(session, tracker_type);
1076 if (!tracker)
1077 return -EINVAL;
1078 if (id < -1)
1079 return -EINVAL;
1080 mutex_lock(&sessions_mutex);
1081 if (id == -1) {
1082 /* untrack all ids: replace by empty tracker. */
1083 ret = lttng_id_tracker_empty_set(tracker);
1084 } else {
1085 ret = lttng_id_tracker_del(tracker, id);
1086 }
1087 mutex_unlock(&sessions_mutex);
1088 return ret;
1089 }
1090
1091 static
1092 void *id_list_start(struct seq_file *m, loff_t *pos)
1093 {
1094 struct lttng_id_tracker *id_tracker = m->private;
1095 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1096 struct lttng_id_hash_node *e;
1097 int iter = 0, i;
1098
1099 mutex_lock(&sessions_mutex);
1100 if (id_tracker_p) {
1101 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1102 struct hlist_head *head = &id_tracker_p->id_hash[i];
1103
1104 lttng_hlist_for_each_entry(e, head, hlist) {
1105 if (iter++ >= *pos)
1106 return e;
1107 }
1108 }
1109 } else {
1110 /* ID tracker disabled. */
1111 if (iter >= *pos && iter == 0) {
1112 return id_tracker_p; /* empty tracker */
1113 }
1114 iter++;
1115 }
1116 /* End of list */
1117 return NULL;
1118 }
1119
1120 /* Called with sessions_mutex held. */
1121 static
1122 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1123 {
1124 struct lttng_id_tracker *id_tracker = m->private;
1125 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1126 struct lttng_id_hash_node *e;
1127 int iter = 0, i;
1128
1129 (*ppos)++;
1130 if (id_tracker_p) {
1131 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1132 struct hlist_head *head = &id_tracker_p->id_hash[i];
1133
1134 lttng_hlist_for_each_entry(e, head, hlist) {
1135 if (iter++ >= *ppos)
1136 return e;
1137 }
1138 }
1139 } else {
1140 /* ID tracker disabled. */
1141 if (iter >= *ppos && iter == 0)
1142 return p; /* empty tracker */
1143 iter++;
1144 }
1145
1146 /* End of list */
1147 return NULL;
1148 }
1149
1150 static
1151 void id_list_stop(struct seq_file *m, void *p)
1152 {
1153 mutex_unlock(&sessions_mutex);
1154 }
1155
1156 static
1157 int id_list_show(struct seq_file *m, void *p)
1158 {
1159 struct lttng_id_tracker *id_tracker = m->private;
1160 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1161 int id;
1162
1163 if (p == id_tracker_p) {
1164 /* Tracker disabled. */
1165 id = -1;
1166 } else {
1167 const struct lttng_id_hash_node *e = p;
1168
1169 id = lttng_id_tracker_get_node_id(e);
1170 }
1171 switch (id_tracker->tracker_type) {
1172 case TRACKER_PID:
1173 seq_printf(m, "process { pid = %d; };\n", id);
1174 break;
1175 case TRACKER_VPID:
1176 seq_printf(m, "process { vpid = %d; };\n", id);
1177 break;
1178 case TRACKER_UID:
1179 seq_printf(m, "user { uid = %d; };\n", id);
1180 break;
1181 case TRACKER_VUID:
1182 seq_printf(m, "user { vuid = %d; };\n", id);
1183 break;
1184 case TRACKER_GID:
1185 seq_printf(m, "group { gid = %d; };\n", id);
1186 break;
1187 case TRACKER_VGID:
1188 seq_printf(m, "group { vgid = %d; };\n", id);
1189 break;
1190 default:
1191 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1192 }
1193 return 0;
1194 }
1195
1196 static
1197 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1198 .start = id_list_start,
1199 .next = id_list_next,
1200 .stop = id_list_stop,
1201 .show = id_list_show,
1202 };
1203
1204 static
1205 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1206 {
1207 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1208 }
1209
1210 static
1211 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1212 {
1213 struct seq_file *m = file->private_data;
1214 struct lttng_id_tracker *id_tracker = m->private;
1215 int ret;
1216
1217 WARN_ON_ONCE(!id_tracker);
1218 ret = seq_release(inode, file);
1219 if (!ret)
1220 fput(id_tracker->session->file);
1221 return ret;
1222 }
1223
1224 const struct file_operations lttng_tracker_ids_list_fops = {
1225 .owner = THIS_MODULE,
1226 .open = lttng_tracker_ids_list_open,
1227 .read = seq_read,
1228 .llseek = seq_lseek,
1229 .release = lttng_tracker_ids_list_release,
1230 };
1231
1232 int lttng_session_list_tracker_ids(struct lttng_session *session,
1233 enum tracker_type tracker_type)
1234 {
1235 struct file *tracker_ids_list_file;
1236 struct seq_file *m;
1237 int file_fd, ret;
1238
1239 file_fd = lttng_get_unused_fd();
1240 if (file_fd < 0) {
1241 ret = file_fd;
1242 goto fd_error;
1243 }
1244
1245 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1246 &lttng_tracker_ids_list_fops,
1247 NULL, O_RDWR);
1248 if (IS_ERR(tracker_ids_list_file)) {
1249 ret = PTR_ERR(tracker_ids_list_file);
1250 goto file_error;
1251 }
1252 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1253 ret = -EOVERFLOW;
1254 goto refcount_error;
1255 }
1256 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1257 if (ret < 0)
1258 goto open_error;
1259 m = tracker_ids_list_file->private_data;
1260
1261 m->private = get_tracker(session, tracker_type);
1262 BUG_ON(!m->private);
1263 fd_install(file_fd, tracker_ids_list_file);
1264
1265 return file_fd;
1266
1267 open_error:
1268 atomic_long_dec(&session->file->f_count);
1269 refcount_error:
1270 fput(tracker_ids_list_file);
1271 file_error:
1272 put_unused_fd(file_fd);
1273 fd_error:
1274 return ret;
1275 }
1276
1277 /*
1278 * Enabler management.
1279 */
1280 static
1281 int lttng_match_enabler_star_glob(const char *desc_name,
1282 const char *pattern)
1283 {
1284 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1285 desc_name, LTTNG_SIZE_MAX))
1286 return 0;
1287 return 1;
1288 }
1289
1290 static
1291 int lttng_match_enabler_name(const char *desc_name,
1292 const char *name)
1293 {
1294 if (strcmp(desc_name, name))
1295 return 0;
1296 return 1;
1297 }
1298
1299 static
1300 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1301 struct lttng_enabler *enabler)
1302 {
1303 const char *desc_name, *enabler_name;
1304 bool compat = false, entry = false;
1305
1306 enabler_name = enabler->event_param.name;
1307 switch (enabler->event_param.instrumentation) {
1308 case LTTNG_KERNEL_TRACEPOINT:
1309 desc_name = desc->name;
1310 switch (enabler->format_type) {
1311 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1312 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1313 case LTTNG_ENABLER_FORMAT_NAME:
1314 return lttng_match_enabler_name(desc_name, enabler_name);
1315 default:
1316 return -EINVAL;
1317 }
1318 break;
1319 case LTTNG_KERNEL_SYSCALL:
1320 desc_name = desc->name;
1321 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1322 desc_name += strlen("compat_");
1323 compat = true;
1324 }
1325 if (!strncmp(desc_name, "syscall_exit_",
1326 strlen("syscall_exit_"))) {
1327 desc_name += strlen("syscall_exit_");
1328 } else if (!strncmp(desc_name, "syscall_entry_",
1329 strlen("syscall_entry_"))) {
1330 desc_name += strlen("syscall_entry_");
1331 entry = true;
1332 } else {
1333 WARN_ON_ONCE(1);
1334 return -EINVAL;
1335 }
1336 switch (enabler->event_param.u.syscall.entryexit) {
1337 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1338 break;
1339 case LTTNG_KERNEL_SYSCALL_ENTRY:
1340 if (!entry)
1341 return 0;
1342 break;
1343 case LTTNG_KERNEL_SYSCALL_EXIT:
1344 if (entry)
1345 return 0;
1346 break;
1347 default:
1348 return -EINVAL;
1349 }
1350 switch (enabler->event_param.u.syscall.abi) {
1351 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1352 break;
1353 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1354 if (compat)
1355 return 0;
1356 break;
1357 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1358 if (!compat)
1359 return 0;
1360 break;
1361 default:
1362 return -EINVAL;
1363 }
1364 switch (enabler->event_param.u.syscall.match) {
1365 case LTTNG_SYSCALL_MATCH_NAME:
1366 switch (enabler->format_type) {
1367 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1368 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1369 case LTTNG_ENABLER_FORMAT_NAME:
1370 return lttng_match_enabler_name(desc_name, enabler_name);
1371 default:
1372 return -EINVAL;
1373 }
1374 break;
1375 case LTTNG_SYSCALL_MATCH_NR:
1376 return -EINVAL; /* Not implemented. */
1377 default:
1378 return -EINVAL;
1379 }
1380 break;
1381 default:
1382 WARN_ON_ONCE(1);
1383 return -EINVAL;
1384 }
1385 }
1386
1387 static
1388 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1389 struct lttng_event *event)
1390 {
1391 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1392 event_enabler);
1393
1394 if (base_enabler->event_param.instrumentation != event->instrumentation)
1395 return 0;
1396 if (lttng_desc_match_enabler(event->desc, base_enabler)
1397 && event->chan == event_enabler->chan)
1398 return 1;
1399 else
1400 return 0;
1401 }
1402
1403 static
1404 struct lttng_enabler_ref *lttng_enabler_ref(
1405 struct list_head *enablers_ref_list,
1406 struct lttng_enabler *enabler)
1407 {
1408 struct lttng_enabler_ref *enabler_ref;
1409
1410 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1411 if (enabler_ref->ref == enabler)
1412 return enabler_ref;
1413 }
1414 return NULL;
1415 }
1416
1417 static
1418 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1419 {
1420 struct lttng_session *session = event_enabler->chan->session;
1421 struct lttng_probe_desc *probe_desc;
1422 const struct lttng_event_desc *desc;
1423 int i;
1424 struct list_head *probe_list;
1425
1426 probe_list = lttng_get_probe_list_head();
1427 /*
1428 * For each probe event, if we find that a probe event matches
1429 * our enabler, create an associated lttng_event if not
1430 * already present.
1431 */
1432 list_for_each_entry(probe_desc, probe_list, head) {
1433 for (i = 0; i < probe_desc->nr_events; i++) {
1434 int found = 0;
1435 struct hlist_head *head;
1436 struct lttng_event *event;
1437
1438 desc = probe_desc->event_desc[i];
1439 if (!lttng_desc_match_enabler(desc,
1440 lttng_event_enabler_as_enabler(event_enabler)))
1441 continue;
1442
1443 /*
1444 * Check if already created.
1445 */
1446 head = utils_borrow_hash_table_bucket(
1447 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1448 desc->name);
1449 lttng_hlist_for_each_entry(event, head, hlist) {
1450 if (event->desc == desc
1451 && event->chan == event_enabler->chan)
1452 found = 1;
1453 }
1454 if (found)
1455 continue;
1456
1457 /*
1458 * We need to create an event for this
1459 * event probe.
1460 */
1461 event = _lttng_event_create(event_enabler->chan,
1462 NULL, NULL, desc,
1463 LTTNG_KERNEL_TRACEPOINT);
1464 if (!event) {
1465 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1466 probe_desc->event_desc[i]->name);
1467 }
1468 }
1469 }
1470 }
1471
1472 static
1473 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1474 {
1475 int ret;
1476
1477 ret = lttng_syscalls_register(event_enabler->chan, NULL);
1478 WARN_ON_ONCE(ret);
1479 }
1480
1481 /*
1482 * Create struct lttng_event if it is missing and present in the list of
1483 * tracepoint probes.
1484 * Should be called with sessions mutex held.
1485 */
1486 static
1487 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1488 {
1489 switch (event_enabler->base.event_param.instrumentation) {
1490 case LTTNG_KERNEL_TRACEPOINT:
1491 lttng_create_tracepoint_event_if_missing(event_enabler);
1492 break;
1493 case LTTNG_KERNEL_SYSCALL:
1494 lttng_create_syscall_event_if_missing(event_enabler);
1495 break;
1496 default:
1497 WARN_ON_ONCE(1);
1498 break;
1499 }
1500 }
1501
1502 /*
1503 * Create events associated with an event_enabler (if not already present),
1504 * and add backward reference from the event to the enabler.
1505 * Should be called with sessions mutex held.
1506 */
1507 static
1508 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1509 {
1510 struct lttng_channel *chan = event_enabler->chan;
1511 struct lttng_session *session = event_enabler->chan->session;
1512 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1513 struct lttng_event *event;
1514
1515 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1516 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1517 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1518 base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
1519 !strcmp(base_enabler->event_param.name, "*")) {
1520 if (base_enabler->enabled)
1521 WRITE_ONCE(chan->syscall_all, 1);
1522 else
1523 WRITE_ONCE(chan->syscall_all, 0);
1524 }
1525
1526 /* First ensure that probe events are created for this enabler. */
1527 lttng_create_event_if_missing(event_enabler);
1528
1529 /* For each event matching event_enabler in session event list. */
1530 list_for_each_entry(event, &session->events, list) {
1531 struct lttng_enabler_ref *enabler_ref;
1532
1533 if (!lttng_event_enabler_match_event(event_enabler, event))
1534 continue;
1535 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1536 lttng_event_enabler_as_enabler(event_enabler));
1537 if (!enabler_ref) {
1538 /*
1539 * If no backward ref, create it.
1540 * Add backward ref from event to event_enabler.
1541 */
1542 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1543 if (!enabler_ref)
1544 return -ENOMEM;
1545 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
1546 list_add(&enabler_ref->node,
1547 &event->enablers_ref_head);
1548 }
1549
1550 /*
1551 * Link filter bytecodes if not linked yet.
1552 */
1553 lttng_enabler_link_bytecode(event->desc,
1554 lttng_static_ctx,
1555 &event->bytecode_runtime_head,
1556 lttng_event_enabler_as_enabler(event_enabler));
1557
1558 /* TODO: merge event context. */
1559 }
1560 return 0;
1561 }
1562
1563 /*
1564 * Called at module load: connect the probe on all enablers matching
1565 * this event.
1566 * Called with sessions lock held.
1567 */
1568 int lttng_fix_pending_events(void)
1569 {
1570 struct lttng_session *session;
1571
1572 list_for_each_entry(session, &sessions, list)
1573 lttng_session_lazy_sync_event_enablers(session);
1574 return 0;
1575 }
1576
1577 struct lttng_event_enabler *lttng_event_enabler_create(
1578 enum lttng_enabler_format_type format_type,
1579 struct lttng_kernel_event *event_param,
1580 struct lttng_channel *chan)
1581 {
1582 struct lttng_event_enabler *event_enabler;
1583
1584 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
1585 if (!event_enabler)
1586 return NULL;
1587 event_enabler->base.format_type = format_type;
1588 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
1589 memcpy(&event_enabler->base.event_param, event_param,
1590 sizeof(event_enabler->base.event_param));
1591 event_enabler->chan = chan;
1592 /* ctx left NULL */
1593 event_enabler->base.enabled = 0;
1594 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
1595 mutex_lock(&sessions_mutex);
1596 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
1597 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1598 mutex_unlock(&sessions_mutex);
1599 return event_enabler;
1600 }
1601
1602 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
1603 {
1604 mutex_lock(&sessions_mutex);
1605 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
1606 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1607 mutex_unlock(&sessions_mutex);
1608 return 0;
1609 }
1610
1611 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
1612 {
1613 mutex_lock(&sessions_mutex);
1614 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
1615 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1616 mutex_unlock(&sessions_mutex);
1617 return 0;
1618 }
1619
1620 static
1621 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1622 struct lttng_kernel_filter_bytecode __user *bytecode)
1623 {
1624 struct lttng_filter_bytecode_node *bytecode_node;
1625 uint32_t bytecode_len;
1626 int ret;
1627
1628 ret = get_user(bytecode_len, &bytecode->len);
1629 if (ret)
1630 return ret;
1631 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1632 GFP_KERNEL);
1633 if (!bytecode_node)
1634 return -ENOMEM;
1635 ret = copy_from_user(&bytecode_node->bc, bytecode,
1636 sizeof(*bytecode) + bytecode_len);
1637 if (ret)
1638 goto error_free;
1639
1640 bytecode_node->enabler = enabler;
1641 /* Enforce length based on allocated size */
1642 bytecode_node->bc.len = bytecode_len;
1643 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1644
1645 return 0;
1646
1647 error_free:
1648 kfree(bytecode_node);
1649 return ret;
1650 }
1651
1652 int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
1653 struct lttng_kernel_filter_bytecode __user *bytecode)
1654 {
1655 int ret;
1656 ret = lttng_enabler_attach_bytecode(
1657 lttng_event_enabler_as_enabler(event_enabler), bytecode);
1658 if (ret)
1659 goto error;
1660
1661 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1662 return 0;
1663
1664 error:
1665 return ret;
1666 }
1667
1668 int lttng_event_add_callsite(struct lttng_event *event,
1669 struct lttng_kernel_event_callsite __user *callsite)
1670 {
1671
1672 switch (event->instrumentation) {
1673 case LTTNG_KERNEL_UPROBE:
1674 return lttng_uprobes_add_callsite(event, callsite);
1675 default:
1676 return -EINVAL;
1677 }
1678 }
1679
1680 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
1681 struct lttng_kernel_context *context_param)
1682 {
1683 return -ENOSYS;
1684 }
1685
1686 static
1687 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1688 {
1689 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1690
1691 /* Destroy filter bytecode */
1692 list_for_each_entry_safe(filter_node, tmp_filter_node,
1693 &enabler->filter_bytecode_head, node) {
1694 kfree(filter_node);
1695 }
1696 }
1697
1698 static
1699 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
1700 {
1701 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
1702
1703 /* Destroy contexts */
1704 lttng_destroy_context(event_enabler->ctx);
1705
1706 list_del(&event_enabler->node);
1707 kfree(event_enabler);
1708 }
1709
1710 /*
1711 * lttng_session_sync_event_enablers should be called just before starting a
1712 * session.
1713 * Should be called with sessions mutex held.
1714 */
1715 static
1716 void lttng_session_sync_event_enablers(struct lttng_session *session)
1717 {
1718 struct lttng_event_enabler *event_enabler;
1719 struct lttng_event *event;
1720
1721 list_for_each_entry(event_enabler, &session->enablers_head, node)
1722 lttng_event_enabler_ref_events(event_enabler);
1723 /*
1724 * For each event, if at least one of its enablers is enabled,
1725 * and its channel and session transient states are enabled, we
1726 * enable the event, else we disable it.
1727 */
1728 list_for_each_entry(event, &session->events, list) {
1729 struct lttng_enabler_ref *enabler_ref;
1730 struct lttng_bytecode_runtime *runtime;
1731 int enabled = 0, has_enablers_without_bytecode = 0;
1732
1733 switch (event->instrumentation) {
1734 case LTTNG_KERNEL_TRACEPOINT:
1735 case LTTNG_KERNEL_SYSCALL:
1736 /* Enable events */
1737 list_for_each_entry(enabler_ref,
1738 &event->enablers_ref_head, node) {
1739 if (enabler_ref->ref->enabled) {
1740 enabled = 1;
1741 break;
1742 }
1743 }
1744 break;
1745 default:
1746 /* Not handled with lazy sync. */
1747 continue;
1748 }
1749 /*
1750 * Enabled state is based on union of enablers, with
1751 * intesection of session and channel transient enable
1752 * states.
1753 */
1754 enabled = enabled && session->tstate && event->chan->tstate;
1755
1756 WRITE_ONCE(event->enabled, enabled);
1757 /*
1758 * Sync tracepoint registration with event enabled
1759 * state.
1760 */
1761 if (enabled) {
1762 register_event(event);
1763 } else {
1764 _lttng_event_unregister(event);
1765 }
1766
1767 /* Check if has enablers without bytecode enabled */
1768 list_for_each_entry(enabler_ref,
1769 &event->enablers_ref_head, node) {
1770 if (enabler_ref->ref->enabled
1771 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1772 has_enablers_without_bytecode = 1;
1773 break;
1774 }
1775 }
1776 event->has_enablers_without_bytecode =
1777 has_enablers_without_bytecode;
1778
1779 /* Enable filters */
1780 list_for_each_entry(runtime,
1781 &event->bytecode_runtime_head, node)
1782 lttng_filter_sync_state(runtime);
1783 }
1784 }
1785
1786 /*
1787 * Apply enablers to session events, adding events to session if need
1788 * be. It is required after each modification applied to an active
1789 * session, and right before session "start".
1790 * "lazy" sync means we only sync if required.
1791 * Should be called with sessions mutex held.
1792 */
1793 static
1794 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
1795 {
1796 /* We can skip if session is not active */
1797 if (!session->active)
1798 return;
1799 lttng_session_sync_event_enablers(session);
1800 }
1801
1802 /*
1803 * Serialize at most one packet worth of metadata into a metadata
1804 * channel.
1805 * We grab the metadata cache mutex to get exclusive access to our metadata
1806 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1807 * allows us to do racy operations such as looking for remaining space left in
1808 * packet and write, since mutual exclusion protects us from concurrent writes.
1809 * Mutual exclusion on the metadata cache allow us to read the cache content
1810 * without racing against reallocation of the cache by updates.
1811 * Returns the number of bytes written in the channel, 0 if no data
1812 * was written and a negative value on error.
1813 */
1814 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1815 struct channel *chan, bool *coherent)
1816 {
1817 struct lib_ring_buffer_ctx ctx;
1818 int ret = 0;
1819 size_t len, reserve_len;
1820
1821 /*
1822 * Ensure we support mutiple get_next / put sequences followed by
1823 * put_next. The metadata cache lock protects reading the metadata
1824 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1825 * "flush" operations on the buffer invoked by different processes.
1826 * Moreover, since the metadata cache memory can be reallocated, we
1827 * need to have exclusive access against updates even though we only
1828 * read it.
1829 */
1830 mutex_lock(&stream->metadata_cache->lock);
1831 WARN_ON(stream->metadata_in < stream->metadata_out);
1832 if (stream->metadata_in != stream->metadata_out)
1833 goto end;
1834
1835 /* Metadata regenerated, change the version. */
1836 if (stream->metadata_cache->version != stream->version)
1837 stream->version = stream->metadata_cache->version;
1838
1839 len = stream->metadata_cache->metadata_written -
1840 stream->metadata_in;
1841 if (!len)
1842 goto end;
1843 reserve_len = min_t(size_t,
1844 stream->transport->ops.packet_avail_size(chan),
1845 len);
1846 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1847 sizeof(char), -1);
1848 /*
1849 * If reservation failed, return an error to the caller.
1850 */
1851 ret = stream->transport->ops.event_reserve(&ctx, 0);
1852 if (ret != 0) {
1853 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1854 stream->coherent = false;
1855 goto end;
1856 }
1857 stream->transport->ops.event_write(&ctx,
1858 stream->metadata_cache->data + stream->metadata_in,
1859 reserve_len);
1860 stream->transport->ops.event_commit(&ctx);
1861 stream->metadata_in += reserve_len;
1862 if (reserve_len < len)
1863 stream->coherent = false;
1864 else
1865 stream->coherent = true;
1866 ret = reserve_len;
1867
1868 end:
1869 if (coherent)
1870 *coherent = stream->coherent;
1871 mutex_unlock(&stream->metadata_cache->lock);
1872 return ret;
1873 }
1874
1875 static
1876 void lttng_metadata_begin(struct lttng_session *session)
1877 {
1878 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
1879 mutex_lock(&session->metadata_cache->lock);
1880 }
1881
1882 static
1883 void lttng_metadata_end(struct lttng_session *session)
1884 {
1885 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1886 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
1887 struct lttng_metadata_stream *stream;
1888
1889 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1890 wake_up_interruptible(&stream->read_wait);
1891 mutex_unlock(&session->metadata_cache->lock);
1892 }
1893 }
1894
1895 /*
1896 * Write the metadata to the metadata cache.
1897 * Must be called with sessions_mutex held.
1898 * The metadata cache lock protects us from concurrent read access from
1899 * thread outputting metadata content to ring buffer.
1900 * The content of the printf is printed as a single atomic metadata
1901 * transaction.
1902 */
1903 int lttng_metadata_printf(struct lttng_session *session,
1904 const char *fmt, ...)
1905 {
1906 char *str;
1907 size_t len;
1908 va_list ap;
1909
1910 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
1911
1912 va_start(ap, fmt);
1913 str = kvasprintf(GFP_KERNEL, fmt, ap);
1914 va_end(ap);
1915 if (!str)
1916 return -ENOMEM;
1917
1918 len = strlen(str);
1919 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1920 if (session->metadata_cache->metadata_written + len >
1921 session->metadata_cache->cache_alloc) {
1922 char *tmp_cache_realloc;
1923 unsigned int tmp_cache_alloc_size;
1924
1925 tmp_cache_alloc_size = max_t(unsigned int,
1926 session->metadata_cache->cache_alloc + len,
1927 session->metadata_cache->cache_alloc << 1);
1928 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
1929 if (!tmp_cache_realloc)
1930 goto err;
1931 if (session->metadata_cache->data) {
1932 memcpy(tmp_cache_realloc,
1933 session->metadata_cache->data,
1934 session->metadata_cache->cache_alloc);
1935 vfree(session->metadata_cache->data);
1936 }
1937
1938 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1939 session->metadata_cache->data = tmp_cache_realloc;
1940 }
1941 memcpy(session->metadata_cache->data +
1942 session->metadata_cache->metadata_written,
1943 str, len);
1944 session->metadata_cache->metadata_written += len;
1945 kfree(str);
1946
1947 return 0;
1948
1949 err:
1950 kfree(str);
1951 return -ENOMEM;
1952 }
1953
1954 static
1955 int print_tabs(struct lttng_session *session, size_t nesting)
1956 {
1957 size_t i;
1958
1959 for (i = 0; i < nesting; i++) {
1960 int ret;
1961
1962 ret = lttng_metadata_printf(session, " ");
1963 if (ret) {
1964 return ret;
1965 }
1966 }
1967 return 0;
1968 }
1969
1970 static
1971 int lttng_field_name_statedump(struct lttng_session *session,
1972 const struct lttng_event_field *field,
1973 size_t nesting)
1974 {
1975 return lttng_metadata_printf(session, " _%s;\n", field->name);
1976 }
1977
1978 static
1979 int _lttng_integer_type_statedump(struct lttng_session *session,
1980 const struct lttng_type *type,
1981 size_t nesting)
1982 {
1983 int ret;
1984
1985 WARN_ON_ONCE(type->atype != atype_integer);
1986 ret = print_tabs(session, nesting);
1987 if (ret)
1988 return ret;
1989 ret = lttng_metadata_printf(session,
1990 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
1991 type->u.integer.size,
1992 type->u.integer.alignment,
1993 type->u.integer.signedness,
1994 (type->u.integer.encoding == lttng_encode_none)
1995 ? "none"
1996 : (type->u.integer.encoding == lttng_encode_UTF8)
1997 ? "UTF8"
1998 : "ASCII",
1999 type->u.integer.base,
2000 #if __BYTE_ORDER == __BIG_ENDIAN
2001 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2002 #else
2003 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2004 #endif
2005 );
2006 return ret;
2007 }
2008
2009 /*
2010 * Must be called with sessions_mutex held.
2011 */
2012 static
2013 int _lttng_struct_type_statedump(struct lttng_session *session,
2014 const struct lttng_type *type,
2015 size_t nesting)
2016 {
2017 int ret;
2018 uint32_t i, nr_fields;
2019 unsigned int alignment;
2020
2021 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2022
2023 ret = print_tabs(session, nesting);
2024 if (ret)
2025 return ret;
2026 ret = lttng_metadata_printf(session,
2027 "struct {\n");
2028 if (ret)
2029 return ret;
2030 nr_fields = type->u.struct_nestable.nr_fields;
2031 for (i = 0; i < nr_fields; i++) {
2032 const struct lttng_event_field *iter_field;
2033
2034 iter_field = &type->u.struct_nestable.fields[i];
2035 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2036 if (ret)
2037 return ret;
2038 }
2039 ret = print_tabs(session, nesting);
2040 if (ret)
2041 return ret;
2042 alignment = type->u.struct_nestable.alignment;
2043 if (alignment) {
2044 ret = lttng_metadata_printf(session,
2045 "} align(%u)",
2046 alignment);
2047 } else {
2048 ret = lttng_metadata_printf(session,
2049 "}");
2050 }
2051 return ret;
2052 }
2053
2054 /*
2055 * Must be called with sessions_mutex held.
2056 */
2057 static
2058 int _lttng_struct_field_statedump(struct lttng_session *session,
2059 const struct lttng_event_field *field,
2060 size_t nesting)
2061 {
2062 int ret;
2063
2064 ret = _lttng_struct_type_statedump(session,
2065 &field->type, nesting);
2066 if (ret)
2067 return ret;
2068 return lttng_field_name_statedump(session, field, nesting);
2069 }
2070
2071 /*
2072 * Must be called with sessions_mutex held.
2073 */
2074 static
2075 int _lttng_variant_type_statedump(struct lttng_session *session,
2076 const struct lttng_type *type,
2077 size_t nesting)
2078 {
2079 int ret;
2080 uint32_t i, nr_choices;
2081
2082 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2083 /*
2084 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2085 */
2086 if (type->u.variant_nestable.alignment != 0)
2087 return -EINVAL;
2088 ret = print_tabs(session, nesting);
2089 if (ret)
2090 return ret;
2091 ret = lttng_metadata_printf(session,
2092 "variant <_%s> {\n",
2093 type->u.variant_nestable.tag_name);
2094 if (ret)
2095 return ret;
2096 nr_choices = type->u.variant_nestable.nr_choices;
2097 for (i = 0; i < nr_choices; i++) {
2098 const struct lttng_event_field *iter_field;
2099
2100 iter_field = &type->u.variant_nestable.choices[i];
2101 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2102 if (ret)
2103 return ret;
2104 }
2105 ret = print_tabs(session, nesting);
2106 if (ret)
2107 return ret;
2108 ret = lttng_metadata_printf(session,
2109 "}");
2110 return ret;
2111 }
2112
2113 /*
2114 * Must be called with sessions_mutex held.
2115 */
2116 static
2117 int _lttng_variant_field_statedump(struct lttng_session *session,
2118 const struct lttng_event_field *field,
2119 size_t nesting)
2120 {
2121 int ret;
2122
2123 ret = _lttng_variant_type_statedump(session,
2124 &field->type, nesting);
2125 if (ret)
2126 return ret;
2127 return lttng_field_name_statedump(session, field, nesting);
2128 }
2129
2130 /*
2131 * Must be called with sessions_mutex held.
2132 */
2133 static
2134 int _lttng_array_field_statedump(struct lttng_session *session,
2135 const struct lttng_event_field *field,
2136 size_t nesting)
2137 {
2138 int ret;
2139 const struct lttng_type *elem_type;
2140
2141 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2142
2143 if (field->type.u.array_nestable.alignment) {
2144 ret = print_tabs(session, nesting);
2145 if (ret)
2146 return ret;
2147 ret = lttng_metadata_printf(session,
2148 "struct { } align(%u) _%s_padding;\n",
2149 field->type.u.array_nestable.alignment * CHAR_BIT,
2150 field->name);
2151 if (ret)
2152 return ret;
2153 }
2154 /*
2155 * Nested compound types: Only array of structures and variants are
2156 * currently supported.
2157 */
2158 elem_type = field->type.u.array_nestable.elem_type;
2159 switch (elem_type->atype) {
2160 case atype_integer:
2161 case atype_struct_nestable:
2162 case atype_variant_nestable:
2163 ret = _lttng_type_statedump(session, elem_type, nesting);
2164 if (ret)
2165 return ret;
2166 break;
2167
2168 default:
2169 return -EINVAL;
2170 }
2171 ret = lttng_metadata_printf(session,
2172 " _%s[%u];\n",
2173 field->name,
2174 field->type.u.array_nestable.length);
2175 return ret;
2176 }
2177
2178 /*
2179 * Must be called with sessions_mutex held.
2180 */
2181 static
2182 int _lttng_sequence_field_statedump(struct lttng_session *session,
2183 const struct lttng_event_field *field,
2184 size_t nesting)
2185 {
2186 int ret;
2187 const char *length_name;
2188 const struct lttng_type *elem_type;
2189
2190 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2191
2192 length_name = field->type.u.sequence_nestable.length_name;
2193
2194 if (field->type.u.sequence_nestable.alignment) {
2195 ret = print_tabs(session, nesting);
2196 if (ret)
2197 return ret;
2198 ret = lttng_metadata_printf(session,
2199 "struct { } align(%u) _%s_padding;\n",
2200 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2201 field->name);
2202 if (ret)
2203 return ret;
2204 }
2205
2206 /*
2207 * Nested compound types: Only array of structures and variants are
2208 * currently supported.
2209 */
2210 elem_type = field->type.u.sequence_nestable.elem_type;
2211 switch (elem_type->atype) {
2212 case atype_integer:
2213 case atype_struct_nestable:
2214 case atype_variant_nestable:
2215 ret = _lttng_type_statedump(session, elem_type, nesting);
2216 if (ret)
2217 return ret;
2218 break;
2219
2220 default:
2221 return -EINVAL;
2222 }
2223 ret = lttng_metadata_printf(session,
2224 " _%s[ _%s ];\n",
2225 field->name,
2226 field->type.u.sequence_nestable.length_name);
2227 return ret;
2228 }
2229
2230 /*
2231 * Must be called with sessions_mutex held.
2232 */
2233 static
2234 int _lttng_enum_type_statedump(struct lttng_session *session,
2235 const struct lttng_type *type,
2236 size_t nesting)
2237 {
2238 const struct lttng_enum_desc *enum_desc;
2239 const struct lttng_type *container_type;
2240 int ret;
2241 unsigned int i, nr_entries;
2242
2243 container_type = type->u.enum_nestable.container_type;
2244 if (container_type->atype != atype_integer) {
2245 ret = -EINVAL;
2246 goto end;
2247 }
2248 enum_desc = type->u.enum_nestable.desc;
2249 nr_entries = enum_desc->nr_entries;
2250
2251 ret = print_tabs(session, nesting);
2252 if (ret)
2253 goto end;
2254 ret = lttng_metadata_printf(session, "enum : ");
2255 if (ret)
2256 goto end;
2257 ret = _lttng_integer_type_statedump(session, container_type, 0);
2258 if (ret)
2259 goto end;
2260 ret = lttng_metadata_printf(session, " {\n");
2261 if (ret)
2262 goto end;
2263 /* Dump all entries */
2264 for (i = 0; i < nr_entries; i++) {
2265 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2266 int j, len;
2267
2268 ret = print_tabs(session, nesting + 1);
2269 if (ret)
2270 goto end;
2271 ret = lttng_metadata_printf(session,
2272 "\"");
2273 if (ret)
2274 goto end;
2275 len = strlen(entry->string);
2276 /* Escape the character '"' */
2277 for (j = 0; j < len; j++) {
2278 char c = entry->string[j];
2279
2280 switch (c) {
2281 case '"':
2282 ret = lttng_metadata_printf(session,
2283 "\\\"");
2284 break;
2285 case '\\':
2286 ret = lttng_metadata_printf(session,
2287 "\\\\");
2288 break;
2289 default:
2290 ret = lttng_metadata_printf(session,
2291 "%c", c);
2292 break;
2293 }
2294 if (ret)
2295 goto end;
2296 }
2297 ret = lttng_metadata_printf(session, "\"");
2298 if (ret)
2299 goto end;
2300
2301 if (entry->options.is_auto) {
2302 ret = lttng_metadata_printf(session, ",\n");
2303 if (ret)
2304 goto end;
2305 } else {
2306 ret = lttng_metadata_printf(session,
2307 " = ");
2308 if (ret)
2309 goto end;
2310 if (entry->start.signedness)
2311 ret = lttng_metadata_printf(session,
2312 "%lld", (long long) entry->start.value);
2313 else
2314 ret = lttng_metadata_printf(session,
2315 "%llu", entry->start.value);
2316 if (ret)
2317 goto end;
2318 if (entry->start.signedness == entry->end.signedness &&
2319 entry->start.value
2320 == entry->end.value) {
2321 ret = lttng_metadata_printf(session,
2322 ",\n");
2323 } else {
2324 if (entry->end.signedness) {
2325 ret = lttng_metadata_printf(session,
2326 " ... %lld,\n",
2327 (long long) entry->end.value);
2328 } else {
2329 ret = lttng_metadata_printf(session,
2330 " ... %llu,\n",
2331 entry->end.value);
2332 }
2333 }
2334 if (ret)
2335 goto end;
2336 }
2337 }
2338 ret = print_tabs(session, nesting);
2339 if (ret)
2340 goto end;
2341 ret = lttng_metadata_printf(session, "}");
2342 end:
2343 return ret;
2344 }
2345
2346 /*
2347 * Must be called with sessions_mutex held.
2348 */
2349 static
2350 int _lttng_enum_field_statedump(struct lttng_session *session,
2351 const struct lttng_event_field *field,
2352 size_t nesting)
2353 {
2354 int ret;
2355
2356 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
2357 if (ret)
2358 return ret;
2359 return lttng_field_name_statedump(session, field, nesting);
2360 }
2361
2362 static
2363 int _lttng_integer_field_statedump(struct lttng_session *session,
2364 const struct lttng_event_field *field,
2365 size_t nesting)
2366 {
2367 int ret;
2368
2369 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
2370 if (ret)
2371 return ret;
2372 return lttng_field_name_statedump(session, field, nesting);
2373 }
2374
2375 static
2376 int _lttng_string_type_statedump(struct lttng_session *session,
2377 const struct lttng_type *type,
2378 size_t nesting)
2379 {
2380 int ret;
2381
2382 WARN_ON_ONCE(type->atype != atype_string);
2383 /* Default encoding is UTF8 */
2384 ret = print_tabs(session, nesting);
2385 if (ret)
2386 return ret;
2387 ret = lttng_metadata_printf(session,
2388 "string%s",
2389 type->u.string.encoding == lttng_encode_ASCII ?
2390 " { encoding = ASCII; }" : "");
2391 return ret;
2392 }
2393
2394 static
2395 int _lttng_string_field_statedump(struct lttng_session *session,
2396 const struct lttng_event_field *field,
2397 size_t nesting)
2398 {
2399 int ret;
2400
2401 WARN_ON_ONCE(field->type.atype != atype_string);
2402 ret = _lttng_string_type_statedump(session, &field->type, nesting);
2403 if (ret)
2404 return ret;
2405 return lttng_field_name_statedump(session, field, nesting);
2406 }
2407
2408 /*
2409 * Must be called with sessions_mutex held.
2410 */
2411 static
2412 int _lttng_type_statedump(struct lttng_session *session,
2413 const struct lttng_type *type,
2414 size_t nesting)
2415 {
2416 int ret = 0;
2417
2418 switch (type->atype) {
2419 case atype_integer:
2420 ret = _lttng_integer_type_statedump(session, type, nesting);
2421 break;
2422 case atype_enum_nestable:
2423 ret = _lttng_enum_type_statedump(session, type, nesting);
2424 break;
2425 case atype_string:
2426 ret = _lttng_string_type_statedump(session, type, nesting);
2427 break;
2428 case atype_struct_nestable:
2429 ret = _lttng_struct_type_statedump(session, type, nesting);
2430 break;
2431 case atype_variant_nestable:
2432 ret = _lttng_variant_type_statedump(session, type, nesting);
2433 break;
2434
2435 /* Nested arrays and sequences are not supported yet. */
2436 case atype_array_nestable:
2437 case atype_sequence_nestable:
2438 default:
2439 WARN_ON_ONCE(1);
2440 return -EINVAL;
2441 }
2442 return ret;
2443 }
2444
2445 /*
2446 * Must be called with sessions_mutex held.
2447 */
2448 static
2449 int _lttng_field_statedump(struct lttng_session *session,
2450 const struct lttng_event_field *field,
2451 size_t nesting)
2452 {
2453 int ret = 0;
2454
2455 switch (field->type.atype) {
2456 case atype_integer:
2457 ret = _lttng_integer_field_statedump(session, field, nesting);
2458 break;
2459 case atype_enum_nestable:
2460 ret = _lttng_enum_field_statedump(session, field, nesting);
2461 break;
2462 case atype_string:
2463 ret = _lttng_string_field_statedump(session, field, nesting);
2464 break;
2465 case atype_struct_nestable:
2466 ret = _lttng_struct_field_statedump(session, field, nesting);
2467 break;
2468 case atype_array_nestable:
2469 ret = _lttng_array_field_statedump(session, field, nesting);
2470 break;
2471 case atype_sequence_nestable:
2472 ret = _lttng_sequence_field_statedump(session, field, nesting);
2473 break;
2474 case atype_variant_nestable:
2475 ret = _lttng_variant_field_statedump(session, field, nesting);
2476 break;
2477
2478 default:
2479 WARN_ON_ONCE(1);
2480 return -EINVAL;
2481 }
2482 return ret;
2483 }
2484
2485 static
2486 int _lttng_context_metadata_statedump(struct lttng_session *session,
2487 struct lttng_ctx *ctx)
2488 {
2489 int ret = 0;
2490 int i;
2491
2492 if (!ctx)
2493 return 0;
2494 for (i = 0; i < ctx->nr_fields; i++) {
2495 const struct lttng_ctx_field *field = &ctx->fields[i];
2496
2497 ret = _lttng_field_statedump(session, &field->event_field, 2);
2498 if (ret)
2499 return ret;
2500 }
2501 return ret;
2502 }
2503
2504 static
2505 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2506 struct lttng_event *event)
2507 {
2508 const struct lttng_event_desc *desc = event->desc;
2509 int ret = 0;
2510 int i;
2511
2512 for (i = 0; i < desc->nr_fields; i++) {
2513 const struct lttng_event_field *field = &desc->fields[i];
2514
2515 ret = _lttng_field_statedump(session, field, 2);
2516 if (ret)
2517 return ret;
2518 }
2519 return ret;
2520 }
2521
2522 /*
2523 * Must be called with sessions_mutex held.
2524 * The entire event metadata is printed as a single atomic metadata
2525 * transaction.
2526 */
2527 static
2528 int _lttng_event_metadata_statedump(struct lttng_session *session,
2529 struct lttng_channel *chan,
2530 struct lttng_event *event)
2531 {
2532 int ret = 0;
2533
2534 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2535 return 0;
2536 if (chan->channel_type == METADATA_CHANNEL)
2537 return 0;
2538
2539 lttng_metadata_begin(session);
2540
2541 ret = lttng_metadata_printf(session,
2542 "event {\n"
2543 " name = \"%s\";\n"
2544 " id = %u;\n"
2545 " stream_id = %u;\n",
2546 event->desc->name,
2547 event->id,
2548 event->chan->id);
2549 if (ret)
2550 goto end;
2551
2552 if (event->ctx) {
2553 ret = lttng_metadata_printf(session,
2554 " context := struct {\n");
2555 if (ret)
2556 goto end;
2557 }
2558 ret = _lttng_context_metadata_statedump(session, event->ctx);
2559 if (ret)
2560 goto end;
2561 if (event->ctx) {
2562 ret = lttng_metadata_printf(session,
2563 " };\n");
2564 if (ret)
2565 goto end;
2566 }
2567
2568 ret = lttng_metadata_printf(session,
2569 " fields := struct {\n"
2570 );
2571 if (ret)
2572 goto end;
2573
2574 ret = _lttng_fields_metadata_statedump(session, event);
2575 if (ret)
2576 goto end;
2577
2578 /*
2579 * LTTng space reservation can only reserve multiples of the
2580 * byte size.
2581 */
2582 ret = lttng_metadata_printf(session,
2583 " };\n"
2584 "};\n\n");
2585 if (ret)
2586 goto end;
2587
2588 event->metadata_dumped = 1;
2589 end:
2590 lttng_metadata_end(session);
2591 return ret;
2592
2593 }
2594
2595 /*
2596 * Must be called with sessions_mutex held.
2597 * The entire channel metadata is printed as a single atomic metadata
2598 * transaction.
2599 */
2600 static
2601 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2602 struct lttng_channel *chan)
2603 {
2604 int ret = 0;
2605
2606 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2607 return 0;
2608
2609 if (chan->channel_type == METADATA_CHANNEL)
2610 return 0;
2611
2612 lttng_metadata_begin(session);
2613
2614 WARN_ON_ONCE(!chan->header_type);
2615 ret = lttng_metadata_printf(session,
2616 "stream {\n"
2617 " id = %u;\n"
2618 " event.header := %s;\n"
2619 " packet.context := struct packet_context;\n",
2620 chan->id,
2621 chan->header_type == 1 ? "struct event_header_compact" :
2622 "struct event_header_large");
2623 if (ret)
2624 goto end;
2625
2626 if (chan->ctx) {
2627 ret = lttng_metadata_printf(session,
2628 " event.context := struct {\n");
2629 if (ret)
2630 goto end;
2631 }
2632 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2633 if (ret)
2634 goto end;
2635 if (chan->ctx) {
2636 ret = lttng_metadata_printf(session,
2637 " };\n");
2638 if (ret)
2639 goto end;
2640 }
2641
2642 ret = lttng_metadata_printf(session,
2643 "};\n\n");
2644
2645 chan->metadata_dumped = 1;
2646 end:
2647 lttng_metadata_end(session);
2648 return ret;
2649 }
2650
2651 /*
2652 * Must be called with sessions_mutex held.
2653 */
2654 static
2655 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2656 {
2657 return lttng_metadata_printf(session,
2658 "struct packet_context {\n"
2659 " uint64_clock_monotonic_t timestamp_begin;\n"
2660 " uint64_clock_monotonic_t timestamp_end;\n"
2661 " uint64_t content_size;\n"
2662 " uint64_t packet_size;\n"
2663 " uint64_t packet_seq_num;\n"
2664 " unsigned long events_discarded;\n"
2665 " uint32_t cpu_id;\n"
2666 "};\n\n"
2667 );
2668 }
2669
2670 /*
2671 * Compact header:
2672 * id: range: 0 - 30.
2673 * id 31 is reserved to indicate an extended header.
2674 *
2675 * Large header:
2676 * id: range: 0 - 65534.
2677 * id 65535 is reserved to indicate an extended header.
2678 *
2679 * Must be called with sessions_mutex held.
2680 */
2681 static
2682 int _lttng_event_header_declare(struct lttng_session *session)
2683 {
2684 return lttng_metadata_printf(session,
2685 "struct event_header_compact {\n"
2686 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2687 " variant <id> {\n"
2688 " struct {\n"
2689 " uint27_clock_monotonic_t timestamp;\n"
2690 " } compact;\n"
2691 " struct {\n"
2692 " uint32_t id;\n"
2693 " uint64_clock_monotonic_t timestamp;\n"
2694 " } extended;\n"
2695 " } v;\n"
2696 "} align(%u);\n"
2697 "\n"
2698 "struct event_header_large {\n"
2699 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2700 " variant <id> {\n"
2701 " struct {\n"
2702 " uint32_clock_monotonic_t timestamp;\n"
2703 " } compact;\n"
2704 " struct {\n"
2705 " uint32_t id;\n"
2706 " uint64_clock_monotonic_t timestamp;\n"
2707 " } extended;\n"
2708 " } v;\n"
2709 "} align(%u);\n\n",
2710 lttng_alignof(uint32_t) * CHAR_BIT,
2711 lttng_alignof(uint16_t) * CHAR_BIT
2712 );
2713 }
2714
2715 /*
2716 * Approximation of NTP time of day to clock monotonic correlation,
2717 * taken at start of trace.
2718 * Yes, this is only an approximation. Yes, we can (and will) do better
2719 * in future versions.
2720 * This function may return a negative offset. It may happen if the
2721 * system sets the REALTIME clock to 0 after boot.
2722 *
2723 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2724 * y2038 compliant.
2725 */
2726 static
2727 int64_t measure_clock_offset(void)
2728 {
2729 uint64_t monotonic_avg, monotonic[2], realtime;
2730 uint64_t tcf = trace_clock_freq();
2731 int64_t offset;
2732 unsigned long flags;
2733 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2734 struct timespec64 rts = { 0, 0 };
2735 #else
2736 struct timespec rts = { 0, 0 };
2737 #endif
2738
2739 /* Disable interrupts to increase correlation precision. */
2740 local_irq_save(flags);
2741 monotonic[0] = trace_clock_read64();
2742 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2743 ktime_get_real_ts64(&rts);
2744 #else
2745 getnstimeofday(&rts);
2746 #endif
2747 monotonic[1] = trace_clock_read64();
2748 local_irq_restore(flags);
2749
2750 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2751 realtime = (uint64_t) rts.tv_sec * tcf;
2752 if (tcf == NSEC_PER_SEC) {
2753 realtime += rts.tv_nsec;
2754 } else {
2755 uint64_t n = rts.tv_nsec * tcf;
2756
2757 do_div(n, NSEC_PER_SEC);
2758 realtime += n;
2759 }
2760 offset = (int64_t) realtime - monotonic_avg;
2761 return offset;
2762 }
2763
2764 static
2765 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2766 {
2767 int ret = 0;
2768 size_t i;
2769 char cur;
2770
2771 i = 0;
2772 cur = string[i];
2773 while (cur != '\0') {
2774 switch (cur) {
2775 case '\n':
2776 ret = lttng_metadata_printf(session, "%s", "\\n");
2777 break;
2778 case '\\':
2779 case '"':
2780 ret = lttng_metadata_printf(session, "%c", '\\');
2781 if (ret)
2782 goto error;
2783 /* We still print the current char */
2784 /* Fallthrough */
2785 default:
2786 ret = lttng_metadata_printf(session, "%c", cur);
2787 break;
2788 }
2789
2790 if (ret)
2791 goto error;
2792
2793 cur = string[++i];
2794 }
2795 error:
2796 return ret;
2797 }
2798
2799 static
2800 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2801 const char *field_value)
2802 {
2803 int ret;
2804
2805 ret = lttng_metadata_printf(session, " %s = \"", field);
2806 if (ret)
2807 goto error;
2808
2809 ret = print_escaped_ctf_string(session, field_value);
2810 if (ret)
2811 goto error;
2812
2813 ret = lttng_metadata_printf(session, "\";\n");
2814
2815 error:
2816 return ret;
2817 }
2818
2819 /*
2820 * Output metadata into this session's metadata buffers.
2821 * Must be called with sessions_mutex held.
2822 */
2823 static
2824 int _lttng_session_metadata_statedump(struct lttng_session *session)
2825 {
2826 unsigned char *uuid_c = session->uuid.b;
2827 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2828 const char *product_uuid;
2829 struct lttng_channel *chan;
2830 struct lttng_event *event;
2831 int ret = 0;
2832
2833 if (!LTTNG_READ_ONCE(session->active))
2834 return 0;
2835
2836 lttng_metadata_begin(session);
2837
2838 if (session->metadata_dumped)
2839 goto skip_session;
2840
2841 snprintf(uuid_s, sizeof(uuid_s),
2842 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2843 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2844 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2845 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2846 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2847
2848 ret = lttng_metadata_printf(session,
2849 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2850 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2851 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2852 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2853 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2854 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2855 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2856 "\n"
2857 "trace {\n"
2858 " major = %u;\n"
2859 " minor = %u;\n"
2860 " uuid = \"%s\";\n"
2861 " byte_order = %s;\n"
2862 " packet.header := struct {\n"
2863 " uint32_t magic;\n"
2864 " uint8_t uuid[16];\n"
2865 " uint32_t stream_id;\n"
2866 " uint64_t stream_instance_id;\n"
2867 " };\n"
2868 "};\n\n",
2869 lttng_alignof(uint8_t) * CHAR_BIT,
2870 lttng_alignof(uint16_t) * CHAR_BIT,
2871 lttng_alignof(uint32_t) * CHAR_BIT,
2872 lttng_alignof(uint64_t) * CHAR_BIT,
2873 sizeof(unsigned long) * CHAR_BIT,
2874 lttng_alignof(unsigned long) * CHAR_BIT,
2875 CTF_SPEC_MAJOR,
2876 CTF_SPEC_MINOR,
2877 uuid_s,
2878 #if __BYTE_ORDER == __BIG_ENDIAN
2879 "be"
2880 #else
2881 "le"
2882 #endif
2883 );
2884 if (ret)
2885 goto end;
2886
2887 ret = lttng_metadata_printf(session,
2888 "env {\n"
2889 " hostname = \"%s\";\n"
2890 " domain = \"kernel\";\n"
2891 " sysname = \"%s\";\n"
2892 " kernel_release = \"%s\";\n"
2893 " kernel_version = \"%s\";\n"
2894 " tracer_name = \"lttng-modules\";\n"
2895 " tracer_major = %d;\n"
2896 " tracer_minor = %d;\n"
2897 " tracer_patchlevel = %d;\n"
2898 " trace_buffering_scheme = \"global\";\n",
2899 current->nsproxy->uts_ns->name.nodename,
2900 utsname()->sysname,
2901 utsname()->release,
2902 utsname()->version,
2903 LTTNG_MODULES_MAJOR_VERSION,
2904 LTTNG_MODULES_MINOR_VERSION,
2905 LTTNG_MODULES_PATCHLEVEL_VERSION
2906 );
2907 if (ret)
2908 goto end;
2909
2910 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2911 if (ret)
2912 goto end;
2913 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2914 session->creation_time);
2915 if (ret)
2916 goto end;
2917
2918 /* Add the product UUID to the 'env' section */
2919 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
2920 if (product_uuid) {
2921 ret = lttng_metadata_printf(session,
2922 " product_uuid = \"%s\";\n",
2923 product_uuid
2924 );
2925 if (ret)
2926 goto end;
2927 }
2928
2929 /* Close the 'env' section */
2930 ret = lttng_metadata_printf(session, "};\n\n");
2931 if (ret)
2932 goto end;
2933
2934 ret = lttng_metadata_printf(session,
2935 "clock {\n"
2936 " name = \"%s\";\n",
2937 trace_clock_name()
2938 );
2939 if (ret)
2940 goto end;
2941
2942 if (!trace_clock_uuid(clock_uuid_s)) {
2943 ret = lttng_metadata_printf(session,
2944 " uuid = \"%s\";\n",
2945 clock_uuid_s
2946 );
2947 if (ret)
2948 goto end;
2949 }
2950
2951 ret = lttng_metadata_printf(session,
2952 " description = \"%s\";\n"
2953 " freq = %llu; /* Frequency, in Hz */\n"
2954 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2955 " offset = %lld;\n"
2956 "};\n\n",
2957 trace_clock_description(),
2958 (unsigned long long) trace_clock_freq(),
2959 (long long) measure_clock_offset()
2960 );
2961 if (ret)
2962 goto end;
2963
2964 ret = lttng_metadata_printf(session,
2965 "typealias integer {\n"
2966 " size = 27; align = 1; signed = false;\n"
2967 " map = clock.%s.value;\n"
2968 "} := uint27_clock_monotonic_t;\n"
2969 "\n"
2970 "typealias integer {\n"
2971 " size = 32; align = %u; signed = false;\n"
2972 " map = clock.%s.value;\n"
2973 "} := uint32_clock_monotonic_t;\n"
2974 "\n"
2975 "typealias integer {\n"
2976 " size = 64; align = %u; signed = false;\n"
2977 " map = clock.%s.value;\n"
2978 "} := uint64_clock_monotonic_t;\n\n",
2979 trace_clock_name(),
2980 lttng_alignof(uint32_t) * CHAR_BIT,
2981 trace_clock_name(),
2982 lttng_alignof(uint64_t) * CHAR_BIT,
2983 trace_clock_name()
2984 );
2985 if (ret)
2986 goto end;
2987
2988 ret = _lttng_stream_packet_context_declare(session);
2989 if (ret)
2990 goto end;
2991
2992 ret = _lttng_event_header_declare(session);
2993 if (ret)
2994 goto end;
2995
2996 skip_session:
2997 list_for_each_entry(chan, &session->chan, list) {
2998 ret = _lttng_channel_metadata_statedump(session, chan);
2999 if (ret)
3000 goto end;
3001 }
3002
3003 list_for_each_entry(event, &session->events, list) {
3004 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3005 if (ret)
3006 goto end;
3007 }
3008 session->metadata_dumped = 1;
3009 end:
3010 lttng_metadata_end(session);
3011 return ret;
3012 }
3013
3014 /**
3015 * lttng_transport_register - LTT transport registration
3016 * @transport: transport structure
3017 *
3018 * Registers a transport which can be used as output to extract the data out of
3019 * LTTng. The module calling this registration function must ensure that no
3020 * trap-inducing code will be executed by the transport functions. E.g.
3021 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3022 * is made visible to the transport function. This registration acts as a
3023 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3024 * after its registration must it synchronize the TLBs.
3025 */
3026 void lttng_transport_register(struct lttng_transport *transport)
3027 {
3028 /*
3029 * Make sure no page fault can be triggered by the module about to be
3030 * registered. We deal with this here so we don't have to call
3031 * vmalloc_sync_mappings() in each module's init.
3032 */
3033 wrapper_vmalloc_sync_mappings();
3034
3035 mutex_lock(&sessions_mutex);
3036 list_add_tail(&transport->node, &lttng_transport_list);
3037 mutex_unlock(&sessions_mutex);
3038 }
3039 EXPORT_SYMBOL_GPL(lttng_transport_register);
3040
3041 /**
3042 * lttng_transport_unregister - LTT transport unregistration
3043 * @transport: transport structure
3044 */
3045 void lttng_transport_unregister(struct lttng_transport *transport)
3046 {
3047 mutex_lock(&sessions_mutex);
3048 list_del(&transport->node);
3049 mutex_unlock(&sessions_mutex);
3050 }
3051 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3052
3053 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3054
3055 enum cpuhp_state lttng_hp_prepare;
3056 enum cpuhp_state lttng_hp_online;
3057
3058 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3059 {
3060 struct lttng_cpuhp_node *lttng_node;
3061
3062 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3063 switch (lttng_node->component) {
3064 case LTTNG_RING_BUFFER_FRONTEND:
3065 return 0;
3066 case LTTNG_RING_BUFFER_BACKEND:
3067 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3068 case LTTNG_RING_BUFFER_ITER:
3069 return 0;
3070 case LTTNG_CONTEXT_PERF_COUNTERS:
3071 return 0;
3072 default:
3073 return -EINVAL;
3074 }
3075 }
3076
3077 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3078 {
3079 struct lttng_cpuhp_node *lttng_node;
3080
3081 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3082 switch (lttng_node->component) {
3083 case LTTNG_RING_BUFFER_FRONTEND:
3084 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3085 case LTTNG_RING_BUFFER_BACKEND:
3086 return 0;
3087 case LTTNG_RING_BUFFER_ITER:
3088 return 0;
3089 case LTTNG_CONTEXT_PERF_COUNTERS:
3090 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3091 default:
3092 return -EINVAL;
3093 }
3094 }
3095
3096 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3097 {
3098 struct lttng_cpuhp_node *lttng_node;
3099
3100 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3101 switch (lttng_node->component) {
3102 case LTTNG_RING_BUFFER_FRONTEND:
3103 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3104 case LTTNG_RING_BUFFER_BACKEND:
3105 return 0;
3106 case LTTNG_RING_BUFFER_ITER:
3107 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3108 case LTTNG_CONTEXT_PERF_COUNTERS:
3109 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3110 default:
3111 return -EINVAL;
3112 }
3113 }
3114
3115 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3116 {
3117 struct lttng_cpuhp_node *lttng_node;
3118
3119 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3120 switch (lttng_node->component) {
3121 case LTTNG_RING_BUFFER_FRONTEND:
3122 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3123 case LTTNG_RING_BUFFER_BACKEND:
3124 return 0;
3125 case LTTNG_RING_BUFFER_ITER:
3126 return 0;
3127 case LTTNG_CONTEXT_PERF_COUNTERS:
3128 return 0;
3129 default:
3130 return -EINVAL;
3131 }
3132 }
3133
3134 static int __init lttng_init_cpu_hotplug(void)
3135 {
3136 int ret;
3137
3138 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3139 lttng_hotplug_prepare,
3140 lttng_hotplug_dead);
3141 if (ret < 0) {
3142 return ret;
3143 }
3144 lttng_hp_prepare = ret;
3145 lttng_rb_set_hp_prepare(ret);
3146
3147 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3148 lttng_hotplug_online,
3149 lttng_hotplug_offline);
3150 if (ret < 0) {
3151 cpuhp_remove_multi_state(lttng_hp_prepare);
3152 lttng_hp_prepare = 0;
3153 return ret;
3154 }
3155 lttng_hp_online = ret;
3156 lttng_rb_set_hp_online(ret);
3157
3158 return 0;
3159 }
3160
3161 static void __exit lttng_exit_cpu_hotplug(void)
3162 {
3163 lttng_rb_set_hp_online(0);
3164 cpuhp_remove_multi_state(lttng_hp_online);
3165 lttng_rb_set_hp_prepare(0);
3166 cpuhp_remove_multi_state(lttng_hp_prepare);
3167 }
3168
3169 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3170 static int lttng_init_cpu_hotplug(void)
3171 {
3172 return 0;
3173 }
3174 static void lttng_exit_cpu_hotplug(void)
3175 {
3176 }
3177 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3178
3179
3180 static int __init lttng_events_init(void)
3181 {
3182 int ret;
3183
3184 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3185 if (ret)
3186 return ret;
3187 ret = wrapper_get_pfnblock_flags_mask_init();
3188 if (ret)
3189 return ret;
3190 ret = wrapper_get_pageblock_flags_mask_init();
3191 if (ret)
3192 return ret;
3193 ret = lttng_probes_init();
3194 if (ret)
3195 return ret;
3196 ret = lttng_context_init();
3197 if (ret)
3198 return ret;
3199 ret = lttng_tracepoint_init();
3200 if (ret)
3201 goto error_tp;
3202 event_cache = KMEM_CACHE(lttng_event, 0);
3203 if (!event_cache) {
3204 ret = -ENOMEM;
3205 goto error_kmem;
3206 }
3207 ret = lttng_abi_init();
3208 if (ret)
3209 goto error_abi;
3210 ret = lttng_logger_init();
3211 if (ret)
3212 goto error_logger;
3213 ret = lttng_init_cpu_hotplug();
3214 if (ret)
3215 goto error_hotplug;
3216 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3217 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3218 __stringify(LTTNG_MODULES_MINOR_VERSION),
3219 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3220 LTTNG_MODULES_EXTRAVERSION,
3221 LTTNG_VERSION_NAME,
3222 #ifdef LTTNG_EXTRA_VERSION_GIT
3223 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3224 #else
3225 "",
3226 #endif
3227 #ifdef LTTNG_EXTRA_VERSION_NAME
3228 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3229 #else
3230 "");
3231 #endif
3232 return 0;
3233
3234 error_hotplug:
3235 lttng_logger_exit();
3236 error_logger:
3237 lttng_abi_exit();
3238 error_abi:
3239 kmem_cache_destroy(event_cache);
3240 error_kmem:
3241 lttng_tracepoint_exit();
3242 error_tp:
3243 lttng_context_exit();
3244 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
3245 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3246 __stringify(LTTNG_MODULES_MINOR_VERSION),
3247 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3248 LTTNG_MODULES_EXTRAVERSION,
3249 LTTNG_VERSION_NAME,
3250 #ifdef LTTNG_EXTRA_VERSION_GIT
3251 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3252 #else
3253 "",
3254 #endif
3255 #ifdef LTTNG_EXTRA_VERSION_NAME
3256 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3257 #else
3258 "");
3259 #endif
3260 return ret;
3261 }
3262
3263 module_init(lttng_events_init);
3264
3265 static void __exit lttng_events_exit(void)
3266 {
3267 struct lttng_session *session, *tmpsession;
3268
3269 lttng_exit_cpu_hotplug();
3270 lttng_logger_exit();
3271 lttng_abi_exit();
3272 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3273 lttng_session_destroy(session);
3274 kmem_cache_destroy(event_cache);
3275 lttng_tracepoint_exit();
3276 lttng_context_exit();
3277 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3278 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3279 __stringify(LTTNG_MODULES_MINOR_VERSION),
3280 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3281 LTTNG_MODULES_EXTRAVERSION,
3282 LTTNG_VERSION_NAME,
3283 #ifdef LTTNG_EXTRA_VERSION_GIT
3284 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3285 #else
3286 "",
3287 #endif
3288 #ifdef LTTNG_EXTRA_VERSION_NAME
3289 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3290 #else
3291 "");
3292 #endif
3293 }
3294
3295 module_exit(lttng_events_exit);
3296
3297 #include <generated/patches.h>
3298 #ifdef LTTNG_EXTRA_VERSION_GIT
3299 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3300 #endif
3301 #ifdef LTTNG_EXTRA_VERSION_NAME
3302 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3303 #endif
3304 MODULE_LICENSE("GPL and additional rights");
3305 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3306 MODULE_DESCRIPTION("LTTng tracer");
3307 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3308 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3309 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3310 LTTNG_MODULES_EXTRAVERSION);
This page took 0.148433 seconds and 3 git commands to generate.