Implement capturing payload on event notifier
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/lttng-bytecode.h>
40 #include <lttng/tracer.h>
41 #include <lttng/event-notifier-notification.h>
42 #include <lttng/abi-old.h>
43 #include <lttng/endian.h>
44 #include <lttng/string-utils.h>
45 #include <lttng/utils.h>
46 #include <ringbuffer/backend.h>
47 #include <ringbuffer/frontend.h>
48 #include <wrapper/time.h>
49
50 #define METADATA_CACHE_DEFAULT_SIZE 4096
51
52 static LIST_HEAD(sessions);
53 static LIST_HEAD(event_notifier_groups);
54 static LIST_HEAD(lttng_transport_list);
55 /*
56 * Protect the sessions and metadata caches.
57 */
58 static DEFINE_MUTEX(sessions_mutex);
59 static struct kmem_cache *event_cache;
60 static struct kmem_cache *event_notifier_cache;
61
62 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
63 static void lttng_session_sync_event_enablers(struct lttng_session *session);
64 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
65 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
66 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
67
68 static void _lttng_event_destroy(struct lttng_event *event);
69 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
70 static void _lttng_channel_destroy(struct lttng_channel *chan);
71 static int _lttng_event_unregister(struct lttng_event *event);
72 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
73 static
74 int _lttng_event_metadata_statedump(struct lttng_session *session,
75 struct lttng_channel *chan,
76 struct lttng_event *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_session *session,
83 const struct lttng_type *type,
84 size_t nesting);
85 static
86 int _lttng_field_statedump(struct lttng_session *session,
87 const struct lttng_event_field *field,
88 size_t nesting);
89
90 void synchronize_trace(void)
91 {
92 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
93 synchronize_rcu();
94 #else
95 synchronize_sched();
96 #endif
97
98 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
99 #ifdef CONFIG_PREEMPT_RT_FULL
100 synchronize_rcu();
101 #endif
102 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
103 #ifdef CONFIG_PREEMPT_RT
104 synchronize_rcu();
105 #endif
106 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
107 }
108
109 void lttng_lock_sessions(void)
110 {
111 mutex_lock(&sessions_mutex);
112 }
113
114 void lttng_unlock_sessions(void)
115 {
116 mutex_unlock(&sessions_mutex);
117 }
118
119 static struct lttng_transport *lttng_transport_find(const char *name)
120 {
121 struct lttng_transport *transport;
122
123 list_for_each_entry(transport, &lttng_transport_list, node) {
124 if (!strcmp(transport->name, name))
125 return transport;
126 }
127 return NULL;
128 }
129
130 /*
131 * Called with sessions lock held.
132 */
133 int lttng_session_active(void)
134 {
135 struct lttng_session *iter;
136
137 list_for_each_entry(iter, &sessions, list) {
138 if (iter->active)
139 return 1;
140 }
141 return 0;
142 }
143
144 struct lttng_session *lttng_session_create(void)
145 {
146 struct lttng_session *session;
147 struct lttng_metadata_cache *metadata_cache;
148 int i;
149
150 mutex_lock(&sessions_mutex);
151 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
152 if (!session)
153 goto err;
154 INIT_LIST_HEAD(&session->chan);
155 INIT_LIST_HEAD(&session->events);
156 lttng_guid_gen(&session->uuid);
157
158 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
159 GFP_KERNEL);
160 if (!metadata_cache)
161 goto err_free_session;
162 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
163 if (!metadata_cache->data)
164 goto err_free_cache;
165 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
166 kref_init(&metadata_cache->refcount);
167 mutex_init(&metadata_cache->lock);
168 session->metadata_cache = metadata_cache;
169 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
170 memcpy(&metadata_cache->uuid, &session->uuid,
171 sizeof(metadata_cache->uuid));
172 INIT_LIST_HEAD(&session->enablers_head);
173 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
174 INIT_HLIST_HEAD(&session->events_ht.table[i]);
175 list_add(&session->list, &sessions);
176 session->pid_tracker.session = session;
177 session->pid_tracker.tracker_type = TRACKER_PID;
178 session->vpid_tracker.session = session;
179 session->vpid_tracker.tracker_type = TRACKER_VPID;
180 session->uid_tracker.session = session;
181 session->uid_tracker.tracker_type = TRACKER_UID;
182 session->vuid_tracker.session = session;
183 session->vuid_tracker.tracker_type = TRACKER_VUID;
184 session->gid_tracker.session = session;
185 session->gid_tracker.tracker_type = TRACKER_GID;
186 session->vgid_tracker.session = session;
187 session->vgid_tracker.tracker_type = TRACKER_VGID;
188 mutex_unlock(&sessions_mutex);
189 return session;
190
191 err_free_cache:
192 kfree(metadata_cache);
193 err_free_session:
194 lttng_kvfree(session);
195 err:
196 mutex_unlock(&sessions_mutex);
197 return NULL;
198 }
199
200 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
201 {
202 struct lttng_transport *transport = NULL;
203 struct lttng_event_notifier_group *event_notifier_group;
204 const char *transport_name = "relay-event-notifier";
205 size_t subbuf_size = 4096; //TODO
206 size_t num_subbuf = 16; //TODO
207 unsigned int switch_timer_interval = 0;
208 unsigned int read_timer_interval = 0;
209 int i;
210
211 mutex_lock(&sessions_mutex);
212
213 transport = lttng_transport_find(transport_name);
214 if (!transport) {
215 printk(KERN_WARNING "LTTng: transport %s not found\n",
216 transport_name);
217 goto notransport;
218 }
219 if (!try_module_get(transport->owner)) {
220 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
221 transport_name);
222 goto notransport;
223 }
224
225 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
226 GFP_KERNEL);
227 if (!event_notifier_group)
228 goto nomem;
229
230 /*
231 * Initialize the ring buffer used to store event notifier
232 * notifications.
233 */
234 event_notifier_group->ops = &transport->ops;
235 event_notifier_group->chan = transport->ops.channel_create(
236 transport_name, event_notifier_group, NULL,
237 subbuf_size, num_subbuf, switch_timer_interval,
238 read_timer_interval);
239 if (!event_notifier_group->chan)
240 goto create_error;
241
242 event_notifier_group->transport = transport;
243
244 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
245 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
246 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
247 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
248
249 list_add(&event_notifier_group->node, &event_notifier_groups);
250
251 mutex_unlock(&sessions_mutex);
252
253 return event_notifier_group;
254
255 create_error:
256 lttng_kvfree(event_notifier_group);
257 nomem:
258 if (transport)
259 module_put(transport->owner);
260 notransport:
261 mutex_unlock(&sessions_mutex);
262 return NULL;
263 }
264
265 void metadata_cache_destroy(struct kref *kref)
266 {
267 struct lttng_metadata_cache *cache =
268 container_of(kref, struct lttng_metadata_cache, refcount);
269 vfree(cache->data);
270 kfree(cache);
271 }
272
273 void lttng_session_destroy(struct lttng_session *session)
274 {
275 struct lttng_channel *chan, *tmpchan;
276 struct lttng_event *event, *tmpevent;
277 struct lttng_metadata_stream *metadata_stream;
278 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
279 int ret;
280
281 mutex_lock(&sessions_mutex);
282 WRITE_ONCE(session->active, 0);
283 list_for_each_entry(chan, &session->chan, list) {
284 ret = lttng_syscalls_unregister_event(chan);
285 WARN_ON(ret);
286 }
287 list_for_each_entry(event, &session->events, list) {
288 ret = _lttng_event_unregister(event);
289 WARN_ON(ret);
290 }
291 synchronize_trace(); /* Wait for in-flight events to complete */
292 list_for_each_entry(chan, &session->chan, list) {
293 ret = lttng_syscalls_destroy_event(chan);
294 WARN_ON(ret);
295 }
296 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
297 &session->enablers_head, node)
298 lttng_event_enabler_destroy(event_enabler);
299 list_for_each_entry_safe(event, tmpevent, &session->events, list)
300 _lttng_event_destroy(event);
301 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
302 BUG_ON(chan->channel_type == METADATA_CHANNEL);
303 _lttng_channel_destroy(chan);
304 }
305 mutex_lock(&session->metadata_cache->lock);
306 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
307 _lttng_metadata_channel_hangup(metadata_stream);
308 mutex_unlock(&session->metadata_cache->lock);
309 lttng_id_tracker_destroy(&session->pid_tracker, false);
310 lttng_id_tracker_destroy(&session->vpid_tracker, false);
311 lttng_id_tracker_destroy(&session->uid_tracker, false);
312 lttng_id_tracker_destroy(&session->vuid_tracker, false);
313 lttng_id_tracker_destroy(&session->gid_tracker, false);
314 lttng_id_tracker_destroy(&session->vgid_tracker, false);
315 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
316 list_del(&session->list);
317 mutex_unlock(&sessions_mutex);
318 lttng_kvfree(session);
319 }
320
321 void lttng_event_notifier_group_destroy(
322 struct lttng_event_notifier_group *event_notifier_group)
323 {
324 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
325 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
326 int ret;
327
328 if (!event_notifier_group)
329 return;
330
331 mutex_lock(&sessions_mutex);
332
333 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
334 WARN_ON(ret);
335
336 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
337 &event_notifier_group->event_notifiers_head, list) {
338 ret = _lttng_event_notifier_unregister(event_notifier);
339 WARN_ON(ret);
340 }
341
342 /* Wait for in-flight event notifier to complete */
343 synchronize_trace();
344
345 irq_work_sync(&event_notifier_group->wakeup_pending);
346
347 kfree(event_notifier_group->sc_filter);
348
349 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
350 &event_notifier_group->enablers_head, node)
351 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
352
353 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
354 &event_notifier_group->event_notifiers_head, list)
355 _lttng_event_notifier_destroy(event_notifier);
356
357 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
358 module_put(event_notifier_group->transport->owner);
359 list_del(&event_notifier_group->node);
360
361 mutex_unlock(&sessions_mutex);
362 lttng_kvfree(event_notifier_group);
363 }
364
365 int lttng_session_statedump(struct lttng_session *session)
366 {
367 int ret;
368
369 mutex_lock(&sessions_mutex);
370 ret = lttng_statedump_start(session);
371 mutex_unlock(&sessions_mutex);
372 return ret;
373 }
374
375 int lttng_session_enable(struct lttng_session *session)
376 {
377 int ret = 0;
378 struct lttng_channel *chan;
379
380 mutex_lock(&sessions_mutex);
381 if (session->active) {
382 ret = -EBUSY;
383 goto end;
384 }
385
386 /* Set transient enabler state to "enabled" */
387 session->tstate = 1;
388
389 /* We need to sync enablers with session before activation. */
390 lttng_session_sync_event_enablers(session);
391
392 /*
393 * Snapshot the number of events per channel to know the type of header
394 * we need to use.
395 */
396 list_for_each_entry(chan, &session->chan, list) {
397 if (chan->header_type)
398 continue; /* don't change it if session stop/restart */
399 if (chan->free_event_id < 31)
400 chan->header_type = 1; /* compact */
401 else
402 chan->header_type = 2; /* large */
403 }
404
405 /* Clear each stream's quiescent state. */
406 list_for_each_entry(chan, &session->chan, list) {
407 if (chan->channel_type != METADATA_CHANNEL)
408 lib_ring_buffer_clear_quiescent_channel(chan->chan);
409 }
410
411 WRITE_ONCE(session->active, 1);
412 WRITE_ONCE(session->been_active, 1);
413 ret = _lttng_session_metadata_statedump(session);
414 if (ret) {
415 WRITE_ONCE(session->active, 0);
416 goto end;
417 }
418 ret = lttng_statedump_start(session);
419 if (ret)
420 WRITE_ONCE(session->active, 0);
421 end:
422 mutex_unlock(&sessions_mutex);
423 return ret;
424 }
425
426 int lttng_session_disable(struct lttng_session *session)
427 {
428 int ret = 0;
429 struct lttng_channel *chan;
430
431 mutex_lock(&sessions_mutex);
432 if (!session->active) {
433 ret = -EBUSY;
434 goto end;
435 }
436 WRITE_ONCE(session->active, 0);
437
438 /* Set transient enabler state to "disabled" */
439 session->tstate = 0;
440 lttng_session_sync_event_enablers(session);
441
442 /* Set each stream's quiescent state. */
443 list_for_each_entry(chan, &session->chan, list) {
444 if (chan->channel_type != METADATA_CHANNEL)
445 lib_ring_buffer_set_quiescent_channel(chan->chan);
446 }
447 end:
448 mutex_unlock(&sessions_mutex);
449 return ret;
450 }
451
452 int lttng_session_metadata_regenerate(struct lttng_session *session)
453 {
454 int ret = 0;
455 struct lttng_channel *chan;
456 struct lttng_event *event;
457 struct lttng_metadata_cache *cache = session->metadata_cache;
458 struct lttng_metadata_stream *stream;
459
460 mutex_lock(&sessions_mutex);
461 if (!session->active) {
462 ret = -EBUSY;
463 goto end;
464 }
465
466 mutex_lock(&cache->lock);
467 memset(cache->data, 0, cache->cache_alloc);
468 cache->metadata_written = 0;
469 cache->version++;
470 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
471 stream->metadata_out = 0;
472 stream->metadata_in = 0;
473 }
474 mutex_unlock(&cache->lock);
475
476 session->metadata_dumped = 0;
477 list_for_each_entry(chan, &session->chan, list) {
478 chan->metadata_dumped = 0;
479 }
480
481 list_for_each_entry(event, &session->events, list) {
482 event->metadata_dumped = 0;
483 }
484
485 ret = _lttng_session_metadata_statedump(session);
486
487 end:
488 mutex_unlock(&sessions_mutex);
489 return ret;
490 }
491
492 int lttng_channel_enable(struct lttng_channel *channel)
493 {
494 int ret = 0;
495
496 mutex_lock(&sessions_mutex);
497 if (channel->channel_type == METADATA_CHANNEL) {
498 ret = -EPERM;
499 goto end;
500 }
501 if (channel->enabled) {
502 ret = -EEXIST;
503 goto end;
504 }
505 /* Set transient enabler state to "enabled" */
506 channel->tstate = 1;
507 lttng_session_sync_event_enablers(channel->session);
508 /* Set atomically the state to "enabled" */
509 WRITE_ONCE(channel->enabled, 1);
510 end:
511 mutex_unlock(&sessions_mutex);
512 return ret;
513 }
514
515 int lttng_channel_disable(struct lttng_channel *channel)
516 {
517 int ret = 0;
518
519 mutex_lock(&sessions_mutex);
520 if (channel->channel_type == METADATA_CHANNEL) {
521 ret = -EPERM;
522 goto end;
523 }
524 if (!channel->enabled) {
525 ret = -EEXIST;
526 goto end;
527 }
528 /* Set atomically the state to "disabled" */
529 WRITE_ONCE(channel->enabled, 0);
530 /* Set transient enabler state to "enabled" */
531 channel->tstate = 0;
532 lttng_session_sync_event_enablers(channel->session);
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_event_enable(struct lttng_event *event)
539 {
540 int ret = 0;
541
542 mutex_lock(&sessions_mutex);
543 if (event->chan->channel_type == METADATA_CHANNEL) {
544 ret = -EPERM;
545 goto end;
546 }
547 if (event->enabled) {
548 ret = -EEXIST;
549 goto end;
550 }
551 switch (event->instrumentation) {
552 case LTTNG_KERNEL_TRACEPOINT:
553 case LTTNG_KERNEL_SYSCALL:
554 ret = -EINVAL;
555 break;
556 case LTTNG_KERNEL_KPROBE:
557 case LTTNG_KERNEL_UPROBE:
558 case LTTNG_KERNEL_NOOP:
559 WRITE_ONCE(event->enabled, 1);
560 break;
561 case LTTNG_KERNEL_KRETPROBE:
562 ret = lttng_kretprobes_event_enable_state(event, 1);
563 break;
564 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
565 default:
566 WARN_ON_ONCE(1);
567 ret = -EINVAL;
568 }
569 end:
570 mutex_unlock(&sessions_mutex);
571 return ret;
572 }
573
574 int lttng_event_disable(struct lttng_event *event)
575 {
576 int ret = 0;
577
578 mutex_lock(&sessions_mutex);
579 if (event->chan->channel_type == METADATA_CHANNEL) {
580 ret = -EPERM;
581 goto end;
582 }
583 if (!event->enabled) {
584 ret = -EEXIST;
585 goto end;
586 }
587 switch (event->instrumentation) {
588 case LTTNG_KERNEL_TRACEPOINT:
589 case LTTNG_KERNEL_SYSCALL:
590 ret = -EINVAL;
591 break;
592 case LTTNG_KERNEL_KPROBE:
593 case LTTNG_KERNEL_UPROBE:
594 case LTTNG_KERNEL_NOOP:
595 WRITE_ONCE(event->enabled, 0);
596 break;
597 case LTTNG_KERNEL_KRETPROBE:
598 ret = lttng_kretprobes_event_enable_state(event, 0);
599 break;
600 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
601 default:
602 WARN_ON_ONCE(1);
603 ret = -EINVAL;
604 }
605 end:
606 mutex_unlock(&sessions_mutex);
607 return ret;
608 }
609
610 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
611 {
612 int ret = 0;
613
614 mutex_lock(&sessions_mutex);
615 if (event_notifier->enabled) {
616 ret = -EEXIST;
617 goto end;
618 }
619 switch (event_notifier->instrumentation) {
620 case LTTNG_KERNEL_TRACEPOINT:
621 case LTTNG_KERNEL_SYSCALL:
622 ret = -EINVAL;
623 break;
624 case LTTNG_KERNEL_KPROBE:
625 case LTTNG_KERNEL_UPROBE:
626 WRITE_ONCE(event_notifier->enabled, 1);
627 break;
628 case LTTNG_KERNEL_FUNCTION:
629 case LTTNG_KERNEL_NOOP:
630 case LTTNG_KERNEL_KRETPROBE:
631 default:
632 WARN_ON_ONCE(1);
633 ret = -EINVAL;
634 }
635 end:
636 mutex_unlock(&sessions_mutex);
637 return ret;
638 }
639
640 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
641 {
642 int ret = 0;
643
644 mutex_lock(&sessions_mutex);
645 if (!event_notifier->enabled) {
646 ret = -EEXIST;
647 goto end;
648 }
649 switch (event_notifier->instrumentation) {
650 case LTTNG_KERNEL_TRACEPOINT:
651 case LTTNG_KERNEL_SYSCALL:
652 ret = -EINVAL;
653 break;
654 case LTTNG_KERNEL_KPROBE:
655 case LTTNG_KERNEL_UPROBE:
656 WRITE_ONCE(event_notifier->enabled, 0);
657 break;
658 case LTTNG_KERNEL_FUNCTION:
659 case LTTNG_KERNEL_NOOP:
660 case LTTNG_KERNEL_KRETPROBE:
661 default:
662 WARN_ON_ONCE(1);
663 ret = -EINVAL;
664 }
665 end:
666 mutex_unlock(&sessions_mutex);
667 return ret;
668 }
669
670 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
671 const char *transport_name,
672 void *buf_addr,
673 size_t subbuf_size, size_t num_subbuf,
674 unsigned int switch_timer_interval,
675 unsigned int read_timer_interval,
676 enum channel_type channel_type)
677 {
678 struct lttng_channel *chan;
679 struct lttng_transport *transport = NULL;
680
681 mutex_lock(&sessions_mutex);
682 if (session->been_active && channel_type != METADATA_CHANNEL)
683 goto active; /* Refuse to add channel to active session */
684 transport = lttng_transport_find(transport_name);
685 if (!transport) {
686 printk(KERN_WARNING "LTTng: transport %s not found\n",
687 transport_name);
688 goto notransport;
689 }
690 if (!try_module_get(transport->owner)) {
691 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
692 goto notransport;
693 }
694 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
695 if (!chan)
696 goto nomem;
697 chan->session = session;
698 chan->id = session->free_chan_id++;
699 chan->ops = &transport->ops;
700 /*
701 * Note: the channel creation op already writes into the packet
702 * headers. Therefore the "chan" information used as input
703 * should be already accessible.
704 */
705 chan->chan = transport->ops.channel_create(transport_name,
706 chan, buf_addr, subbuf_size, num_subbuf,
707 switch_timer_interval, read_timer_interval);
708 if (!chan->chan)
709 goto create_error;
710 chan->tstate = 1;
711 chan->enabled = 1;
712 chan->transport = transport;
713 chan->channel_type = channel_type;
714 list_add(&chan->list, &session->chan);
715 mutex_unlock(&sessions_mutex);
716 return chan;
717
718 create_error:
719 kfree(chan);
720 nomem:
721 if (transport)
722 module_put(transport->owner);
723 notransport:
724 active:
725 mutex_unlock(&sessions_mutex);
726 return NULL;
727 }
728
729 /*
730 * Only used internally at session destruction for per-cpu channels, and
731 * when metadata channel is released.
732 * Needs to be called with sessions mutex held.
733 */
734 static
735 void _lttng_channel_destroy(struct lttng_channel *chan)
736 {
737 chan->ops->channel_destroy(chan->chan);
738 module_put(chan->transport->owner);
739 list_del(&chan->list);
740 lttng_destroy_context(chan->ctx);
741 kfree(chan);
742 }
743
744 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
745 {
746 BUG_ON(chan->channel_type != METADATA_CHANNEL);
747
748 /* Protect the metadata cache with the sessions_mutex. */
749 mutex_lock(&sessions_mutex);
750 _lttng_channel_destroy(chan);
751 mutex_unlock(&sessions_mutex);
752 }
753 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
754
755 static
756 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
757 {
758 stream->finalized = 1;
759 wake_up_interruptible(&stream->read_wait);
760 }
761
762 /*
763 * Supports event creation while tracing session is active.
764 * Needs to be called with sessions mutex held.
765 */
766 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
767 struct lttng_kernel_event *event_param,
768 void *filter,
769 const struct lttng_event_desc *event_desc,
770 enum lttng_kernel_instrumentation itype)
771 {
772 struct lttng_session *session = chan->session;
773 struct lttng_event *event;
774 const char *event_name;
775 struct hlist_head *head;
776 int ret;
777
778 if (chan->free_event_id == -1U) {
779 ret = -EMFILE;
780 goto full;
781 }
782
783 switch (itype) {
784 case LTTNG_KERNEL_TRACEPOINT:
785 event_name = event_desc->name;
786 break;
787 case LTTNG_KERNEL_KPROBE:
788 case LTTNG_KERNEL_UPROBE:
789 case LTTNG_KERNEL_KRETPROBE:
790 case LTTNG_KERNEL_NOOP:
791 case LTTNG_KERNEL_SYSCALL:
792 event_name = event_param->name;
793 break;
794 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
795 default:
796 WARN_ON_ONCE(1);
797 ret = -EINVAL;
798 goto type_error;
799 }
800
801 head = utils_borrow_hash_table_bucket(session->events_ht.table,
802 LTTNG_EVENT_HT_SIZE, event_name);
803 lttng_hlist_for_each_entry(event, head, hlist) {
804 WARN_ON_ONCE(!event->desc);
805 if (!strncmp(event->desc->name, event_name,
806 LTTNG_KERNEL_SYM_NAME_LEN - 1)
807 && chan == event->chan) {
808 ret = -EEXIST;
809 goto exist;
810 }
811 }
812
813 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
814 if (!event) {
815 ret = -ENOMEM;
816 goto cache_error;
817 }
818 event->chan = chan;
819 event->filter = filter;
820 event->id = chan->free_event_id++;
821 event->instrumentation = itype;
822 event->evtype = LTTNG_TYPE_EVENT;
823 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
824 INIT_LIST_HEAD(&event->enablers_ref_head);
825
826 switch (itype) {
827 case LTTNG_KERNEL_TRACEPOINT:
828 /* Event will be enabled by enabler sync. */
829 event->enabled = 0;
830 event->registered = 0;
831 event->desc = lttng_event_desc_get(event_name);
832 if (!event->desc) {
833 ret = -ENOENT;
834 goto register_error;
835 }
836 /* Populate lttng_event structure before event registration. */
837 smp_wmb();
838 break;
839 case LTTNG_KERNEL_KPROBE:
840 /*
841 * Needs to be explicitly enabled after creation, since
842 * we may want to apply filters.
843 */
844 event->enabled = 0;
845 event->registered = 1;
846 /*
847 * Populate lttng_event structure before event
848 * registration.
849 */
850 smp_wmb();
851 ret = lttng_kprobes_register_event(event_name,
852 event_param->u.kprobe.symbol_name,
853 event_param->u.kprobe.offset,
854 event_param->u.kprobe.addr,
855 event);
856 if (ret) {
857 ret = -EINVAL;
858 goto register_error;
859 }
860 ret = try_module_get(event->desc->owner);
861 WARN_ON_ONCE(!ret);
862 break;
863 case LTTNG_KERNEL_KRETPROBE:
864 {
865 struct lttng_event *event_return;
866
867 /* kretprobe defines 2 events */
868 /*
869 * Needs to be explicitly enabled after creation, since
870 * we may want to apply filters.
871 */
872 event->enabled = 0;
873 event->registered = 1;
874 event_return =
875 kmem_cache_zalloc(event_cache, GFP_KERNEL);
876 if (!event_return) {
877 ret = -ENOMEM;
878 goto register_error;
879 }
880 event_return->chan = chan;
881 event_return->filter = filter;
882 event_return->id = chan->free_event_id++;
883 event_return->enabled = 0;
884 event_return->registered = 1;
885 event_return->instrumentation = itype;
886 /*
887 * Populate lttng_event structure before kretprobe registration.
888 */
889 smp_wmb();
890 ret = lttng_kretprobes_register(event_name,
891 event_param->u.kretprobe.symbol_name,
892 event_param->u.kretprobe.offset,
893 event_param->u.kretprobe.addr,
894 event, event_return);
895 if (ret) {
896 kmem_cache_free(event_cache, event_return);
897 ret = -EINVAL;
898 goto register_error;
899 }
900 /* Take 2 refs on the module: one per event. */
901 ret = try_module_get(event->desc->owner);
902 WARN_ON_ONCE(!ret);
903 ret = try_module_get(event->desc->owner);
904 WARN_ON_ONCE(!ret);
905 ret = _lttng_event_metadata_statedump(chan->session, chan,
906 event_return);
907 WARN_ON_ONCE(ret > 0);
908 if (ret) {
909 kmem_cache_free(event_cache, event_return);
910 module_put(event->desc->owner);
911 module_put(event->desc->owner);
912 goto statedump_error;
913 }
914 list_add(&event_return->list, &chan->session->events);
915 break;
916 }
917 case LTTNG_KERNEL_NOOP:
918 case LTTNG_KERNEL_SYSCALL:
919 /*
920 * Needs to be explicitly enabled after creation, since
921 * we may want to apply filters.
922 */
923 event->enabled = 0;
924 event->registered = 0;
925 event->desc = event_desc;
926 switch (event_param->u.syscall.entryexit) {
927 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
928 ret = -EINVAL;
929 goto register_error;
930 case LTTNG_KERNEL_SYSCALL_ENTRY:
931 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
932 break;
933 case LTTNG_KERNEL_SYSCALL_EXIT:
934 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
935 break;
936 }
937 switch (event_param->u.syscall.abi) {
938 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
939 ret = -EINVAL;
940 goto register_error;
941 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
942 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
943 break;
944 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
945 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
946 break;
947 }
948 if (!event->desc) {
949 ret = -EINVAL;
950 goto register_error;
951 }
952 break;
953 case LTTNG_KERNEL_UPROBE:
954 /*
955 * Needs to be explicitly enabled after creation, since
956 * we may want to apply filters.
957 */
958 event->enabled = 0;
959 event->registered = 1;
960
961 /*
962 * Populate lttng_event structure before event
963 * registration.
964 */
965 smp_wmb();
966
967 ret = lttng_uprobes_register_event(event_param->name,
968 event_param->u.uprobe.fd,
969 event);
970 if (ret)
971 goto register_error;
972 ret = try_module_get(event->desc->owner);
973 WARN_ON_ONCE(!ret);
974 break;
975 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
976 default:
977 WARN_ON_ONCE(1);
978 ret = -EINVAL;
979 goto register_error;
980 }
981 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
982 WARN_ON_ONCE(ret > 0);
983 if (ret) {
984 goto statedump_error;
985 }
986 hlist_add_head(&event->hlist, head);
987 list_add(&event->list, &chan->session->events);
988 return event;
989
990 statedump_error:
991 /* If a statedump error occurs, events will not be readable. */
992 register_error:
993 kmem_cache_free(event_cache, event);
994 cache_error:
995 exist:
996 type_error:
997 full:
998 return ERR_PTR(ret);
999 }
1000
1001 struct lttng_event_notifier *_lttng_event_notifier_create(
1002 const struct lttng_event_desc *event_desc,
1003 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
1004 struct lttng_kernel_event_notifier *event_notifier_param,
1005 void *filter, enum lttng_kernel_instrumentation itype)
1006 {
1007 struct lttng_event_notifier *event_notifier;
1008 const char *event_name;
1009 struct hlist_head *head;
1010 int ret;
1011
1012 switch (itype) {
1013 case LTTNG_KERNEL_TRACEPOINT:
1014 event_name = event_desc->name;
1015 break;
1016 case LTTNG_KERNEL_KPROBE:
1017 case LTTNG_KERNEL_UPROBE:
1018 case LTTNG_KERNEL_SYSCALL:
1019 event_name = event_notifier_param->event.name;
1020 break;
1021 case LTTNG_KERNEL_KRETPROBE:
1022 case LTTNG_KERNEL_FUNCTION:
1023 case LTTNG_KERNEL_NOOP:
1024 default:
1025 WARN_ON_ONCE(1);
1026 ret = -EINVAL;
1027 goto type_error;
1028 }
1029
1030 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1031 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1032 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1033 WARN_ON_ONCE(!event_notifier->desc);
1034 if (!strncmp(event_notifier->desc->name, event_name,
1035 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1036 && event_notifier_group == event_notifier->group
1037 && token == event_notifier->user_token) {
1038 ret = -EEXIST;
1039 goto exist;
1040 }
1041 }
1042
1043 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1044 if (!event_notifier) {
1045 ret = -ENOMEM;
1046 goto cache_error;
1047 }
1048
1049 event_notifier->group = event_notifier_group;
1050 event_notifier->user_token = token;
1051 event_notifier->num_captures = 0;
1052 event_notifier->filter = filter;
1053 event_notifier->instrumentation = itype;
1054 event_notifier->evtype = LTTNG_TYPE_EVENT;
1055 event_notifier->send_notification = lttng_event_notifier_notification_send;
1056 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1057 INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
1058 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1059
1060 switch (itype) {
1061 case LTTNG_KERNEL_TRACEPOINT:
1062 /* Event will be enabled by enabler sync. */
1063 event_notifier->enabled = 0;
1064 event_notifier->registered = 0;
1065 event_notifier->desc = lttng_event_desc_get(event_name);
1066 if (!event_notifier->desc) {
1067 ret = -ENOENT;
1068 goto register_error;
1069 }
1070 /* Populate lttng_event_notifier structure before event registration. */
1071 smp_wmb();
1072 break;
1073 case LTTNG_KERNEL_KPROBE:
1074 /*
1075 * Needs to be explicitly enabled after creation, since
1076 * we may want to apply filters.
1077 */
1078 event_notifier->enabled = 0;
1079 event_notifier->registered = 1;
1080 /*
1081 * Populate lttng_event_notifier structure before event
1082 * registration.
1083 */
1084 smp_wmb();
1085 ret = lttng_kprobes_register_event_notifier(
1086 event_notifier_param->event.u.kprobe.symbol_name,
1087 event_notifier_param->event.u.kprobe.offset,
1088 event_notifier_param->event.u.kprobe.addr,
1089 event_notifier);
1090 if (ret) {
1091 ret = -EINVAL;
1092 goto register_error;
1093 }
1094 ret = try_module_get(event_notifier->desc->owner);
1095 WARN_ON_ONCE(!ret);
1096 break;
1097 case LTTNG_KERNEL_NOOP:
1098 case LTTNG_KERNEL_SYSCALL:
1099 /*
1100 * Needs to be explicitly enabled after creation, since
1101 * we may want to apply filters.
1102 */
1103 event_notifier->enabled = 0;
1104 event_notifier->registered = 0;
1105 event_notifier->desc = event_desc;
1106 switch (event_notifier_param->event.u.syscall.entryexit) {
1107 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1108 ret = -EINVAL;
1109 goto register_error;
1110 case LTTNG_KERNEL_SYSCALL_ENTRY:
1111 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1112 break;
1113 case LTTNG_KERNEL_SYSCALL_EXIT:
1114 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1115 break;
1116 }
1117 switch (event_notifier_param->event.u.syscall.abi) {
1118 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1119 ret = -EINVAL;
1120 goto register_error;
1121 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1122 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1123 break;
1124 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1125 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1126 break;
1127 }
1128
1129 if (!event_notifier->desc) {
1130 ret = -EINVAL;
1131 goto register_error;
1132 }
1133 break;
1134 case LTTNG_KERNEL_UPROBE:
1135 /*
1136 * Needs to be explicitly enabled after creation, since
1137 * we may want to apply filters.
1138 */
1139 event_notifier->enabled = 0;
1140 event_notifier->registered = 1;
1141
1142 /*
1143 * Populate lttng_event_notifier structure before
1144 * event_notifier registration.
1145 */
1146 smp_wmb();
1147
1148 ret = lttng_uprobes_register_event_notifier(
1149 event_notifier_param->event.name,
1150 event_notifier_param->event.u.uprobe.fd,
1151 event_notifier);
1152 if (ret)
1153 goto register_error;
1154 ret = try_module_get(event_notifier->desc->owner);
1155 WARN_ON_ONCE(!ret);
1156 break;
1157 case LTTNG_KERNEL_KRETPROBE:
1158 case LTTNG_KERNEL_FUNCTION:
1159 default:
1160 WARN_ON_ONCE(1);
1161 ret = -EINVAL;
1162 goto register_error;
1163 }
1164
1165 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1166 hlist_add_head(&event_notifier->hlist, head);
1167 return event_notifier;
1168
1169 register_error:
1170 kmem_cache_free(event_notifier_cache, event_notifier);
1171 cache_error:
1172 exist:
1173 type_error:
1174 return ERR_PTR(ret);
1175 }
1176
1177 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1178 struct lttng_kernel_event *event_param,
1179 void *filter,
1180 const struct lttng_event_desc *event_desc,
1181 enum lttng_kernel_instrumentation itype)
1182 {
1183 struct lttng_event *event;
1184
1185 mutex_lock(&sessions_mutex);
1186 event = _lttng_event_create(chan, event_param, filter, event_desc,
1187 itype);
1188 mutex_unlock(&sessions_mutex);
1189 return event;
1190 }
1191
1192 struct lttng_event_notifier *lttng_event_notifier_create(
1193 const struct lttng_event_desc *event_desc,
1194 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1195 struct lttng_kernel_event_notifier *event_notifier_param,
1196 void *filter, enum lttng_kernel_instrumentation itype)
1197 {
1198 struct lttng_event_notifier *event_notifier;
1199
1200 mutex_lock(&sessions_mutex);
1201 event_notifier = _lttng_event_notifier_create(event_desc, id,
1202 event_notifier_group, event_notifier_param, filter, itype);
1203 mutex_unlock(&sessions_mutex);
1204 return event_notifier;
1205 }
1206
1207 /* Only used for tracepoints for now. */
1208 static
1209 void register_event(struct lttng_event *event)
1210 {
1211 const struct lttng_event_desc *desc;
1212 int ret = -EINVAL;
1213
1214 if (event->registered)
1215 return;
1216
1217 desc = event->desc;
1218 switch (event->instrumentation) {
1219 case LTTNG_KERNEL_TRACEPOINT:
1220 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1221 desc->probe_callback,
1222 event);
1223 break;
1224 case LTTNG_KERNEL_SYSCALL:
1225 ret = lttng_syscall_filter_enable_event(event->chan, event);
1226 break;
1227 case LTTNG_KERNEL_KPROBE:
1228 case LTTNG_KERNEL_UPROBE:
1229 case LTTNG_KERNEL_KRETPROBE:
1230 case LTTNG_KERNEL_NOOP:
1231 ret = 0;
1232 break;
1233 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1234 default:
1235 WARN_ON_ONCE(1);
1236 }
1237 if (!ret)
1238 event->registered = 1;
1239 }
1240
1241 /*
1242 * Only used internally at session destruction.
1243 */
1244 int _lttng_event_unregister(struct lttng_event *event)
1245 {
1246 const struct lttng_event_desc *desc;
1247 int ret = -EINVAL;
1248
1249 if (!event->registered)
1250 return 0;
1251
1252 desc = event->desc;
1253 switch (event->instrumentation) {
1254 case LTTNG_KERNEL_TRACEPOINT:
1255 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1256 event->desc->probe_callback,
1257 event);
1258 break;
1259 case LTTNG_KERNEL_KPROBE:
1260 lttng_kprobes_unregister_event(event);
1261 ret = 0;
1262 break;
1263 case LTTNG_KERNEL_KRETPROBE:
1264 lttng_kretprobes_unregister(event);
1265 ret = 0;
1266 break;
1267 case LTTNG_KERNEL_SYSCALL:
1268 ret = lttng_syscall_filter_disable_event(event->chan, event);
1269 break;
1270 case LTTNG_KERNEL_NOOP:
1271 ret = 0;
1272 break;
1273 case LTTNG_KERNEL_UPROBE:
1274 lttng_uprobes_unregister_event(event);
1275 ret = 0;
1276 break;
1277 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1278 default:
1279 WARN_ON_ONCE(1);
1280 }
1281 if (!ret)
1282 event->registered = 0;
1283 return ret;
1284 }
1285
1286 /* Only used for tracepoints for now. */
1287 static
1288 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1289 {
1290 const struct lttng_event_desc *desc;
1291 int ret = -EINVAL;
1292
1293 if (event_notifier->registered)
1294 return;
1295
1296 desc = event_notifier->desc;
1297 switch (event_notifier->instrumentation) {
1298 case LTTNG_KERNEL_TRACEPOINT:
1299 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1300 desc->event_notifier_callback,
1301 event_notifier);
1302 break;
1303 case LTTNG_KERNEL_SYSCALL:
1304 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1305 break;
1306 case LTTNG_KERNEL_KPROBE:
1307 case LTTNG_KERNEL_UPROBE:
1308 ret = 0;
1309 break;
1310 case LTTNG_KERNEL_KRETPROBE:
1311 case LTTNG_KERNEL_FUNCTION:
1312 case LTTNG_KERNEL_NOOP:
1313 default:
1314 WARN_ON_ONCE(1);
1315 }
1316 if (!ret)
1317 event_notifier->registered = 1;
1318 }
1319
1320 static
1321 int _lttng_event_notifier_unregister(
1322 struct lttng_event_notifier *event_notifier)
1323 {
1324 const struct lttng_event_desc *desc;
1325 int ret = -EINVAL;
1326
1327 if (!event_notifier->registered)
1328 return 0;
1329
1330 desc = event_notifier->desc;
1331 switch (event_notifier->instrumentation) {
1332 case LTTNG_KERNEL_TRACEPOINT:
1333 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1334 event_notifier->desc->event_notifier_callback,
1335 event_notifier);
1336 break;
1337 case LTTNG_KERNEL_KPROBE:
1338 lttng_kprobes_unregister_event_notifier(event_notifier);
1339 ret = 0;
1340 break;
1341 case LTTNG_KERNEL_UPROBE:
1342 lttng_uprobes_unregister_event_notifier(event_notifier);
1343 ret = 0;
1344 break;
1345 case LTTNG_KERNEL_SYSCALL:
1346 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1347 break;
1348 case LTTNG_KERNEL_KRETPROBE:
1349 case LTTNG_KERNEL_FUNCTION:
1350 case LTTNG_KERNEL_NOOP:
1351 default:
1352 WARN_ON_ONCE(1);
1353 }
1354 if (!ret)
1355 event_notifier->registered = 0;
1356 return ret;
1357 }
1358
1359 /*
1360 * Only used internally at session destruction.
1361 */
1362 static
1363 void _lttng_event_destroy(struct lttng_event *event)
1364 {
1365 switch (event->instrumentation) {
1366 case LTTNG_KERNEL_TRACEPOINT:
1367 lttng_event_desc_put(event->desc);
1368 break;
1369 case LTTNG_KERNEL_KPROBE:
1370 module_put(event->desc->owner);
1371 lttng_kprobes_destroy_event_private(event);
1372 break;
1373 case LTTNG_KERNEL_KRETPROBE:
1374 module_put(event->desc->owner);
1375 lttng_kretprobes_destroy_private(event);
1376 break;
1377 case LTTNG_KERNEL_NOOP:
1378 case LTTNG_KERNEL_SYSCALL:
1379 break;
1380 case LTTNG_KERNEL_UPROBE:
1381 module_put(event->desc->owner);
1382 lttng_uprobes_destroy_event_private(event);
1383 break;
1384 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1385 default:
1386 WARN_ON_ONCE(1);
1387 }
1388 list_del(&event->list);
1389 lttng_destroy_context(event->ctx);
1390 kmem_cache_free(event_cache, event);
1391 }
1392
1393 /*
1394 * Only used internally at session destruction.
1395 */
1396 static
1397 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1398 {
1399 switch (event_notifier->instrumentation) {
1400 case LTTNG_KERNEL_TRACEPOINT:
1401 lttng_event_desc_put(event_notifier->desc);
1402 break;
1403 case LTTNG_KERNEL_KPROBE:
1404 module_put(event_notifier->desc->owner);
1405 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1406 break;
1407 case LTTNG_KERNEL_NOOP:
1408 case LTTNG_KERNEL_SYSCALL:
1409 break;
1410 case LTTNG_KERNEL_UPROBE:
1411 module_put(event_notifier->desc->owner);
1412 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1413 break;
1414 case LTTNG_KERNEL_KRETPROBE:
1415 case LTTNG_KERNEL_FUNCTION:
1416 default:
1417 WARN_ON_ONCE(1);
1418 }
1419 list_del(&event_notifier->list);
1420 kmem_cache_free(event_notifier_cache, event_notifier);
1421 }
1422
1423 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1424 enum tracker_type tracker_type)
1425 {
1426 switch (tracker_type) {
1427 case TRACKER_PID:
1428 return &session->pid_tracker;
1429 case TRACKER_VPID:
1430 return &session->vpid_tracker;
1431 case TRACKER_UID:
1432 return &session->uid_tracker;
1433 case TRACKER_VUID:
1434 return &session->vuid_tracker;
1435 case TRACKER_GID:
1436 return &session->gid_tracker;
1437 case TRACKER_VGID:
1438 return &session->vgid_tracker;
1439 default:
1440 WARN_ON_ONCE(1);
1441 return NULL;
1442 }
1443 }
1444
1445 int lttng_session_track_id(struct lttng_session *session,
1446 enum tracker_type tracker_type, int id)
1447 {
1448 struct lttng_id_tracker *tracker;
1449 int ret;
1450
1451 tracker = get_tracker(session, tracker_type);
1452 if (!tracker)
1453 return -EINVAL;
1454 if (id < -1)
1455 return -EINVAL;
1456 mutex_lock(&sessions_mutex);
1457 if (id == -1) {
1458 /* track all ids: destroy tracker. */
1459 lttng_id_tracker_destroy(tracker, true);
1460 ret = 0;
1461 } else {
1462 ret = lttng_id_tracker_add(tracker, id);
1463 }
1464 mutex_unlock(&sessions_mutex);
1465 return ret;
1466 }
1467
1468 int lttng_session_untrack_id(struct lttng_session *session,
1469 enum tracker_type tracker_type, int id)
1470 {
1471 struct lttng_id_tracker *tracker;
1472 int ret;
1473
1474 tracker = get_tracker(session, tracker_type);
1475 if (!tracker)
1476 return -EINVAL;
1477 if (id < -1)
1478 return -EINVAL;
1479 mutex_lock(&sessions_mutex);
1480 if (id == -1) {
1481 /* untrack all ids: replace by empty tracker. */
1482 ret = lttng_id_tracker_empty_set(tracker);
1483 } else {
1484 ret = lttng_id_tracker_del(tracker, id);
1485 }
1486 mutex_unlock(&sessions_mutex);
1487 return ret;
1488 }
1489
1490 static
1491 void *id_list_start(struct seq_file *m, loff_t *pos)
1492 {
1493 struct lttng_id_tracker *id_tracker = m->private;
1494 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1495 struct lttng_id_hash_node *e;
1496 int iter = 0, i;
1497
1498 mutex_lock(&sessions_mutex);
1499 if (id_tracker_p) {
1500 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1501 struct hlist_head *head = &id_tracker_p->id_hash[i];
1502
1503 lttng_hlist_for_each_entry(e, head, hlist) {
1504 if (iter++ >= *pos)
1505 return e;
1506 }
1507 }
1508 } else {
1509 /* ID tracker disabled. */
1510 if (iter >= *pos && iter == 0) {
1511 return id_tracker_p; /* empty tracker */
1512 }
1513 iter++;
1514 }
1515 /* End of list */
1516 return NULL;
1517 }
1518
1519 /* Called with sessions_mutex held. */
1520 static
1521 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1522 {
1523 struct lttng_id_tracker *id_tracker = m->private;
1524 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1525 struct lttng_id_hash_node *e;
1526 int iter = 0, i;
1527
1528 (*ppos)++;
1529 if (id_tracker_p) {
1530 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1531 struct hlist_head *head = &id_tracker_p->id_hash[i];
1532
1533 lttng_hlist_for_each_entry(e, head, hlist) {
1534 if (iter++ >= *ppos)
1535 return e;
1536 }
1537 }
1538 } else {
1539 /* ID tracker disabled. */
1540 if (iter >= *ppos && iter == 0)
1541 return p; /* empty tracker */
1542 iter++;
1543 }
1544
1545 /* End of list */
1546 return NULL;
1547 }
1548
1549 static
1550 void id_list_stop(struct seq_file *m, void *p)
1551 {
1552 mutex_unlock(&sessions_mutex);
1553 }
1554
1555 static
1556 int id_list_show(struct seq_file *m, void *p)
1557 {
1558 struct lttng_id_tracker *id_tracker = m->private;
1559 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1560 int id;
1561
1562 if (p == id_tracker_p) {
1563 /* Tracker disabled. */
1564 id = -1;
1565 } else {
1566 const struct lttng_id_hash_node *e = p;
1567
1568 id = lttng_id_tracker_get_node_id(e);
1569 }
1570 switch (id_tracker->tracker_type) {
1571 case TRACKER_PID:
1572 seq_printf(m, "process { pid = %d; };\n", id);
1573 break;
1574 case TRACKER_VPID:
1575 seq_printf(m, "process { vpid = %d; };\n", id);
1576 break;
1577 case TRACKER_UID:
1578 seq_printf(m, "user { uid = %d; };\n", id);
1579 break;
1580 case TRACKER_VUID:
1581 seq_printf(m, "user { vuid = %d; };\n", id);
1582 break;
1583 case TRACKER_GID:
1584 seq_printf(m, "group { gid = %d; };\n", id);
1585 break;
1586 case TRACKER_VGID:
1587 seq_printf(m, "group { vgid = %d; };\n", id);
1588 break;
1589 default:
1590 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1591 }
1592 return 0;
1593 }
1594
1595 static
1596 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1597 .start = id_list_start,
1598 .next = id_list_next,
1599 .stop = id_list_stop,
1600 .show = id_list_show,
1601 };
1602
1603 static
1604 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1605 {
1606 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1607 }
1608
1609 static
1610 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1611 {
1612 struct seq_file *m = file->private_data;
1613 struct lttng_id_tracker *id_tracker = m->private;
1614 int ret;
1615
1616 WARN_ON_ONCE(!id_tracker);
1617 ret = seq_release(inode, file);
1618 if (!ret)
1619 fput(id_tracker->session->file);
1620 return ret;
1621 }
1622
1623 const struct file_operations lttng_tracker_ids_list_fops = {
1624 .owner = THIS_MODULE,
1625 .open = lttng_tracker_ids_list_open,
1626 .read = seq_read,
1627 .llseek = seq_lseek,
1628 .release = lttng_tracker_ids_list_release,
1629 };
1630
1631 int lttng_session_list_tracker_ids(struct lttng_session *session,
1632 enum tracker_type tracker_type)
1633 {
1634 struct file *tracker_ids_list_file;
1635 struct seq_file *m;
1636 int file_fd, ret;
1637
1638 file_fd = lttng_get_unused_fd();
1639 if (file_fd < 0) {
1640 ret = file_fd;
1641 goto fd_error;
1642 }
1643
1644 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1645 &lttng_tracker_ids_list_fops,
1646 NULL, O_RDWR);
1647 if (IS_ERR(tracker_ids_list_file)) {
1648 ret = PTR_ERR(tracker_ids_list_file);
1649 goto file_error;
1650 }
1651 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1652 ret = -EOVERFLOW;
1653 goto refcount_error;
1654 }
1655 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1656 if (ret < 0)
1657 goto open_error;
1658 m = tracker_ids_list_file->private_data;
1659
1660 m->private = get_tracker(session, tracker_type);
1661 BUG_ON(!m->private);
1662 fd_install(file_fd, tracker_ids_list_file);
1663
1664 return file_fd;
1665
1666 open_error:
1667 atomic_long_dec(&session->file->f_count);
1668 refcount_error:
1669 fput(tracker_ids_list_file);
1670 file_error:
1671 put_unused_fd(file_fd);
1672 fd_error:
1673 return ret;
1674 }
1675
1676 /*
1677 * Enabler management.
1678 */
1679 static
1680 int lttng_match_enabler_star_glob(const char *desc_name,
1681 const char *pattern)
1682 {
1683 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1684 desc_name, LTTNG_SIZE_MAX))
1685 return 0;
1686 return 1;
1687 }
1688
1689 static
1690 int lttng_match_enabler_name(const char *desc_name,
1691 const char *name)
1692 {
1693 if (strcmp(desc_name, name))
1694 return 0;
1695 return 1;
1696 }
1697
1698 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1699 struct lttng_enabler *enabler)
1700 {
1701 const char *desc_name, *enabler_name;
1702 bool compat = false, entry = false;
1703
1704 enabler_name = enabler->event_param.name;
1705 switch (enabler->event_param.instrumentation) {
1706 case LTTNG_KERNEL_TRACEPOINT:
1707 desc_name = desc->name;
1708 switch (enabler->format_type) {
1709 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1710 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1711 case LTTNG_ENABLER_FORMAT_NAME:
1712 return lttng_match_enabler_name(desc_name, enabler_name);
1713 default:
1714 return -EINVAL;
1715 }
1716 break;
1717 case LTTNG_KERNEL_SYSCALL:
1718 desc_name = desc->name;
1719 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1720 desc_name += strlen("compat_");
1721 compat = true;
1722 }
1723 if (!strncmp(desc_name, "syscall_exit_",
1724 strlen("syscall_exit_"))) {
1725 desc_name += strlen("syscall_exit_");
1726 } else if (!strncmp(desc_name, "syscall_entry_",
1727 strlen("syscall_entry_"))) {
1728 desc_name += strlen("syscall_entry_");
1729 entry = true;
1730 } else {
1731 WARN_ON_ONCE(1);
1732 return -EINVAL;
1733 }
1734 switch (enabler->event_param.u.syscall.entryexit) {
1735 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1736 break;
1737 case LTTNG_KERNEL_SYSCALL_ENTRY:
1738 if (!entry)
1739 return 0;
1740 break;
1741 case LTTNG_KERNEL_SYSCALL_EXIT:
1742 if (entry)
1743 return 0;
1744 break;
1745 default:
1746 return -EINVAL;
1747 }
1748 switch (enabler->event_param.u.syscall.abi) {
1749 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1750 break;
1751 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1752 if (compat)
1753 return 0;
1754 break;
1755 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1756 if (!compat)
1757 return 0;
1758 break;
1759 default:
1760 return -EINVAL;
1761 }
1762 switch (enabler->event_param.u.syscall.match) {
1763 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1764 switch (enabler->format_type) {
1765 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1766 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1767 case LTTNG_ENABLER_FORMAT_NAME:
1768 return lttng_match_enabler_name(desc_name, enabler_name);
1769 default:
1770 return -EINVAL;
1771 }
1772 break;
1773 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1774 return -EINVAL; /* Not implemented. */
1775 default:
1776 return -EINVAL;
1777 }
1778 break;
1779 default:
1780 WARN_ON_ONCE(1);
1781 return -EINVAL;
1782 }
1783 }
1784
1785 static
1786 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1787 struct lttng_event *event)
1788 {
1789 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1790 event_enabler);
1791
1792 if (base_enabler->event_param.instrumentation != event->instrumentation)
1793 return 0;
1794 if (lttng_desc_match_enabler(event->desc, base_enabler)
1795 && event->chan == event_enabler->chan)
1796 return 1;
1797 else
1798 return 0;
1799 }
1800
1801 static
1802 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1803 struct lttng_event_notifier *event_notifier)
1804 {
1805 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1806 event_notifier_enabler);
1807
1808 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1809 return 0;
1810 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1811 && event_notifier->group == event_notifier_enabler->group
1812 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1813 return 1;
1814 else
1815 return 0;
1816 }
1817
1818 static
1819 struct lttng_enabler_ref *lttng_enabler_ref(
1820 struct list_head *enablers_ref_list,
1821 struct lttng_enabler *enabler)
1822 {
1823 struct lttng_enabler_ref *enabler_ref;
1824
1825 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1826 if (enabler_ref->ref == enabler)
1827 return enabler_ref;
1828 }
1829 return NULL;
1830 }
1831
1832 static
1833 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1834 {
1835 struct lttng_session *session = event_enabler->chan->session;
1836 struct lttng_probe_desc *probe_desc;
1837 const struct lttng_event_desc *desc;
1838 int i;
1839 struct list_head *probe_list;
1840
1841 probe_list = lttng_get_probe_list_head();
1842 /*
1843 * For each probe event, if we find that a probe event matches
1844 * our enabler, create an associated lttng_event if not
1845 * already present.
1846 */
1847 list_for_each_entry(probe_desc, probe_list, head) {
1848 for (i = 0; i < probe_desc->nr_events; i++) {
1849 int found = 0;
1850 struct hlist_head *head;
1851 struct lttng_event *event;
1852
1853 desc = probe_desc->event_desc[i];
1854 if (!lttng_desc_match_enabler(desc,
1855 lttng_event_enabler_as_enabler(event_enabler)))
1856 continue;
1857
1858 /*
1859 * Check if already created.
1860 */
1861 head = utils_borrow_hash_table_bucket(
1862 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1863 desc->name);
1864 lttng_hlist_for_each_entry(event, head, hlist) {
1865 if (event->desc == desc
1866 && event->chan == event_enabler->chan)
1867 found = 1;
1868 }
1869 if (found)
1870 continue;
1871
1872 /*
1873 * We need to create an event for this
1874 * event probe.
1875 */
1876 event = _lttng_event_create(event_enabler->chan,
1877 NULL, NULL, desc,
1878 LTTNG_KERNEL_TRACEPOINT);
1879 if (!event) {
1880 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1881 probe_desc->event_desc[i]->name);
1882 }
1883 }
1884 }
1885 }
1886
1887 static
1888 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1889 {
1890 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
1891 struct lttng_probe_desc *probe_desc;
1892 const struct lttng_event_desc *desc;
1893 int i;
1894 struct list_head *probe_list;
1895
1896 probe_list = lttng_get_probe_list_head();
1897 /*
1898 * For each probe event, if we find that a probe event matches
1899 * our enabler, create an associated lttng_event_notifier if not
1900 * already present.
1901 */
1902 list_for_each_entry(probe_desc, probe_list, head) {
1903 for (i = 0; i < probe_desc->nr_events; i++) {
1904 int found = 0;
1905 struct hlist_head *head;
1906 struct lttng_event_notifier *event_notifier;
1907
1908 desc = probe_desc->event_desc[i];
1909 if (!lttng_desc_match_enabler(desc,
1910 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
1911 continue;
1912
1913 /*
1914 * Check if already created.
1915 */
1916 head = utils_borrow_hash_table_bucket(
1917 event_notifier_group->event_notifiers_ht.table,
1918 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
1919 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1920 if (event_notifier->desc == desc
1921 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1922 found = 1;
1923 }
1924 if (found)
1925 continue;
1926
1927 /*
1928 * We need to create a event_notifier for this event probe.
1929 */
1930 event_notifier = _lttng_event_notifier_create(desc,
1931 event_notifier_enabler->base.user_token,
1932 event_notifier_group, NULL, NULL,
1933 LTTNG_KERNEL_TRACEPOINT);
1934 if (IS_ERR(event_notifier)) {
1935 printk(KERN_INFO "Unable to create event_notifier %s\n",
1936 probe_desc->event_desc[i]->name);
1937 }
1938 }
1939 }
1940 }
1941
1942 static
1943 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1944 {
1945 int ret;
1946
1947 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
1948 WARN_ON_ONCE(ret);
1949 }
1950
1951 static
1952 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1953 {
1954 int ret;
1955
1956 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
1957 WARN_ON_ONCE(ret);
1958 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
1959 WARN_ON_ONCE(ret);
1960 }
1961
1962 /*
1963 * Create struct lttng_event if it is missing and present in the list of
1964 * tracepoint probes.
1965 * Should be called with sessions mutex held.
1966 */
1967 static
1968 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1969 {
1970 switch (event_enabler->base.event_param.instrumentation) {
1971 case LTTNG_KERNEL_TRACEPOINT:
1972 lttng_create_tracepoint_event_if_missing(event_enabler);
1973 break;
1974 case LTTNG_KERNEL_SYSCALL:
1975 lttng_create_syscall_event_if_missing(event_enabler);
1976 break;
1977 default:
1978 WARN_ON_ONCE(1);
1979 break;
1980 }
1981 }
1982
1983 /*
1984 * Create events associated with an event_enabler (if not already present),
1985 * and add backward reference from the event to the enabler.
1986 * Should be called with sessions mutex held.
1987 */
1988 static
1989 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1990 {
1991 struct lttng_channel *chan = event_enabler->chan;
1992 struct lttng_session *session = event_enabler->chan->session;
1993 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1994 struct lttng_event *event;
1995
1996 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1997 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1998 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1999 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2000 !strcmp(base_enabler->event_param.name, "*")) {
2001 if (base_enabler->enabled)
2002 WRITE_ONCE(chan->syscall_all, 1);
2003 else
2004 WRITE_ONCE(chan->syscall_all, 0);
2005 }
2006
2007 /* First ensure that probe events are created for this enabler. */
2008 lttng_create_event_if_missing(event_enabler);
2009
2010 /* For each event matching event_enabler in session event list. */
2011 list_for_each_entry(event, &session->events, list) {
2012 struct lttng_enabler_ref *enabler_ref;
2013
2014 if (!lttng_event_enabler_match_event(event_enabler, event))
2015 continue;
2016 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2017 lttng_event_enabler_as_enabler(event_enabler));
2018 if (!enabler_ref) {
2019 /*
2020 * If no backward ref, create it.
2021 * Add backward ref from event to event_enabler.
2022 */
2023 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2024 if (!enabler_ref)
2025 return -ENOMEM;
2026 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2027 list_add(&enabler_ref->node,
2028 &event->enablers_ref_head);
2029 }
2030
2031 /*
2032 * Link filter bytecodes if not linked yet.
2033 */
2034 lttng_enabler_link_bytecode(event->desc,
2035 lttng_static_ctx,
2036 &event->filter_bytecode_runtime_head,
2037 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2038
2039 /* TODO: merge event context. */
2040 }
2041 return 0;
2042 }
2043
2044 /*
2045 * Create struct lttng_event_notifier if it is missing and present in the list of
2046 * tracepoint probes.
2047 * Should be called with sessions mutex held.
2048 */
2049 static
2050 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2051 {
2052 switch (event_notifier_enabler->base.event_param.instrumentation) {
2053 case LTTNG_KERNEL_TRACEPOINT:
2054 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2055 break;
2056 case LTTNG_KERNEL_SYSCALL:
2057 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2058 break;
2059 default:
2060 WARN_ON_ONCE(1);
2061 break;
2062 }
2063 }
2064
2065 /*
2066 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2067 */
2068 static
2069 int lttng_event_notifier_enabler_ref_event_notifiers(
2070 struct lttng_event_notifier_enabler *event_notifier_enabler)
2071 {
2072 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2073 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2074 struct lttng_event_notifier *event_notifier;
2075
2076 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2077 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2078 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2079 !strcmp(base_enabler->event_param.name, "*")) {
2080
2081 int enabled = base_enabler->enabled;
2082 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2083
2084 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2085 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2086
2087 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2088 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2089
2090 }
2091
2092 /* First ensure that probe event_notifiers are created for this enabler. */
2093 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2094
2095 /* Link the created event_notifier with its associated enabler. */
2096 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2097 struct lttng_enabler_ref *enabler_ref;
2098
2099 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2100 continue;
2101
2102 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2103 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2104 if (!enabler_ref) {
2105 /*
2106 * If no backward ref, create it.
2107 * Add backward ref from event_notifier to enabler.
2108 */
2109 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2110 if (!enabler_ref)
2111 return -ENOMEM;
2112
2113 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2114 event_notifier_enabler);
2115 list_add(&enabler_ref->node,
2116 &event_notifier->enablers_ref_head);
2117 }
2118
2119 /*
2120 * Link filter bytecodes if not linked yet.
2121 */
2122 lttng_enabler_link_bytecode(event_notifier->desc,
2123 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2124 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2125
2126 /* Link capture bytecodes if not linked yet. */
2127 lttng_enabler_link_bytecode(event_notifier->desc,
2128 lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
2129 &event_notifier_enabler->capture_bytecode_head);
2130
2131 event_notifier->num_captures = event_notifier_enabler->num_captures;
2132 }
2133 return 0;
2134 }
2135
2136 /*
2137 * Called at module load: connect the probe on all enablers matching
2138 * this event.
2139 * Called with sessions lock held.
2140 */
2141 int lttng_fix_pending_events(void)
2142 {
2143 struct lttng_session *session;
2144
2145 list_for_each_entry(session, &sessions, list)
2146 lttng_session_lazy_sync_event_enablers(session);
2147 return 0;
2148 }
2149
2150 static bool lttng_event_notifier_group_has_active_event_notifiers(
2151 struct lttng_event_notifier_group *event_notifier_group)
2152 {
2153 struct lttng_event_notifier_enabler *event_notifier_enabler;
2154
2155 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2156 node) {
2157 if (event_notifier_enabler->base.enabled)
2158 return true;
2159 }
2160 return false;
2161 }
2162
2163 bool lttng_event_notifier_active(void)
2164 {
2165 struct lttng_event_notifier_group *event_notifier_group;
2166
2167 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2168 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2169 return true;
2170 }
2171 return false;
2172 }
2173
2174 int lttng_fix_pending_event_notifiers(void)
2175 {
2176 struct lttng_event_notifier_group *event_notifier_group;
2177
2178 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2179 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2180 return 0;
2181 }
2182
2183 struct lttng_event_enabler *lttng_event_enabler_create(
2184 enum lttng_enabler_format_type format_type,
2185 struct lttng_kernel_event *event_param,
2186 struct lttng_channel *chan)
2187 {
2188 struct lttng_event_enabler *event_enabler;
2189
2190 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2191 if (!event_enabler)
2192 return NULL;
2193 event_enabler->base.format_type = format_type;
2194 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2195 memcpy(&event_enabler->base.event_param, event_param,
2196 sizeof(event_enabler->base.event_param));
2197 event_enabler->chan = chan;
2198 /* ctx left NULL */
2199 event_enabler->base.enabled = 0;
2200 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2201 mutex_lock(&sessions_mutex);
2202 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2203 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2204 mutex_unlock(&sessions_mutex);
2205 return event_enabler;
2206 }
2207
2208 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2209 {
2210 mutex_lock(&sessions_mutex);
2211 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2212 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2213 mutex_unlock(&sessions_mutex);
2214 return 0;
2215 }
2216
2217 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2218 {
2219 mutex_lock(&sessions_mutex);
2220 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2221 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2222 mutex_unlock(&sessions_mutex);
2223 return 0;
2224 }
2225
2226 static
2227 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2228 struct lttng_kernel_filter_bytecode __user *bytecode)
2229 {
2230 struct lttng_bytecode_node *bytecode_node;
2231 uint32_t bytecode_len;
2232 int ret;
2233
2234 ret = get_user(bytecode_len, &bytecode->len);
2235 if (ret)
2236 return ret;
2237 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2238 GFP_KERNEL);
2239 if (!bytecode_node)
2240 return -ENOMEM;
2241 ret = copy_from_user(&bytecode_node->bc, bytecode,
2242 sizeof(*bytecode) + bytecode_len);
2243 if (ret)
2244 goto error_free;
2245
2246 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2247 bytecode_node->enabler = enabler;
2248 /* Enforce length based on allocated size */
2249 bytecode_node->bc.len = bytecode_len;
2250 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2251
2252 return 0;
2253
2254 error_free:
2255 lttng_kvfree(bytecode_node);
2256 return ret;
2257 }
2258
2259 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2260 struct lttng_kernel_filter_bytecode __user *bytecode)
2261 {
2262 int ret;
2263 ret = lttng_enabler_attach_filter_bytecode(
2264 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2265 if (ret)
2266 goto error;
2267
2268 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2269 return 0;
2270
2271 error:
2272 return ret;
2273 }
2274
2275 int lttng_event_add_callsite(struct lttng_event *event,
2276 struct lttng_kernel_event_callsite __user *callsite)
2277 {
2278
2279 switch (event->instrumentation) {
2280 case LTTNG_KERNEL_UPROBE:
2281 return lttng_uprobes_event_add_callsite(event, callsite);
2282 default:
2283 return -EINVAL;
2284 }
2285 }
2286
2287 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2288 struct lttng_kernel_context *context_param)
2289 {
2290 return -ENOSYS;
2291 }
2292
2293 static
2294 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2295 {
2296 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2297
2298 /* Destroy filter bytecode */
2299 list_for_each_entry_safe(filter_node, tmp_filter_node,
2300 &enabler->filter_bytecode_head, node) {
2301 lttng_kvfree(filter_node);
2302 }
2303 }
2304
2305 static
2306 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2307 {
2308 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2309
2310 /* Destroy contexts */
2311 lttng_destroy_context(event_enabler->ctx);
2312
2313 list_del(&event_enabler->node);
2314 kfree(event_enabler);
2315 }
2316
2317 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2318 struct lttng_event_notifier_group *event_notifier_group,
2319 enum lttng_enabler_format_type format_type,
2320 struct lttng_kernel_event_notifier *event_notifier_param)
2321 {
2322 struct lttng_event_notifier_enabler *event_notifier_enabler;
2323
2324 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2325 if (!event_notifier_enabler)
2326 return NULL;
2327
2328 event_notifier_enabler->base.format_type = format_type;
2329 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2330 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2331
2332 event_notifier_enabler->num_captures = 0;
2333
2334 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2335 sizeof(event_notifier_enabler->base.event_param));
2336 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2337
2338 event_notifier_enabler->base.enabled = 0;
2339 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2340 event_notifier_enabler->group = event_notifier_group;
2341
2342 mutex_lock(&sessions_mutex);
2343 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2344 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2345
2346 mutex_unlock(&sessions_mutex);
2347
2348 return event_notifier_enabler;
2349 }
2350
2351 int lttng_event_notifier_enabler_enable(
2352 struct lttng_event_notifier_enabler *event_notifier_enabler)
2353 {
2354 mutex_lock(&sessions_mutex);
2355 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2356 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2357 mutex_unlock(&sessions_mutex);
2358 return 0;
2359 }
2360
2361 int lttng_event_notifier_enabler_disable(
2362 struct lttng_event_notifier_enabler *event_notifier_enabler)
2363 {
2364 mutex_lock(&sessions_mutex);
2365 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2366 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2367 mutex_unlock(&sessions_mutex);
2368 return 0;
2369 }
2370
2371 int lttng_event_notifier_enabler_attach_filter_bytecode(
2372 struct lttng_event_notifier_enabler *event_notifier_enabler,
2373 struct lttng_kernel_filter_bytecode __user *bytecode)
2374 {
2375 int ret;
2376
2377 ret = lttng_enabler_attach_filter_bytecode(
2378 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2379 bytecode);
2380 if (ret)
2381 goto error;
2382
2383 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2384 return 0;
2385
2386 error:
2387 return ret;
2388 }
2389
2390 int lttng_event_notifier_enabler_attach_capture_bytecode(
2391 struct lttng_event_notifier_enabler *event_notifier_enabler,
2392 struct lttng_kernel_capture_bytecode __user *bytecode)
2393 {
2394 struct lttng_bytecode_node *bytecode_node;
2395 struct lttng_enabler *enabler =
2396 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2397 uint32_t bytecode_len;
2398 int ret;
2399
2400 ret = get_user(bytecode_len, &bytecode->len);
2401 if (ret)
2402 return ret;
2403
2404 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2405 GFP_KERNEL);
2406 if (!bytecode_node)
2407 return -ENOMEM;
2408
2409 ret = copy_from_user(&bytecode_node->bc, bytecode,
2410 sizeof(*bytecode) + bytecode_len);
2411 if (ret)
2412 goto error_free;
2413
2414 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2415 bytecode_node->enabler = enabler;
2416
2417 /* Enforce length based on allocated size */
2418 bytecode_node->bc.len = bytecode_len;
2419 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2420
2421 event_notifier_enabler->num_captures++;
2422
2423 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2424 goto end;
2425
2426 error_free:
2427 lttng_kvfree(bytecode_node);
2428 end:
2429 return ret;
2430 }
2431
2432 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2433 struct lttng_kernel_event_callsite __user *callsite)
2434 {
2435
2436 switch (event_notifier->instrumentation) {
2437 case LTTNG_KERNEL_UPROBE:
2438 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2439 callsite);
2440 default:
2441 return -EINVAL;
2442 }
2443 }
2444
2445 int lttng_event_notifier_enabler_attach_context(
2446 struct lttng_event_notifier_enabler *event_notifier_enabler,
2447 struct lttng_kernel_context *context_param)
2448 {
2449 return -ENOSYS;
2450 }
2451
2452 static
2453 void lttng_event_notifier_enabler_destroy(
2454 struct lttng_event_notifier_enabler *event_notifier_enabler)
2455 {
2456 if (!event_notifier_enabler) {
2457 return;
2458 }
2459
2460 list_del(&event_notifier_enabler->node);
2461
2462 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2463 kfree(event_notifier_enabler);
2464 }
2465
2466 /*
2467 * lttng_session_sync_event_enablers should be called just before starting a
2468 * session.
2469 * Should be called with sessions mutex held.
2470 */
2471 static
2472 void lttng_session_sync_event_enablers(struct lttng_session *session)
2473 {
2474 struct lttng_event_enabler *event_enabler;
2475 struct lttng_event *event;
2476
2477 list_for_each_entry(event_enabler, &session->enablers_head, node)
2478 lttng_event_enabler_ref_events(event_enabler);
2479 /*
2480 * For each event, if at least one of its enablers is enabled,
2481 * and its channel and session transient states are enabled, we
2482 * enable the event, else we disable it.
2483 */
2484 list_for_each_entry(event, &session->events, list) {
2485 struct lttng_enabler_ref *enabler_ref;
2486 struct lttng_bytecode_runtime *runtime;
2487 int enabled = 0, has_enablers_without_bytecode = 0;
2488
2489 switch (event->instrumentation) {
2490 case LTTNG_KERNEL_TRACEPOINT:
2491 case LTTNG_KERNEL_SYSCALL:
2492 /* Enable events */
2493 list_for_each_entry(enabler_ref,
2494 &event->enablers_ref_head, node) {
2495 if (enabler_ref->ref->enabled) {
2496 enabled = 1;
2497 break;
2498 }
2499 }
2500 break;
2501 default:
2502 /* Not handled with lazy sync. */
2503 continue;
2504 }
2505 /*
2506 * Enabled state is based on union of enablers, with
2507 * intesection of session and channel transient enable
2508 * states.
2509 */
2510 enabled = enabled && session->tstate && event->chan->tstate;
2511
2512 WRITE_ONCE(event->enabled, enabled);
2513 /*
2514 * Sync tracepoint registration with event enabled
2515 * state.
2516 */
2517 if (enabled) {
2518 register_event(event);
2519 } else {
2520 _lttng_event_unregister(event);
2521 }
2522
2523 /* Check if has enablers without bytecode enabled */
2524 list_for_each_entry(enabler_ref,
2525 &event->enablers_ref_head, node) {
2526 if (enabler_ref->ref->enabled
2527 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2528 has_enablers_without_bytecode = 1;
2529 break;
2530 }
2531 }
2532 event->has_enablers_without_bytecode =
2533 has_enablers_without_bytecode;
2534
2535 /* Enable filters */
2536 list_for_each_entry(runtime,
2537 &event->filter_bytecode_runtime_head, node)
2538 lttng_bytecode_filter_sync_state(runtime);
2539 }
2540 }
2541
2542 /*
2543 * Apply enablers to session events, adding events to session if need
2544 * be. It is required after each modification applied to an active
2545 * session, and right before session "start".
2546 * "lazy" sync means we only sync if required.
2547 * Should be called with sessions mutex held.
2548 */
2549 static
2550 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2551 {
2552 /* We can skip if session is not active */
2553 if (!session->active)
2554 return;
2555 lttng_session_sync_event_enablers(session);
2556 }
2557
2558 static
2559 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2560 {
2561 struct lttng_event_notifier_enabler *event_notifier_enabler;
2562 struct lttng_event_notifier *event_notifier;
2563
2564 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2565 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2566
2567 /*
2568 * For each event_notifier, if at least one of its enablers is enabled,
2569 * we enable the event_notifier, else we disable it.
2570 */
2571 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2572 struct lttng_enabler_ref *enabler_ref;
2573 struct lttng_bytecode_runtime *runtime;
2574 int enabled = 0, has_enablers_without_bytecode = 0;
2575
2576 switch (event_notifier->instrumentation) {
2577 case LTTNG_KERNEL_TRACEPOINT:
2578 case LTTNG_KERNEL_SYSCALL:
2579 /* Enable event_notifiers */
2580 list_for_each_entry(enabler_ref,
2581 &event_notifier->enablers_ref_head, node) {
2582 if (enabler_ref->ref->enabled) {
2583 enabled = 1;
2584 break;
2585 }
2586 }
2587 break;
2588 default:
2589 /* Not handled with sync. */
2590 continue;
2591 }
2592
2593 WRITE_ONCE(event_notifier->enabled, enabled);
2594 /*
2595 * Sync tracepoint registration with event_notifier enabled
2596 * state.
2597 */
2598 if (enabled) {
2599 if (!event_notifier->registered)
2600 register_event_notifier(event_notifier);
2601 } else {
2602 if (event_notifier->registered)
2603 _lttng_event_notifier_unregister(event_notifier);
2604 }
2605
2606 /* Check if has enablers without bytecode enabled */
2607 list_for_each_entry(enabler_ref,
2608 &event_notifier->enablers_ref_head, node) {
2609 if (enabler_ref->ref->enabled
2610 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2611 has_enablers_without_bytecode = 1;
2612 break;
2613 }
2614 }
2615 event_notifier->has_enablers_without_bytecode =
2616 has_enablers_without_bytecode;
2617
2618 /* Enable filters */
2619 list_for_each_entry(runtime,
2620 &event_notifier->filter_bytecode_runtime_head, node)
2621 lttng_bytecode_filter_sync_state(runtime);
2622
2623 /* Enable captures */
2624 list_for_each_entry(runtime,
2625 &event_notifier->capture_bytecode_runtime_head, node)
2626 lttng_bytecode_capture_sync_state(runtime);
2627 }
2628 }
2629
2630 /*
2631 * Serialize at most one packet worth of metadata into a metadata
2632 * channel.
2633 * We grab the metadata cache mutex to get exclusive access to our metadata
2634 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2635 * allows us to do racy operations such as looking for remaining space left in
2636 * packet and write, since mutual exclusion protects us from concurrent writes.
2637 * Mutual exclusion on the metadata cache allow us to read the cache content
2638 * without racing against reallocation of the cache by updates.
2639 * Returns the number of bytes written in the channel, 0 if no data
2640 * was written and a negative value on error.
2641 */
2642 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2643 struct channel *chan, bool *coherent)
2644 {
2645 struct lib_ring_buffer_ctx ctx;
2646 int ret = 0;
2647 size_t len, reserve_len;
2648
2649 /*
2650 * Ensure we support mutiple get_next / put sequences followed by
2651 * put_next. The metadata cache lock protects reading the metadata
2652 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2653 * "flush" operations on the buffer invoked by different processes.
2654 * Moreover, since the metadata cache memory can be reallocated, we
2655 * need to have exclusive access against updates even though we only
2656 * read it.
2657 */
2658 mutex_lock(&stream->metadata_cache->lock);
2659 WARN_ON(stream->metadata_in < stream->metadata_out);
2660 if (stream->metadata_in != stream->metadata_out)
2661 goto end;
2662
2663 /* Metadata regenerated, change the version. */
2664 if (stream->metadata_cache->version != stream->version)
2665 stream->version = stream->metadata_cache->version;
2666
2667 len = stream->metadata_cache->metadata_written -
2668 stream->metadata_in;
2669 if (!len)
2670 goto end;
2671 reserve_len = min_t(size_t,
2672 stream->transport->ops.packet_avail_size(chan),
2673 len);
2674 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2675 sizeof(char), -1);
2676 /*
2677 * If reservation failed, return an error to the caller.
2678 */
2679 ret = stream->transport->ops.event_reserve(&ctx, 0);
2680 if (ret != 0) {
2681 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2682 stream->coherent = false;
2683 goto end;
2684 }
2685 stream->transport->ops.event_write(&ctx,
2686 stream->metadata_cache->data + stream->metadata_in,
2687 reserve_len);
2688 stream->transport->ops.event_commit(&ctx);
2689 stream->metadata_in += reserve_len;
2690 if (reserve_len < len)
2691 stream->coherent = false;
2692 else
2693 stream->coherent = true;
2694 ret = reserve_len;
2695
2696 end:
2697 if (coherent)
2698 *coherent = stream->coherent;
2699 mutex_unlock(&stream->metadata_cache->lock);
2700 return ret;
2701 }
2702
2703 static
2704 void lttng_metadata_begin(struct lttng_session *session)
2705 {
2706 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2707 mutex_lock(&session->metadata_cache->lock);
2708 }
2709
2710 static
2711 void lttng_metadata_end(struct lttng_session *session)
2712 {
2713 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2714 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2715 struct lttng_metadata_stream *stream;
2716
2717 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2718 wake_up_interruptible(&stream->read_wait);
2719 mutex_unlock(&session->metadata_cache->lock);
2720 }
2721 }
2722
2723 /*
2724 * Write the metadata to the metadata cache.
2725 * Must be called with sessions_mutex held.
2726 * The metadata cache lock protects us from concurrent read access from
2727 * thread outputting metadata content to ring buffer.
2728 * The content of the printf is printed as a single atomic metadata
2729 * transaction.
2730 */
2731 int lttng_metadata_printf(struct lttng_session *session,
2732 const char *fmt, ...)
2733 {
2734 char *str;
2735 size_t len;
2736 va_list ap;
2737
2738 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2739
2740 va_start(ap, fmt);
2741 str = kvasprintf(GFP_KERNEL, fmt, ap);
2742 va_end(ap);
2743 if (!str)
2744 return -ENOMEM;
2745
2746 len = strlen(str);
2747 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2748 if (session->metadata_cache->metadata_written + len >
2749 session->metadata_cache->cache_alloc) {
2750 char *tmp_cache_realloc;
2751 unsigned int tmp_cache_alloc_size;
2752
2753 tmp_cache_alloc_size = max_t(unsigned int,
2754 session->metadata_cache->cache_alloc + len,
2755 session->metadata_cache->cache_alloc << 1);
2756 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2757 if (!tmp_cache_realloc)
2758 goto err;
2759 if (session->metadata_cache->data) {
2760 memcpy(tmp_cache_realloc,
2761 session->metadata_cache->data,
2762 session->metadata_cache->cache_alloc);
2763 vfree(session->metadata_cache->data);
2764 }
2765
2766 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2767 session->metadata_cache->data = tmp_cache_realloc;
2768 }
2769 memcpy(session->metadata_cache->data +
2770 session->metadata_cache->metadata_written,
2771 str, len);
2772 session->metadata_cache->metadata_written += len;
2773 kfree(str);
2774
2775 return 0;
2776
2777 err:
2778 kfree(str);
2779 return -ENOMEM;
2780 }
2781
2782 static
2783 int print_tabs(struct lttng_session *session, size_t nesting)
2784 {
2785 size_t i;
2786
2787 for (i = 0; i < nesting; i++) {
2788 int ret;
2789
2790 ret = lttng_metadata_printf(session, " ");
2791 if (ret) {
2792 return ret;
2793 }
2794 }
2795 return 0;
2796 }
2797
2798 static
2799 int lttng_field_name_statedump(struct lttng_session *session,
2800 const struct lttng_event_field *field,
2801 size_t nesting)
2802 {
2803 return lttng_metadata_printf(session, " _%s;\n", field->name);
2804 }
2805
2806 static
2807 int _lttng_integer_type_statedump(struct lttng_session *session,
2808 const struct lttng_type *type,
2809 size_t nesting)
2810 {
2811 int ret;
2812
2813 WARN_ON_ONCE(type->atype != atype_integer);
2814 ret = print_tabs(session, nesting);
2815 if (ret)
2816 return ret;
2817 ret = lttng_metadata_printf(session,
2818 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2819 type->u.integer.size,
2820 type->u.integer.alignment,
2821 type->u.integer.signedness,
2822 (type->u.integer.encoding == lttng_encode_none)
2823 ? "none"
2824 : (type->u.integer.encoding == lttng_encode_UTF8)
2825 ? "UTF8"
2826 : "ASCII",
2827 type->u.integer.base,
2828 #if __BYTE_ORDER == __BIG_ENDIAN
2829 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2830 #else
2831 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2832 #endif
2833 );
2834 return ret;
2835 }
2836
2837 /*
2838 * Must be called with sessions_mutex held.
2839 */
2840 static
2841 int _lttng_struct_type_statedump(struct lttng_session *session,
2842 const struct lttng_type *type,
2843 size_t nesting)
2844 {
2845 int ret;
2846 uint32_t i, nr_fields;
2847 unsigned int alignment;
2848
2849 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2850
2851 ret = print_tabs(session, nesting);
2852 if (ret)
2853 return ret;
2854 ret = lttng_metadata_printf(session,
2855 "struct {\n");
2856 if (ret)
2857 return ret;
2858 nr_fields = type->u.struct_nestable.nr_fields;
2859 for (i = 0; i < nr_fields; i++) {
2860 const struct lttng_event_field *iter_field;
2861
2862 iter_field = &type->u.struct_nestable.fields[i];
2863 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2864 if (ret)
2865 return ret;
2866 }
2867 ret = print_tabs(session, nesting);
2868 if (ret)
2869 return ret;
2870 alignment = type->u.struct_nestable.alignment;
2871 if (alignment) {
2872 ret = lttng_metadata_printf(session,
2873 "} align(%u)",
2874 alignment);
2875 } else {
2876 ret = lttng_metadata_printf(session,
2877 "}");
2878 }
2879 return ret;
2880 }
2881
2882 /*
2883 * Must be called with sessions_mutex held.
2884 */
2885 static
2886 int _lttng_struct_field_statedump(struct lttng_session *session,
2887 const struct lttng_event_field *field,
2888 size_t nesting)
2889 {
2890 int ret;
2891
2892 ret = _lttng_struct_type_statedump(session,
2893 &field->type, nesting);
2894 if (ret)
2895 return ret;
2896 return lttng_field_name_statedump(session, field, nesting);
2897 }
2898
2899 /*
2900 * Must be called with sessions_mutex held.
2901 */
2902 static
2903 int _lttng_variant_type_statedump(struct lttng_session *session,
2904 const struct lttng_type *type,
2905 size_t nesting)
2906 {
2907 int ret;
2908 uint32_t i, nr_choices;
2909
2910 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2911 /*
2912 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2913 */
2914 if (type->u.variant_nestable.alignment != 0)
2915 return -EINVAL;
2916 ret = print_tabs(session, nesting);
2917 if (ret)
2918 return ret;
2919 ret = lttng_metadata_printf(session,
2920 "variant <_%s> {\n",
2921 type->u.variant_nestable.tag_name);
2922 if (ret)
2923 return ret;
2924 nr_choices = type->u.variant_nestable.nr_choices;
2925 for (i = 0; i < nr_choices; i++) {
2926 const struct lttng_event_field *iter_field;
2927
2928 iter_field = &type->u.variant_nestable.choices[i];
2929 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2930 if (ret)
2931 return ret;
2932 }
2933 ret = print_tabs(session, nesting);
2934 if (ret)
2935 return ret;
2936 ret = lttng_metadata_printf(session,
2937 "}");
2938 return ret;
2939 }
2940
2941 /*
2942 * Must be called with sessions_mutex held.
2943 */
2944 static
2945 int _lttng_variant_field_statedump(struct lttng_session *session,
2946 const struct lttng_event_field *field,
2947 size_t nesting)
2948 {
2949 int ret;
2950
2951 ret = _lttng_variant_type_statedump(session,
2952 &field->type, nesting);
2953 if (ret)
2954 return ret;
2955 return lttng_field_name_statedump(session, field, nesting);
2956 }
2957
2958 /*
2959 * Must be called with sessions_mutex held.
2960 */
2961 static
2962 int _lttng_array_field_statedump(struct lttng_session *session,
2963 const struct lttng_event_field *field,
2964 size_t nesting)
2965 {
2966 int ret;
2967 const struct lttng_type *elem_type;
2968
2969 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2970
2971 if (field->type.u.array_nestable.alignment) {
2972 ret = print_tabs(session, nesting);
2973 if (ret)
2974 return ret;
2975 ret = lttng_metadata_printf(session,
2976 "struct { } align(%u) _%s_padding;\n",
2977 field->type.u.array_nestable.alignment * CHAR_BIT,
2978 field->name);
2979 if (ret)
2980 return ret;
2981 }
2982 /*
2983 * Nested compound types: Only array of structures and variants are
2984 * currently supported.
2985 */
2986 elem_type = field->type.u.array_nestable.elem_type;
2987 switch (elem_type->atype) {
2988 case atype_integer:
2989 case atype_struct_nestable:
2990 case atype_variant_nestable:
2991 ret = _lttng_type_statedump(session, elem_type, nesting);
2992 if (ret)
2993 return ret;
2994 break;
2995
2996 default:
2997 return -EINVAL;
2998 }
2999 ret = lttng_metadata_printf(session,
3000 " _%s[%u];\n",
3001 field->name,
3002 field->type.u.array_nestable.length);
3003 return ret;
3004 }
3005
3006 /*
3007 * Must be called with sessions_mutex held.
3008 */
3009 static
3010 int _lttng_sequence_field_statedump(struct lttng_session *session,
3011 const struct lttng_event_field *field,
3012 size_t nesting)
3013 {
3014 int ret;
3015 const char *length_name;
3016 const struct lttng_type *elem_type;
3017
3018 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
3019
3020 length_name = field->type.u.sequence_nestable.length_name;
3021
3022 if (field->type.u.sequence_nestable.alignment) {
3023 ret = print_tabs(session, nesting);
3024 if (ret)
3025 return ret;
3026 ret = lttng_metadata_printf(session,
3027 "struct { } align(%u) _%s_padding;\n",
3028 field->type.u.sequence_nestable.alignment * CHAR_BIT,
3029 field->name);
3030 if (ret)
3031 return ret;
3032 }
3033
3034 /*
3035 * Nested compound types: Only array of structures and variants are
3036 * currently supported.
3037 */
3038 elem_type = field->type.u.sequence_nestable.elem_type;
3039 switch (elem_type->atype) {
3040 case atype_integer:
3041 case atype_struct_nestable:
3042 case atype_variant_nestable:
3043 ret = _lttng_type_statedump(session, elem_type, nesting);
3044 if (ret)
3045 return ret;
3046 break;
3047
3048 default:
3049 return -EINVAL;
3050 }
3051 ret = lttng_metadata_printf(session,
3052 " _%s[ _%s ];\n",
3053 field->name,
3054 field->type.u.sequence_nestable.length_name);
3055 return ret;
3056 }
3057
3058 /*
3059 * Must be called with sessions_mutex held.
3060 */
3061 static
3062 int _lttng_enum_type_statedump(struct lttng_session *session,
3063 const struct lttng_type *type,
3064 size_t nesting)
3065 {
3066 const struct lttng_enum_desc *enum_desc;
3067 const struct lttng_type *container_type;
3068 int ret;
3069 unsigned int i, nr_entries;
3070
3071 container_type = type->u.enum_nestable.container_type;
3072 if (container_type->atype != atype_integer) {
3073 ret = -EINVAL;
3074 goto end;
3075 }
3076 enum_desc = type->u.enum_nestable.desc;
3077 nr_entries = enum_desc->nr_entries;
3078
3079 ret = print_tabs(session, nesting);
3080 if (ret)
3081 goto end;
3082 ret = lttng_metadata_printf(session, "enum : ");
3083 if (ret)
3084 goto end;
3085 ret = _lttng_integer_type_statedump(session, container_type, 0);
3086 if (ret)
3087 goto end;
3088 ret = lttng_metadata_printf(session, " {\n");
3089 if (ret)
3090 goto end;
3091 /* Dump all entries */
3092 for (i = 0; i < nr_entries; i++) {
3093 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3094 int j, len;
3095
3096 ret = print_tabs(session, nesting + 1);
3097 if (ret)
3098 goto end;
3099 ret = lttng_metadata_printf(session,
3100 "\"");
3101 if (ret)
3102 goto end;
3103 len = strlen(entry->string);
3104 /* Escape the character '"' */
3105 for (j = 0; j < len; j++) {
3106 char c = entry->string[j];
3107
3108 switch (c) {
3109 case '"':
3110 ret = lttng_metadata_printf(session,
3111 "\\\"");
3112 break;
3113 case '\\':
3114 ret = lttng_metadata_printf(session,
3115 "\\\\");
3116 break;
3117 default:
3118 ret = lttng_metadata_printf(session,
3119 "%c", c);
3120 break;
3121 }
3122 if (ret)
3123 goto end;
3124 }
3125 ret = lttng_metadata_printf(session, "\"");
3126 if (ret)
3127 goto end;
3128
3129 if (entry->options.is_auto) {
3130 ret = lttng_metadata_printf(session, ",\n");
3131 if (ret)
3132 goto end;
3133 } else {
3134 ret = lttng_metadata_printf(session,
3135 " = ");
3136 if (ret)
3137 goto end;
3138 if (entry->start.signedness)
3139 ret = lttng_metadata_printf(session,
3140 "%lld", (long long) entry->start.value);
3141 else
3142 ret = lttng_metadata_printf(session,
3143 "%llu", entry->start.value);
3144 if (ret)
3145 goto end;
3146 if (entry->start.signedness == entry->end.signedness &&
3147 entry->start.value
3148 == entry->end.value) {
3149 ret = lttng_metadata_printf(session,
3150 ",\n");
3151 } else {
3152 if (entry->end.signedness) {
3153 ret = lttng_metadata_printf(session,
3154 " ... %lld,\n",
3155 (long long) entry->end.value);
3156 } else {
3157 ret = lttng_metadata_printf(session,
3158 " ... %llu,\n",
3159 entry->end.value);
3160 }
3161 }
3162 if (ret)
3163 goto end;
3164 }
3165 }
3166 ret = print_tabs(session, nesting);
3167 if (ret)
3168 goto end;
3169 ret = lttng_metadata_printf(session, "}");
3170 end:
3171 return ret;
3172 }
3173
3174 /*
3175 * Must be called with sessions_mutex held.
3176 */
3177 static
3178 int _lttng_enum_field_statedump(struct lttng_session *session,
3179 const struct lttng_event_field *field,
3180 size_t nesting)
3181 {
3182 int ret;
3183
3184 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3185 if (ret)
3186 return ret;
3187 return lttng_field_name_statedump(session, field, nesting);
3188 }
3189
3190 static
3191 int _lttng_integer_field_statedump(struct lttng_session *session,
3192 const struct lttng_event_field *field,
3193 size_t nesting)
3194 {
3195 int ret;
3196
3197 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3198 if (ret)
3199 return ret;
3200 return lttng_field_name_statedump(session, field, nesting);
3201 }
3202
3203 static
3204 int _lttng_string_type_statedump(struct lttng_session *session,
3205 const struct lttng_type *type,
3206 size_t nesting)
3207 {
3208 int ret;
3209
3210 WARN_ON_ONCE(type->atype != atype_string);
3211 /* Default encoding is UTF8 */
3212 ret = print_tabs(session, nesting);
3213 if (ret)
3214 return ret;
3215 ret = lttng_metadata_printf(session,
3216 "string%s",
3217 type->u.string.encoding == lttng_encode_ASCII ?
3218 " { encoding = ASCII; }" : "");
3219 return ret;
3220 }
3221
3222 static
3223 int _lttng_string_field_statedump(struct lttng_session *session,
3224 const struct lttng_event_field *field,
3225 size_t nesting)
3226 {
3227 int ret;
3228
3229 WARN_ON_ONCE(field->type.atype != atype_string);
3230 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3231 if (ret)
3232 return ret;
3233 return lttng_field_name_statedump(session, field, nesting);
3234 }
3235
3236 /*
3237 * Must be called with sessions_mutex held.
3238 */
3239 static
3240 int _lttng_type_statedump(struct lttng_session *session,
3241 const struct lttng_type *type,
3242 size_t nesting)
3243 {
3244 int ret = 0;
3245
3246 switch (type->atype) {
3247 case atype_integer:
3248 ret = _lttng_integer_type_statedump(session, type, nesting);
3249 break;
3250 case atype_enum_nestable:
3251 ret = _lttng_enum_type_statedump(session, type, nesting);
3252 break;
3253 case atype_string:
3254 ret = _lttng_string_type_statedump(session, type, nesting);
3255 break;
3256 case atype_struct_nestable:
3257 ret = _lttng_struct_type_statedump(session, type, nesting);
3258 break;
3259 case atype_variant_nestable:
3260 ret = _lttng_variant_type_statedump(session, type, nesting);
3261 break;
3262
3263 /* Nested arrays and sequences are not supported yet. */
3264 case atype_array_nestable:
3265 case atype_sequence_nestable:
3266 default:
3267 WARN_ON_ONCE(1);
3268 return -EINVAL;
3269 }
3270 return ret;
3271 }
3272
3273 /*
3274 * Must be called with sessions_mutex held.
3275 */
3276 static
3277 int _lttng_field_statedump(struct lttng_session *session,
3278 const struct lttng_event_field *field,
3279 size_t nesting)
3280 {
3281 int ret = 0;
3282
3283 switch (field->type.atype) {
3284 case atype_integer:
3285 ret = _lttng_integer_field_statedump(session, field, nesting);
3286 break;
3287 case atype_enum_nestable:
3288 ret = _lttng_enum_field_statedump(session, field, nesting);
3289 break;
3290 case atype_string:
3291 ret = _lttng_string_field_statedump(session, field, nesting);
3292 break;
3293 case atype_struct_nestable:
3294 ret = _lttng_struct_field_statedump(session, field, nesting);
3295 break;
3296 case atype_array_nestable:
3297 ret = _lttng_array_field_statedump(session, field, nesting);
3298 break;
3299 case atype_sequence_nestable:
3300 ret = _lttng_sequence_field_statedump(session, field, nesting);
3301 break;
3302 case atype_variant_nestable:
3303 ret = _lttng_variant_field_statedump(session, field, nesting);
3304 break;
3305
3306 default:
3307 WARN_ON_ONCE(1);
3308 return -EINVAL;
3309 }
3310 return ret;
3311 }
3312
3313 static
3314 int _lttng_context_metadata_statedump(struct lttng_session *session,
3315 struct lttng_ctx *ctx)
3316 {
3317 int ret = 0;
3318 int i;
3319
3320 if (!ctx)
3321 return 0;
3322 for (i = 0; i < ctx->nr_fields; i++) {
3323 const struct lttng_ctx_field *field = &ctx->fields[i];
3324
3325 ret = _lttng_field_statedump(session, &field->event_field, 2);
3326 if (ret)
3327 return ret;
3328 }
3329 return ret;
3330 }
3331
3332 static
3333 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3334 struct lttng_event *event)
3335 {
3336 const struct lttng_event_desc *desc = event->desc;
3337 int ret = 0;
3338 int i;
3339
3340 for (i = 0; i < desc->nr_fields; i++) {
3341 const struct lttng_event_field *field = &desc->fields[i];
3342
3343 ret = _lttng_field_statedump(session, field, 2);
3344 if (ret)
3345 return ret;
3346 }
3347 return ret;
3348 }
3349
3350 /*
3351 * Must be called with sessions_mutex held.
3352 * The entire event metadata is printed as a single atomic metadata
3353 * transaction.
3354 */
3355 static
3356 int _lttng_event_metadata_statedump(struct lttng_session *session,
3357 struct lttng_channel *chan,
3358 struct lttng_event *event)
3359 {
3360 int ret = 0;
3361
3362 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3363 return 0;
3364 if (chan->channel_type == METADATA_CHANNEL)
3365 return 0;
3366
3367 lttng_metadata_begin(session);
3368
3369 ret = lttng_metadata_printf(session,
3370 "event {\n"
3371 " name = \"%s\";\n"
3372 " id = %u;\n"
3373 " stream_id = %u;\n",
3374 event->desc->name,
3375 event->id,
3376 event->chan->id);
3377 if (ret)
3378 goto end;
3379
3380 if (event->ctx) {
3381 ret = lttng_metadata_printf(session,
3382 " context := struct {\n");
3383 if (ret)
3384 goto end;
3385 }
3386 ret = _lttng_context_metadata_statedump(session, event->ctx);
3387 if (ret)
3388 goto end;
3389 if (event->ctx) {
3390 ret = lttng_metadata_printf(session,
3391 " };\n");
3392 if (ret)
3393 goto end;
3394 }
3395
3396 ret = lttng_metadata_printf(session,
3397 " fields := struct {\n"
3398 );
3399 if (ret)
3400 goto end;
3401
3402 ret = _lttng_fields_metadata_statedump(session, event);
3403 if (ret)
3404 goto end;
3405
3406 /*
3407 * LTTng space reservation can only reserve multiples of the
3408 * byte size.
3409 */
3410 ret = lttng_metadata_printf(session,
3411 " };\n"
3412 "};\n\n");
3413 if (ret)
3414 goto end;
3415
3416 event->metadata_dumped = 1;
3417 end:
3418 lttng_metadata_end(session);
3419 return ret;
3420
3421 }
3422
3423 /*
3424 * Must be called with sessions_mutex held.
3425 * The entire channel metadata is printed as a single atomic metadata
3426 * transaction.
3427 */
3428 static
3429 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3430 struct lttng_channel *chan)
3431 {
3432 int ret = 0;
3433
3434 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3435 return 0;
3436
3437 if (chan->channel_type == METADATA_CHANNEL)
3438 return 0;
3439
3440 lttng_metadata_begin(session);
3441
3442 WARN_ON_ONCE(!chan->header_type);
3443 ret = lttng_metadata_printf(session,
3444 "stream {\n"
3445 " id = %u;\n"
3446 " event.header := %s;\n"
3447 " packet.context := struct packet_context;\n",
3448 chan->id,
3449 chan->header_type == 1 ? "struct event_header_compact" :
3450 "struct event_header_large");
3451 if (ret)
3452 goto end;
3453
3454 if (chan->ctx) {
3455 ret = lttng_metadata_printf(session,
3456 " event.context := struct {\n");
3457 if (ret)
3458 goto end;
3459 }
3460 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3461 if (ret)
3462 goto end;
3463 if (chan->ctx) {
3464 ret = lttng_metadata_printf(session,
3465 " };\n");
3466 if (ret)
3467 goto end;
3468 }
3469
3470 ret = lttng_metadata_printf(session,
3471 "};\n\n");
3472
3473 chan->metadata_dumped = 1;
3474 end:
3475 lttng_metadata_end(session);
3476 return ret;
3477 }
3478
3479 /*
3480 * Must be called with sessions_mutex held.
3481 */
3482 static
3483 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3484 {
3485 return lttng_metadata_printf(session,
3486 "struct packet_context {\n"
3487 " uint64_clock_monotonic_t timestamp_begin;\n"
3488 " uint64_clock_monotonic_t timestamp_end;\n"
3489 " uint64_t content_size;\n"
3490 " uint64_t packet_size;\n"
3491 " uint64_t packet_seq_num;\n"
3492 " unsigned long events_discarded;\n"
3493 " uint32_t cpu_id;\n"
3494 "};\n\n"
3495 );
3496 }
3497
3498 /*
3499 * Compact header:
3500 * id: range: 0 - 30.
3501 * id 31 is reserved to indicate an extended header.
3502 *
3503 * Large header:
3504 * id: range: 0 - 65534.
3505 * id 65535 is reserved to indicate an extended header.
3506 *
3507 * Must be called with sessions_mutex held.
3508 */
3509 static
3510 int _lttng_event_header_declare(struct lttng_session *session)
3511 {
3512 return lttng_metadata_printf(session,
3513 "struct event_header_compact {\n"
3514 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3515 " variant <id> {\n"
3516 " struct {\n"
3517 " uint27_clock_monotonic_t timestamp;\n"
3518 " } compact;\n"
3519 " struct {\n"
3520 " uint32_t id;\n"
3521 " uint64_clock_monotonic_t timestamp;\n"
3522 " } extended;\n"
3523 " } v;\n"
3524 "} align(%u);\n"
3525 "\n"
3526 "struct event_header_large {\n"
3527 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3528 " variant <id> {\n"
3529 " struct {\n"
3530 " uint32_clock_monotonic_t timestamp;\n"
3531 " } compact;\n"
3532 " struct {\n"
3533 " uint32_t id;\n"
3534 " uint64_clock_monotonic_t timestamp;\n"
3535 " } extended;\n"
3536 " } v;\n"
3537 "} align(%u);\n\n",
3538 lttng_alignof(uint32_t) * CHAR_BIT,
3539 lttng_alignof(uint16_t) * CHAR_BIT
3540 );
3541 }
3542
3543 /*
3544 * Approximation of NTP time of day to clock monotonic correlation,
3545 * taken at start of trace.
3546 * Yes, this is only an approximation. Yes, we can (and will) do better
3547 * in future versions.
3548 * This function may return a negative offset. It may happen if the
3549 * system sets the REALTIME clock to 0 after boot.
3550 *
3551 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3552 * y2038 compliant.
3553 */
3554 static
3555 int64_t measure_clock_offset(void)
3556 {
3557 uint64_t monotonic_avg, monotonic[2], realtime;
3558 uint64_t tcf = trace_clock_freq();
3559 int64_t offset;
3560 unsigned long flags;
3561 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3562 struct timespec64 rts = { 0, 0 };
3563 #else
3564 struct timespec rts = { 0, 0 };
3565 #endif
3566
3567 /* Disable interrupts to increase correlation precision. */
3568 local_irq_save(flags);
3569 monotonic[0] = trace_clock_read64();
3570 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3571 ktime_get_real_ts64(&rts);
3572 #else
3573 getnstimeofday(&rts);
3574 #endif
3575 monotonic[1] = trace_clock_read64();
3576 local_irq_restore(flags);
3577
3578 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3579 realtime = (uint64_t) rts.tv_sec * tcf;
3580 if (tcf == NSEC_PER_SEC) {
3581 realtime += rts.tv_nsec;
3582 } else {
3583 uint64_t n = rts.tv_nsec * tcf;
3584
3585 do_div(n, NSEC_PER_SEC);
3586 realtime += n;
3587 }
3588 offset = (int64_t) realtime - monotonic_avg;
3589 return offset;
3590 }
3591
3592 static
3593 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3594 {
3595 int ret = 0;
3596 size_t i;
3597 char cur;
3598
3599 i = 0;
3600 cur = string[i];
3601 while (cur != '\0') {
3602 switch (cur) {
3603 case '\n':
3604 ret = lttng_metadata_printf(session, "%s", "\\n");
3605 break;
3606 case '\\':
3607 case '"':
3608 ret = lttng_metadata_printf(session, "%c", '\\');
3609 if (ret)
3610 goto error;
3611 /* We still print the current char */
3612 /* Fallthrough */
3613 default:
3614 ret = lttng_metadata_printf(session, "%c", cur);
3615 break;
3616 }
3617
3618 if (ret)
3619 goto error;
3620
3621 cur = string[++i];
3622 }
3623 error:
3624 return ret;
3625 }
3626
3627 static
3628 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3629 const char *field_value)
3630 {
3631 int ret;
3632
3633 ret = lttng_metadata_printf(session, " %s = \"", field);
3634 if (ret)
3635 goto error;
3636
3637 ret = print_escaped_ctf_string(session, field_value);
3638 if (ret)
3639 goto error;
3640
3641 ret = lttng_metadata_printf(session, "\";\n");
3642
3643 error:
3644 return ret;
3645 }
3646
3647 /*
3648 * Output metadata into this session's metadata buffers.
3649 * Must be called with sessions_mutex held.
3650 */
3651 static
3652 int _lttng_session_metadata_statedump(struct lttng_session *session)
3653 {
3654 unsigned char *uuid_c = session->uuid.b;
3655 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3656 const char *product_uuid;
3657 struct lttng_channel *chan;
3658 struct lttng_event *event;
3659 int ret = 0;
3660
3661 if (!LTTNG_READ_ONCE(session->active))
3662 return 0;
3663
3664 lttng_metadata_begin(session);
3665
3666 if (session->metadata_dumped)
3667 goto skip_session;
3668
3669 snprintf(uuid_s, sizeof(uuid_s),
3670 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3671 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3672 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3673 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3674 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3675
3676 ret = lttng_metadata_printf(session,
3677 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3678 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3679 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3680 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3681 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3682 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3683 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3684 "\n"
3685 "trace {\n"
3686 " major = %u;\n"
3687 " minor = %u;\n"
3688 " uuid = \"%s\";\n"
3689 " byte_order = %s;\n"
3690 " packet.header := struct {\n"
3691 " uint32_t magic;\n"
3692 " uint8_t uuid[16];\n"
3693 " uint32_t stream_id;\n"
3694 " uint64_t stream_instance_id;\n"
3695 " };\n"
3696 "};\n\n",
3697 lttng_alignof(uint8_t) * CHAR_BIT,
3698 lttng_alignof(uint16_t) * CHAR_BIT,
3699 lttng_alignof(uint32_t) * CHAR_BIT,
3700 lttng_alignof(uint64_t) * CHAR_BIT,
3701 sizeof(unsigned long) * CHAR_BIT,
3702 lttng_alignof(unsigned long) * CHAR_BIT,
3703 CTF_SPEC_MAJOR,
3704 CTF_SPEC_MINOR,
3705 uuid_s,
3706 #if __BYTE_ORDER == __BIG_ENDIAN
3707 "be"
3708 #else
3709 "le"
3710 #endif
3711 );
3712 if (ret)
3713 goto end;
3714
3715 ret = lttng_metadata_printf(session,
3716 "env {\n"
3717 " hostname = \"%s\";\n"
3718 " domain = \"kernel\";\n"
3719 " sysname = \"%s\";\n"
3720 " kernel_release = \"%s\";\n"
3721 " kernel_version = \"%s\";\n"
3722 " tracer_name = \"lttng-modules\";\n"
3723 " tracer_major = %d;\n"
3724 " tracer_minor = %d;\n"
3725 " tracer_patchlevel = %d;\n"
3726 " trace_buffering_scheme = \"global\";\n",
3727 current->nsproxy->uts_ns->name.nodename,
3728 utsname()->sysname,
3729 utsname()->release,
3730 utsname()->version,
3731 LTTNG_MODULES_MAJOR_VERSION,
3732 LTTNG_MODULES_MINOR_VERSION,
3733 LTTNG_MODULES_PATCHLEVEL_VERSION
3734 );
3735 if (ret)
3736 goto end;
3737
3738 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3739 if (ret)
3740 goto end;
3741 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3742 session->creation_time);
3743 if (ret)
3744 goto end;
3745
3746 /* Add the product UUID to the 'env' section */
3747 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3748 if (product_uuid) {
3749 ret = lttng_metadata_printf(session,
3750 " product_uuid = \"%s\";\n",
3751 product_uuid
3752 );
3753 if (ret)
3754 goto end;
3755 }
3756
3757 /* Close the 'env' section */
3758 ret = lttng_metadata_printf(session, "};\n\n");
3759 if (ret)
3760 goto end;
3761
3762 ret = lttng_metadata_printf(session,
3763 "clock {\n"
3764 " name = \"%s\";\n",
3765 trace_clock_name()
3766 );
3767 if (ret)
3768 goto end;
3769
3770 if (!trace_clock_uuid(clock_uuid_s)) {
3771 ret = lttng_metadata_printf(session,
3772 " uuid = \"%s\";\n",
3773 clock_uuid_s
3774 );
3775 if (ret)
3776 goto end;
3777 }
3778
3779 ret = lttng_metadata_printf(session,
3780 " description = \"%s\";\n"
3781 " freq = %llu; /* Frequency, in Hz */\n"
3782 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3783 " offset = %lld;\n"
3784 "};\n\n",
3785 trace_clock_description(),
3786 (unsigned long long) trace_clock_freq(),
3787 (long long) measure_clock_offset()
3788 );
3789 if (ret)
3790 goto end;
3791
3792 ret = lttng_metadata_printf(session,
3793 "typealias integer {\n"
3794 " size = 27; align = 1; signed = false;\n"
3795 " map = clock.%s.value;\n"
3796 "} := uint27_clock_monotonic_t;\n"
3797 "\n"
3798 "typealias integer {\n"
3799 " size = 32; align = %u; signed = false;\n"
3800 " map = clock.%s.value;\n"
3801 "} := uint32_clock_monotonic_t;\n"
3802 "\n"
3803 "typealias integer {\n"
3804 " size = 64; align = %u; signed = false;\n"
3805 " map = clock.%s.value;\n"
3806 "} := uint64_clock_monotonic_t;\n\n",
3807 trace_clock_name(),
3808 lttng_alignof(uint32_t) * CHAR_BIT,
3809 trace_clock_name(),
3810 lttng_alignof(uint64_t) * CHAR_BIT,
3811 trace_clock_name()
3812 );
3813 if (ret)
3814 goto end;
3815
3816 ret = _lttng_stream_packet_context_declare(session);
3817 if (ret)
3818 goto end;
3819
3820 ret = _lttng_event_header_declare(session);
3821 if (ret)
3822 goto end;
3823
3824 skip_session:
3825 list_for_each_entry(chan, &session->chan, list) {
3826 ret = _lttng_channel_metadata_statedump(session, chan);
3827 if (ret)
3828 goto end;
3829 }
3830
3831 list_for_each_entry(event, &session->events, list) {
3832 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3833 if (ret)
3834 goto end;
3835 }
3836 session->metadata_dumped = 1;
3837 end:
3838 lttng_metadata_end(session);
3839 return ret;
3840 }
3841
3842 /**
3843 * lttng_transport_register - LTT transport registration
3844 * @transport: transport structure
3845 *
3846 * Registers a transport which can be used as output to extract the data out of
3847 * LTTng. The module calling this registration function must ensure that no
3848 * trap-inducing code will be executed by the transport functions. E.g.
3849 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3850 * is made visible to the transport function. This registration acts as a
3851 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3852 * after its registration must it synchronize the TLBs.
3853 */
3854 void lttng_transport_register(struct lttng_transport *transport)
3855 {
3856 /*
3857 * Make sure no page fault can be triggered by the module about to be
3858 * registered. We deal with this here so we don't have to call
3859 * vmalloc_sync_mappings() in each module's init.
3860 */
3861 wrapper_vmalloc_sync_mappings();
3862
3863 mutex_lock(&sessions_mutex);
3864 list_add_tail(&transport->node, &lttng_transport_list);
3865 mutex_unlock(&sessions_mutex);
3866 }
3867 EXPORT_SYMBOL_GPL(lttng_transport_register);
3868
3869 /**
3870 * lttng_transport_unregister - LTT transport unregistration
3871 * @transport: transport structure
3872 */
3873 void lttng_transport_unregister(struct lttng_transport *transport)
3874 {
3875 mutex_lock(&sessions_mutex);
3876 list_del(&transport->node);
3877 mutex_unlock(&sessions_mutex);
3878 }
3879 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3880
3881 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3882
3883 enum cpuhp_state lttng_hp_prepare;
3884 enum cpuhp_state lttng_hp_online;
3885
3886 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3887 {
3888 struct lttng_cpuhp_node *lttng_node;
3889
3890 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3891 switch (lttng_node->component) {
3892 case LTTNG_RING_BUFFER_FRONTEND:
3893 return 0;
3894 case LTTNG_RING_BUFFER_BACKEND:
3895 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3896 case LTTNG_RING_BUFFER_ITER:
3897 return 0;
3898 case LTTNG_CONTEXT_PERF_COUNTERS:
3899 return 0;
3900 default:
3901 return -EINVAL;
3902 }
3903 }
3904
3905 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3906 {
3907 struct lttng_cpuhp_node *lttng_node;
3908
3909 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3910 switch (lttng_node->component) {
3911 case LTTNG_RING_BUFFER_FRONTEND:
3912 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3913 case LTTNG_RING_BUFFER_BACKEND:
3914 return 0;
3915 case LTTNG_RING_BUFFER_ITER:
3916 return 0;
3917 case LTTNG_CONTEXT_PERF_COUNTERS:
3918 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3919 default:
3920 return -EINVAL;
3921 }
3922 }
3923
3924 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3925 {
3926 struct lttng_cpuhp_node *lttng_node;
3927
3928 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3929 switch (lttng_node->component) {
3930 case LTTNG_RING_BUFFER_FRONTEND:
3931 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3932 case LTTNG_RING_BUFFER_BACKEND:
3933 return 0;
3934 case LTTNG_RING_BUFFER_ITER:
3935 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3936 case LTTNG_CONTEXT_PERF_COUNTERS:
3937 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3938 default:
3939 return -EINVAL;
3940 }
3941 }
3942
3943 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3944 {
3945 struct lttng_cpuhp_node *lttng_node;
3946
3947 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3948 switch (lttng_node->component) {
3949 case LTTNG_RING_BUFFER_FRONTEND:
3950 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3951 case LTTNG_RING_BUFFER_BACKEND:
3952 return 0;
3953 case LTTNG_RING_BUFFER_ITER:
3954 return 0;
3955 case LTTNG_CONTEXT_PERF_COUNTERS:
3956 return 0;
3957 default:
3958 return -EINVAL;
3959 }
3960 }
3961
3962 static int __init lttng_init_cpu_hotplug(void)
3963 {
3964 int ret;
3965
3966 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3967 lttng_hotplug_prepare,
3968 lttng_hotplug_dead);
3969 if (ret < 0) {
3970 return ret;
3971 }
3972 lttng_hp_prepare = ret;
3973 lttng_rb_set_hp_prepare(ret);
3974
3975 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3976 lttng_hotplug_online,
3977 lttng_hotplug_offline);
3978 if (ret < 0) {
3979 cpuhp_remove_multi_state(lttng_hp_prepare);
3980 lttng_hp_prepare = 0;
3981 return ret;
3982 }
3983 lttng_hp_online = ret;
3984 lttng_rb_set_hp_online(ret);
3985
3986 return 0;
3987 }
3988
3989 static void __exit lttng_exit_cpu_hotplug(void)
3990 {
3991 lttng_rb_set_hp_online(0);
3992 cpuhp_remove_multi_state(lttng_hp_online);
3993 lttng_rb_set_hp_prepare(0);
3994 cpuhp_remove_multi_state(lttng_hp_prepare);
3995 }
3996
3997 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3998 static int lttng_init_cpu_hotplug(void)
3999 {
4000 return 0;
4001 }
4002 static void lttng_exit_cpu_hotplug(void)
4003 {
4004 }
4005 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4006
4007
4008 static int __init lttng_events_init(void)
4009 {
4010 int ret;
4011
4012 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4013 if (ret)
4014 return ret;
4015 ret = wrapper_get_pfnblock_flags_mask_init();
4016 if (ret)
4017 return ret;
4018 ret = wrapper_get_pageblock_flags_mask_init();
4019 if (ret)
4020 return ret;
4021 ret = lttng_probes_init();
4022 if (ret)
4023 return ret;
4024 ret = lttng_context_init();
4025 if (ret)
4026 return ret;
4027 ret = lttng_tracepoint_init();
4028 if (ret)
4029 goto error_tp;
4030 event_cache = KMEM_CACHE(lttng_event, 0);
4031 if (!event_cache) {
4032 ret = -ENOMEM;
4033 goto error_kmem_event;
4034 }
4035 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
4036 if (!event_notifier_cache) {
4037 ret = -ENOMEM;
4038 goto error_kmem_event_notifier;
4039 }
4040 ret = lttng_abi_init();
4041 if (ret)
4042 goto error_abi;
4043 ret = lttng_logger_init();
4044 if (ret)
4045 goto error_logger;
4046 ret = lttng_init_cpu_hotplug();
4047 if (ret)
4048 goto error_hotplug;
4049 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4050 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4051 __stringify(LTTNG_MODULES_MINOR_VERSION),
4052 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4053 LTTNG_MODULES_EXTRAVERSION,
4054 LTTNG_VERSION_NAME,
4055 #ifdef LTTNG_EXTRA_VERSION_GIT
4056 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4057 #else
4058 "",
4059 #endif
4060 #ifdef LTTNG_EXTRA_VERSION_NAME
4061 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4062 #else
4063 "");
4064 #endif
4065 return 0;
4066
4067 error_hotplug:
4068 lttng_logger_exit();
4069 error_logger:
4070 lttng_abi_exit();
4071 error_abi:
4072 kmem_cache_destroy(event_notifier_cache);
4073 error_kmem_event_notifier:
4074 kmem_cache_destroy(event_cache);
4075 error_kmem_event:
4076 lttng_tracepoint_exit();
4077 error_tp:
4078 lttng_context_exit();
4079 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4080 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4081 __stringify(LTTNG_MODULES_MINOR_VERSION),
4082 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4083 LTTNG_MODULES_EXTRAVERSION,
4084 LTTNG_VERSION_NAME,
4085 #ifdef LTTNG_EXTRA_VERSION_GIT
4086 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4087 #else
4088 "",
4089 #endif
4090 #ifdef LTTNG_EXTRA_VERSION_NAME
4091 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4092 #else
4093 "");
4094 #endif
4095 return ret;
4096 }
4097
4098 module_init(lttng_events_init);
4099
4100 static void __exit lttng_events_exit(void)
4101 {
4102 struct lttng_session *session, *tmpsession;
4103
4104 lttng_exit_cpu_hotplug();
4105 lttng_logger_exit();
4106 lttng_abi_exit();
4107 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4108 lttng_session_destroy(session);
4109 kmem_cache_destroy(event_cache);
4110 kmem_cache_destroy(event_notifier_cache);
4111 lttng_tracepoint_exit();
4112 lttng_context_exit();
4113 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4114 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4115 __stringify(LTTNG_MODULES_MINOR_VERSION),
4116 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4117 LTTNG_MODULES_EXTRAVERSION,
4118 LTTNG_VERSION_NAME,
4119 #ifdef LTTNG_EXTRA_VERSION_GIT
4120 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4121 #else
4122 "",
4123 #endif
4124 #ifdef LTTNG_EXTRA_VERSION_NAME
4125 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4126 #else
4127 "");
4128 #endif
4129 }
4130
4131 module_exit(lttng_events_exit);
4132
4133 #include <generated/patches.h>
4134 #ifdef LTTNG_EXTRA_VERSION_GIT
4135 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4136 #endif
4137 #ifdef LTTNG_EXTRA_VERSION_NAME
4138 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4139 #endif
4140 MODULE_LICENSE("GPL and additional rights");
4141 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4142 MODULE_DESCRIPTION("LTTng tracer");
4143 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4144 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4145 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4146 LTTNG_MODULES_EXTRAVERSION);
This page took 0.18801 seconds and 4 git commands to generate.