Implement event notifier send notification
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/tracer.h>
40 #include <lttng/event-notifier-notification.h>
41 #include <lttng/abi-old.h>
42 #include <lttng/endian.h>
43 #include <lttng/string-utils.h>
44 #include <lttng/utils.h>
45 #include <ringbuffer/backend.h>
46 #include <ringbuffer/frontend.h>
47 #include <wrapper/time.h>
48
49 #define METADATA_CACHE_DEFAULT_SIZE 4096
50
51 static LIST_HEAD(sessions);
52 static LIST_HEAD(event_notifier_groups);
53 static LIST_HEAD(lttng_transport_list);
54 /*
55 * Protect the sessions and metadata caches.
56 */
57 static DEFINE_MUTEX(sessions_mutex);
58 static struct kmem_cache *event_cache;
59 static struct kmem_cache *event_notifier_cache;
60
61 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
62 static void lttng_session_sync_event_enablers(struct lttng_session *session);
63 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
64 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
65
66 static void _lttng_event_destroy(struct lttng_event *event);
67 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
68 static void _lttng_channel_destroy(struct lttng_channel *chan);
69 static int _lttng_event_unregister(struct lttng_event *event);
70 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
71 static
72 int _lttng_event_metadata_statedump(struct lttng_session *session,
73 struct lttng_channel *chan,
74 struct lttng_event *event);
75 static
76 int _lttng_session_metadata_statedump(struct lttng_session *session);
77 static
78 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
79 static
80 int _lttng_type_statedump(struct lttng_session *session,
81 const struct lttng_type *type,
82 size_t nesting);
83 static
84 int _lttng_field_statedump(struct lttng_session *session,
85 const struct lttng_event_field *field,
86 size_t nesting);
87
88 void synchronize_trace(void)
89 {
90 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
91 synchronize_rcu();
92 #else
93 synchronize_sched();
94 #endif
95
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
97 #ifdef CONFIG_PREEMPT_RT_FULL
98 synchronize_rcu();
99 #endif
100 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
101 #ifdef CONFIG_PREEMPT_RT
102 synchronize_rcu();
103 #endif
104 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
105 }
106
107 void lttng_lock_sessions(void)
108 {
109 mutex_lock(&sessions_mutex);
110 }
111
112 void lttng_unlock_sessions(void)
113 {
114 mutex_unlock(&sessions_mutex);
115 }
116
117 static struct lttng_transport *lttng_transport_find(const char *name)
118 {
119 struct lttng_transport *transport;
120
121 list_for_each_entry(transport, &lttng_transport_list, node) {
122 if (!strcmp(transport->name, name))
123 return transport;
124 }
125 return NULL;
126 }
127
128 /*
129 * Called with sessions lock held.
130 */
131 int lttng_session_active(void)
132 {
133 struct lttng_session *iter;
134
135 list_for_each_entry(iter, &sessions, list) {
136 if (iter->active)
137 return 1;
138 }
139 return 0;
140 }
141
142 struct lttng_session *lttng_session_create(void)
143 {
144 struct lttng_session *session;
145 struct lttng_metadata_cache *metadata_cache;
146 int i;
147
148 mutex_lock(&sessions_mutex);
149 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
150 if (!session)
151 goto err;
152 INIT_LIST_HEAD(&session->chan);
153 INIT_LIST_HEAD(&session->events);
154 lttng_guid_gen(&session->uuid);
155
156 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
157 GFP_KERNEL);
158 if (!metadata_cache)
159 goto err_free_session;
160 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
161 if (!metadata_cache->data)
162 goto err_free_cache;
163 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
164 kref_init(&metadata_cache->refcount);
165 mutex_init(&metadata_cache->lock);
166 session->metadata_cache = metadata_cache;
167 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
168 memcpy(&metadata_cache->uuid, &session->uuid,
169 sizeof(metadata_cache->uuid));
170 INIT_LIST_HEAD(&session->enablers_head);
171 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
172 INIT_HLIST_HEAD(&session->events_ht.table[i]);
173 list_add(&session->list, &sessions);
174 session->pid_tracker.session = session;
175 session->pid_tracker.tracker_type = TRACKER_PID;
176 session->vpid_tracker.session = session;
177 session->vpid_tracker.tracker_type = TRACKER_VPID;
178 session->uid_tracker.session = session;
179 session->uid_tracker.tracker_type = TRACKER_UID;
180 session->vuid_tracker.session = session;
181 session->vuid_tracker.tracker_type = TRACKER_VUID;
182 session->gid_tracker.session = session;
183 session->gid_tracker.tracker_type = TRACKER_GID;
184 session->vgid_tracker.session = session;
185 session->vgid_tracker.tracker_type = TRACKER_VGID;
186 mutex_unlock(&sessions_mutex);
187 return session;
188
189 err_free_cache:
190 kfree(metadata_cache);
191 err_free_session:
192 lttng_kvfree(session);
193 err:
194 mutex_unlock(&sessions_mutex);
195 return NULL;
196 }
197
198 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
199 {
200 struct lttng_transport *transport = NULL;
201 struct lttng_event_notifier_group *event_notifier_group;
202 const char *transport_name = "relay-event-notifier";
203 size_t subbuf_size = 4096; //TODO
204 size_t num_subbuf = 16; //TODO
205 unsigned int switch_timer_interval = 0;
206 unsigned int read_timer_interval = 0;
207 int i;
208
209 mutex_lock(&sessions_mutex);
210
211 transport = lttng_transport_find(transport_name);
212 if (!transport) {
213 printk(KERN_WARNING "LTTng: transport %s not found\n",
214 transport_name);
215 goto notransport;
216 }
217 if (!try_module_get(transport->owner)) {
218 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
219 transport_name);
220 goto notransport;
221 }
222
223 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
224 GFP_KERNEL);
225 if (!event_notifier_group)
226 goto nomem;
227
228 /*
229 * Initialize the ring buffer used to store event notifier
230 * notifications.
231 */
232 event_notifier_group->ops = &transport->ops;
233 event_notifier_group->chan = transport->ops.channel_create(
234 transport_name, event_notifier_group, NULL,
235 subbuf_size, num_subbuf, switch_timer_interval,
236 read_timer_interval);
237 if (!event_notifier_group->chan)
238 goto create_error;
239
240 event_notifier_group->transport = transport;
241
242 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
243 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
244 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
245 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
246
247 list_add(&event_notifier_group->node, &event_notifier_groups);
248
249 mutex_unlock(&sessions_mutex);
250
251 return event_notifier_group;
252
253 create_error:
254 lttng_kvfree(event_notifier_group);
255 nomem:
256 if (transport)
257 module_put(transport->owner);
258 notransport:
259 mutex_unlock(&sessions_mutex);
260 return NULL;
261 }
262
263 void metadata_cache_destroy(struct kref *kref)
264 {
265 struct lttng_metadata_cache *cache =
266 container_of(kref, struct lttng_metadata_cache, refcount);
267 vfree(cache->data);
268 kfree(cache);
269 }
270
271 void lttng_session_destroy(struct lttng_session *session)
272 {
273 struct lttng_channel *chan, *tmpchan;
274 struct lttng_event *event, *tmpevent;
275 struct lttng_metadata_stream *metadata_stream;
276 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
277 int ret;
278
279 mutex_lock(&sessions_mutex);
280 WRITE_ONCE(session->active, 0);
281 list_for_each_entry(chan, &session->chan, list) {
282 ret = lttng_syscalls_unregister(chan);
283 WARN_ON(ret);
284 }
285 list_for_each_entry(event, &session->events, list) {
286 ret = _lttng_event_unregister(event);
287 WARN_ON(ret);
288 }
289 synchronize_trace(); /* Wait for in-flight events to complete */
290 list_for_each_entry(chan, &session->chan, list) {
291 ret = lttng_syscalls_destroy(chan);
292 WARN_ON(ret);
293 }
294 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
295 &session->enablers_head, node)
296 lttng_event_enabler_destroy(event_enabler);
297 list_for_each_entry_safe(event, tmpevent, &session->events, list)
298 _lttng_event_destroy(event);
299 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
300 BUG_ON(chan->channel_type == METADATA_CHANNEL);
301 _lttng_channel_destroy(chan);
302 }
303 mutex_lock(&session->metadata_cache->lock);
304 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
305 _lttng_metadata_channel_hangup(metadata_stream);
306 mutex_unlock(&session->metadata_cache->lock);
307 lttng_id_tracker_destroy(&session->pid_tracker, false);
308 lttng_id_tracker_destroy(&session->vpid_tracker, false);
309 lttng_id_tracker_destroy(&session->uid_tracker, false);
310 lttng_id_tracker_destroy(&session->vuid_tracker, false);
311 lttng_id_tracker_destroy(&session->gid_tracker, false);
312 lttng_id_tracker_destroy(&session->vgid_tracker, false);
313 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
314 list_del(&session->list);
315 mutex_unlock(&sessions_mutex);
316 lttng_kvfree(session);
317 }
318
319 void lttng_event_notifier_group_destroy(
320 struct lttng_event_notifier_group *event_notifier_group)
321 {
322 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
323 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
324 int ret;
325
326 if (!event_notifier_group)
327 return;
328
329 mutex_lock(&sessions_mutex);
330
331 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
332 &event_notifier_group->event_notifiers_head, list) {
333 ret = _lttng_event_notifier_unregister(event_notifier);
334 WARN_ON(ret);
335 }
336
337 irq_work_sync(&event_notifier_group->wakeup_pending);
338
339 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
340 &event_notifier_group->enablers_head, node)
341 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
342
343 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
344 &event_notifier_group->event_notifiers_head, list)
345 _lttng_event_notifier_destroy(event_notifier);
346
347 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
348 module_put(event_notifier_group->transport->owner);
349 list_del(&event_notifier_group->node);
350
351 mutex_unlock(&sessions_mutex);
352 lttng_kvfree(event_notifier_group);
353 }
354
355 int lttng_session_statedump(struct lttng_session *session)
356 {
357 int ret;
358
359 mutex_lock(&sessions_mutex);
360 ret = lttng_statedump_start(session);
361 mutex_unlock(&sessions_mutex);
362 return ret;
363 }
364
365 int lttng_session_enable(struct lttng_session *session)
366 {
367 int ret = 0;
368 struct lttng_channel *chan;
369
370 mutex_lock(&sessions_mutex);
371 if (session->active) {
372 ret = -EBUSY;
373 goto end;
374 }
375
376 /* Set transient enabler state to "enabled" */
377 session->tstate = 1;
378
379 /* We need to sync enablers with session before activation. */
380 lttng_session_sync_event_enablers(session);
381
382 /*
383 * Snapshot the number of events per channel to know the type of header
384 * we need to use.
385 */
386 list_for_each_entry(chan, &session->chan, list) {
387 if (chan->header_type)
388 continue; /* don't change it if session stop/restart */
389 if (chan->free_event_id < 31)
390 chan->header_type = 1; /* compact */
391 else
392 chan->header_type = 2; /* large */
393 }
394
395 /* Clear each stream's quiescent state. */
396 list_for_each_entry(chan, &session->chan, list) {
397 if (chan->channel_type != METADATA_CHANNEL)
398 lib_ring_buffer_clear_quiescent_channel(chan->chan);
399 }
400
401 WRITE_ONCE(session->active, 1);
402 WRITE_ONCE(session->been_active, 1);
403 ret = _lttng_session_metadata_statedump(session);
404 if (ret) {
405 WRITE_ONCE(session->active, 0);
406 goto end;
407 }
408 ret = lttng_statedump_start(session);
409 if (ret)
410 WRITE_ONCE(session->active, 0);
411 end:
412 mutex_unlock(&sessions_mutex);
413 return ret;
414 }
415
416 int lttng_session_disable(struct lttng_session *session)
417 {
418 int ret = 0;
419 struct lttng_channel *chan;
420
421 mutex_lock(&sessions_mutex);
422 if (!session->active) {
423 ret = -EBUSY;
424 goto end;
425 }
426 WRITE_ONCE(session->active, 0);
427
428 /* Set transient enabler state to "disabled" */
429 session->tstate = 0;
430 lttng_session_sync_event_enablers(session);
431
432 /* Set each stream's quiescent state. */
433 list_for_each_entry(chan, &session->chan, list) {
434 if (chan->channel_type != METADATA_CHANNEL)
435 lib_ring_buffer_set_quiescent_channel(chan->chan);
436 }
437 end:
438 mutex_unlock(&sessions_mutex);
439 return ret;
440 }
441
442 int lttng_session_metadata_regenerate(struct lttng_session *session)
443 {
444 int ret = 0;
445 struct lttng_channel *chan;
446 struct lttng_event *event;
447 struct lttng_metadata_cache *cache = session->metadata_cache;
448 struct lttng_metadata_stream *stream;
449
450 mutex_lock(&sessions_mutex);
451 if (!session->active) {
452 ret = -EBUSY;
453 goto end;
454 }
455
456 mutex_lock(&cache->lock);
457 memset(cache->data, 0, cache->cache_alloc);
458 cache->metadata_written = 0;
459 cache->version++;
460 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
461 stream->metadata_out = 0;
462 stream->metadata_in = 0;
463 }
464 mutex_unlock(&cache->lock);
465
466 session->metadata_dumped = 0;
467 list_for_each_entry(chan, &session->chan, list) {
468 chan->metadata_dumped = 0;
469 }
470
471 list_for_each_entry(event, &session->events, list) {
472 event->metadata_dumped = 0;
473 }
474
475 ret = _lttng_session_metadata_statedump(session);
476
477 end:
478 mutex_unlock(&sessions_mutex);
479 return ret;
480 }
481
482 int lttng_channel_enable(struct lttng_channel *channel)
483 {
484 int ret = 0;
485
486 mutex_lock(&sessions_mutex);
487 if (channel->channel_type == METADATA_CHANNEL) {
488 ret = -EPERM;
489 goto end;
490 }
491 if (channel->enabled) {
492 ret = -EEXIST;
493 goto end;
494 }
495 /* Set transient enabler state to "enabled" */
496 channel->tstate = 1;
497 lttng_session_sync_event_enablers(channel->session);
498 /* Set atomically the state to "enabled" */
499 WRITE_ONCE(channel->enabled, 1);
500 end:
501 mutex_unlock(&sessions_mutex);
502 return ret;
503 }
504
505 int lttng_channel_disable(struct lttng_channel *channel)
506 {
507 int ret = 0;
508
509 mutex_lock(&sessions_mutex);
510 if (channel->channel_type == METADATA_CHANNEL) {
511 ret = -EPERM;
512 goto end;
513 }
514 if (!channel->enabled) {
515 ret = -EEXIST;
516 goto end;
517 }
518 /* Set atomically the state to "disabled" */
519 WRITE_ONCE(channel->enabled, 0);
520 /* Set transient enabler state to "enabled" */
521 channel->tstate = 0;
522 lttng_session_sync_event_enablers(channel->session);
523 end:
524 mutex_unlock(&sessions_mutex);
525 return ret;
526 }
527
528 int lttng_event_enable(struct lttng_event *event)
529 {
530 int ret = 0;
531
532 mutex_lock(&sessions_mutex);
533 if (event->chan->channel_type == METADATA_CHANNEL) {
534 ret = -EPERM;
535 goto end;
536 }
537 if (event->enabled) {
538 ret = -EEXIST;
539 goto end;
540 }
541 switch (event->instrumentation) {
542 case LTTNG_KERNEL_TRACEPOINT:
543 case LTTNG_KERNEL_SYSCALL:
544 ret = -EINVAL;
545 break;
546 case LTTNG_KERNEL_KPROBE:
547 case LTTNG_KERNEL_UPROBE:
548 case LTTNG_KERNEL_NOOP:
549 WRITE_ONCE(event->enabled, 1);
550 break;
551 case LTTNG_KERNEL_KRETPROBE:
552 ret = lttng_kretprobes_event_enable_state(event, 1);
553 break;
554 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
555 default:
556 WARN_ON_ONCE(1);
557 ret = -EINVAL;
558 }
559 end:
560 mutex_unlock(&sessions_mutex);
561 return ret;
562 }
563
564 int lttng_event_disable(struct lttng_event *event)
565 {
566 int ret = 0;
567
568 mutex_lock(&sessions_mutex);
569 if (event->chan->channel_type == METADATA_CHANNEL) {
570 ret = -EPERM;
571 goto end;
572 }
573 if (!event->enabled) {
574 ret = -EEXIST;
575 goto end;
576 }
577 switch (event->instrumentation) {
578 case LTTNG_KERNEL_TRACEPOINT:
579 case LTTNG_KERNEL_SYSCALL:
580 ret = -EINVAL;
581 break;
582 case LTTNG_KERNEL_KPROBE:
583 case LTTNG_KERNEL_UPROBE:
584 case LTTNG_KERNEL_NOOP:
585 WRITE_ONCE(event->enabled, 0);
586 break;
587 case LTTNG_KERNEL_KRETPROBE:
588 ret = lttng_kretprobes_event_enable_state(event, 0);
589 break;
590 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
591 default:
592 WARN_ON_ONCE(1);
593 ret = -EINVAL;
594 }
595 end:
596 mutex_unlock(&sessions_mutex);
597 return ret;
598 }
599
600 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
601 {
602 int ret = 0;
603
604 mutex_lock(&sessions_mutex);
605 if (event_notifier->enabled) {
606 ret = -EEXIST;
607 goto end;
608 }
609 switch (event_notifier->instrumentation) {
610 case LTTNG_KERNEL_TRACEPOINT:
611 case LTTNG_KERNEL_SYSCALL:
612 case LTTNG_KERNEL_KPROBE:
613 case LTTNG_KERNEL_FUNCTION:
614 case LTTNG_KERNEL_UPROBE:
615 case LTTNG_KERNEL_NOOP:
616 case LTTNG_KERNEL_KRETPROBE:
617 default:
618 WARN_ON_ONCE(1);
619 ret = -EINVAL;
620 }
621 end:
622 mutex_unlock(&sessions_mutex);
623 return ret;
624 }
625
626 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
627 {
628 int ret = 0;
629
630 mutex_lock(&sessions_mutex);
631 if (!event_notifier->enabled) {
632 ret = -EEXIST;
633 goto end;
634 }
635 switch (event_notifier->instrumentation) {
636 case LTTNG_KERNEL_TRACEPOINT:
637 case LTTNG_KERNEL_SYSCALL:
638 case LTTNG_KERNEL_KPROBE:
639 case LTTNG_KERNEL_FUNCTION:
640 case LTTNG_KERNEL_UPROBE:
641 case LTTNG_KERNEL_NOOP:
642 case LTTNG_KERNEL_KRETPROBE:
643 default:
644 WARN_ON_ONCE(1);
645 ret = -EINVAL;
646 }
647 end:
648 mutex_unlock(&sessions_mutex);
649 return ret;
650 }
651
652 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
653 const char *transport_name,
654 void *buf_addr,
655 size_t subbuf_size, size_t num_subbuf,
656 unsigned int switch_timer_interval,
657 unsigned int read_timer_interval,
658 enum channel_type channel_type)
659 {
660 struct lttng_channel *chan;
661 struct lttng_transport *transport = NULL;
662
663 mutex_lock(&sessions_mutex);
664 if (session->been_active && channel_type != METADATA_CHANNEL)
665 goto active; /* Refuse to add channel to active session */
666 transport = lttng_transport_find(transport_name);
667 if (!transport) {
668 printk(KERN_WARNING "LTTng: transport %s not found\n",
669 transport_name);
670 goto notransport;
671 }
672 if (!try_module_get(transport->owner)) {
673 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
674 goto notransport;
675 }
676 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
677 if (!chan)
678 goto nomem;
679 chan->session = session;
680 chan->id = session->free_chan_id++;
681 chan->ops = &transport->ops;
682 /*
683 * Note: the channel creation op already writes into the packet
684 * headers. Therefore the "chan" information used as input
685 * should be already accessible.
686 */
687 chan->chan = transport->ops.channel_create(transport_name,
688 chan, buf_addr, subbuf_size, num_subbuf,
689 switch_timer_interval, read_timer_interval);
690 if (!chan->chan)
691 goto create_error;
692 chan->tstate = 1;
693 chan->enabled = 1;
694 chan->transport = transport;
695 chan->channel_type = channel_type;
696 list_add(&chan->list, &session->chan);
697 mutex_unlock(&sessions_mutex);
698 return chan;
699
700 create_error:
701 kfree(chan);
702 nomem:
703 if (transport)
704 module_put(transport->owner);
705 notransport:
706 active:
707 mutex_unlock(&sessions_mutex);
708 return NULL;
709 }
710
711 /*
712 * Only used internally at session destruction for per-cpu channels, and
713 * when metadata channel is released.
714 * Needs to be called with sessions mutex held.
715 */
716 static
717 void _lttng_channel_destroy(struct lttng_channel *chan)
718 {
719 chan->ops->channel_destroy(chan->chan);
720 module_put(chan->transport->owner);
721 list_del(&chan->list);
722 lttng_destroy_context(chan->ctx);
723 kfree(chan);
724 }
725
726 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
727 {
728 BUG_ON(chan->channel_type != METADATA_CHANNEL);
729
730 /* Protect the metadata cache with the sessions_mutex. */
731 mutex_lock(&sessions_mutex);
732 _lttng_channel_destroy(chan);
733 mutex_unlock(&sessions_mutex);
734 }
735 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
736
737 static
738 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
739 {
740 stream->finalized = 1;
741 wake_up_interruptible(&stream->read_wait);
742 }
743
744 /*
745 * Supports event creation while tracing session is active.
746 * Needs to be called with sessions mutex held.
747 */
748 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
749 struct lttng_kernel_event *event_param,
750 void *filter,
751 const struct lttng_event_desc *event_desc,
752 enum lttng_kernel_instrumentation itype)
753 {
754 struct lttng_session *session = chan->session;
755 struct lttng_event *event;
756 const char *event_name;
757 struct hlist_head *head;
758 int ret;
759
760 if (chan->free_event_id == -1U) {
761 ret = -EMFILE;
762 goto full;
763 }
764
765 switch (itype) {
766 case LTTNG_KERNEL_TRACEPOINT:
767 event_name = event_desc->name;
768 break;
769 case LTTNG_KERNEL_KPROBE:
770 case LTTNG_KERNEL_UPROBE:
771 case LTTNG_KERNEL_KRETPROBE:
772 case LTTNG_KERNEL_NOOP:
773 case LTTNG_KERNEL_SYSCALL:
774 event_name = event_param->name;
775 break;
776 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
777 default:
778 WARN_ON_ONCE(1);
779 ret = -EINVAL;
780 goto type_error;
781 }
782
783 head = utils_borrow_hash_table_bucket(session->events_ht.table,
784 LTTNG_EVENT_HT_SIZE, event_name);
785 lttng_hlist_for_each_entry(event, head, hlist) {
786 WARN_ON_ONCE(!event->desc);
787 if (!strncmp(event->desc->name, event_name,
788 LTTNG_KERNEL_SYM_NAME_LEN - 1)
789 && chan == event->chan) {
790 ret = -EEXIST;
791 goto exist;
792 }
793 }
794
795 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
796 if (!event) {
797 ret = -ENOMEM;
798 goto cache_error;
799 }
800 event->chan = chan;
801 event->filter = filter;
802 event->id = chan->free_event_id++;
803 event->instrumentation = itype;
804 event->evtype = LTTNG_TYPE_EVENT;
805 INIT_LIST_HEAD(&event->bytecode_runtime_head);
806 INIT_LIST_HEAD(&event->enablers_ref_head);
807
808 switch (itype) {
809 case LTTNG_KERNEL_TRACEPOINT:
810 /* Event will be enabled by enabler sync. */
811 event->enabled = 0;
812 event->registered = 0;
813 event->desc = lttng_event_desc_get(event_name);
814 if (!event->desc) {
815 ret = -ENOENT;
816 goto register_error;
817 }
818 /* Populate lttng_event structure before event registration. */
819 smp_wmb();
820 break;
821 case LTTNG_KERNEL_KPROBE:
822 /*
823 * Needs to be explicitly enabled after creation, since
824 * we may want to apply filters.
825 */
826 event->enabled = 0;
827 event->registered = 1;
828 /*
829 * Populate lttng_event structure before event
830 * registration.
831 */
832 smp_wmb();
833 ret = lttng_kprobes_register(event_name,
834 event_param->u.kprobe.symbol_name,
835 event_param->u.kprobe.offset,
836 event_param->u.kprobe.addr,
837 event);
838 if (ret) {
839 ret = -EINVAL;
840 goto register_error;
841 }
842 ret = try_module_get(event->desc->owner);
843 WARN_ON_ONCE(!ret);
844 break;
845 case LTTNG_KERNEL_KRETPROBE:
846 {
847 struct lttng_event *event_return;
848
849 /* kretprobe defines 2 events */
850 /*
851 * Needs to be explicitly enabled after creation, since
852 * we may want to apply filters.
853 */
854 event->enabled = 0;
855 event->registered = 1;
856 event_return =
857 kmem_cache_zalloc(event_cache, GFP_KERNEL);
858 if (!event_return) {
859 ret = -ENOMEM;
860 goto register_error;
861 }
862 event_return->chan = chan;
863 event_return->filter = filter;
864 event_return->id = chan->free_event_id++;
865 event_return->enabled = 0;
866 event_return->registered = 1;
867 event_return->instrumentation = itype;
868 /*
869 * Populate lttng_event structure before kretprobe registration.
870 */
871 smp_wmb();
872 ret = lttng_kretprobes_register(event_name,
873 event_param->u.kretprobe.symbol_name,
874 event_param->u.kretprobe.offset,
875 event_param->u.kretprobe.addr,
876 event, event_return);
877 if (ret) {
878 kmem_cache_free(event_cache, event_return);
879 ret = -EINVAL;
880 goto register_error;
881 }
882 /* Take 2 refs on the module: one per event. */
883 ret = try_module_get(event->desc->owner);
884 WARN_ON_ONCE(!ret);
885 ret = try_module_get(event->desc->owner);
886 WARN_ON_ONCE(!ret);
887 ret = _lttng_event_metadata_statedump(chan->session, chan,
888 event_return);
889 WARN_ON_ONCE(ret > 0);
890 if (ret) {
891 kmem_cache_free(event_cache, event_return);
892 module_put(event->desc->owner);
893 module_put(event->desc->owner);
894 goto statedump_error;
895 }
896 list_add(&event_return->list, &chan->session->events);
897 break;
898 }
899 case LTTNG_KERNEL_NOOP:
900 case LTTNG_KERNEL_SYSCALL:
901 /*
902 * Needs to be explicitly enabled after creation, since
903 * we may want to apply filters.
904 */
905 event->enabled = 0;
906 event->registered = 0;
907 event->desc = event_desc;
908 switch (event_param->u.syscall.entryexit) {
909 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
910 ret = -EINVAL;
911 goto register_error;
912 case LTTNG_KERNEL_SYSCALL_ENTRY:
913 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
914 break;
915 case LTTNG_KERNEL_SYSCALL_EXIT:
916 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
917 break;
918 }
919 switch (event_param->u.syscall.abi) {
920 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
921 ret = -EINVAL;
922 goto register_error;
923 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
924 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
925 break;
926 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
927 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
928 break;
929 }
930 if (!event->desc) {
931 ret = -EINVAL;
932 goto register_error;
933 }
934 break;
935 case LTTNG_KERNEL_UPROBE:
936 /*
937 * Needs to be explicitly enabled after creation, since
938 * we may want to apply filters.
939 */
940 event->enabled = 0;
941 event->registered = 1;
942
943 /*
944 * Populate lttng_event structure before event
945 * registration.
946 */
947 smp_wmb();
948
949 ret = lttng_uprobes_register(event_param->name,
950 event_param->u.uprobe.fd,
951 event);
952 if (ret)
953 goto register_error;
954 ret = try_module_get(event->desc->owner);
955 WARN_ON_ONCE(!ret);
956 break;
957 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
958 default:
959 WARN_ON_ONCE(1);
960 ret = -EINVAL;
961 goto register_error;
962 }
963 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
964 WARN_ON_ONCE(ret > 0);
965 if (ret) {
966 goto statedump_error;
967 }
968 hlist_add_head(&event->hlist, head);
969 list_add(&event->list, &chan->session->events);
970 return event;
971
972 statedump_error:
973 /* If a statedump error occurs, events will not be readable. */
974 register_error:
975 kmem_cache_free(event_cache, event);
976 cache_error:
977 exist:
978 type_error:
979 full:
980 return ERR_PTR(ret);
981 }
982
983 struct lttng_event_notifier *_lttng_event_notifier_create(
984 const struct lttng_event_desc *event_desc,
985 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
986 struct lttng_kernel_event_notifier *event_notifier_param,
987 void *filter, enum lttng_kernel_instrumentation itype)
988 {
989 struct lttng_event_notifier *event_notifier;
990 const char *event_name;
991 struct hlist_head *head;
992 int ret;
993
994 switch (itype) {
995 case LTTNG_KERNEL_TRACEPOINT:
996 case LTTNG_KERNEL_KPROBE:
997 case LTTNG_KERNEL_UPROBE:
998 case LTTNG_KERNEL_KRETPROBE:
999 case LTTNG_KERNEL_FUNCTION:
1000 case LTTNG_KERNEL_NOOP:
1001 case LTTNG_KERNEL_SYSCALL:
1002 default:
1003 WARN_ON_ONCE(1);
1004 ret = -EINVAL;
1005 goto type_error;
1006 }
1007
1008 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1009 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1010 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1011 WARN_ON_ONCE(!event_notifier->desc);
1012 if (!strncmp(event_notifier->desc->name, event_name,
1013 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1014 && event_notifier_group == event_notifier->group
1015 && token == event_notifier->user_token) {
1016 ret = -EEXIST;
1017 goto exist;
1018 }
1019 }
1020
1021 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1022 if (!event_notifier) {
1023 ret = -ENOMEM;
1024 goto cache_error;
1025 }
1026
1027 event_notifier->group = event_notifier_group;
1028 event_notifier->user_token = token;
1029 event_notifier->filter = filter;
1030 event_notifier->instrumentation = itype;
1031 event_notifier->evtype = LTTNG_TYPE_EVENT;
1032 event_notifier->send_notification = lttng_event_notifier_notification_send;
1033 INIT_LIST_HEAD(&event_notifier->bytecode_runtime_head);
1034 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1035
1036 switch (itype) {
1037 case LTTNG_KERNEL_TRACEPOINT:
1038 /* Event will be enabled by enabler sync. */
1039 event_notifier->enabled = 0;
1040 event_notifier->registered = 0;
1041 event_notifier->desc = lttng_event_desc_get(event_name);
1042 if (!event_notifier->desc) {
1043 ret = -ENOENT;
1044 goto register_error;
1045 }
1046 /* Populate lttng_event_notifier structure before event registration. */
1047 smp_wmb();
1048 break;
1049 case LTTNG_KERNEL_KPROBE:
1050 case LTTNG_KERNEL_UPROBE:
1051 case LTTNG_KERNEL_KRETPROBE:
1052 case LTTNG_KERNEL_FUNCTION:
1053 case LTTNG_KERNEL_NOOP:
1054 case LTTNG_KERNEL_SYSCALL:
1055 default:
1056 WARN_ON_ONCE(1);
1057 ret = -EINVAL;
1058 goto register_error;
1059 }
1060
1061 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1062 hlist_add_head(&event_notifier->hlist, head);
1063 return event_notifier;
1064
1065 register_error:
1066 kmem_cache_free(event_notifier_cache, event_notifier);
1067 cache_error:
1068 exist:
1069 type_error:
1070 return ERR_PTR(ret);
1071 }
1072
1073 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1074 struct lttng_kernel_event *event_param,
1075 void *filter,
1076 const struct lttng_event_desc *event_desc,
1077 enum lttng_kernel_instrumentation itype)
1078 {
1079 struct lttng_event *event;
1080
1081 mutex_lock(&sessions_mutex);
1082 event = _lttng_event_create(chan, event_param, filter, event_desc,
1083 itype);
1084 mutex_unlock(&sessions_mutex);
1085 return event;
1086 }
1087
1088 struct lttng_event_notifier *lttng_event_notifier_create(
1089 const struct lttng_event_desc *event_desc,
1090 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1091 struct lttng_kernel_event_notifier *event_notifier_param,
1092 void *filter, enum lttng_kernel_instrumentation itype)
1093 {
1094 struct lttng_event_notifier *event_notifier;
1095
1096 mutex_lock(&sessions_mutex);
1097 event_notifier = _lttng_event_notifier_create(event_desc, id,
1098 event_notifier_group, event_notifier_param, filter, itype);
1099 mutex_unlock(&sessions_mutex);
1100 return event_notifier;
1101 }
1102
1103 /* Only used for tracepoints for now. */
1104 static
1105 void register_event(struct lttng_event *event)
1106 {
1107 const struct lttng_event_desc *desc;
1108 int ret = -EINVAL;
1109
1110 if (event->registered)
1111 return;
1112
1113 desc = event->desc;
1114 switch (event->instrumentation) {
1115 case LTTNG_KERNEL_TRACEPOINT:
1116 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1117 desc->probe_callback,
1118 event);
1119 break;
1120 case LTTNG_KERNEL_SYSCALL:
1121 ret = lttng_syscall_filter_enable(event->chan, event);
1122 break;
1123 case LTTNG_KERNEL_KPROBE:
1124 case LTTNG_KERNEL_UPROBE:
1125 case LTTNG_KERNEL_KRETPROBE:
1126 case LTTNG_KERNEL_NOOP:
1127 ret = 0;
1128 break;
1129 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1130 default:
1131 WARN_ON_ONCE(1);
1132 }
1133 if (!ret)
1134 event->registered = 1;
1135 }
1136
1137 /*
1138 * Only used internally at session destruction.
1139 */
1140 int _lttng_event_unregister(struct lttng_event *event)
1141 {
1142 const struct lttng_event_desc *desc;
1143 int ret = -EINVAL;
1144
1145 if (!event->registered)
1146 return 0;
1147
1148 desc = event->desc;
1149 switch (event->instrumentation) {
1150 case LTTNG_KERNEL_TRACEPOINT:
1151 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1152 event->desc->probe_callback,
1153 event);
1154 break;
1155 case LTTNG_KERNEL_KPROBE:
1156 lttng_kprobes_unregister(event);
1157 ret = 0;
1158 break;
1159 case LTTNG_KERNEL_KRETPROBE:
1160 lttng_kretprobes_unregister(event);
1161 ret = 0;
1162 break;
1163 case LTTNG_KERNEL_SYSCALL:
1164 ret = lttng_syscall_filter_disable(event->chan, event);
1165 break;
1166 case LTTNG_KERNEL_NOOP:
1167 ret = 0;
1168 break;
1169 case LTTNG_KERNEL_UPROBE:
1170 lttng_uprobes_unregister(event);
1171 ret = 0;
1172 break;
1173 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1174 default:
1175 WARN_ON_ONCE(1);
1176 }
1177 if (!ret)
1178 event->registered = 0;
1179 return ret;
1180 }
1181
1182 /* Only used for tracepoints for now. */
1183 static
1184 void __always_unused register_event_notifier(
1185 struct lttng_event_notifier *event_notifier)
1186 {
1187 const struct lttng_event_desc *desc;
1188 int ret = -EINVAL;
1189
1190 if (event_notifier->registered)
1191 return;
1192
1193 desc = event_notifier->desc;
1194 switch (event_notifier->instrumentation) {
1195 case LTTNG_KERNEL_TRACEPOINT:
1196 case LTTNG_KERNEL_SYSCALL:
1197 case LTTNG_KERNEL_KPROBE:
1198 case LTTNG_KERNEL_UPROBE:
1199 case LTTNG_KERNEL_KRETPROBE:
1200 case LTTNG_KERNEL_FUNCTION:
1201 case LTTNG_KERNEL_NOOP:
1202 default:
1203 WARN_ON_ONCE(1);
1204 }
1205 if (!ret)
1206 event_notifier->registered = 1;
1207 }
1208
1209 static
1210 int _lttng_event_notifier_unregister(
1211 struct lttng_event_notifier *event_notifier)
1212 {
1213 const struct lttng_event_desc *desc;
1214 int ret = -EINVAL;
1215
1216 if (!event_notifier->registered)
1217 return 0;
1218
1219 desc = event_notifier->desc;
1220 switch (event_notifier->instrumentation) {
1221 case LTTNG_KERNEL_TRACEPOINT:
1222 case LTTNG_KERNEL_KPROBE:
1223 case LTTNG_KERNEL_KRETPROBE:
1224 case LTTNG_KERNEL_FUNCTION:
1225 case LTTNG_KERNEL_SYSCALL:
1226 case LTTNG_KERNEL_NOOP:
1227 case LTTNG_KERNEL_UPROBE:
1228 default:
1229 WARN_ON_ONCE(1);
1230 }
1231 if (!ret)
1232 event_notifier->registered = 0;
1233 return ret;
1234 }
1235
1236 /*
1237 * Only used internally at session destruction.
1238 */
1239 static
1240 void _lttng_event_destroy(struct lttng_event *event)
1241 {
1242 switch (event->instrumentation) {
1243 case LTTNG_KERNEL_TRACEPOINT:
1244 lttng_event_desc_put(event->desc);
1245 break;
1246 case LTTNG_KERNEL_KPROBE:
1247 module_put(event->desc->owner);
1248 lttng_kprobes_destroy_private(event);
1249 break;
1250 case LTTNG_KERNEL_KRETPROBE:
1251 module_put(event->desc->owner);
1252 lttng_kretprobes_destroy_private(event);
1253 break;
1254 case LTTNG_KERNEL_NOOP:
1255 case LTTNG_KERNEL_SYSCALL:
1256 break;
1257 case LTTNG_KERNEL_UPROBE:
1258 module_put(event->desc->owner);
1259 lttng_uprobes_destroy_private(event);
1260 break;
1261 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1262 default:
1263 WARN_ON_ONCE(1);
1264 }
1265 list_del(&event->list);
1266 lttng_destroy_context(event->ctx);
1267 kmem_cache_free(event_cache, event);
1268 }
1269
1270 /*
1271 * Only used internally at session destruction.
1272 */
1273 static
1274 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1275 {
1276 switch (event_notifier->instrumentation) {
1277 case LTTNG_KERNEL_TRACEPOINT:
1278 case LTTNG_KERNEL_KPROBE:
1279 case LTTNG_KERNEL_KRETPROBE:
1280 case LTTNG_KERNEL_FUNCTION:
1281 case LTTNG_KERNEL_NOOP:
1282 case LTTNG_KERNEL_SYSCALL:
1283 case LTTNG_KERNEL_UPROBE:
1284 default:
1285 WARN_ON_ONCE(1);
1286 }
1287 list_del(&event_notifier->list);
1288 kmem_cache_free(event_notifier_cache, event_notifier);
1289 }
1290
1291 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1292 enum tracker_type tracker_type)
1293 {
1294 switch (tracker_type) {
1295 case TRACKER_PID:
1296 return &session->pid_tracker;
1297 case TRACKER_VPID:
1298 return &session->vpid_tracker;
1299 case TRACKER_UID:
1300 return &session->uid_tracker;
1301 case TRACKER_VUID:
1302 return &session->vuid_tracker;
1303 case TRACKER_GID:
1304 return &session->gid_tracker;
1305 case TRACKER_VGID:
1306 return &session->vgid_tracker;
1307 default:
1308 WARN_ON_ONCE(1);
1309 return NULL;
1310 }
1311 }
1312
1313 int lttng_session_track_id(struct lttng_session *session,
1314 enum tracker_type tracker_type, int id)
1315 {
1316 struct lttng_id_tracker *tracker;
1317 int ret;
1318
1319 tracker = get_tracker(session, tracker_type);
1320 if (!tracker)
1321 return -EINVAL;
1322 if (id < -1)
1323 return -EINVAL;
1324 mutex_lock(&sessions_mutex);
1325 if (id == -1) {
1326 /* track all ids: destroy tracker. */
1327 lttng_id_tracker_destroy(tracker, true);
1328 ret = 0;
1329 } else {
1330 ret = lttng_id_tracker_add(tracker, id);
1331 }
1332 mutex_unlock(&sessions_mutex);
1333 return ret;
1334 }
1335
1336 int lttng_session_untrack_id(struct lttng_session *session,
1337 enum tracker_type tracker_type, int id)
1338 {
1339 struct lttng_id_tracker *tracker;
1340 int ret;
1341
1342 tracker = get_tracker(session, tracker_type);
1343 if (!tracker)
1344 return -EINVAL;
1345 if (id < -1)
1346 return -EINVAL;
1347 mutex_lock(&sessions_mutex);
1348 if (id == -1) {
1349 /* untrack all ids: replace by empty tracker. */
1350 ret = lttng_id_tracker_empty_set(tracker);
1351 } else {
1352 ret = lttng_id_tracker_del(tracker, id);
1353 }
1354 mutex_unlock(&sessions_mutex);
1355 return ret;
1356 }
1357
1358 static
1359 void *id_list_start(struct seq_file *m, loff_t *pos)
1360 {
1361 struct lttng_id_tracker *id_tracker = m->private;
1362 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1363 struct lttng_id_hash_node *e;
1364 int iter = 0, i;
1365
1366 mutex_lock(&sessions_mutex);
1367 if (id_tracker_p) {
1368 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1369 struct hlist_head *head = &id_tracker_p->id_hash[i];
1370
1371 lttng_hlist_for_each_entry(e, head, hlist) {
1372 if (iter++ >= *pos)
1373 return e;
1374 }
1375 }
1376 } else {
1377 /* ID tracker disabled. */
1378 if (iter >= *pos && iter == 0) {
1379 return id_tracker_p; /* empty tracker */
1380 }
1381 iter++;
1382 }
1383 /* End of list */
1384 return NULL;
1385 }
1386
1387 /* Called with sessions_mutex held. */
1388 static
1389 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1390 {
1391 struct lttng_id_tracker *id_tracker = m->private;
1392 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1393 struct lttng_id_hash_node *e;
1394 int iter = 0, i;
1395
1396 (*ppos)++;
1397 if (id_tracker_p) {
1398 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1399 struct hlist_head *head = &id_tracker_p->id_hash[i];
1400
1401 lttng_hlist_for_each_entry(e, head, hlist) {
1402 if (iter++ >= *ppos)
1403 return e;
1404 }
1405 }
1406 } else {
1407 /* ID tracker disabled. */
1408 if (iter >= *ppos && iter == 0)
1409 return p; /* empty tracker */
1410 iter++;
1411 }
1412
1413 /* End of list */
1414 return NULL;
1415 }
1416
1417 static
1418 void id_list_stop(struct seq_file *m, void *p)
1419 {
1420 mutex_unlock(&sessions_mutex);
1421 }
1422
1423 static
1424 int id_list_show(struct seq_file *m, void *p)
1425 {
1426 struct lttng_id_tracker *id_tracker = m->private;
1427 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1428 int id;
1429
1430 if (p == id_tracker_p) {
1431 /* Tracker disabled. */
1432 id = -1;
1433 } else {
1434 const struct lttng_id_hash_node *e = p;
1435
1436 id = lttng_id_tracker_get_node_id(e);
1437 }
1438 switch (id_tracker->tracker_type) {
1439 case TRACKER_PID:
1440 seq_printf(m, "process { pid = %d; };\n", id);
1441 break;
1442 case TRACKER_VPID:
1443 seq_printf(m, "process { vpid = %d; };\n", id);
1444 break;
1445 case TRACKER_UID:
1446 seq_printf(m, "user { uid = %d; };\n", id);
1447 break;
1448 case TRACKER_VUID:
1449 seq_printf(m, "user { vuid = %d; };\n", id);
1450 break;
1451 case TRACKER_GID:
1452 seq_printf(m, "group { gid = %d; };\n", id);
1453 break;
1454 case TRACKER_VGID:
1455 seq_printf(m, "group { vgid = %d; };\n", id);
1456 break;
1457 default:
1458 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1459 }
1460 return 0;
1461 }
1462
1463 static
1464 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1465 .start = id_list_start,
1466 .next = id_list_next,
1467 .stop = id_list_stop,
1468 .show = id_list_show,
1469 };
1470
1471 static
1472 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1473 {
1474 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1475 }
1476
1477 static
1478 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1479 {
1480 struct seq_file *m = file->private_data;
1481 struct lttng_id_tracker *id_tracker = m->private;
1482 int ret;
1483
1484 WARN_ON_ONCE(!id_tracker);
1485 ret = seq_release(inode, file);
1486 if (!ret)
1487 fput(id_tracker->session->file);
1488 return ret;
1489 }
1490
1491 const struct file_operations lttng_tracker_ids_list_fops = {
1492 .owner = THIS_MODULE,
1493 .open = lttng_tracker_ids_list_open,
1494 .read = seq_read,
1495 .llseek = seq_lseek,
1496 .release = lttng_tracker_ids_list_release,
1497 };
1498
1499 int lttng_session_list_tracker_ids(struct lttng_session *session,
1500 enum tracker_type tracker_type)
1501 {
1502 struct file *tracker_ids_list_file;
1503 struct seq_file *m;
1504 int file_fd, ret;
1505
1506 file_fd = lttng_get_unused_fd();
1507 if (file_fd < 0) {
1508 ret = file_fd;
1509 goto fd_error;
1510 }
1511
1512 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1513 &lttng_tracker_ids_list_fops,
1514 NULL, O_RDWR);
1515 if (IS_ERR(tracker_ids_list_file)) {
1516 ret = PTR_ERR(tracker_ids_list_file);
1517 goto file_error;
1518 }
1519 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1520 ret = -EOVERFLOW;
1521 goto refcount_error;
1522 }
1523 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1524 if (ret < 0)
1525 goto open_error;
1526 m = tracker_ids_list_file->private_data;
1527
1528 m->private = get_tracker(session, tracker_type);
1529 BUG_ON(!m->private);
1530 fd_install(file_fd, tracker_ids_list_file);
1531
1532 return file_fd;
1533
1534 open_error:
1535 atomic_long_dec(&session->file->f_count);
1536 refcount_error:
1537 fput(tracker_ids_list_file);
1538 file_error:
1539 put_unused_fd(file_fd);
1540 fd_error:
1541 return ret;
1542 }
1543
1544 /*
1545 * Enabler management.
1546 */
1547 static
1548 int lttng_match_enabler_star_glob(const char *desc_name,
1549 const char *pattern)
1550 {
1551 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1552 desc_name, LTTNG_SIZE_MAX))
1553 return 0;
1554 return 1;
1555 }
1556
1557 static
1558 int lttng_match_enabler_name(const char *desc_name,
1559 const char *name)
1560 {
1561 if (strcmp(desc_name, name))
1562 return 0;
1563 return 1;
1564 }
1565
1566 static
1567 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1568 struct lttng_enabler *enabler)
1569 {
1570 const char *desc_name, *enabler_name;
1571 bool compat = false, entry = false;
1572
1573 enabler_name = enabler->event_param.name;
1574 switch (enabler->event_param.instrumentation) {
1575 case LTTNG_KERNEL_TRACEPOINT:
1576 desc_name = desc->name;
1577 switch (enabler->format_type) {
1578 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1579 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1580 case LTTNG_ENABLER_FORMAT_NAME:
1581 return lttng_match_enabler_name(desc_name, enabler_name);
1582 default:
1583 return -EINVAL;
1584 }
1585 break;
1586 case LTTNG_KERNEL_SYSCALL:
1587 desc_name = desc->name;
1588 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1589 desc_name += strlen("compat_");
1590 compat = true;
1591 }
1592 if (!strncmp(desc_name, "syscall_exit_",
1593 strlen("syscall_exit_"))) {
1594 desc_name += strlen("syscall_exit_");
1595 } else if (!strncmp(desc_name, "syscall_entry_",
1596 strlen("syscall_entry_"))) {
1597 desc_name += strlen("syscall_entry_");
1598 entry = true;
1599 } else {
1600 WARN_ON_ONCE(1);
1601 return -EINVAL;
1602 }
1603 switch (enabler->event_param.u.syscall.entryexit) {
1604 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1605 break;
1606 case LTTNG_KERNEL_SYSCALL_ENTRY:
1607 if (!entry)
1608 return 0;
1609 break;
1610 case LTTNG_KERNEL_SYSCALL_EXIT:
1611 if (entry)
1612 return 0;
1613 break;
1614 default:
1615 return -EINVAL;
1616 }
1617 switch (enabler->event_param.u.syscall.abi) {
1618 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1619 break;
1620 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1621 if (compat)
1622 return 0;
1623 break;
1624 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1625 if (!compat)
1626 return 0;
1627 break;
1628 default:
1629 return -EINVAL;
1630 }
1631 switch (enabler->event_param.u.syscall.match) {
1632 case LTTNG_SYSCALL_MATCH_NAME:
1633 switch (enabler->format_type) {
1634 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1635 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1636 case LTTNG_ENABLER_FORMAT_NAME:
1637 return lttng_match_enabler_name(desc_name, enabler_name);
1638 default:
1639 return -EINVAL;
1640 }
1641 break;
1642 case LTTNG_SYSCALL_MATCH_NR:
1643 return -EINVAL; /* Not implemented. */
1644 default:
1645 return -EINVAL;
1646 }
1647 break;
1648 default:
1649 WARN_ON_ONCE(1);
1650 return -EINVAL;
1651 }
1652 }
1653
1654 static
1655 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1656 struct lttng_event *event)
1657 {
1658 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1659 event_enabler);
1660
1661 if (base_enabler->event_param.instrumentation != event->instrumentation)
1662 return 0;
1663 if (lttng_desc_match_enabler(event->desc, base_enabler)
1664 && event->chan == event_enabler->chan)
1665 return 1;
1666 else
1667 return 0;
1668 }
1669
1670 static
1671 struct lttng_enabler_ref *lttng_enabler_ref(
1672 struct list_head *enablers_ref_list,
1673 struct lttng_enabler *enabler)
1674 {
1675 struct lttng_enabler_ref *enabler_ref;
1676
1677 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1678 if (enabler_ref->ref == enabler)
1679 return enabler_ref;
1680 }
1681 return NULL;
1682 }
1683
1684 static
1685 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1686 {
1687 struct lttng_session *session = event_enabler->chan->session;
1688 struct lttng_probe_desc *probe_desc;
1689 const struct lttng_event_desc *desc;
1690 int i;
1691 struct list_head *probe_list;
1692
1693 probe_list = lttng_get_probe_list_head();
1694 /*
1695 * For each probe event, if we find that a probe event matches
1696 * our enabler, create an associated lttng_event if not
1697 * already present.
1698 */
1699 list_for_each_entry(probe_desc, probe_list, head) {
1700 for (i = 0; i < probe_desc->nr_events; i++) {
1701 int found = 0;
1702 struct hlist_head *head;
1703 struct lttng_event *event;
1704
1705 desc = probe_desc->event_desc[i];
1706 if (!lttng_desc_match_enabler(desc,
1707 lttng_event_enabler_as_enabler(event_enabler)))
1708 continue;
1709
1710 /*
1711 * Check if already created.
1712 */
1713 head = utils_borrow_hash_table_bucket(
1714 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1715 desc->name);
1716 lttng_hlist_for_each_entry(event, head, hlist) {
1717 if (event->desc == desc
1718 && event->chan == event_enabler->chan)
1719 found = 1;
1720 }
1721 if (found)
1722 continue;
1723
1724 /*
1725 * We need to create an event for this
1726 * event probe.
1727 */
1728 event = _lttng_event_create(event_enabler->chan,
1729 NULL, NULL, desc,
1730 LTTNG_KERNEL_TRACEPOINT);
1731 if (!event) {
1732 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1733 probe_desc->event_desc[i]->name);
1734 }
1735 }
1736 }
1737 }
1738
1739 static
1740 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1741 {
1742 int ret;
1743
1744 ret = lttng_syscalls_register(event_enabler->chan, NULL);
1745 WARN_ON_ONCE(ret);
1746 }
1747
1748 /*
1749 * Create struct lttng_event if it is missing and present in the list of
1750 * tracepoint probes.
1751 * Should be called with sessions mutex held.
1752 */
1753 static
1754 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1755 {
1756 switch (event_enabler->base.event_param.instrumentation) {
1757 case LTTNG_KERNEL_TRACEPOINT:
1758 lttng_create_tracepoint_event_if_missing(event_enabler);
1759 break;
1760 case LTTNG_KERNEL_SYSCALL:
1761 lttng_create_syscall_event_if_missing(event_enabler);
1762 break;
1763 default:
1764 WARN_ON_ONCE(1);
1765 break;
1766 }
1767 }
1768
1769 /*
1770 * Create events associated with an event_enabler (if not already present),
1771 * and add backward reference from the event to the enabler.
1772 * Should be called with sessions mutex held.
1773 */
1774 static
1775 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1776 {
1777 struct lttng_channel *chan = event_enabler->chan;
1778 struct lttng_session *session = event_enabler->chan->session;
1779 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1780 struct lttng_event *event;
1781
1782 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1783 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1784 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1785 base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
1786 !strcmp(base_enabler->event_param.name, "*")) {
1787 if (base_enabler->enabled)
1788 WRITE_ONCE(chan->syscall_all, 1);
1789 else
1790 WRITE_ONCE(chan->syscall_all, 0);
1791 }
1792
1793 /* First ensure that probe events are created for this enabler. */
1794 lttng_create_event_if_missing(event_enabler);
1795
1796 /* For each event matching event_enabler in session event list. */
1797 list_for_each_entry(event, &session->events, list) {
1798 struct lttng_enabler_ref *enabler_ref;
1799
1800 if (!lttng_event_enabler_match_event(event_enabler, event))
1801 continue;
1802 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1803 lttng_event_enabler_as_enabler(event_enabler));
1804 if (!enabler_ref) {
1805 /*
1806 * If no backward ref, create it.
1807 * Add backward ref from event to event_enabler.
1808 */
1809 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1810 if (!enabler_ref)
1811 return -ENOMEM;
1812 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
1813 list_add(&enabler_ref->node,
1814 &event->enablers_ref_head);
1815 }
1816
1817 /*
1818 * Link filter bytecodes if not linked yet.
1819 */
1820 lttng_enabler_link_bytecode(event->desc,
1821 lttng_static_ctx,
1822 &event->bytecode_runtime_head,
1823 lttng_event_enabler_as_enabler(event_enabler));
1824
1825 /* TODO: merge event context. */
1826 }
1827 return 0;
1828 }
1829
1830 /*
1831 * Called at module load: connect the probe on all enablers matching
1832 * this event.
1833 * Called with sessions lock held.
1834 */
1835 int lttng_fix_pending_events(void)
1836 {
1837 struct lttng_session *session;
1838
1839 list_for_each_entry(session, &sessions, list)
1840 lttng_session_lazy_sync_event_enablers(session);
1841 return 0;
1842 }
1843
1844 struct lttng_event_enabler *lttng_event_enabler_create(
1845 enum lttng_enabler_format_type format_type,
1846 struct lttng_kernel_event *event_param,
1847 struct lttng_channel *chan)
1848 {
1849 struct lttng_event_enabler *event_enabler;
1850
1851 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
1852 if (!event_enabler)
1853 return NULL;
1854 event_enabler->base.format_type = format_type;
1855 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
1856 memcpy(&event_enabler->base.event_param, event_param,
1857 sizeof(event_enabler->base.event_param));
1858 event_enabler->chan = chan;
1859 /* ctx left NULL */
1860 event_enabler->base.enabled = 0;
1861 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
1862 mutex_lock(&sessions_mutex);
1863 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
1864 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1865 mutex_unlock(&sessions_mutex);
1866 return event_enabler;
1867 }
1868
1869 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
1870 {
1871 mutex_lock(&sessions_mutex);
1872 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
1873 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1874 mutex_unlock(&sessions_mutex);
1875 return 0;
1876 }
1877
1878 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
1879 {
1880 mutex_lock(&sessions_mutex);
1881 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
1882 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1883 mutex_unlock(&sessions_mutex);
1884 return 0;
1885 }
1886
1887 static
1888 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1889 struct lttng_kernel_filter_bytecode __user *bytecode)
1890 {
1891 struct lttng_filter_bytecode_node *bytecode_node;
1892 uint32_t bytecode_len;
1893 int ret;
1894
1895 ret = get_user(bytecode_len, &bytecode->len);
1896 if (ret)
1897 return ret;
1898 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1899 GFP_KERNEL);
1900 if (!bytecode_node)
1901 return -ENOMEM;
1902 ret = copy_from_user(&bytecode_node->bc, bytecode,
1903 sizeof(*bytecode) + bytecode_len);
1904 if (ret)
1905 goto error_free;
1906
1907 bytecode_node->enabler = enabler;
1908 /* Enforce length based on allocated size */
1909 bytecode_node->bc.len = bytecode_len;
1910 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1911
1912 return 0;
1913
1914 error_free:
1915 kfree(bytecode_node);
1916 return ret;
1917 }
1918
1919 int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
1920 struct lttng_kernel_filter_bytecode __user *bytecode)
1921 {
1922 int ret;
1923 ret = lttng_enabler_attach_bytecode(
1924 lttng_event_enabler_as_enabler(event_enabler), bytecode);
1925 if (ret)
1926 goto error;
1927
1928 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1929 return 0;
1930
1931 error:
1932 return ret;
1933 }
1934
1935 int lttng_event_add_callsite(struct lttng_event *event,
1936 struct lttng_kernel_event_callsite __user *callsite)
1937 {
1938
1939 switch (event->instrumentation) {
1940 case LTTNG_KERNEL_UPROBE:
1941 return lttng_uprobes_add_callsite(event, callsite);
1942 default:
1943 return -EINVAL;
1944 }
1945 }
1946
1947 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
1948 struct lttng_kernel_context *context_param)
1949 {
1950 return -ENOSYS;
1951 }
1952
1953 static
1954 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1955 {
1956 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1957
1958 /* Destroy filter bytecode */
1959 list_for_each_entry_safe(filter_node, tmp_filter_node,
1960 &enabler->filter_bytecode_head, node) {
1961 kfree(filter_node);
1962 }
1963 }
1964
1965 static
1966 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
1967 {
1968 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
1969
1970 /* Destroy contexts */
1971 lttng_destroy_context(event_enabler->ctx);
1972
1973 list_del(&event_enabler->node);
1974 kfree(event_enabler);
1975 }
1976
1977 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
1978 struct lttng_event_notifier_group *event_notifier_group,
1979 enum lttng_enabler_format_type format_type,
1980 struct lttng_kernel_event_notifier *event_notifier_param)
1981 {
1982 struct lttng_event_notifier_enabler *event_notifier_enabler;
1983
1984 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
1985 if (!event_notifier_enabler)
1986 return NULL;
1987
1988 event_notifier_enabler->base.format_type = format_type;
1989 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
1990
1991 memcpy(&event_notifier_enabler->base.event_param.name, event_notifier_param->event.name,
1992 sizeof(event_notifier_enabler->base.event_param.name));
1993 event_notifier_enabler->base.event_param.instrumentation = event_notifier_param->event.instrumentation;
1994 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
1995
1996 event_notifier_enabler->base.enabled = 0;
1997 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
1998 event_notifier_enabler->group = event_notifier_group;
1999
2000 mutex_lock(&sessions_mutex);
2001 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2002
2003 mutex_unlock(&sessions_mutex);
2004
2005 return event_notifier_enabler;
2006 }
2007
2008 int lttng_event_notifier_enabler_enable(
2009 struct lttng_event_notifier_enabler *event_notifier_enabler)
2010 {
2011 mutex_lock(&sessions_mutex);
2012 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2013 mutex_unlock(&sessions_mutex);
2014 return 0;
2015 }
2016
2017 int lttng_event_notifier_enabler_disable(
2018 struct lttng_event_notifier_enabler *event_notifier_enabler)
2019 {
2020 mutex_lock(&sessions_mutex);
2021 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2022 mutex_unlock(&sessions_mutex);
2023 return 0;
2024 }
2025
2026 int lttng_event_notifier_enabler_attach_bytecode(
2027 struct lttng_event_notifier_enabler *event_notifier_enabler,
2028 struct lttng_kernel_filter_bytecode __user *bytecode)
2029 {
2030 int ret;
2031
2032 ret = lttng_enabler_attach_bytecode(
2033 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2034 bytecode);
2035 if (ret)
2036 goto error;
2037
2038 return 0;
2039
2040 error:
2041 return ret;
2042 }
2043
2044 int lttng_event_notifier_enabler_attach_context(
2045 struct lttng_event_notifier_enabler *event_notifier_enabler,
2046 struct lttng_kernel_context *context_param)
2047 {
2048 return -ENOSYS;
2049 }
2050
2051 static
2052 void lttng_event_notifier_enabler_destroy(
2053 struct lttng_event_notifier_enabler *event_notifier_enabler)
2054 {
2055 if (!event_notifier_enabler) {
2056 return;
2057 }
2058
2059 list_del(&event_notifier_enabler->node);
2060
2061 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2062 kfree(event_notifier_enabler);
2063 }
2064
2065 /*
2066 * lttng_session_sync_event_enablers should be called just before starting a
2067 * session.
2068 * Should be called with sessions mutex held.
2069 */
2070 static
2071 void lttng_session_sync_event_enablers(struct lttng_session *session)
2072 {
2073 struct lttng_event_enabler *event_enabler;
2074 struct lttng_event *event;
2075
2076 list_for_each_entry(event_enabler, &session->enablers_head, node)
2077 lttng_event_enabler_ref_events(event_enabler);
2078 /*
2079 * For each event, if at least one of its enablers is enabled,
2080 * and its channel and session transient states are enabled, we
2081 * enable the event, else we disable it.
2082 */
2083 list_for_each_entry(event, &session->events, list) {
2084 struct lttng_enabler_ref *enabler_ref;
2085 struct lttng_bytecode_runtime *runtime;
2086 int enabled = 0, has_enablers_without_bytecode = 0;
2087
2088 switch (event->instrumentation) {
2089 case LTTNG_KERNEL_TRACEPOINT:
2090 case LTTNG_KERNEL_SYSCALL:
2091 /* Enable events */
2092 list_for_each_entry(enabler_ref,
2093 &event->enablers_ref_head, node) {
2094 if (enabler_ref->ref->enabled) {
2095 enabled = 1;
2096 break;
2097 }
2098 }
2099 break;
2100 default:
2101 /* Not handled with lazy sync. */
2102 continue;
2103 }
2104 /*
2105 * Enabled state is based on union of enablers, with
2106 * intesection of session and channel transient enable
2107 * states.
2108 */
2109 enabled = enabled && session->tstate && event->chan->tstate;
2110
2111 WRITE_ONCE(event->enabled, enabled);
2112 /*
2113 * Sync tracepoint registration with event enabled
2114 * state.
2115 */
2116 if (enabled) {
2117 register_event(event);
2118 } else {
2119 _lttng_event_unregister(event);
2120 }
2121
2122 /* Check if has enablers without bytecode enabled */
2123 list_for_each_entry(enabler_ref,
2124 &event->enablers_ref_head, node) {
2125 if (enabler_ref->ref->enabled
2126 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2127 has_enablers_without_bytecode = 1;
2128 break;
2129 }
2130 }
2131 event->has_enablers_without_bytecode =
2132 has_enablers_without_bytecode;
2133
2134 /* Enable filters */
2135 list_for_each_entry(runtime,
2136 &event->bytecode_runtime_head, node)
2137 lttng_filter_sync_state(runtime);
2138 }
2139 }
2140
2141 /*
2142 * Apply enablers to session events, adding events to session if need
2143 * be. It is required after each modification applied to an active
2144 * session, and right before session "start".
2145 * "lazy" sync means we only sync if required.
2146 * Should be called with sessions mutex held.
2147 */
2148 static
2149 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2150 {
2151 /* We can skip if session is not active */
2152 if (!session->active)
2153 return;
2154 lttng_session_sync_event_enablers(session);
2155 }
2156
2157 /*
2158 * Serialize at most one packet worth of metadata into a metadata
2159 * channel.
2160 * We grab the metadata cache mutex to get exclusive access to our metadata
2161 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2162 * allows us to do racy operations such as looking for remaining space left in
2163 * packet and write, since mutual exclusion protects us from concurrent writes.
2164 * Mutual exclusion on the metadata cache allow us to read the cache content
2165 * without racing against reallocation of the cache by updates.
2166 * Returns the number of bytes written in the channel, 0 if no data
2167 * was written and a negative value on error.
2168 */
2169 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2170 struct channel *chan, bool *coherent)
2171 {
2172 struct lib_ring_buffer_ctx ctx;
2173 int ret = 0;
2174 size_t len, reserve_len;
2175
2176 /*
2177 * Ensure we support mutiple get_next / put sequences followed by
2178 * put_next. The metadata cache lock protects reading the metadata
2179 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2180 * "flush" operations on the buffer invoked by different processes.
2181 * Moreover, since the metadata cache memory can be reallocated, we
2182 * need to have exclusive access against updates even though we only
2183 * read it.
2184 */
2185 mutex_lock(&stream->metadata_cache->lock);
2186 WARN_ON(stream->metadata_in < stream->metadata_out);
2187 if (stream->metadata_in != stream->metadata_out)
2188 goto end;
2189
2190 /* Metadata regenerated, change the version. */
2191 if (stream->metadata_cache->version != stream->version)
2192 stream->version = stream->metadata_cache->version;
2193
2194 len = stream->metadata_cache->metadata_written -
2195 stream->metadata_in;
2196 if (!len)
2197 goto end;
2198 reserve_len = min_t(size_t,
2199 stream->transport->ops.packet_avail_size(chan),
2200 len);
2201 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2202 sizeof(char), -1);
2203 /*
2204 * If reservation failed, return an error to the caller.
2205 */
2206 ret = stream->transport->ops.event_reserve(&ctx, 0);
2207 if (ret != 0) {
2208 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2209 stream->coherent = false;
2210 goto end;
2211 }
2212 stream->transport->ops.event_write(&ctx,
2213 stream->metadata_cache->data + stream->metadata_in,
2214 reserve_len);
2215 stream->transport->ops.event_commit(&ctx);
2216 stream->metadata_in += reserve_len;
2217 if (reserve_len < len)
2218 stream->coherent = false;
2219 else
2220 stream->coherent = true;
2221 ret = reserve_len;
2222
2223 end:
2224 if (coherent)
2225 *coherent = stream->coherent;
2226 mutex_unlock(&stream->metadata_cache->lock);
2227 return ret;
2228 }
2229
2230 static
2231 void lttng_metadata_begin(struct lttng_session *session)
2232 {
2233 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2234 mutex_lock(&session->metadata_cache->lock);
2235 }
2236
2237 static
2238 void lttng_metadata_end(struct lttng_session *session)
2239 {
2240 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2241 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2242 struct lttng_metadata_stream *stream;
2243
2244 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2245 wake_up_interruptible(&stream->read_wait);
2246 mutex_unlock(&session->metadata_cache->lock);
2247 }
2248 }
2249
2250 /*
2251 * Write the metadata to the metadata cache.
2252 * Must be called with sessions_mutex held.
2253 * The metadata cache lock protects us from concurrent read access from
2254 * thread outputting metadata content to ring buffer.
2255 * The content of the printf is printed as a single atomic metadata
2256 * transaction.
2257 */
2258 int lttng_metadata_printf(struct lttng_session *session,
2259 const char *fmt, ...)
2260 {
2261 char *str;
2262 size_t len;
2263 va_list ap;
2264
2265 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2266
2267 va_start(ap, fmt);
2268 str = kvasprintf(GFP_KERNEL, fmt, ap);
2269 va_end(ap);
2270 if (!str)
2271 return -ENOMEM;
2272
2273 len = strlen(str);
2274 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2275 if (session->metadata_cache->metadata_written + len >
2276 session->metadata_cache->cache_alloc) {
2277 char *tmp_cache_realloc;
2278 unsigned int tmp_cache_alloc_size;
2279
2280 tmp_cache_alloc_size = max_t(unsigned int,
2281 session->metadata_cache->cache_alloc + len,
2282 session->metadata_cache->cache_alloc << 1);
2283 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2284 if (!tmp_cache_realloc)
2285 goto err;
2286 if (session->metadata_cache->data) {
2287 memcpy(tmp_cache_realloc,
2288 session->metadata_cache->data,
2289 session->metadata_cache->cache_alloc);
2290 vfree(session->metadata_cache->data);
2291 }
2292
2293 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2294 session->metadata_cache->data = tmp_cache_realloc;
2295 }
2296 memcpy(session->metadata_cache->data +
2297 session->metadata_cache->metadata_written,
2298 str, len);
2299 session->metadata_cache->metadata_written += len;
2300 kfree(str);
2301
2302 return 0;
2303
2304 err:
2305 kfree(str);
2306 return -ENOMEM;
2307 }
2308
2309 static
2310 int print_tabs(struct lttng_session *session, size_t nesting)
2311 {
2312 size_t i;
2313
2314 for (i = 0; i < nesting; i++) {
2315 int ret;
2316
2317 ret = lttng_metadata_printf(session, " ");
2318 if (ret) {
2319 return ret;
2320 }
2321 }
2322 return 0;
2323 }
2324
2325 static
2326 int lttng_field_name_statedump(struct lttng_session *session,
2327 const struct lttng_event_field *field,
2328 size_t nesting)
2329 {
2330 return lttng_metadata_printf(session, " _%s;\n", field->name);
2331 }
2332
2333 static
2334 int _lttng_integer_type_statedump(struct lttng_session *session,
2335 const struct lttng_type *type,
2336 size_t nesting)
2337 {
2338 int ret;
2339
2340 WARN_ON_ONCE(type->atype != atype_integer);
2341 ret = print_tabs(session, nesting);
2342 if (ret)
2343 return ret;
2344 ret = lttng_metadata_printf(session,
2345 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2346 type->u.integer.size,
2347 type->u.integer.alignment,
2348 type->u.integer.signedness,
2349 (type->u.integer.encoding == lttng_encode_none)
2350 ? "none"
2351 : (type->u.integer.encoding == lttng_encode_UTF8)
2352 ? "UTF8"
2353 : "ASCII",
2354 type->u.integer.base,
2355 #if __BYTE_ORDER == __BIG_ENDIAN
2356 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2357 #else
2358 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2359 #endif
2360 );
2361 return ret;
2362 }
2363
2364 /*
2365 * Must be called with sessions_mutex held.
2366 */
2367 static
2368 int _lttng_struct_type_statedump(struct lttng_session *session,
2369 const struct lttng_type *type,
2370 size_t nesting)
2371 {
2372 int ret;
2373 uint32_t i, nr_fields;
2374 unsigned int alignment;
2375
2376 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2377
2378 ret = print_tabs(session, nesting);
2379 if (ret)
2380 return ret;
2381 ret = lttng_metadata_printf(session,
2382 "struct {\n");
2383 if (ret)
2384 return ret;
2385 nr_fields = type->u.struct_nestable.nr_fields;
2386 for (i = 0; i < nr_fields; i++) {
2387 const struct lttng_event_field *iter_field;
2388
2389 iter_field = &type->u.struct_nestable.fields[i];
2390 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2391 if (ret)
2392 return ret;
2393 }
2394 ret = print_tabs(session, nesting);
2395 if (ret)
2396 return ret;
2397 alignment = type->u.struct_nestable.alignment;
2398 if (alignment) {
2399 ret = lttng_metadata_printf(session,
2400 "} align(%u)",
2401 alignment);
2402 } else {
2403 ret = lttng_metadata_printf(session,
2404 "}");
2405 }
2406 return ret;
2407 }
2408
2409 /*
2410 * Must be called with sessions_mutex held.
2411 */
2412 static
2413 int _lttng_struct_field_statedump(struct lttng_session *session,
2414 const struct lttng_event_field *field,
2415 size_t nesting)
2416 {
2417 int ret;
2418
2419 ret = _lttng_struct_type_statedump(session,
2420 &field->type, nesting);
2421 if (ret)
2422 return ret;
2423 return lttng_field_name_statedump(session, field, nesting);
2424 }
2425
2426 /*
2427 * Must be called with sessions_mutex held.
2428 */
2429 static
2430 int _lttng_variant_type_statedump(struct lttng_session *session,
2431 const struct lttng_type *type,
2432 size_t nesting)
2433 {
2434 int ret;
2435 uint32_t i, nr_choices;
2436
2437 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2438 /*
2439 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2440 */
2441 if (type->u.variant_nestable.alignment != 0)
2442 return -EINVAL;
2443 ret = print_tabs(session, nesting);
2444 if (ret)
2445 return ret;
2446 ret = lttng_metadata_printf(session,
2447 "variant <_%s> {\n",
2448 type->u.variant_nestable.tag_name);
2449 if (ret)
2450 return ret;
2451 nr_choices = type->u.variant_nestable.nr_choices;
2452 for (i = 0; i < nr_choices; i++) {
2453 const struct lttng_event_field *iter_field;
2454
2455 iter_field = &type->u.variant_nestable.choices[i];
2456 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2457 if (ret)
2458 return ret;
2459 }
2460 ret = print_tabs(session, nesting);
2461 if (ret)
2462 return ret;
2463 ret = lttng_metadata_printf(session,
2464 "}");
2465 return ret;
2466 }
2467
2468 /*
2469 * Must be called with sessions_mutex held.
2470 */
2471 static
2472 int _lttng_variant_field_statedump(struct lttng_session *session,
2473 const struct lttng_event_field *field,
2474 size_t nesting)
2475 {
2476 int ret;
2477
2478 ret = _lttng_variant_type_statedump(session,
2479 &field->type, nesting);
2480 if (ret)
2481 return ret;
2482 return lttng_field_name_statedump(session, field, nesting);
2483 }
2484
2485 /*
2486 * Must be called with sessions_mutex held.
2487 */
2488 static
2489 int _lttng_array_field_statedump(struct lttng_session *session,
2490 const struct lttng_event_field *field,
2491 size_t nesting)
2492 {
2493 int ret;
2494 const struct lttng_type *elem_type;
2495
2496 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2497
2498 if (field->type.u.array_nestable.alignment) {
2499 ret = print_tabs(session, nesting);
2500 if (ret)
2501 return ret;
2502 ret = lttng_metadata_printf(session,
2503 "struct { } align(%u) _%s_padding;\n",
2504 field->type.u.array_nestable.alignment * CHAR_BIT,
2505 field->name);
2506 if (ret)
2507 return ret;
2508 }
2509 /*
2510 * Nested compound types: Only array of structures and variants are
2511 * currently supported.
2512 */
2513 elem_type = field->type.u.array_nestable.elem_type;
2514 switch (elem_type->atype) {
2515 case atype_integer:
2516 case atype_struct_nestable:
2517 case atype_variant_nestable:
2518 ret = _lttng_type_statedump(session, elem_type, nesting);
2519 if (ret)
2520 return ret;
2521 break;
2522
2523 default:
2524 return -EINVAL;
2525 }
2526 ret = lttng_metadata_printf(session,
2527 " _%s[%u];\n",
2528 field->name,
2529 field->type.u.array_nestable.length);
2530 return ret;
2531 }
2532
2533 /*
2534 * Must be called with sessions_mutex held.
2535 */
2536 static
2537 int _lttng_sequence_field_statedump(struct lttng_session *session,
2538 const struct lttng_event_field *field,
2539 size_t nesting)
2540 {
2541 int ret;
2542 const char *length_name;
2543 const struct lttng_type *elem_type;
2544
2545 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2546
2547 length_name = field->type.u.sequence_nestable.length_name;
2548
2549 if (field->type.u.sequence_nestable.alignment) {
2550 ret = print_tabs(session, nesting);
2551 if (ret)
2552 return ret;
2553 ret = lttng_metadata_printf(session,
2554 "struct { } align(%u) _%s_padding;\n",
2555 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2556 field->name);
2557 if (ret)
2558 return ret;
2559 }
2560
2561 /*
2562 * Nested compound types: Only array of structures and variants are
2563 * currently supported.
2564 */
2565 elem_type = field->type.u.sequence_nestable.elem_type;
2566 switch (elem_type->atype) {
2567 case atype_integer:
2568 case atype_struct_nestable:
2569 case atype_variant_nestable:
2570 ret = _lttng_type_statedump(session, elem_type, nesting);
2571 if (ret)
2572 return ret;
2573 break;
2574
2575 default:
2576 return -EINVAL;
2577 }
2578 ret = lttng_metadata_printf(session,
2579 " _%s[ _%s ];\n",
2580 field->name,
2581 field->type.u.sequence_nestable.length_name);
2582 return ret;
2583 }
2584
2585 /*
2586 * Must be called with sessions_mutex held.
2587 */
2588 static
2589 int _lttng_enum_type_statedump(struct lttng_session *session,
2590 const struct lttng_type *type,
2591 size_t nesting)
2592 {
2593 const struct lttng_enum_desc *enum_desc;
2594 const struct lttng_type *container_type;
2595 int ret;
2596 unsigned int i, nr_entries;
2597
2598 container_type = type->u.enum_nestable.container_type;
2599 if (container_type->atype != atype_integer) {
2600 ret = -EINVAL;
2601 goto end;
2602 }
2603 enum_desc = type->u.enum_nestable.desc;
2604 nr_entries = enum_desc->nr_entries;
2605
2606 ret = print_tabs(session, nesting);
2607 if (ret)
2608 goto end;
2609 ret = lttng_metadata_printf(session, "enum : ");
2610 if (ret)
2611 goto end;
2612 ret = _lttng_integer_type_statedump(session, container_type, 0);
2613 if (ret)
2614 goto end;
2615 ret = lttng_metadata_printf(session, " {\n");
2616 if (ret)
2617 goto end;
2618 /* Dump all entries */
2619 for (i = 0; i < nr_entries; i++) {
2620 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2621 int j, len;
2622
2623 ret = print_tabs(session, nesting + 1);
2624 if (ret)
2625 goto end;
2626 ret = lttng_metadata_printf(session,
2627 "\"");
2628 if (ret)
2629 goto end;
2630 len = strlen(entry->string);
2631 /* Escape the character '"' */
2632 for (j = 0; j < len; j++) {
2633 char c = entry->string[j];
2634
2635 switch (c) {
2636 case '"':
2637 ret = lttng_metadata_printf(session,
2638 "\\\"");
2639 break;
2640 case '\\':
2641 ret = lttng_metadata_printf(session,
2642 "\\\\");
2643 break;
2644 default:
2645 ret = lttng_metadata_printf(session,
2646 "%c", c);
2647 break;
2648 }
2649 if (ret)
2650 goto end;
2651 }
2652 ret = lttng_metadata_printf(session, "\"");
2653 if (ret)
2654 goto end;
2655
2656 if (entry->options.is_auto) {
2657 ret = lttng_metadata_printf(session, ",\n");
2658 if (ret)
2659 goto end;
2660 } else {
2661 ret = lttng_metadata_printf(session,
2662 " = ");
2663 if (ret)
2664 goto end;
2665 if (entry->start.signedness)
2666 ret = lttng_metadata_printf(session,
2667 "%lld", (long long) entry->start.value);
2668 else
2669 ret = lttng_metadata_printf(session,
2670 "%llu", entry->start.value);
2671 if (ret)
2672 goto end;
2673 if (entry->start.signedness == entry->end.signedness &&
2674 entry->start.value
2675 == entry->end.value) {
2676 ret = lttng_metadata_printf(session,
2677 ",\n");
2678 } else {
2679 if (entry->end.signedness) {
2680 ret = lttng_metadata_printf(session,
2681 " ... %lld,\n",
2682 (long long) entry->end.value);
2683 } else {
2684 ret = lttng_metadata_printf(session,
2685 " ... %llu,\n",
2686 entry->end.value);
2687 }
2688 }
2689 if (ret)
2690 goto end;
2691 }
2692 }
2693 ret = print_tabs(session, nesting);
2694 if (ret)
2695 goto end;
2696 ret = lttng_metadata_printf(session, "}");
2697 end:
2698 return ret;
2699 }
2700
2701 /*
2702 * Must be called with sessions_mutex held.
2703 */
2704 static
2705 int _lttng_enum_field_statedump(struct lttng_session *session,
2706 const struct lttng_event_field *field,
2707 size_t nesting)
2708 {
2709 int ret;
2710
2711 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
2712 if (ret)
2713 return ret;
2714 return lttng_field_name_statedump(session, field, nesting);
2715 }
2716
2717 static
2718 int _lttng_integer_field_statedump(struct lttng_session *session,
2719 const struct lttng_event_field *field,
2720 size_t nesting)
2721 {
2722 int ret;
2723
2724 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
2725 if (ret)
2726 return ret;
2727 return lttng_field_name_statedump(session, field, nesting);
2728 }
2729
2730 static
2731 int _lttng_string_type_statedump(struct lttng_session *session,
2732 const struct lttng_type *type,
2733 size_t nesting)
2734 {
2735 int ret;
2736
2737 WARN_ON_ONCE(type->atype != atype_string);
2738 /* Default encoding is UTF8 */
2739 ret = print_tabs(session, nesting);
2740 if (ret)
2741 return ret;
2742 ret = lttng_metadata_printf(session,
2743 "string%s",
2744 type->u.string.encoding == lttng_encode_ASCII ?
2745 " { encoding = ASCII; }" : "");
2746 return ret;
2747 }
2748
2749 static
2750 int _lttng_string_field_statedump(struct lttng_session *session,
2751 const struct lttng_event_field *field,
2752 size_t nesting)
2753 {
2754 int ret;
2755
2756 WARN_ON_ONCE(field->type.atype != atype_string);
2757 ret = _lttng_string_type_statedump(session, &field->type, nesting);
2758 if (ret)
2759 return ret;
2760 return lttng_field_name_statedump(session, field, nesting);
2761 }
2762
2763 /*
2764 * Must be called with sessions_mutex held.
2765 */
2766 static
2767 int _lttng_type_statedump(struct lttng_session *session,
2768 const struct lttng_type *type,
2769 size_t nesting)
2770 {
2771 int ret = 0;
2772
2773 switch (type->atype) {
2774 case atype_integer:
2775 ret = _lttng_integer_type_statedump(session, type, nesting);
2776 break;
2777 case atype_enum_nestable:
2778 ret = _lttng_enum_type_statedump(session, type, nesting);
2779 break;
2780 case atype_string:
2781 ret = _lttng_string_type_statedump(session, type, nesting);
2782 break;
2783 case atype_struct_nestable:
2784 ret = _lttng_struct_type_statedump(session, type, nesting);
2785 break;
2786 case atype_variant_nestable:
2787 ret = _lttng_variant_type_statedump(session, type, nesting);
2788 break;
2789
2790 /* Nested arrays and sequences are not supported yet. */
2791 case atype_array_nestable:
2792 case atype_sequence_nestable:
2793 default:
2794 WARN_ON_ONCE(1);
2795 return -EINVAL;
2796 }
2797 return ret;
2798 }
2799
2800 /*
2801 * Must be called with sessions_mutex held.
2802 */
2803 static
2804 int _lttng_field_statedump(struct lttng_session *session,
2805 const struct lttng_event_field *field,
2806 size_t nesting)
2807 {
2808 int ret = 0;
2809
2810 switch (field->type.atype) {
2811 case atype_integer:
2812 ret = _lttng_integer_field_statedump(session, field, nesting);
2813 break;
2814 case atype_enum_nestable:
2815 ret = _lttng_enum_field_statedump(session, field, nesting);
2816 break;
2817 case atype_string:
2818 ret = _lttng_string_field_statedump(session, field, nesting);
2819 break;
2820 case atype_struct_nestable:
2821 ret = _lttng_struct_field_statedump(session, field, nesting);
2822 break;
2823 case atype_array_nestable:
2824 ret = _lttng_array_field_statedump(session, field, nesting);
2825 break;
2826 case atype_sequence_nestable:
2827 ret = _lttng_sequence_field_statedump(session, field, nesting);
2828 break;
2829 case atype_variant_nestable:
2830 ret = _lttng_variant_field_statedump(session, field, nesting);
2831 break;
2832
2833 default:
2834 WARN_ON_ONCE(1);
2835 return -EINVAL;
2836 }
2837 return ret;
2838 }
2839
2840 static
2841 int _lttng_context_metadata_statedump(struct lttng_session *session,
2842 struct lttng_ctx *ctx)
2843 {
2844 int ret = 0;
2845 int i;
2846
2847 if (!ctx)
2848 return 0;
2849 for (i = 0; i < ctx->nr_fields; i++) {
2850 const struct lttng_ctx_field *field = &ctx->fields[i];
2851
2852 ret = _lttng_field_statedump(session, &field->event_field, 2);
2853 if (ret)
2854 return ret;
2855 }
2856 return ret;
2857 }
2858
2859 static
2860 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2861 struct lttng_event *event)
2862 {
2863 const struct lttng_event_desc *desc = event->desc;
2864 int ret = 0;
2865 int i;
2866
2867 for (i = 0; i < desc->nr_fields; i++) {
2868 const struct lttng_event_field *field = &desc->fields[i];
2869
2870 ret = _lttng_field_statedump(session, field, 2);
2871 if (ret)
2872 return ret;
2873 }
2874 return ret;
2875 }
2876
2877 /*
2878 * Must be called with sessions_mutex held.
2879 * The entire event metadata is printed as a single atomic metadata
2880 * transaction.
2881 */
2882 static
2883 int _lttng_event_metadata_statedump(struct lttng_session *session,
2884 struct lttng_channel *chan,
2885 struct lttng_event *event)
2886 {
2887 int ret = 0;
2888
2889 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2890 return 0;
2891 if (chan->channel_type == METADATA_CHANNEL)
2892 return 0;
2893
2894 lttng_metadata_begin(session);
2895
2896 ret = lttng_metadata_printf(session,
2897 "event {\n"
2898 " name = \"%s\";\n"
2899 " id = %u;\n"
2900 " stream_id = %u;\n",
2901 event->desc->name,
2902 event->id,
2903 event->chan->id);
2904 if (ret)
2905 goto end;
2906
2907 if (event->ctx) {
2908 ret = lttng_metadata_printf(session,
2909 " context := struct {\n");
2910 if (ret)
2911 goto end;
2912 }
2913 ret = _lttng_context_metadata_statedump(session, event->ctx);
2914 if (ret)
2915 goto end;
2916 if (event->ctx) {
2917 ret = lttng_metadata_printf(session,
2918 " };\n");
2919 if (ret)
2920 goto end;
2921 }
2922
2923 ret = lttng_metadata_printf(session,
2924 " fields := struct {\n"
2925 );
2926 if (ret)
2927 goto end;
2928
2929 ret = _lttng_fields_metadata_statedump(session, event);
2930 if (ret)
2931 goto end;
2932
2933 /*
2934 * LTTng space reservation can only reserve multiples of the
2935 * byte size.
2936 */
2937 ret = lttng_metadata_printf(session,
2938 " };\n"
2939 "};\n\n");
2940 if (ret)
2941 goto end;
2942
2943 event->metadata_dumped = 1;
2944 end:
2945 lttng_metadata_end(session);
2946 return ret;
2947
2948 }
2949
2950 /*
2951 * Must be called with sessions_mutex held.
2952 * The entire channel metadata is printed as a single atomic metadata
2953 * transaction.
2954 */
2955 static
2956 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2957 struct lttng_channel *chan)
2958 {
2959 int ret = 0;
2960
2961 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2962 return 0;
2963
2964 if (chan->channel_type == METADATA_CHANNEL)
2965 return 0;
2966
2967 lttng_metadata_begin(session);
2968
2969 WARN_ON_ONCE(!chan->header_type);
2970 ret = lttng_metadata_printf(session,
2971 "stream {\n"
2972 " id = %u;\n"
2973 " event.header := %s;\n"
2974 " packet.context := struct packet_context;\n",
2975 chan->id,
2976 chan->header_type == 1 ? "struct event_header_compact" :
2977 "struct event_header_large");
2978 if (ret)
2979 goto end;
2980
2981 if (chan->ctx) {
2982 ret = lttng_metadata_printf(session,
2983 " event.context := struct {\n");
2984 if (ret)
2985 goto end;
2986 }
2987 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2988 if (ret)
2989 goto end;
2990 if (chan->ctx) {
2991 ret = lttng_metadata_printf(session,
2992 " };\n");
2993 if (ret)
2994 goto end;
2995 }
2996
2997 ret = lttng_metadata_printf(session,
2998 "};\n\n");
2999
3000 chan->metadata_dumped = 1;
3001 end:
3002 lttng_metadata_end(session);
3003 return ret;
3004 }
3005
3006 /*
3007 * Must be called with sessions_mutex held.
3008 */
3009 static
3010 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3011 {
3012 return lttng_metadata_printf(session,
3013 "struct packet_context {\n"
3014 " uint64_clock_monotonic_t timestamp_begin;\n"
3015 " uint64_clock_monotonic_t timestamp_end;\n"
3016 " uint64_t content_size;\n"
3017 " uint64_t packet_size;\n"
3018 " uint64_t packet_seq_num;\n"
3019 " unsigned long events_discarded;\n"
3020 " uint32_t cpu_id;\n"
3021 "};\n\n"
3022 );
3023 }
3024
3025 /*
3026 * Compact header:
3027 * id: range: 0 - 30.
3028 * id 31 is reserved to indicate an extended header.
3029 *
3030 * Large header:
3031 * id: range: 0 - 65534.
3032 * id 65535 is reserved to indicate an extended header.
3033 *
3034 * Must be called with sessions_mutex held.
3035 */
3036 static
3037 int _lttng_event_header_declare(struct lttng_session *session)
3038 {
3039 return lttng_metadata_printf(session,
3040 "struct event_header_compact {\n"
3041 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3042 " variant <id> {\n"
3043 " struct {\n"
3044 " uint27_clock_monotonic_t timestamp;\n"
3045 " } compact;\n"
3046 " struct {\n"
3047 " uint32_t id;\n"
3048 " uint64_clock_monotonic_t timestamp;\n"
3049 " } extended;\n"
3050 " } v;\n"
3051 "} align(%u);\n"
3052 "\n"
3053 "struct event_header_large {\n"
3054 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3055 " variant <id> {\n"
3056 " struct {\n"
3057 " uint32_clock_monotonic_t timestamp;\n"
3058 " } compact;\n"
3059 " struct {\n"
3060 " uint32_t id;\n"
3061 " uint64_clock_monotonic_t timestamp;\n"
3062 " } extended;\n"
3063 " } v;\n"
3064 "} align(%u);\n\n",
3065 lttng_alignof(uint32_t) * CHAR_BIT,
3066 lttng_alignof(uint16_t) * CHAR_BIT
3067 );
3068 }
3069
3070 /*
3071 * Approximation of NTP time of day to clock monotonic correlation,
3072 * taken at start of trace.
3073 * Yes, this is only an approximation. Yes, we can (and will) do better
3074 * in future versions.
3075 * This function may return a negative offset. It may happen if the
3076 * system sets the REALTIME clock to 0 after boot.
3077 *
3078 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3079 * y2038 compliant.
3080 */
3081 static
3082 int64_t measure_clock_offset(void)
3083 {
3084 uint64_t monotonic_avg, monotonic[2], realtime;
3085 uint64_t tcf = trace_clock_freq();
3086 int64_t offset;
3087 unsigned long flags;
3088 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3089 struct timespec64 rts = { 0, 0 };
3090 #else
3091 struct timespec rts = { 0, 0 };
3092 #endif
3093
3094 /* Disable interrupts to increase correlation precision. */
3095 local_irq_save(flags);
3096 monotonic[0] = trace_clock_read64();
3097 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3098 ktime_get_real_ts64(&rts);
3099 #else
3100 getnstimeofday(&rts);
3101 #endif
3102 monotonic[1] = trace_clock_read64();
3103 local_irq_restore(flags);
3104
3105 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3106 realtime = (uint64_t) rts.tv_sec * tcf;
3107 if (tcf == NSEC_PER_SEC) {
3108 realtime += rts.tv_nsec;
3109 } else {
3110 uint64_t n = rts.tv_nsec * tcf;
3111
3112 do_div(n, NSEC_PER_SEC);
3113 realtime += n;
3114 }
3115 offset = (int64_t) realtime - monotonic_avg;
3116 return offset;
3117 }
3118
3119 static
3120 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3121 {
3122 int ret = 0;
3123 size_t i;
3124 char cur;
3125
3126 i = 0;
3127 cur = string[i];
3128 while (cur != '\0') {
3129 switch (cur) {
3130 case '\n':
3131 ret = lttng_metadata_printf(session, "%s", "\\n");
3132 break;
3133 case '\\':
3134 case '"':
3135 ret = lttng_metadata_printf(session, "%c", '\\');
3136 if (ret)
3137 goto error;
3138 /* We still print the current char */
3139 /* Fallthrough */
3140 default:
3141 ret = lttng_metadata_printf(session, "%c", cur);
3142 break;
3143 }
3144
3145 if (ret)
3146 goto error;
3147
3148 cur = string[++i];
3149 }
3150 error:
3151 return ret;
3152 }
3153
3154 static
3155 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3156 const char *field_value)
3157 {
3158 int ret;
3159
3160 ret = lttng_metadata_printf(session, " %s = \"", field);
3161 if (ret)
3162 goto error;
3163
3164 ret = print_escaped_ctf_string(session, field_value);
3165 if (ret)
3166 goto error;
3167
3168 ret = lttng_metadata_printf(session, "\";\n");
3169
3170 error:
3171 return ret;
3172 }
3173
3174 /*
3175 * Output metadata into this session's metadata buffers.
3176 * Must be called with sessions_mutex held.
3177 */
3178 static
3179 int _lttng_session_metadata_statedump(struct lttng_session *session)
3180 {
3181 unsigned char *uuid_c = session->uuid.b;
3182 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3183 const char *product_uuid;
3184 struct lttng_channel *chan;
3185 struct lttng_event *event;
3186 int ret = 0;
3187
3188 if (!LTTNG_READ_ONCE(session->active))
3189 return 0;
3190
3191 lttng_metadata_begin(session);
3192
3193 if (session->metadata_dumped)
3194 goto skip_session;
3195
3196 snprintf(uuid_s, sizeof(uuid_s),
3197 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3198 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3199 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3200 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3201 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3202
3203 ret = lttng_metadata_printf(session,
3204 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3205 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3206 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3207 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3208 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3209 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3210 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3211 "\n"
3212 "trace {\n"
3213 " major = %u;\n"
3214 " minor = %u;\n"
3215 " uuid = \"%s\";\n"
3216 " byte_order = %s;\n"
3217 " packet.header := struct {\n"
3218 " uint32_t magic;\n"
3219 " uint8_t uuid[16];\n"
3220 " uint32_t stream_id;\n"
3221 " uint64_t stream_instance_id;\n"
3222 " };\n"
3223 "};\n\n",
3224 lttng_alignof(uint8_t) * CHAR_BIT,
3225 lttng_alignof(uint16_t) * CHAR_BIT,
3226 lttng_alignof(uint32_t) * CHAR_BIT,
3227 lttng_alignof(uint64_t) * CHAR_BIT,
3228 sizeof(unsigned long) * CHAR_BIT,
3229 lttng_alignof(unsigned long) * CHAR_BIT,
3230 CTF_SPEC_MAJOR,
3231 CTF_SPEC_MINOR,
3232 uuid_s,
3233 #if __BYTE_ORDER == __BIG_ENDIAN
3234 "be"
3235 #else
3236 "le"
3237 #endif
3238 );
3239 if (ret)
3240 goto end;
3241
3242 ret = lttng_metadata_printf(session,
3243 "env {\n"
3244 " hostname = \"%s\";\n"
3245 " domain = \"kernel\";\n"
3246 " sysname = \"%s\";\n"
3247 " kernel_release = \"%s\";\n"
3248 " kernel_version = \"%s\";\n"
3249 " tracer_name = \"lttng-modules\";\n"
3250 " tracer_major = %d;\n"
3251 " tracer_minor = %d;\n"
3252 " tracer_patchlevel = %d;\n"
3253 " trace_buffering_scheme = \"global\";\n",
3254 current->nsproxy->uts_ns->name.nodename,
3255 utsname()->sysname,
3256 utsname()->release,
3257 utsname()->version,
3258 LTTNG_MODULES_MAJOR_VERSION,
3259 LTTNG_MODULES_MINOR_VERSION,
3260 LTTNG_MODULES_PATCHLEVEL_VERSION
3261 );
3262 if (ret)
3263 goto end;
3264
3265 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3266 if (ret)
3267 goto end;
3268 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3269 session->creation_time);
3270 if (ret)
3271 goto end;
3272
3273 /* Add the product UUID to the 'env' section */
3274 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3275 if (product_uuid) {
3276 ret = lttng_metadata_printf(session,
3277 " product_uuid = \"%s\";\n",
3278 product_uuid
3279 );
3280 if (ret)
3281 goto end;
3282 }
3283
3284 /* Close the 'env' section */
3285 ret = lttng_metadata_printf(session, "};\n\n");
3286 if (ret)
3287 goto end;
3288
3289 ret = lttng_metadata_printf(session,
3290 "clock {\n"
3291 " name = \"%s\";\n",
3292 trace_clock_name()
3293 );
3294 if (ret)
3295 goto end;
3296
3297 if (!trace_clock_uuid(clock_uuid_s)) {
3298 ret = lttng_metadata_printf(session,
3299 " uuid = \"%s\";\n",
3300 clock_uuid_s
3301 );
3302 if (ret)
3303 goto end;
3304 }
3305
3306 ret = lttng_metadata_printf(session,
3307 " description = \"%s\";\n"
3308 " freq = %llu; /* Frequency, in Hz */\n"
3309 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3310 " offset = %lld;\n"
3311 "};\n\n",
3312 trace_clock_description(),
3313 (unsigned long long) trace_clock_freq(),
3314 (long long) measure_clock_offset()
3315 );
3316 if (ret)
3317 goto end;
3318
3319 ret = lttng_metadata_printf(session,
3320 "typealias integer {\n"
3321 " size = 27; align = 1; signed = false;\n"
3322 " map = clock.%s.value;\n"
3323 "} := uint27_clock_monotonic_t;\n"
3324 "\n"
3325 "typealias integer {\n"
3326 " size = 32; align = %u; signed = false;\n"
3327 " map = clock.%s.value;\n"
3328 "} := uint32_clock_monotonic_t;\n"
3329 "\n"
3330 "typealias integer {\n"
3331 " size = 64; align = %u; signed = false;\n"
3332 " map = clock.%s.value;\n"
3333 "} := uint64_clock_monotonic_t;\n\n",
3334 trace_clock_name(),
3335 lttng_alignof(uint32_t) * CHAR_BIT,
3336 trace_clock_name(),
3337 lttng_alignof(uint64_t) * CHAR_BIT,
3338 trace_clock_name()
3339 );
3340 if (ret)
3341 goto end;
3342
3343 ret = _lttng_stream_packet_context_declare(session);
3344 if (ret)
3345 goto end;
3346
3347 ret = _lttng_event_header_declare(session);
3348 if (ret)
3349 goto end;
3350
3351 skip_session:
3352 list_for_each_entry(chan, &session->chan, list) {
3353 ret = _lttng_channel_metadata_statedump(session, chan);
3354 if (ret)
3355 goto end;
3356 }
3357
3358 list_for_each_entry(event, &session->events, list) {
3359 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3360 if (ret)
3361 goto end;
3362 }
3363 session->metadata_dumped = 1;
3364 end:
3365 lttng_metadata_end(session);
3366 return ret;
3367 }
3368
3369 /**
3370 * lttng_transport_register - LTT transport registration
3371 * @transport: transport structure
3372 *
3373 * Registers a transport which can be used as output to extract the data out of
3374 * LTTng. The module calling this registration function must ensure that no
3375 * trap-inducing code will be executed by the transport functions. E.g.
3376 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3377 * is made visible to the transport function. This registration acts as a
3378 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3379 * after its registration must it synchronize the TLBs.
3380 */
3381 void lttng_transport_register(struct lttng_transport *transport)
3382 {
3383 /*
3384 * Make sure no page fault can be triggered by the module about to be
3385 * registered. We deal with this here so we don't have to call
3386 * vmalloc_sync_mappings() in each module's init.
3387 */
3388 wrapper_vmalloc_sync_mappings();
3389
3390 mutex_lock(&sessions_mutex);
3391 list_add_tail(&transport->node, &lttng_transport_list);
3392 mutex_unlock(&sessions_mutex);
3393 }
3394 EXPORT_SYMBOL_GPL(lttng_transport_register);
3395
3396 /**
3397 * lttng_transport_unregister - LTT transport unregistration
3398 * @transport: transport structure
3399 */
3400 void lttng_transport_unregister(struct lttng_transport *transport)
3401 {
3402 mutex_lock(&sessions_mutex);
3403 list_del(&transport->node);
3404 mutex_unlock(&sessions_mutex);
3405 }
3406 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3407
3408 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3409
3410 enum cpuhp_state lttng_hp_prepare;
3411 enum cpuhp_state lttng_hp_online;
3412
3413 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3414 {
3415 struct lttng_cpuhp_node *lttng_node;
3416
3417 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3418 switch (lttng_node->component) {
3419 case LTTNG_RING_BUFFER_FRONTEND:
3420 return 0;
3421 case LTTNG_RING_BUFFER_BACKEND:
3422 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3423 case LTTNG_RING_BUFFER_ITER:
3424 return 0;
3425 case LTTNG_CONTEXT_PERF_COUNTERS:
3426 return 0;
3427 default:
3428 return -EINVAL;
3429 }
3430 }
3431
3432 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3433 {
3434 struct lttng_cpuhp_node *lttng_node;
3435
3436 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3437 switch (lttng_node->component) {
3438 case LTTNG_RING_BUFFER_FRONTEND:
3439 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3440 case LTTNG_RING_BUFFER_BACKEND:
3441 return 0;
3442 case LTTNG_RING_BUFFER_ITER:
3443 return 0;
3444 case LTTNG_CONTEXT_PERF_COUNTERS:
3445 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3446 default:
3447 return -EINVAL;
3448 }
3449 }
3450
3451 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3452 {
3453 struct lttng_cpuhp_node *lttng_node;
3454
3455 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3456 switch (lttng_node->component) {
3457 case LTTNG_RING_BUFFER_FRONTEND:
3458 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3459 case LTTNG_RING_BUFFER_BACKEND:
3460 return 0;
3461 case LTTNG_RING_BUFFER_ITER:
3462 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3463 case LTTNG_CONTEXT_PERF_COUNTERS:
3464 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3465 default:
3466 return -EINVAL;
3467 }
3468 }
3469
3470 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3471 {
3472 struct lttng_cpuhp_node *lttng_node;
3473
3474 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3475 switch (lttng_node->component) {
3476 case LTTNG_RING_BUFFER_FRONTEND:
3477 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3478 case LTTNG_RING_BUFFER_BACKEND:
3479 return 0;
3480 case LTTNG_RING_BUFFER_ITER:
3481 return 0;
3482 case LTTNG_CONTEXT_PERF_COUNTERS:
3483 return 0;
3484 default:
3485 return -EINVAL;
3486 }
3487 }
3488
3489 static int __init lttng_init_cpu_hotplug(void)
3490 {
3491 int ret;
3492
3493 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3494 lttng_hotplug_prepare,
3495 lttng_hotplug_dead);
3496 if (ret < 0) {
3497 return ret;
3498 }
3499 lttng_hp_prepare = ret;
3500 lttng_rb_set_hp_prepare(ret);
3501
3502 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3503 lttng_hotplug_online,
3504 lttng_hotplug_offline);
3505 if (ret < 0) {
3506 cpuhp_remove_multi_state(lttng_hp_prepare);
3507 lttng_hp_prepare = 0;
3508 return ret;
3509 }
3510 lttng_hp_online = ret;
3511 lttng_rb_set_hp_online(ret);
3512
3513 return 0;
3514 }
3515
3516 static void __exit lttng_exit_cpu_hotplug(void)
3517 {
3518 lttng_rb_set_hp_online(0);
3519 cpuhp_remove_multi_state(lttng_hp_online);
3520 lttng_rb_set_hp_prepare(0);
3521 cpuhp_remove_multi_state(lttng_hp_prepare);
3522 }
3523
3524 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3525 static int lttng_init_cpu_hotplug(void)
3526 {
3527 return 0;
3528 }
3529 static void lttng_exit_cpu_hotplug(void)
3530 {
3531 }
3532 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3533
3534
3535 static int __init lttng_events_init(void)
3536 {
3537 int ret;
3538
3539 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3540 if (ret)
3541 return ret;
3542 ret = wrapper_get_pfnblock_flags_mask_init();
3543 if (ret)
3544 return ret;
3545 ret = wrapper_get_pageblock_flags_mask_init();
3546 if (ret)
3547 return ret;
3548 ret = lttng_probes_init();
3549 if (ret)
3550 return ret;
3551 ret = lttng_context_init();
3552 if (ret)
3553 return ret;
3554 ret = lttng_tracepoint_init();
3555 if (ret)
3556 goto error_tp;
3557 event_cache = KMEM_CACHE(lttng_event, 0);
3558 if (!event_cache) {
3559 ret = -ENOMEM;
3560 goto error_kmem_event;
3561 }
3562 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
3563 if (!event_notifier_cache) {
3564 ret = -ENOMEM;
3565 goto error_kmem_event_notifier;
3566 }
3567 ret = lttng_abi_init();
3568 if (ret)
3569 goto error_abi;
3570 ret = lttng_logger_init();
3571 if (ret)
3572 goto error_logger;
3573 ret = lttng_init_cpu_hotplug();
3574 if (ret)
3575 goto error_hotplug;
3576 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3577 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3578 __stringify(LTTNG_MODULES_MINOR_VERSION),
3579 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3580 LTTNG_MODULES_EXTRAVERSION,
3581 LTTNG_VERSION_NAME,
3582 #ifdef LTTNG_EXTRA_VERSION_GIT
3583 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3584 #else
3585 "",
3586 #endif
3587 #ifdef LTTNG_EXTRA_VERSION_NAME
3588 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3589 #else
3590 "");
3591 #endif
3592 return 0;
3593
3594 error_hotplug:
3595 lttng_logger_exit();
3596 error_logger:
3597 lttng_abi_exit();
3598 error_abi:
3599 kmem_cache_destroy(event_notifier_cache);
3600 error_kmem_event_notifier:
3601 kmem_cache_destroy(event_cache);
3602 error_kmem_event:
3603 lttng_tracepoint_exit();
3604 error_tp:
3605 lttng_context_exit();
3606 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
3607 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3608 __stringify(LTTNG_MODULES_MINOR_VERSION),
3609 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3610 LTTNG_MODULES_EXTRAVERSION,
3611 LTTNG_VERSION_NAME,
3612 #ifdef LTTNG_EXTRA_VERSION_GIT
3613 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3614 #else
3615 "",
3616 #endif
3617 #ifdef LTTNG_EXTRA_VERSION_NAME
3618 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3619 #else
3620 "");
3621 #endif
3622 return ret;
3623 }
3624
3625 module_init(lttng_events_init);
3626
3627 static void __exit lttng_events_exit(void)
3628 {
3629 struct lttng_session *session, *tmpsession;
3630
3631 lttng_exit_cpu_hotplug();
3632 lttng_logger_exit();
3633 lttng_abi_exit();
3634 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3635 lttng_session_destroy(session);
3636 kmem_cache_destroy(event_cache);
3637 kmem_cache_destroy(event_notifier_cache);
3638 lttng_tracepoint_exit();
3639 lttng_context_exit();
3640 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3641 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3642 __stringify(LTTNG_MODULES_MINOR_VERSION),
3643 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3644 LTTNG_MODULES_EXTRAVERSION,
3645 LTTNG_VERSION_NAME,
3646 #ifdef LTTNG_EXTRA_VERSION_GIT
3647 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3648 #else
3649 "",
3650 #endif
3651 #ifdef LTTNG_EXTRA_VERSION_NAME
3652 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3653 #else
3654 "");
3655 #endif
3656 }
3657
3658 module_exit(lttng_events_exit);
3659
3660 #include <generated/patches.h>
3661 #ifdef LTTNG_EXTRA_VERSION_GIT
3662 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3663 #endif
3664 #ifdef LTTNG_EXTRA_VERSION_NAME
3665 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3666 #endif
3667 MODULE_LICENSE("GPL and additional rights");
3668 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3669 MODULE_DESCRIPTION("LTTng tracer");
3670 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3671 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3672 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3673 LTTNG_MODULES_EXTRAVERSION);
This page took 0.156812 seconds and 4 git commands to generate.