Fix: use vmalloc for filter bytecode allocation
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/lttng-bytecode.h>
40 #include <lttng/tracer.h>
41 #include <lttng/event-notifier-notification.h>
42 #include <lttng/abi-old.h>
43 #include <lttng/endian.h>
44 #include <lttng/string-utils.h>
45 #include <lttng/utils.h>
46 #include <ringbuffer/backend.h>
47 #include <ringbuffer/frontend.h>
48 #include <wrapper/time.h>
49
50 #define METADATA_CACHE_DEFAULT_SIZE 4096
51
52 static LIST_HEAD(sessions);
53 static LIST_HEAD(event_notifier_groups);
54 static LIST_HEAD(lttng_transport_list);
55 /*
56 * Protect the sessions and metadata caches.
57 */
58 static DEFINE_MUTEX(sessions_mutex);
59 static struct kmem_cache *event_cache;
60 static struct kmem_cache *event_notifier_cache;
61
62 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
63 static void lttng_session_sync_event_enablers(struct lttng_session *session);
64 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
65 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
66 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
67
68 static void _lttng_event_destroy(struct lttng_event *event);
69 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
70 static void _lttng_channel_destroy(struct lttng_channel *chan);
71 static int _lttng_event_unregister(struct lttng_event *event);
72 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
73 static
74 int _lttng_event_metadata_statedump(struct lttng_session *session,
75 struct lttng_channel *chan,
76 struct lttng_event *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_session *session,
83 const struct lttng_type *type,
84 size_t nesting);
85 static
86 int _lttng_field_statedump(struct lttng_session *session,
87 const struct lttng_event_field *field,
88 size_t nesting);
89
90 void synchronize_trace(void)
91 {
92 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
93 synchronize_rcu();
94 #else
95 synchronize_sched();
96 #endif
97
98 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
99 #ifdef CONFIG_PREEMPT_RT_FULL
100 synchronize_rcu();
101 #endif
102 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
103 #ifdef CONFIG_PREEMPT_RT
104 synchronize_rcu();
105 #endif
106 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
107 }
108
109 void lttng_lock_sessions(void)
110 {
111 mutex_lock(&sessions_mutex);
112 }
113
114 void lttng_unlock_sessions(void)
115 {
116 mutex_unlock(&sessions_mutex);
117 }
118
119 static struct lttng_transport *lttng_transport_find(const char *name)
120 {
121 struct lttng_transport *transport;
122
123 list_for_each_entry(transport, &lttng_transport_list, node) {
124 if (!strcmp(transport->name, name))
125 return transport;
126 }
127 return NULL;
128 }
129
130 /*
131 * Called with sessions lock held.
132 */
133 int lttng_session_active(void)
134 {
135 struct lttng_session *iter;
136
137 list_for_each_entry(iter, &sessions, list) {
138 if (iter->active)
139 return 1;
140 }
141 return 0;
142 }
143
144 struct lttng_session *lttng_session_create(void)
145 {
146 struct lttng_session *session;
147 struct lttng_metadata_cache *metadata_cache;
148 int i;
149
150 mutex_lock(&sessions_mutex);
151 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
152 if (!session)
153 goto err;
154 INIT_LIST_HEAD(&session->chan);
155 INIT_LIST_HEAD(&session->events);
156 lttng_guid_gen(&session->uuid);
157
158 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
159 GFP_KERNEL);
160 if (!metadata_cache)
161 goto err_free_session;
162 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
163 if (!metadata_cache->data)
164 goto err_free_cache;
165 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
166 kref_init(&metadata_cache->refcount);
167 mutex_init(&metadata_cache->lock);
168 session->metadata_cache = metadata_cache;
169 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
170 memcpy(&metadata_cache->uuid, &session->uuid,
171 sizeof(metadata_cache->uuid));
172 INIT_LIST_HEAD(&session->enablers_head);
173 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
174 INIT_HLIST_HEAD(&session->events_ht.table[i]);
175 list_add(&session->list, &sessions);
176 session->pid_tracker.session = session;
177 session->pid_tracker.tracker_type = TRACKER_PID;
178 session->vpid_tracker.session = session;
179 session->vpid_tracker.tracker_type = TRACKER_VPID;
180 session->uid_tracker.session = session;
181 session->uid_tracker.tracker_type = TRACKER_UID;
182 session->vuid_tracker.session = session;
183 session->vuid_tracker.tracker_type = TRACKER_VUID;
184 session->gid_tracker.session = session;
185 session->gid_tracker.tracker_type = TRACKER_GID;
186 session->vgid_tracker.session = session;
187 session->vgid_tracker.tracker_type = TRACKER_VGID;
188 mutex_unlock(&sessions_mutex);
189 return session;
190
191 err_free_cache:
192 kfree(metadata_cache);
193 err_free_session:
194 lttng_kvfree(session);
195 err:
196 mutex_unlock(&sessions_mutex);
197 return NULL;
198 }
199
200 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
201 {
202 struct lttng_transport *transport = NULL;
203 struct lttng_event_notifier_group *event_notifier_group;
204 const char *transport_name = "relay-event-notifier";
205 size_t subbuf_size = 4096; //TODO
206 size_t num_subbuf = 16; //TODO
207 unsigned int switch_timer_interval = 0;
208 unsigned int read_timer_interval = 0;
209 int i;
210
211 mutex_lock(&sessions_mutex);
212
213 transport = lttng_transport_find(transport_name);
214 if (!transport) {
215 printk(KERN_WARNING "LTTng: transport %s not found\n",
216 transport_name);
217 goto notransport;
218 }
219 if (!try_module_get(transport->owner)) {
220 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
221 transport_name);
222 goto notransport;
223 }
224
225 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
226 GFP_KERNEL);
227 if (!event_notifier_group)
228 goto nomem;
229
230 /*
231 * Initialize the ring buffer used to store event notifier
232 * notifications.
233 */
234 event_notifier_group->ops = &transport->ops;
235 event_notifier_group->chan = transport->ops.channel_create(
236 transport_name, event_notifier_group, NULL,
237 subbuf_size, num_subbuf, switch_timer_interval,
238 read_timer_interval);
239 if (!event_notifier_group->chan)
240 goto create_error;
241
242 event_notifier_group->transport = transport;
243
244 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
245 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
246 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
247 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
248
249 list_add(&event_notifier_group->node, &event_notifier_groups);
250
251 mutex_unlock(&sessions_mutex);
252
253 return event_notifier_group;
254
255 create_error:
256 lttng_kvfree(event_notifier_group);
257 nomem:
258 if (transport)
259 module_put(transport->owner);
260 notransport:
261 mutex_unlock(&sessions_mutex);
262 return NULL;
263 }
264
265 void metadata_cache_destroy(struct kref *kref)
266 {
267 struct lttng_metadata_cache *cache =
268 container_of(kref, struct lttng_metadata_cache, refcount);
269 vfree(cache->data);
270 kfree(cache);
271 }
272
273 void lttng_session_destroy(struct lttng_session *session)
274 {
275 struct lttng_channel *chan, *tmpchan;
276 struct lttng_event *event, *tmpevent;
277 struct lttng_metadata_stream *metadata_stream;
278 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
279 int ret;
280
281 mutex_lock(&sessions_mutex);
282 WRITE_ONCE(session->active, 0);
283 list_for_each_entry(chan, &session->chan, list) {
284 ret = lttng_syscalls_unregister_event(chan);
285 WARN_ON(ret);
286 }
287 list_for_each_entry(event, &session->events, list) {
288 ret = _lttng_event_unregister(event);
289 WARN_ON(ret);
290 }
291 synchronize_trace(); /* Wait for in-flight events to complete */
292 list_for_each_entry(chan, &session->chan, list) {
293 ret = lttng_syscalls_destroy_event(chan);
294 WARN_ON(ret);
295 }
296 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
297 &session->enablers_head, node)
298 lttng_event_enabler_destroy(event_enabler);
299 list_for_each_entry_safe(event, tmpevent, &session->events, list)
300 _lttng_event_destroy(event);
301 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
302 BUG_ON(chan->channel_type == METADATA_CHANNEL);
303 _lttng_channel_destroy(chan);
304 }
305 mutex_lock(&session->metadata_cache->lock);
306 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
307 _lttng_metadata_channel_hangup(metadata_stream);
308 mutex_unlock(&session->metadata_cache->lock);
309 lttng_id_tracker_destroy(&session->pid_tracker, false);
310 lttng_id_tracker_destroy(&session->vpid_tracker, false);
311 lttng_id_tracker_destroy(&session->uid_tracker, false);
312 lttng_id_tracker_destroy(&session->vuid_tracker, false);
313 lttng_id_tracker_destroy(&session->gid_tracker, false);
314 lttng_id_tracker_destroy(&session->vgid_tracker, false);
315 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
316 list_del(&session->list);
317 mutex_unlock(&sessions_mutex);
318 lttng_kvfree(session);
319 }
320
321 void lttng_event_notifier_group_destroy(
322 struct lttng_event_notifier_group *event_notifier_group)
323 {
324 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
325 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
326 int ret;
327
328 if (!event_notifier_group)
329 return;
330
331 mutex_lock(&sessions_mutex);
332
333 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
334 WARN_ON(ret);
335
336 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
337 &event_notifier_group->event_notifiers_head, list) {
338 ret = _lttng_event_notifier_unregister(event_notifier);
339 WARN_ON(ret);
340 }
341
342 /* Wait for in-flight event notifier to complete */
343 synchronize_trace();
344
345 irq_work_sync(&event_notifier_group->wakeup_pending);
346
347 kfree(event_notifier_group->sc_filter);
348
349 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
350 &event_notifier_group->enablers_head, node)
351 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
352
353 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
354 &event_notifier_group->event_notifiers_head, list)
355 _lttng_event_notifier_destroy(event_notifier);
356
357 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
358 module_put(event_notifier_group->transport->owner);
359 list_del(&event_notifier_group->node);
360
361 mutex_unlock(&sessions_mutex);
362 lttng_kvfree(event_notifier_group);
363 }
364
365 int lttng_session_statedump(struct lttng_session *session)
366 {
367 int ret;
368
369 mutex_lock(&sessions_mutex);
370 ret = lttng_statedump_start(session);
371 mutex_unlock(&sessions_mutex);
372 return ret;
373 }
374
375 int lttng_session_enable(struct lttng_session *session)
376 {
377 int ret = 0;
378 struct lttng_channel *chan;
379
380 mutex_lock(&sessions_mutex);
381 if (session->active) {
382 ret = -EBUSY;
383 goto end;
384 }
385
386 /* Set transient enabler state to "enabled" */
387 session->tstate = 1;
388
389 /* We need to sync enablers with session before activation. */
390 lttng_session_sync_event_enablers(session);
391
392 /*
393 * Snapshot the number of events per channel to know the type of header
394 * we need to use.
395 */
396 list_for_each_entry(chan, &session->chan, list) {
397 if (chan->header_type)
398 continue; /* don't change it if session stop/restart */
399 if (chan->free_event_id < 31)
400 chan->header_type = 1; /* compact */
401 else
402 chan->header_type = 2; /* large */
403 }
404
405 /* Clear each stream's quiescent state. */
406 list_for_each_entry(chan, &session->chan, list) {
407 if (chan->channel_type != METADATA_CHANNEL)
408 lib_ring_buffer_clear_quiescent_channel(chan->chan);
409 }
410
411 WRITE_ONCE(session->active, 1);
412 WRITE_ONCE(session->been_active, 1);
413 ret = _lttng_session_metadata_statedump(session);
414 if (ret) {
415 WRITE_ONCE(session->active, 0);
416 goto end;
417 }
418 ret = lttng_statedump_start(session);
419 if (ret)
420 WRITE_ONCE(session->active, 0);
421 end:
422 mutex_unlock(&sessions_mutex);
423 return ret;
424 }
425
426 int lttng_session_disable(struct lttng_session *session)
427 {
428 int ret = 0;
429 struct lttng_channel *chan;
430
431 mutex_lock(&sessions_mutex);
432 if (!session->active) {
433 ret = -EBUSY;
434 goto end;
435 }
436 WRITE_ONCE(session->active, 0);
437
438 /* Set transient enabler state to "disabled" */
439 session->tstate = 0;
440 lttng_session_sync_event_enablers(session);
441
442 /* Set each stream's quiescent state. */
443 list_for_each_entry(chan, &session->chan, list) {
444 if (chan->channel_type != METADATA_CHANNEL)
445 lib_ring_buffer_set_quiescent_channel(chan->chan);
446 }
447 end:
448 mutex_unlock(&sessions_mutex);
449 return ret;
450 }
451
452 int lttng_session_metadata_regenerate(struct lttng_session *session)
453 {
454 int ret = 0;
455 struct lttng_channel *chan;
456 struct lttng_event *event;
457 struct lttng_metadata_cache *cache = session->metadata_cache;
458 struct lttng_metadata_stream *stream;
459
460 mutex_lock(&sessions_mutex);
461 if (!session->active) {
462 ret = -EBUSY;
463 goto end;
464 }
465
466 mutex_lock(&cache->lock);
467 memset(cache->data, 0, cache->cache_alloc);
468 cache->metadata_written = 0;
469 cache->version++;
470 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
471 stream->metadata_out = 0;
472 stream->metadata_in = 0;
473 }
474 mutex_unlock(&cache->lock);
475
476 session->metadata_dumped = 0;
477 list_for_each_entry(chan, &session->chan, list) {
478 chan->metadata_dumped = 0;
479 }
480
481 list_for_each_entry(event, &session->events, list) {
482 event->metadata_dumped = 0;
483 }
484
485 ret = _lttng_session_metadata_statedump(session);
486
487 end:
488 mutex_unlock(&sessions_mutex);
489 return ret;
490 }
491
492 int lttng_channel_enable(struct lttng_channel *channel)
493 {
494 int ret = 0;
495
496 mutex_lock(&sessions_mutex);
497 if (channel->channel_type == METADATA_CHANNEL) {
498 ret = -EPERM;
499 goto end;
500 }
501 if (channel->enabled) {
502 ret = -EEXIST;
503 goto end;
504 }
505 /* Set transient enabler state to "enabled" */
506 channel->tstate = 1;
507 lttng_session_sync_event_enablers(channel->session);
508 /* Set atomically the state to "enabled" */
509 WRITE_ONCE(channel->enabled, 1);
510 end:
511 mutex_unlock(&sessions_mutex);
512 return ret;
513 }
514
515 int lttng_channel_disable(struct lttng_channel *channel)
516 {
517 int ret = 0;
518
519 mutex_lock(&sessions_mutex);
520 if (channel->channel_type == METADATA_CHANNEL) {
521 ret = -EPERM;
522 goto end;
523 }
524 if (!channel->enabled) {
525 ret = -EEXIST;
526 goto end;
527 }
528 /* Set atomically the state to "disabled" */
529 WRITE_ONCE(channel->enabled, 0);
530 /* Set transient enabler state to "enabled" */
531 channel->tstate = 0;
532 lttng_session_sync_event_enablers(channel->session);
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_event_enable(struct lttng_event *event)
539 {
540 int ret = 0;
541
542 mutex_lock(&sessions_mutex);
543 if (event->chan->channel_type == METADATA_CHANNEL) {
544 ret = -EPERM;
545 goto end;
546 }
547 if (event->enabled) {
548 ret = -EEXIST;
549 goto end;
550 }
551 switch (event->instrumentation) {
552 case LTTNG_KERNEL_TRACEPOINT:
553 case LTTNG_KERNEL_SYSCALL:
554 ret = -EINVAL;
555 break;
556 case LTTNG_KERNEL_KPROBE:
557 case LTTNG_KERNEL_UPROBE:
558 case LTTNG_KERNEL_NOOP:
559 WRITE_ONCE(event->enabled, 1);
560 break;
561 case LTTNG_KERNEL_KRETPROBE:
562 ret = lttng_kretprobes_event_enable_state(event, 1);
563 break;
564 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
565 default:
566 WARN_ON_ONCE(1);
567 ret = -EINVAL;
568 }
569 end:
570 mutex_unlock(&sessions_mutex);
571 return ret;
572 }
573
574 int lttng_event_disable(struct lttng_event *event)
575 {
576 int ret = 0;
577
578 mutex_lock(&sessions_mutex);
579 if (event->chan->channel_type == METADATA_CHANNEL) {
580 ret = -EPERM;
581 goto end;
582 }
583 if (!event->enabled) {
584 ret = -EEXIST;
585 goto end;
586 }
587 switch (event->instrumentation) {
588 case LTTNG_KERNEL_TRACEPOINT:
589 case LTTNG_KERNEL_SYSCALL:
590 ret = -EINVAL;
591 break;
592 case LTTNG_KERNEL_KPROBE:
593 case LTTNG_KERNEL_UPROBE:
594 case LTTNG_KERNEL_NOOP:
595 WRITE_ONCE(event->enabled, 0);
596 break;
597 case LTTNG_KERNEL_KRETPROBE:
598 ret = lttng_kretprobes_event_enable_state(event, 0);
599 break;
600 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
601 default:
602 WARN_ON_ONCE(1);
603 ret = -EINVAL;
604 }
605 end:
606 mutex_unlock(&sessions_mutex);
607 return ret;
608 }
609
610 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
611 {
612 int ret = 0;
613
614 mutex_lock(&sessions_mutex);
615 if (event_notifier->enabled) {
616 ret = -EEXIST;
617 goto end;
618 }
619 switch (event_notifier->instrumentation) {
620 case LTTNG_KERNEL_TRACEPOINT:
621 case LTTNG_KERNEL_SYSCALL:
622 ret = -EINVAL;
623 break;
624 case LTTNG_KERNEL_KPROBE:
625 case LTTNG_KERNEL_UPROBE:
626 WRITE_ONCE(event_notifier->enabled, 1);
627 break;
628 case LTTNG_KERNEL_FUNCTION:
629 case LTTNG_KERNEL_NOOP:
630 case LTTNG_KERNEL_KRETPROBE:
631 default:
632 WARN_ON_ONCE(1);
633 ret = -EINVAL;
634 }
635 end:
636 mutex_unlock(&sessions_mutex);
637 return ret;
638 }
639
640 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
641 {
642 int ret = 0;
643
644 mutex_lock(&sessions_mutex);
645 if (!event_notifier->enabled) {
646 ret = -EEXIST;
647 goto end;
648 }
649 switch (event_notifier->instrumentation) {
650 case LTTNG_KERNEL_TRACEPOINT:
651 case LTTNG_KERNEL_SYSCALL:
652 ret = -EINVAL;
653 break;
654 case LTTNG_KERNEL_KPROBE:
655 case LTTNG_KERNEL_UPROBE:
656 WRITE_ONCE(event_notifier->enabled, 0);
657 break;
658 case LTTNG_KERNEL_FUNCTION:
659 case LTTNG_KERNEL_NOOP:
660 case LTTNG_KERNEL_KRETPROBE:
661 default:
662 WARN_ON_ONCE(1);
663 ret = -EINVAL;
664 }
665 end:
666 mutex_unlock(&sessions_mutex);
667 return ret;
668 }
669
670 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
671 const char *transport_name,
672 void *buf_addr,
673 size_t subbuf_size, size_t num_subbuf,
674 unsigned int switch_timer_interval,
675 unsigned int read_timer_interval,
676 enum channel_type channel_type)
677 {
678 struct lttng_channel *chan;
679 struct lttng_transport *transport = NULL;
680
681 mutex_lock(&sessions_mutex);
682 if (session->been_active && channel_type != METADATA_CHANNEL)
683 goto active; /* Refuse to add channel to active session */
684 transport = lttng_transport_find(transport_name);
685 if (!transport) {
686 printk(KERN_WARNING "LTTng: transport %s not found\n",
687 transport_name);
688 goto notransport;
689 }
690 if (!try_module_get(transport->owner)) {
691 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
692 goto notransport;
693 }
694 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
695 if (!chan)
696 goto nomem;
697 chan->session = session;
698 chan->id = session->free_chan_id++;
699 chan->ops = &transport->ops;
700 /*
701 * Note: the channel creation op already writes into the packet
702 * headers. Therefore the "chan" information used as input
703 * should be already accessible.
704 */
705 chan->chan = transport->ops.channel_create(transport_name,
706 chan, buf_addr, subbuf_size, num_subbuf,
707 switch_timer_interval, read_timer_interval);
708 if (!chan->chan)
709 goto create_error;
710 chan->tstate = 1;
711 chan->enabled = 1;
712 chan->transport = transport;
713 chan->channel_type = channel_type;
714 list_add(&chan->list, &session->chan);
715 mutex_unlock(&sessions_mutex);
716 return chan;
717
718 create_error:
719 kfree(chan);
720 nomem:
721 if (transport)
722 module_put(transport->owner);
723 notransport:
724 active:
725 mutex_unlock(&sessions_mutex);
726 return NULL;
727 }
728
729 /*
730 * Only used internally at session destruction for per-cpu channels, and
731 * when metadata channel is released.
732 * Needs to be called with sessions mutex held.
733 */
734 static
735 void _lttng_channel_destroy(struct lttng_channel *chan)
736 {
737 chan->ops->channel_destroy(chan->chan);
738 module_put(chan->transport->owner);
739 list_del(&chan->list);
740 lttng_destroy_context(chan->ctx);
741 kfree(chan);
742 }
743
744 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
745 {
746 BUG_ON(chan->channel_type != METADATA_CHANNEL);
747
748 /* Protect the metadata cache with the sessions_mutex. */
749 mutex_lock(&sessions_mutex);
750 _lttng_channel_destroy(chan);
751 mutex_unlock(&sessions_mutex);
752 }
753 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
754
755 static
756 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
757 {
758 stream->finalized = 1;
759 wake_up_interruptible(&stream->read_wait);
760 }
761
762 /*
763 * Supports event creation while tracing session is active.
764 * Needs to be called with sessions mutex held.
765 */
766 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
767 struct lttng_kernel_event *event_param,
768 void *filter,
769 const struct lttng_event_desc *event_desc,
770 enum lttng_kernel_instrumentation itype)
771 {
772 struct lttng_session *session = chan->session;
773 struct lttng_event *event;
774 const char *event_name;
775 struct hlist_head *head;
776 int ret;
777
778 if (chan->free_event_id == -1U) {
779 ret = -EMFILE;
780 goto full;
781 }
782
783 switch (itype) {
784 case LTTNG_KERNEL_TRACEPOINT:
785 event_name = event_desc->name;
786 break;
787 case LTTNG_KERNEL_KPROBE:
788 case LTTNG_KERNEL_UPROBE:
789 case LTTNG_KERNEL_KRETPROBE:
790 case LTTNG_KERNEL_NOOP:
791 case LTTNG_KERNEL_SYSCALL:
792 event_name = event_param->name;
793 break;
794 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
795 default:
796 WARN_ON_ONCE(1);
797 ret = -EINVAL;
798 goto type_error;
799 }
800
801 head = utils_borrow_hash_table_bucket(session->events_ht.table,
802 LTTNG_EVENT_HT_SIZE, event_name);
803 lttng_hlist_for_each_entry(event, head, hlist) {
804 WARN_ON_ONCE(!event->desc);
805 if (!strncmp(event->desc->name, event_name,
806 LTTNG_KERNEL_SYM_NAME_LEN - 1)
807 && chan == event->chan) {
808 ret = -EEXIST;
809 goto exist;
810 }
811 }
812
813 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
814 if (!event) {
815 ret = -ENOMEM;
816 goto cache_error;
817 }
818 event->chan = chan;
819 event->filter = filter;
820 event->id = chan->free_event_id++;
821 event->instrumentation = itype;
822 event->evtype = LTTNG_TYPE_EVENT;
823 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
824 INIT_LIST_HEAD(&event->enablers_ref_head);
825
826 switch (itype) {
827 case LTTNG_KERNEL_TRACEPOINT:
828 /* Event will be enabled by enabler sync. */
829 event->enabled = 0;
830 event->registered = 0;
831 event->desc = lttng_event_desc_get(event_name);
832 if (!event->desc) {
833 ret = -ENOENT;
834 goto register_error;
835 }
836 /* Populate lttng_event structure before event registration. */
837 smp_wmb();
838 break;
839 case LTTNG_KERNEL_KPROBE:
840 /*
841 * Needs to be explicitly enabled after creation, since
842 * we may want to apply filters.
843 */
844 event->enabled = 0;
845 event->registered = 1;
846 /*
847 * Populate lttng_event structure before event
848 * registration.
849 */
850 smp_wmb();
851 ret = lttng_kprobes_register_event(event_name,
852 event_param->u.kprobe.symbol_name,
853 event_param->u.kprobe.offset,
854 event_param->u.kprobe.addr,
855 event);
856 if (ret) {
857 ret = -EINVAL;
858 goto register_error;
859 }
860 ret = try_module_get(event->desc->owner);
861 WARN_ON_ONCE(!ret);
862 break;
863 case LTTNG_KERNEL_KRETPROBE:
864 {
865 struct lttng_event *event_return;
866
867 /* kretprobe defines 2 events */
868 /*
869 * Needs to be explicitly enabled after creation, since
870 * we may want to apply filters.
871 */
872 event->enabled = 0;
873 event->registered = 1;
874 event_return =
875 kmem_cache_zalloc(event_cache, GFP_KERNEL);
876 if (!event_return) {
877 ret = -ENOMEM;
878 goto register_error;
879 }
880 event_return->chan = chan;
881 event_return->filter = filter;
882 event_return->id = chan->free_event_id++;
883 event_return->enabled = 0;
884 event_return->registered = 1;
885 event_return->instrumentation = itype;
886 /*
887 * Populate lttng_event structure before kretprobe registration.
888 */
889 smp_wmb();
890 ret = lttng_kretprobes_register(event_name,
891 event_param->u.kretprobe.symbol_name,
892 event_param->u.kretprobe.offset,
893 event_param->u.kretprobe.addr,
894 event, event_return);
895 if (ret) {
896 kmem_cache_free(event_cache, event_return);
897 ret = -EINVAL;
898 goto register_error;
899 }
900 /* Take 2 refs on the module: one per event. */
901 ret = try_module_get(event->desc->owner);
902 WARN_ON_ONCE(!ret);
903 ret = try_module_get(event->desc->owner);
904 WARN_ON_ONCE(!ret);
905 ret = _lttng_event_metadata_statedump(chan->session, chan,
906 event_return);
907 WARN_ON_ONCE(ret > 0);
908 if (ret) {
909 kmem_cache_free(event_cache, event_return);
910 module_put(event->desc->owner);
911 module_put(event->desc->owner);
912 goto statedump_error;
913 }
914 list_add(&event_return->list, &chan->session->events);
915 break;
916 }
917 case LTTNG_KERNEL_NOOP:
918 case LTTNG_KERNEL_SYSCALL:
919 /*
920 * Needs to be explicitly enabled after creation, since
921 * we may want to apply filters.
922 */
923 event->enabled = 0;
924 event->registered = 0;
925 event->desc = event_desc;
926 switch (event_param->u.syscall.entryexit) {
927 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
928 ret = -EINVAL;
929 goto register_error;
930 case LTTNG_KERNEL_SYSCALL_ENTRY:
931 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
932 break;
933 case LTTNG_KERNEL_SYSCALL_EXIT:
934 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
935 break;
936 }
937 switch (event_param->u.syscall.abi) {
938 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
939 ret = -EINVAL;
940 goto register_error;
941 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
942 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
943 break;
944 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
945 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
946 break;
947 }
948 if (!event->desc) {
949 ret = -EINVAL;
950 goto register_error;
951 }
952 break;
953 case LTTNG_KERNEL_UPROBE:
954 /*
955 * Needs to be explicitly enabled after creation, since
956 * we may want to apply filters.
957 */
958 event->enabled = 0;
959 event->registered = 1;
960
961 /*
962 * Populate lttng_event structure before event
963 * registration.
964 */
965 smp_wmb();
966
967 ret = lttng_uprobes_register_event(event_param->name,
968 event_param->u.uprobe.fd,
969 event);
970 if (ret)
971 goto register_error;
972 ret = try_module_get(event->desc->owner);
973 WARN_ON_ONCE(!ret);
974 break;
975 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
976 default:
977 WARN_ON_ONCE(1);
978 ret = -EINVAL;
979 goto register_error;
980 }
981 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
982 WARN_ON_ONCE(ret > 0);
983 if (ret) {
984 goto statedump_error;
985 }
986 hlist_add_head(&event->hlist, head);
987 list_add(&event->list, &chan->session->events);
988 return event;
989
990 statedump_error:
991 /* If a statedump error occurs, events will not be readable. */
992 register_error:
993 kmem_cache_free(event_cache, event);
994 cache_error:
995 exist:
996 type_error:
997 full:
998 return ERR_PTR(ret);
999 }
1000
1001 struct lttng_event_notifier *_lttng_event_notifier_create(
1002 const struct lttng_event_desc *event_desc,
1003 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
1004 struct lttng_kernel_event_notifier *event_notifier_param,
1005 void *filter, enum lttng_kernel_instrumentation itype)
1006 {
1007 struct lttng_event_notifier *event_notifier;
1008 const char *event_name;
1009 struct hlist_head *head;
1010 int ret;
1011
1012 switch (itype) {
1013 case LTTNG_KERNEL_TRACEPOINT:
1014 event_name = event_desc->name;
1015 break;
1016 case LTTNG_KERNEL_KPROBE:
1017 case LTTNG_KERNEL_UPROBE:
1018 case LTTNG_KERNEL_SYSCALL:
1019 event_name = event_notifier_param->event.name;
1020 break;
1021 case LTTNG_KERNEL_KRETPROBE:
1022 case LTTNG_KERNEL_FUNCTION:
1023 case LTTNG_KERNEL_NOOP:
1024 default:
1025 WARN_ON_ONCE(1);
1026 ret = -EINVAL;
1027 goto type_error;
1028 }
1029
1030 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1031 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1032 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1033 WARN_ON_ONCE(!event_notifier->desc);
1034 if (!strncmp(event_notifier->desc->name, event_name,
1035 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1036 && event_notifier_group == event_notifier->group
1037 && token == event_notifier->user_token) {
1038 ret = -EEXIST;
1039 goto exist;
1040 }
1041 }
1042
1043 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1044 if (!event_notifier) {
1045 ret = -ENOMEM;
1046 goto cache_error;
1047 }
1048
1049 event_notifier->group = event_notifier_group;
1050 event_notifier->user_token = token;
1051 event_notifier->filter = filter;
1052 event_notifier->instrumentation = itype;
1053 event_notifier->evtype = LTTNG_TYPE_EVENT;
1054 event_notifier->send_notification = lttng_event_notifier_notification_send;
1055 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1056 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1057
1058 switch (itype) {
1059 case LTTNG_KERNEL_TRACEPOINT:
1060 /* Event will be enabled by enabler sync. */
1061 event_notifier->enabled = 0;
1062 event_notifier->registered = 0;
1063 event_notifier->desc = lttng_event_desc_get(event_name);
1064 if (!event_notifier->desc) {
1065 ret = -ENOENT;
1066 goto register_error;
1067 }
1068 /* Populate lttng_event_notifier structure before event registration. */
1069 smp_wmb();
1070 break;
1071 case LTTNG_KERNEL_KPROBE:
1072 /*
1073 * Needs to be explicitly enabled after creation, since
1074 * we may want to apply filters.
1075 */
1076 event_notifier->enabled = 0;
1077 event_notifier->registered = 1;
1078 /*
1079 * Populate lttng_event_notifier structure before event
1080 * registration.
1081 */
1082 smp_wmb();
1083 ret = lttng_kprobes_register_event_notifier(
1084 event_notifier_param->event.u.kprobe.symbol_name,
1085 event_notifier_param->event.u.kprobe.offset,
1086 event_notifier_param->event.u.kprobe.addr,
1087 event_notifier);
1088 if (ret) {
1089 ret = -EINVAL;
1090 goto register_error;
1091 }
1092 ret = try_module_get(event_notifier->desc->owner);
1093 WARN_ON_ONCE(!ret);
1094 break;
1095 case LTTNG_KERNEL_NOOP:
1096 case LTTNG_KERNEL_SYSCALL:
1097 /*
1098 * Needs to be explicitly enabled after creation, since
1099 * we may want to apply filters.
1100 */
1101 event_notifier->enabled = 0;
1102 event_notifier->registered = 0;
1103 event_notifier->desc = event_desc;
1104 switch (event_notifier_param->event.u.syscall.entryexit) {
1105 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1106 ret = -EINVAL;
1107 goto register_error;
1108 case LTTNG_KERNEL_SYSCALL_ENTRY:
1109 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1110 break;
1111 case LTTNG_KERNEL_SYSCALL_EXIT:
1112 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1113 break;
1114 }
1115 switch (event_notifier_param->event.u.syscall.abi) {
1116 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1117 ret = -EINVAL;
1118 goto register_error;
1119 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1120 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1121 break;
1122 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1123 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1124 break;
1125 }
1126
1127 if (!event_notifier->desc) {
1128 ret = -EINVAL;
1129 goto register_error;
1130 }
1131 break;
1132 case LTTNG_KERNEL_UPROBE:
1133 /*
1134 * Needs to be explicitly enabled after creation, since
1135 * we may want to apply filters.
1136 */
1137 event_notifier->enabled = 0;
1138 event_notifier->registered = 1;
1139
1140 /*
1141 * Populate lttng_event_notifier structure before
1142 * event_notifier registration.
1143 */
1144 smp_wmb();
1145
1146 ret = lttng_uprobes_register_event_notifier(
1147 event_notifier_param->event.name,
1148 event_notifier_param->event.u.uprobe.fd,
1149 event_notifier);
1150 if (ret)
1151 goto register_error;
1152 ret = try_module_get(event_notifier->desc->owner);
1153 WARN_ON_ONCE(!ret);
1154 break;
1155 case LTTNG_KERNEL_KRETPROBE:
1156 case LTTNG_KERNEL_FUNCTION:
1157 default:
1158 WARN_ON_ONCE(1);
1159 ret = -EINVAL;
1160 goto register_error;
1161 }
1162
1163 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1164 hlist_add_head(&event_notifier->hlist, head);
1165 return event_notifier;
1166
1167 register_error:
1168 kmem_cache_free(event_notifier_cache, event_notifier);
1169 cache_error:
1170 exist:
1171 type_error:
1172 return ERR_PTR(ret);
1173 }
1174
1175 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1176 struct lttng_kernel_event *event_param,
1177 void *filter,
1178 const struct lttng_event_desc *event_desc,
1179 enum lttng_kernel_instrumentation itype)
1180 {
1181 struct lttng_event *event;
1182
1183 mutex_lock(&sessions_mutex);
1184 event = _lttng_event_create(chan, event_param, filter, event_desc,
1185 itype);
1186 mutex_unlock(&sessions_mutex);
1187 return event;
1188 }
1189
1190 struct lttng_event_notifier *lttng_event_notifier_create(
1191 const struct lttng_event_desc *event_desc,
1192 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1193 struct lttng_kernel_event_notifier *event_notifier_param,
1194 void *filter, enum lttng_kernel_instrumentation itype)
1195 {
1196 struct lttng_event_notifier *event_notifier;
1197
1198 mutex_lock(&sessions_mutex);
1199 event_notifier = _lttng_event_notifier_create(event_desc, id,
1200 event_notifier_group, event_notifier_param, filter, itype);
1201 mutex_unlock(&sessions_mutex);
1202 return event_notifier;
1203 }
1204
1205 /* Only used for tracepoints for now. */
1206 static
1207 void register_event(struct lttng_event *event)
1208 {
1209 const struct lttng_event_desc *desc;
1210 int ret = -EINVAL;
1211
1212 if (event->registered)
1213 return;
1214
1215 desc = event->desc;
1216 switch (event->instrumentation) {
1217 case LTTNG_KERNEL_TRACEPOINT:
1218 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1219 desc->probe_callback,
1220 event);
1221 break;
1222 case LTTNG_KERNEL_SYSCALL:
1223 ret = lttng_syscall_filter_enable_event(event->chan, event);
1224 break;
1225 case LTTNG_KERNEL_KPROBE:
1226 case LTTNG_KERNEL_UPROBE:
1227 case LTTNG_KERNEL_KRETPROBE:
1228 case LTTNG_KERNEL_NOOP:
1229 ret = 0;
1230 break;
1231 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1232 default:
1233 WARN_ON_ONCE(1);
1234 }
1235 if (!ret)
1236 event->registered = 1;
1237 }
1238
1239 /*
1240 * Only used internally at session destruction.
1241 */
1242 int _lttng_event_unregister(struct lttng_event *event)
1243 {
1244 const struct lttng_event_desc *desc;
1245 int ret = -EINVAL;
1246
1247 if (!event->registered)
1248 return 0;
1249
1250 desc = event->desc;
1251 switch (event->instrumentation) {
1252 case LTTNG_KERNEL_TRACEPOINT:
1253 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1254 event->desc->probe_callback,
1255 event);
1256 break;
1257 case LTTNG_KERNEL_KPROBE:
1258 lttng_kprobes_unregister_event(event);
1259 ret = 0;
1260 break;
1261 case LTTNG_KERNEL_KRETPROBE:
1262 lttng_kretprobes_unregister(event);
1263 ret = 0;
1264 break;
1265 case LTTNG_KERNEL_SYSCALL:
1266 ret = lttng_syscall_filter_disable_event(event->chan, event);
1267 break;
1268 case LTTNG_KERNEL_NOOP:
1269 ret = 0;
1270 break;
1271 case LTTNG_KERNEL_UPROBE:
1272 lttng_uprobes_unregister_event(event);
1273 ret = 0;
1274 break;
1275 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1276 default:
1277 WARN_ON_ONCE(1);
1278 }
1279 if (!ret)
1280 event->registered = 0;
1281 return ret;
1282 }
1283
1284 /* Only used for tracepoints for now. */
1285 static
1286 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1287 {
1288 const struct lttng_event_desc *desc;
1289 int ret = -EINVAL;
1290
1291 if (event_notifier->registered)
1292 return;
1293
1294 desc = event_notifier->desc;
1295 switch (event_notifier->instrumentation) {
1296 case LTTNG_KERNEL_TRACEPOINT:
1297 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1298 desc->event_notifier_callback,
1299 event_notifier);
1300 break;
1301 case LTTNG_KERNEL_SYSCALL:
1302 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1303 break;
1304 case LTTNG_KERNEL_KPROBE:
1305 case LTTNG_KERNEL_UPROBE:
1306 ret = 0;
1307 break;
1308 case LTTNG_KERNEL_KRETPROBE:
1309 case LTTNG_KERNEL_FUNCTION:
1310 case LTTNG_KERNEL_NOOP:
1311 default:
1312 WARN_ON_ONCE(1);
1313 }
1314 if (!ret)
1315 event_notifier->registered = 1;
1316 }
1317
1318 static
1319 int _lttng_event_notifier_unregister(
1320 struct lttng_event_notifier *event_notifier)
1321 {
1322 const struct lttng_event_desc *desc;
1323 int ret = -EINVAL;
1324
1325 if (!event_notifier->registered)
1326 return 0;
1327
1328 desc = event_notifier->desc;
1329 switch (event_notifier->instrumentation) {
1330 case LTTNG_KERNEL_TRACEPOINT:
1331 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1332 event_notifier->desc->event_notifier_callback,
1333 event_notifier);
1334 break;
1335 case LTTNG_KERNEL_KPROBE:
1336 lttng_kprobes_unregister_event_notifier(event_notifier);
1337 ret = 0;
1338 break;
1339 case LTTNG_KERNEL_UPROBE:
1340 lttng_uprobes_unregister_event_notifier(event_notifier);
1341 ret = 0;
1342 break;
1343 case LTTNG_KERNEL_SYSCALL:
1344 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1345 break;
1346 case LTTNG_KERNEL_KRETPROBE:
1347 case LTTNG_KERNEL_FUNCTION:
1348 case LTTNG_KERNEL_NOOP:
1349 default:
1350 WARN_ON_ONCE(1);
1351 }
1352 if (!ret)
1353 event_notifier->registered = 0;
1354 return ret;
1355 }
1356
1357 /*
1358 * Only used internally at session destruction.
1359 */
1360 static
1361 void _lttng_event_destroy(struct lttng_event *event)
1362 {
1363 switch (event->instrumentation) {
1364 case LTTNG_KERNEL_TRACEPOINT:
1365 lttng_event_desc_put(event->desc);
1366 break;
1367 case LTTNG_KERNEL_KPROBE:
1368 module_put(event->desc->owner);
1369 lttng_kprobes_destroy_event_private(event);
1370 break;
1371 case LTTNG_KERNEL_KRETPROBE:
1372 module_put(event->desc->owner);
1373 lttng_kretprobes_destroy_private(event);
1374 break;
1375 case LTTNG_KERNEL_NOOP:
1376 case LTTNG_KERNEL_SYSCALL:
1377 break;
1378 case LTTNG_KERNEL_UPROBE:
1379 module_put(event->desc->owner);
1380 lttng_uprobes_destroy_event_private(event);
1381 break;
1382 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1383 default:
1384 WARN_ON_ONCE(1);
1385 }
1386 list_del(&event->list);
1387 lttng_destroy_context(event->ctx);
1388 kmem_cache_free(event_cache, event);
1389 }
1390
1391 /*
1392 * Only used internally at session destruction.
1393 */
1394 static
1395 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1396 {
1397 switch (event_notifier->instrumentation) {
1398 case LTTNG_KERNEL_TRACEPOINT:
1399 lttng_event_desc_put(event_notifier->desc);
1400 break;
1401 case LTTNG_KERNEL_KPROBE:
1402 module_put(event_notifier->desc->owner);
1403 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1404 break;
1405 case LTTNG_KERNEL_NOOP:
1406 case LTTNG_KERNEL_SYSCALL:
1407 break;
1408 case LTTNG_KERNEL_UPROBE:
1409 module_put(event_notifier->desc->owner);
1410 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1411 break;
1412 case LTTNG_KERNEL_KRETPROBE:
1413 case LTTNG_KERNEL_FUNCTION:
1414 default:
1415 WARN_ON_ONCE(1);
1416 }
1417 list_del(&event_notifier->list);
1418 kmem_cache_free(event_notifier_cache, event_notifier);
1419 }
1420
1421 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1422 enum tracker_type tracker_type)
1423 {
1424 switch (tracker_type) {
1425 case TRACKER_PID:
1426 return &session->pid_tracker;
1427 case TRACKER_VPID:
1428 return &session->vpid_tracker;
1429 case TRACKER_UID:
1430 return &session->uid_tracker;
1431 case TRACKER_VUID:
1432 return &session->vuid_tracker;
1433 case TRACKER_GID:
1434 return &session->gid_tracker;
1435 case TRACKER_VGID:
1436 return &session->vgid_tracker;
1437 default:
1438 WARN_ON_ONCE(1);
1439 return NULL;
1440 }
1441 }
1442
1443 int lttng_session_track_id(struct lttng_session *session,
1444 enum tracker_type tracker_type, int id)
1445 {
1446 struct lttng_id_tracker *tracker;
1447 int ret;
1448
1449 tracker = get_tracker(session, tracker_type);
1450 if (!tracker)
1451 return -EINVAL;
1452 if (id < -1)
1453 return -EINVAL;
1454 mutex_lock(&sessions_mutex);
1455 if (id == -1) {
1456 /* track all ids: destroy tracker. */
1457 lttng_id_tracker_destroy(tracker, true);
1458 ret = 0;
1459 } else {
1460 ret = lttng_id_tracker_add(tracker, id);
1461 }
1462 mutex_unlock(&sessions_mutex);
1463 return ret;
1464 }
1465
1466 int lttng_session_untrack_id(struct lttng_session *session,
1467 enum tracker_type tracker_type, int id)
1468 {
1469 struct lttng_id_tracker *tracker;
1470 int ret;
1471
1472 tracker = get_tracker(session, tracker_type);
1473 if (!tracker)
1474 return -EINVAL;
1475 if (id < -1)
1476 return -EINVAL;
1477 mutex_lock(&sessions_mutex);
1478 if (id == -1) {
1479 /* untrack all ids: replace by empty tracker. */
1480 ret = lttng_id_tracker_empty_set(tracker);
1481 } else {
1482 ret = lttng_id_tracker_del(tracker, id);
1483 }
1484 mutex_unlock(&sessions_mutex);
1485 return ret;
1486 }
1487
1488 static
1489 void *id_list_start(struct seq_file *m, loff_t *pos)
1490 {
1491 struct lttng_id_tracker *id_tracker = m->private;
1492 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1493 struct lttng_id_hash_node *e;
1494 int iter = 0, i;
1495
1496 mutex_lock(&sessions_mutex);
1497 if (id_tracker_p) {
1498 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1499 struct hlist_head *head = &id_tracker_p->id_hash[i];
1500
1501 lttng_hlist_for_each_entry(e, head, hlist) {
1502 if (iter++ >= *pos)
1503 return e;
1504 }
1505 }
1506 } else {
1507 /* ID tracker disabled. */
1508 if (iter >= *pos && iter == 0) {
1509 return id_tracker_p; /* empty tracker */
1510 }
1511 iter++;
1512 }
1513 /* End of list */
1514 return NULL;
1515 }
1516
1517 /* Called with sessions_mutex held. */
1518 static
1519 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1520 {
1521 struct lttng_id_tracker *id_tracker = m->private;
1522 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1523 struct lttng_id_hash_node *e;
1524 int iter = 0, i;
1525
1526 (*ppos)++;
1527 if (id_tracker_p) {
1528 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1529 struct hlist_head *head = &id_tracker_p->id_hash[i];
1530
1531 lttng_hlist_for_each_entry(e, head, hlist) {
1532 if (iter++ >= *ppos)
1533 return e;
1534 }
1535 }
1536 } else {
1537 /* ID tracker disabled. */
1538 if (iter >= *ppos && iter == 0)
1539 return p; /* empty tracker */
1540 iter++;
1541 }
1542
1543 /* End of list */
1544 return NULL;
1545 }
1546
1547 static
1548 void id_list_stop(struct seq_file *m, void *p)
1549 {
1550 mutex_unlock(&sessions_mutex);
1551 }
1552
1553 static
1554 int id_list_show(struct seq_file *m, void *p)
1555 {
1556 struct lttng_id_tracker *id_tracker = m->private;
1557 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1558 int id;
1559
1560 if (p == id_tracker_p) {
1561 /* Tracker disabled. */
1562 id = -1;
1563 } else {
1564 const struct lttng_id_hash_node *e = p;
1565
1566 id = lttng_id_tracker_get_node_id(e);
1567 }
1568 switch (id_tracker->tracker_type) {
1569 case TRACKER_PID:
1570 seq_printf(m, "process { pid = %d; };\n", id);
1571 break;
1572 case TRACKER_VPID:
1573 seq_printf(m, "process { vpid = %d; };\n", id);
1574 break;
1575 case TRACKER_UID:
1576 seq_printf(m, "user { uid = %d; };\n", id);
1577 break;
1578 case TRACKER_VUID:
1579 seq_printf(m, "user { vuid = %d; };\n", id);
1580 break;
1581 case TRACKER_GID:
1582 seq_printf(m, "group { gid = %d; };\n", id);
1583 break;
1584 case TRACKER_VGID:
1585 seq_printf(m, "group { vgid = %d; };\n", id);
1586 break;
1587 default:
1588 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1589 }
1590 return 0;
1591 }
1592
1593 static
1594 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1595 .start = id_list_start,
1596 .next = id_list_next,
1597 .stop = id_list_stop,
1598 .show = id_list_show,
1599 };
1600
1601 static
1602 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1603 {
1604 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1605 }
1606
1607 static
1608 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1609 {
1610 struct seq_file *m = file->private_data;
1611 struct lttng_id_tracker *id_tracker = m->private;
1612 int ret;
1613
1614 WARN_ON_ONCE(!id_tracker);
1615 ret = seq_release(inode, file);
1616 if (!ret)
1617 fput(id_tracker->session->file);
1618 return ret;
1619 }
1620
1621 const struct file_operations lttng_tracker_ids_list_fops = {
1622 .owner = THIS_MODULE,
1623 .open = lttng_tracker_ids_list_open,
1624 .read = seq_read,
1625 .llseek = seq_lseek,
1626 .release = lttng_tracker_ids_list_release,
1627 };
1628
1629 int lttng_session_list_tracker_ids(struct lttng_session *session,
1630 enum tracker_type tracker_type)
1631 {
1632 struct file *tracker_ids_list_file;
1633 struct seq_file *m;
1634 int file_fd, ret;
1635
1636 file_fd = lttng_get_unused_fd();
1637 if (file_fd < 0) {
1638 ret = file_fd;
1639 goto fd_error;
1640 }
1641
1642 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1643 &lttng_tracker_ids_list_fops,
1644 NULL, O_RDWR);
1645 if (IS_ERR(tracker_ids_list_file)) {
1646 ret = PTR_ERR(tracker_ids_list_file);
1647 goto file_error;
1648 }
1649 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1650 ret = -EOVERFLOW;
1651 goto refcount_error;
1652 }
1653 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1654 if (ret < 0)
1655 goto open_error;
1656 m = tracker_ids_list_file->private_data;
1657
1658 m->private = get_tracker(session, tracker_type);
1659 BUG_ON(!m->private);
1660 fd_install(file_fd, tracker_ids_list_file);
1661
1662 return file_fd;
1663
1664 open_error:
1665 atomic_long_dec(&session->file->f_count);
1666 refcount_error:
1667 fput(tracker_ids_list_file);
1668 file_error:
1669 put_unused_fd(file_fd);
1670 fd_error:
1671 return ret;
1672 }
1673
1674 /*
1675 * Enabler management.
1676 */
1677 static
1678 int lttng_match_enabler_star_glob(const char *desc_name,
1679 const char *pattern)
1680 {
1681 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1682 desc_name, LTTNG_SIZE_MAX))
1683 return 0;
1684 return 1;
1685 }
1686
1687 static
1688 int lttng_match_enabler_name(const char *desc_name,
1689 const char *name)
1690 {
1691 if (strcmp(desc_name, name))
1692 return 0;
1693 return 1;
1694 }
1695
1696 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1697 struct lttng_enabler *enabler)
1698 {
1699 const char *desc_name, *enabler_name;
1700 bool compat = false, entry = false;
1701
1702 enabler_name = enabler->event_param.name;
1703 switch (enabler->event_param.instrumentation) {
1704 case LTTNG_KERNEL_TRACEPOINT:
1705 desc_name = desc->name;
1706 switch (enabler->format_type) {
1707 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1708 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1709 case LTTNG_ENABLER_FORMAT_NAME:
1710 return lttng_match_enabler_name(desc_name, enabler_name);
1711 default:
1712 return -EINVAL;
1713 }
1714 break;
1715 case LTTNG_KERNEL_SYSCALL:
1716 desc_name = desc->name;
1717 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1718 desc_name += strlen("compat_");
1719 compat = true;
1720 }
1721 if (!strncmp(desc_name, "syscall_exit_",
1722 strlen("syscall_exit_"))) {
1723 desc_name += strlen("syscall_exit_");
1724 } else if (!strncmp(desc_name, "syscall_entry_",
1725 strlen("syscall_entry_"))) {
1726 desc_name += strlen("syscall_entry_");
1727 entry = true;
1728 } else {
1729 WARN_ON_ONCE(1);
1730 return -EINVAL;
1731 }
1732 switch (enabler->event_param.u.syscall.entryexit) {
1733 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1734 break;
1735 case LTTNG_KERNEL_SYSCALL_ENTRY:
1736 if (!entry)
1737 return 0;
1738 break;
1739 case LTTNG_KERNEL_SYSCALL_EXIT:
1740 if (entry)
1741 return 0;
1742 break;
1743 default:
1744 return -EINVAL;
1745 }
1746 switch (enabler->event_param.u.syscall.abi) {
1747 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1748 break;
1749 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1750 if (compat)
1751 return 0;
1752 break;
1753 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1754 if (!compat)
1755 return 0;
1756 break;
1757 default:
1758 return -EINVAL;
1759 }
1760 switch (enabler->event_param.u.syscall.match) {
1761 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1762 switch (enabler->format_type) {
1763 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1764 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1765 case LTTNG_ENABLER_FORMAT_NAME:
1766 return lttng_match_enabler_name(desc_name, enabler_name);
1767 default:
1768 return -EINVAL;
1769 }
1770 break;
1771 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1772 return -EINVAL; /* Not implemented. */
1773 default:
1774 return -EINVAL;
1775 }
1776 break;
1777 default:
1778 WARN_ON_ONCE(1);
1779 return -EINVAL;
1780 }
1781 }
1782
1783 static
1784 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1785 struct lttng_event *event)
1786 {
1787 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1788 event_enabler);
1789
1790 if (base_enabler->event_param.instrumentation != event->instrumentation)
1791 return 0;
1792 if (lttng_desc_match_enabler(event->desc, base_enabler)
1793 && event->chan == event_enabler->chan)
1794 return 1;
1795 else
1796 return 0;
1797 }
1798
1799 static
1800 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1801 struct lttng_event_notifier *event_notifier)
1802 {
1803 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1804 event_notifier_enabler);
1805
1806 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1807 return 0;
1808 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1809 && event_notifier->group == event_notifier_enabler->group
1810 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1811 return 1;
1812 else
1813 return 0;
1814 }
1815
1816 static
1817 struct lttng_enabler_ref *lttng_enabler_ref(
1818 struct list_head *enablers_ref_list,
1819 struct lttng_enabler *enabler)
1820 {
1821 struct lttng_enabler_ref *enabler_ref;
1822
1823 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1824 if (enabler_ref->ref == enabler)
1825 return enabler_ref;
1826 }
1827 return NULL;
1828 }
1829
1830 static
1831 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1832 {
1833 struct lttng_session *session = event_enabler->chan->session;
1834 struct lttng_probe_desc *probe_desc;
1835 const struct lttng_event_desc *desc;
1836 int i;
1837 struct list_head *probe_list;
1838
1839 probe_list = lttng_get_probe_list_head();
1840 /*
1841 * For each probe event, if we find that a probe event matches
1842 * our enabler, create an associated lttng_event if not
1843 * already present.
1844 */
1845 list_for_each_entry(probe_desc, probe_list, head) {
1846 for (i = 0; i < probe_desc->nr_events; i++) {
1847 int found = 0;
1848 struct hlist_head *head;
1849 struct lttng_event *event;
1850
1851 desc = probe_desc->event_desc[i];
1852 if (!lttng_desc_match_enabler(desc,
1853 lttng_event_enabler_as_enabler(event_enabler)))
1854 continue;
1855
1856 /*
1857 * Check if already created.
1858 */
1859 head = utils_borrow_hash_table_bucket(
1860 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1861 desc->name);
1862 lttng_hlist_for_each_entry(event, head, hlist) {
1863 if (event->desc == desc
1864 && event->chan == event_enabler->chan)
1865 found = 1;
1866 }
1867 if (found)
1868 continue;
1869
1870 /*
1871 * We need to create an event for this
1872 * event probe.
1873 */
1874 event = _lttng_event_create(event_enabler->chan,
1875 NULL, NULL, desc,
1876 LTTNG_KERNEL_TRACEPOINT);
1877 if (!event) {
1878 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1879 probe_desc->event_desc[i]->name);
1880 }
1881 }
1882 }
1883 }
1884
1885 static
1886 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1887 {
1888 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
1889 struct lttng_probe_desc *probe_desc;
1890 const struct lttng_event_desc *desc;
1891 int i;
1892 struct list_head *probe_list;
1893
1894 probe_list = lttng_get_probe_list_head();
1895 /*
1896 * For each probe event, if we find that a probe event matches
1897 * our enabler, create an associated lttng_event_notifier if not
1898 * already present.
1899 */
1900 list_for_each_entry(probe_desc, probe_list, head) {
1901 for (i = 0; i < probe_desc->nr_events; i++) {
1902 int found = 0;
1903 struct hlist_head *head;
1904 struct lttng_event_notifier *event_notifier;
1905
1906 desc = probe_desc->event_desc[i];
1907 if (!lttng_desc_match_enabler(desc,
1908 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
1909 continue;
1910
1911 /*
1912 * Check if already created.
1913 */
1914 head = utils_borrow_hash_table_bucket(
1915 event_notifier_group->event_notifiers_ht.table,
1916 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
1917 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1918 if (event_notifier->desc == desc
1919 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1920 found = 1;
1921 }
1922 if (found)
1923 continue;
1924
1925 /*
1926 * We need to create a event_notifier for this event probe.
1927 */
1928 event_notifier = _lttng_event_notifier_create(desc,
1929 event_notifier_enabler->base.user_token,
1930 event_notifier_group, NULL, NULL,
1931 LTTNG_KERNEL_TRACEPOINT);
1932 if (IS_ERR(event_notifier)) {
1933 printk(KERN_INFO "Unable to create event_notifier %s\n",
1934 probe_desc->event_desc[i]->name);
1935 }
1936 }
1937 }
1938 }
1939
1940 static
1941 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1942 {
1943 int ret;
1944
1945 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
1946 WARN_ON_ONCE(ret);
1947 }
1948
1949 static
1950 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1951 {
1952 int ret;
1953
1954 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
1955 WARN_ON_ONCE(ret);
1956 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
1957 WARN_ON_ONCE(ret);
1958 }
1959
1960 /*
1961 * Create struct lttng_event if it is missing and present in the list of
1962 * tracepoint probes.
1963 * Should be called with sessions mutex held.
1964 */
1965 static
1966 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1967 {
1968 switch (event_enabler->base.event_param.instrumentation) {
1969 case LTTNG_KERNEL_TRACEPOINT:
1970 lttng_create_tracepoint_event_if_missing(event_enabler);
1971 break;
1972 case LTTNG_KERNEL_SYSCALL:
1973 lttng_create_syscall_event_if_missing(event_enabler);
1974 break;
1975 default:
1976 WARN_ON_ONCE(1);
1977 break;
1978 }
1979 }
1980
1981 /*
1982 * Create events associated with an event_enabler (if not already present),
1983 * and add backward reference from the event to the enabler.
1984 * Should be called with sessions mutex held.
1985 */
1986 static
1987 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1988 {
1989 struct lttng_channel *chan = event_enabler->chan;
1990 struct lttng_session *session = event_enabler->chan->session;
1991 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1992 struct lttng_event *event;
1993
1994 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1995 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1996 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1997 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
1998 !strcmp(base_enabler->event_param.name, "*")) {
1999 if (base_enabler->enabled)
2000 WRITE_ONCE(chan->syscall_all, 1);
2001 else
2002 WRITE_ONCE(chan->syscall_all, 0);
2003 }
2004
2005 /* First ensure that probe events are created for this enabler. */
2006 lttng_create_event_if_missing(event_enabler);
2007
2008 /* For each event matching event_enabler in session event list. */
2009 list_for_each_entry(event, &session->events, list) {
2010 struct lttng_enabler_ref *enabler_ref;
2011
2012 if (!lttng_event_enabler_match_event(event_enabler, event))
2013 continue;
2014 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2015 lttng_event_enabler_as_enabler(event_enabler));
2016 if (!enabler_ref) {
2017 /*
2018 * If no backward ref, create it.
2019 * Add backward ref from event to event_enabler.
2020 */
2021 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2022 if (!enabler_ref)
2023 return -ENOMEM;
2024 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2025 list_add(&enabler_ref->node,
2026 &event->enablers_ref_head);
2027 }
2028
2029 /*
2030 * Link filter bytecodes if not linked yet.
2031 */
2032 lttng_enabler_link_bytecode(event->desc,
2033 lttng_static_ctx,
2034 &event->filter_bytecode_runtime_head,
2035 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2036
2037 /* TODO: merge event context. */
2038 }
2039 return 0;
2040 }
2041
2042 /*
2043 * Create struct lttng_event_notifier if it is missing and present in the list of
2044 * tracepoint probes.
2045 * Should be called with sessions mutex held.
2046 */
2047 static
2048 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2049 {
2050 switch (event_notifier_enabler->base.event_param.instrumentation) {
2051 case LTTNG_KERNEL_TRACEPOINT:
2052 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2053 break;
2054 case LTTNG_KERNEL_SYSCALL:
2055 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2056 break;
2057 default:
2058 WARN_ON_ONCE(1);
2059 break;
2060 }
2061 }
2062
2063 /*
2064 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2065 */
2066 static
2067 int lttng_event_notifier_enabler_ref_event_notifiers(
2068 struct lttng_event_notifier_enabler *event_notifier_enabler)
2069 {
2070 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2071 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2072 struct lttng_event_notifier *event_notifier;
2073
2074 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2075 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2076 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2077 !strcmp(base_enabler->event_param.name, "*")) {
2078
2079 int enabled = base_enabler->enabled;
2080 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2081
2082 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2083 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2084
2085 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2086 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2087
2088 }
2089
2090 /* First ensure that probe event_notifiers are created for this enabler. */
2091 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2092
2093 /* Link the created event_notifier with its associated enabler. */
2094 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2095 struct lttng_enabler_ref *enabler_ref;
2096
2097 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2098 continue;
2099
2100 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2101 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2102 if (!enabler_ref) {
2103 /*
2104 * If no backward ref, create it.
2105 * Add backward ref from event_notifier to enabler.
2106 */
2107 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2108 if (!enabler_ref)
2109 return -ENOMEM;
2110
2111 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2112 event_notifier_enabler);
2113 list_add(&enabler_ref->node,
2114 &event_notifier->enablers_ref_head);
2115 }
2116
2117 /*
2118 * Link filter bytecodes if not linked yet.
2119 */
2120 lttng_enabler_link_bytecode(event_notifier->desc,
2121 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2122 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2123 }
2124 return 0;
2125 }
2126
2127 /*
2128 * Called at module load: connect the probe on all enablers matching
2129 * this event.
2130 * Called with sessions lock held.
2131 */
2132 int lttng_fix_pending_events(void)
2133 {
2134 struct lttng_session *session;
2135
2136 list_for_each_entry(session, &sessions, list)
2137 lttng_session_lazy_sync_event_enablers(session);
2138 return 0;
2139 }
2140
2141 static bool lttng_event_notifier_group_has_active_event_notifiers(
2142 struct lttng_event_notifier_group *event_notifier_group)
2143 {
2144 struct lttng_event_notifier_enabler *event_notifier_enabler;
2145
2146 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2147 node) {
2148 if (event_notifier_enabler->base.enabled)
2149 return true;
2150 }
2151 return false;
2152 }
2153
2154 bool lttng_event_notifier_active(void)
2155 {
2156 struct lttng_event_notifier_group *event_notifier_group;
2157
2158 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2159 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2160 return true;
2161 }
2162 return false;
2163 }
2164
2165 int lttng_fix_pending_event_notifiers(void)
2166 {
2167 struct lttng_event_notifier_group *event_notifier_group;
2168
2169 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2170 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2171 return 0;
2172 }
2173
2174 struct lttng_event_enabler *lttng_event_enabler_create(
2175 enum lttng_enabler_format_type format_type,
2176 struct lttng_kernel_event *event_param,
2177 struct lttng_channel *chan)
2178 {
2179 struct lttng_event_enabler *event_enabler;
2180
2181 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2182 if (!event_enabler)
2183 return NULL;
2184 event_enabler->base.format_type = format_type;
2185 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2186 memcpy(&event_enabler->base.event_param, event_param,
2187 sizeof(event_enabler->base.event_param));
2188 event_enabler->chan = chan;
2189 /* ctx left NULL */
2190 event_enabler->base.enabled = 0;
2191 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2192 mutex_lock(&sessions_mutex);
2193 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2194 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2195 mutex_unlock(&sessions_mutex);
2196 return event_enabler;
2197 }
2198
2199 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2200 {
2201 mutex_lock(&sessions_mutex);
2202 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2203 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2204 mutex_unlock(&sessions_mutex);
2205 return 0;
2206 }
2207
2208 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2209 {
2210 mutex_lock(&sessions_mutex);
2211 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2212 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2213 mutex_unlock(&sessions_mutex);
2214 return 0;
2215 }
2216
2217 static
2218 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2219 struct lttng_kernel_filter_bytecode __user *bytecode)
2220 {
2221 struct lttng_bytecode_node *bytecode_node;
2222 uint32_t bytecode_len;
2223 int ret;
2224
2225 ret = get_user(bytecode_len, &bytecode->len);
2226 if (ret)
2227 return ret;
2228 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2229 GFP_KERNEL);
2230 if (!bytecode_node)
2231 return -ENOMEM;
2232 ret = copy_from_user(&bytecode_node->bc, bytecode,
2233 sizeof(*bytecode) + bytecode_len);
2234 if (ret)
2235 goto error_free;
2236
2237 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2238 bytecode_node->enabler = enabler;
2239 /* Enforce length based on allocated size */
2240 bytecode_node->bc.len = bytecode_len;
2241 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2242
2243 return 0;
2244
2245 error_free:
2246 lttng_kvfree(bytecode_node);
2247 return ret;
2248 }
2249
2250 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2251 struct lttng_kernel_filter_bytecode __user *bytecode)
2252 {
2253 int ret;
2254 ret = lttng_enabler_attach_filter_bytecode(
2255 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2256 if (ret)
2257 goto error;
2258
2259 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2260 return 0;
2261
2262 error:
2263 return ret;
2264 }
2265
2266 int lttng_event_add_callsite(struct lttng_event *event,
2267 struct lttng_kernel_event_callsite __user *callsite)
2268 {
2269
2270 switch (event->instrumentation) {
2271 case LTTNG_KERNEL_UPROBE:
2272 return lttng_uprobes_event_add_callsite(event, callsite);
2273 default:
2274 return -EINVAL;
2275 }
2276 }
2277
2278 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2279 struct lttng_kernel_context *context_param)
2280 {
2281 return -ENOSYS;
2282 }
2283
2284 static
2285 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2286 {
2287 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2288
2289 /* Destroy filter bytecode */
2290 list_for_each_entry_safe(filter_node, tmp_filter_node,
2291 &enabler->filter_bytecode_head, node) {
2292 lttng_kvfree(filter_node);
2293 }
2294 }
2295
2296 static
2297 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2298 {
2299 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2300
2301 /* Destroy contexts */
2302 lttng_destroy_context(event_enabler->ctx);
2303
2304 list_del(&event_enabler->node);
2305 kfree(event_enabler);
2306 }
2307
2308 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2309 struct lttng_event_notifier_group *event_notifier_group,
2310 enum lttng_enabler_format_type format_type,
2311 struct lttng_kernel_event_notifier *event_notifier_param)
2312 {
2313 struct lttng_event_notifier_enabler *event_notifier_enabler;
2314
2315 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2316 if (!event_notifier_enabler)
2317 return NULL;
2318
2319 event_notifier_enabler->base.format_type = format_type;
2320 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2321
2322 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2323 sizeof(event_notifier_enabler->base.event_param));
2324 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2325
2326 event_notifier_enabler->base.enabled = 0;
2327 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2328 event_notifier_enabler->group = event_notifier_group;
2329
2330 mutex_lock(&sessions_mutex);
2331 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2332 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2333
2334 mutex_unlock(&sessions_mutex);
2335
2336 return event_notifier_enabler;
2337 }
2338
2339 int lttng_event_notifier_enabler_enable(
2340 struct lttng_event_notifier_enabler *event_notifier_enabler)
2341 {
2342 mutex_lock(&sessions_mutex);
2343 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2344 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2345 mutex_unlock(&sessions_mutex);
2346 return 0;
2347 }
2348
2349 int lttng_event_notifier_enabler_disable(
2350 struct lttng_event_notifier_enabler *event_notifier_enabler)
2351 {
2352 mutex_lock(&sessions_mutex);
2353 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2354 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2355 mutex_unlock(&sessions_mutex);
2356 return 0;
2357 }
2358
2359 int lttng_event_notifier_enabler_attach_filter_bytecode(
2360 struct lttng_event_notifier_enabler *event_notifier_enabler,
2361 struct lttng_kernel_filter_bytecode __user *bytecode)
2362 {
2363 int ret;
2364
2365 ret = lttng_enabler_attach_filter_bytecode(
2366 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2367 bytecode);
2368 if (ret)
2369 goto error;
2370
2371 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2372 return 0;
2373
2374 error:
2375 return ret;
2376 }
2377
2378 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2379 struct lttng_kernel_event_callsite __user *callsite)
2380 {
2381
2382 switch (event_notifier->instrumentation) {
2383 case LTTNG_KERNEL_UPROBE:
2384 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2385 callsite);
2386 default:
2387 return -EINVAL;
2388 }
2389 }
2390
2391 int lttng_event_notifier_enabler_attach_context(
2392 struct lttng_event_notifier_enabler *event_notifier_enabler,
2393 struct lttng_kernel_context *context_param)
2394 {
2395 return -ENOSYS;
2396 }
2397
2398 static
2399 void lttng_event_notifier_enabler_destroy(
2400 struct lttng_event_notifier_enabler *event_notifier_enabler)
2401 {
2402 if (!event_notifier_enabler) {
2403 return;
2404 }
2405
2406 list_del(&event_notifier_enabler->node);
2407
2408 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2409 kfree(event_notifier_enabler);
2410 }
2411
2412 /*
2413 * lttng_session_sync_event_enablers should be called just before starting a
2414 * session.
2415 * Should be called with sessions mutex held.
2416 */
2417 static
2418 void lttng_session_sync_event_enablers(struct lttng_session *session)
2419 {
2420 struct lttng_event_enabler *event_enabler;
2421 struct lttng_event *event;
2422
2423 list_for_each_entry(event_enabler, &session->enablers_head, node)
2424 lttng_event_enabler_ref_events(event_enabler);
2425 /*
2426 * For each event, if at least one of its enablers is enabled,
2427 * and its channel and session transient states are enabled, we
2428 * enable the event, else we disable it.
2429 */
2430 list_for_each_entry(event, &session->events, list) {
2431 struct lttng_enabler_ref *enabler_ref;
2432 struct lttng_bytecode_runtime *runtime;
2433 int enabled = 0, has_enablers_without_bytecode = 0;
2434
2435 switch (event->instrumentation) {
2436 case LTTNG_KERNEL_TRACEPOINT:
2437 case LTTNG_KERNEL_SYSCALL:
2438 /* Enable events */
2439 list_for_each_entry(enabler_ref,
2440 &event->enablers_ref_head, node) {
2441 if (enabler_ref->ref->enabled) {
2442 enabled = 1;
2443 break;
2444 }
2445 }
2446 break;
2447 default:
2448 /* Not handled with lazy sync. */
2449 continue;
2450 }
2451 /*
2452 * Enabled state is based on union of enablers, with
2453 * intesection of session and channel transient enable
2454 * states.
2455 */
2456 enabled = enabled && session->tstate && event->chan->tstate;
2457
2458 WRITE_ONCE(event->enabled, enabled);
2459 /*
2460 * Sync tracepoint registration with event enabled
2461 * state.
2462 */
2463 if (enabled) {
2464 register_event(event);
2465 } else {
2466 _lttng_event_unregister(event);
2467 }
2468
2469 /* Check if has enablers without bytecode enabled */
2470 list_for_each_entry(enabler_ref,
2471 &event->enablers_ref_head, node) {
2472 if (enabler_ref->ref->enabled
2473 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2474 has_enablers_without_bytecode = 1;
2475 break;
2476 }
2477 }
2478 event->has_enablers_without_bytecode =
2479 has_enablers_without_bytecode;
2480
2481 /* Enable filters */
2482 list_for_each_entry(runtime,
2483 &event->filter_bytecode_runtime_head, node)
2484 lttng_bytecode_filter_sync_state(runtime);
2485 }
2486 }
2487
2488 /*
2489 * Apply enablers to session events, adding events to session if need
2490 * be. It is required after each modification applied to an active
2491 * session, and right before session "start".
2492 * "lazy" sync means we only sync if required.
2493 * Should be called with sessions mutex held.
2494 */
2495 static
2496 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2497 {
2498 /* We can skip if session is not active */
2499 if (!session->active)
2500 return;
2501 lttng_session_sync_event_enablers(session);
2502 }
2503
2504 static
2505 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2506 {
2507 struct lttng_event_notifier_enabler *event_notifier_enabler;
2508 struct lttng_event_notifier *event_notifier;
2509
2510 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2511 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2512
2513 /*
2514 * For each event_notifier, if at least one of its enablers is enabled,
2515 * we enable the event_notifier, else we disable it.
2516 */
2517 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2518 struct lttng_enabler_ref *enabler_ref;
2519 struct lttng_bytecode_runtime *runtime;
2520 int enabled = 0, has_enablers_without_bytecode = 0;
2521
2522 switch (event_notifier->instrumentation) {
2523 case LTTNG_KERNEL_TRACEPOINT:
2524 case LTTNG_KERNEL_SYSCALL:
2525 /* Enable event_notifiers */
2526 list_for_each_entry(enabler_ref,
2527 &event_notifier->enablers_ref_head, node) {
2528 if (enabler_ref->ref->enabled) {
2529 enabled = 1;
2530 break;
2531 }
2532 }
2533 break;
2534 default:
2535 /* Not handled with sync. */
2536 continue;
2537 }
2538
2539 WRITE_ONCE(event_notifier->enabled, enabled);
2540 /*
2541 * Sync tracepoint registration with event_notifier enabled
2542 * state.
2543 */
2544 if (enabled) {
2545 if (!event_notifier->registered)
2546 register_event_notifier(event_notifier);
2547 } else {
2548 if (event_notifier->registered)
2549 _lttng_event_notifier_unregister(event_notifier);
2550 }
2551
2552 /* Check if has enablers without bytecode enabled */
2553 list_for_each_entry(enabler_ref,
2554 &event_notifier->enablers_ref_head, node) {
2555 if (enabler_ref->ref->enabled
2556 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2557 has_enablers_without_bytecode = 1;
2558 break;
2559 }
2560 }
2561 event_notifier->has_enablers_without_bytecode =
2562 has_enablers_without_bytecode;
2563
2564 /* Enable filters */
2565 list_for_each_entry(runtime,
2566 &event_notifier->filter_bytecode_runtime_head, node)
2567 lttng_bytecode_filter_sync_state(runtime);
2568 }
2569 }
2570
2571 /*
2572 * Serialize at most one packet worth of metadata into a metadata
2573 * channel.
2574 * We grab the metadata cache mutex to get exclusive access to our metadata
2575 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2576 * allows us to do racy operations such as looking for remaining space left in
2577 * packet and write, since mutual exclusion protects us from concurrent writes.
2578 * Mutual exclusion on the metadata cache allow us to read the cache content
2579 * without racing against reallocation of the cache by updates.
2580 * Returns the number of bytes written in the channel, 0 if no data
2581 * was written and a negative value on error.
2582 */
2583 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2584 struct channel *chan, bool *coherent)
2585 {
2586 struct lib_ring_buffer_ctx ctx;
2587 int ret = 0;
2588 size_t len, reserve_len;
2589
2590 /*
2591 * Ensure we support mutiple get_next / put sequences followed by
2592 * put_next. The metadata cache lock protects reading the metadata
2593 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2594 * "flush" operations on the buffer invoked by different processes.
2595 * Moreover, since the metadata cache memory can be reallocated, we
2596 * need to have exclusive access against updates even though we only
2597 * read it.
2598 */
2599 mutex_lock(&stream->metadata_cache->lock);
2600 WARN_ON(stream->metadata_in < stream->metadata_out);
2601 if (stream->metadata_in != stream->metadata_out)
2602 goto end;
2603
2604 /* Metadata regenerated, change the version. */
2605 if (stream->metadata_cache->version != stream->version)
2606 stream->version = stream->metadata_cache->version;
2607
2608 len = stream->metadata_cache->metadata_written -
2609 stream->metadata_in;
2610 if (!len)
2611 goto end;
2612 reserve_len = min_t(size_t,
2613 stream->transport->ops.packet_avail_size(chan),
2614 len);
2615 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2616 sizeof(char), -1);
2617 /*
2618 * If reservation failed, return an error to the caller.
2619 */
2620 ret = stream->transport->ops.event_reserve(&ctx, 0);
2621 if (ret != 0) {
2622 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2623 stream->coherent = false;
2624 goto end;
2625 }
2626 stream->transport->ops.event_write(&ctx,
2627 stream->metadata_cache->data + stream->metadata_in,
2628 reserve_len);
2629 stream->transport->ops.event_commit(&ctx);
2630 stream->metadata_in += reserve_len;
2631 if (reserve_len < len)
2632 stream->coherent = false;
2633 else
2634 stream->coherent = true;
2635 ret = reserve_len;
2636
2637 end:
2638 if (coherent)
2639 *coherent = stream->coherent;
2640 mutex_unlock(&stream->metadata_cache->lock);
2641 return ret;
2642 }
2643
2644 static
2645 void lttng_metadata_begin(struct lttng_session *session)
2646 {
2647 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2648 mutex_lock(&session->metadata_cache->lock);
2649 }
2650
2651 static
2652 void lttng_metadata_end(struct lttng_session *session)
2653 {
2654 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2655 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2656 struct lttng_metadata_stream *stream;
2657
2658 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2659 wake_up_interruptible(&stream->read_wait);
2660 mutex_unlock(&session->metadata_cache->lock);
2661 }
2662 }
2663
2664 /*
2665 * Write the metadata to the metadata cache.
2666 * Must be called with sessions_mutex held.
2667 * The metadata cache lock protects us from concurrent read access from
2668 * thread outputting metadata content to ring buffer.
2669 * The content of the printf is printed as a single atomic metadata
2670 * transaction.
2671 */
2672 int lttng_metadata_printf(struct lttng_session *session,
2673 const char *fmt, ...)
2674 {
2675 char *str;
2676 size_t len;
2677 va_list ap;
2678
2679 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2680
2681 va_start(ap, fmt);
2682 str = kvasprintf(GFP_KERNEL, fmt, ap);
2683 va_end(ap);
2684 if (!str)
2685 return -ENOMEM;
2686
2687 len = strlen(str);
2688 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2689 if (session->metadata_cache->metadata_written + len >
2690 session->metadata_cache->cache_alloc) {
2691 char *tmp_cache_realloc;
2692 unsigned int tmp_cache_alloc_size;
2693
2694 tmp_cache_alloc_size = max_t(unsigned int,
2695 session->metadata_cache->cache_alloc + len,
2696 session->metadata_cache->cache_alloc << 1);
2697 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2698 if (!tmp_cache_realloc)
2699 goto err;
2700 if (session->metadata_cache->data) {
2701 memcpy(tmp_cache_realloc,
2702 session->metadata_cache->data,
2703 session->metadata_cache->cache_alloc);
2704 vfree(session->metadata_cache->data);
2705 }
2706
2707 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2708 session->metadata_cache->data = tmp_cache_realloc;
2709 }
2710 memcpy(session->metadata_cache->data +
2711 session->metadata_cache->metadata_written,
2712 str, len);
2713 session->metadata_cache->metadata_written += len;
2714 kfree(str);
2715
2716 return 0;
2717
2718 err:
2719 kfree(str);
2720 return -ENOMEM;
2721 }
2722
2723 static
2724 int print_tabs(struct lttng_session *session, size_t nesting)
2725 {
2726 size_t i;
2727
2728 for (i = 0; i < nesting; i++) {
2729 int ret;
2730
2731 ret = lttng_metadata_printf(session, " ");
2732 if (ret) {
2733 return ret;
2734 }
2735 }
2736 return 0;
2737 }
2738
2739 static
2740 int lttng_field_name_statedump(struct lttng_session *session,
2741 const struct lttng_event_field *field,
2742 size_t nesting)
2743 {
2744 return lttng_metadata_printf(session, " _%s;\n", field->name);
2745 }
2746
2747 static
2748 int _lttng_integer_type_statedump(struct lttng_session *session,
2749 const struct lttng_type *type,
2750 size_t nesting)
2751 {
2752 int ret;
2753
2754 WARN_ON_ONCE(type->atype != atype_integer);
2755 ret = print_tabs(session, nesting);
2756 if (ret)
2757 return ret;
2758 ret = lttng_metadata_printf(session,
2759 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2760 type->u.integer.size,
2761 type->u.integer.alignment,
2762 type->u.integer.signedness,
2763 (type->u.integer.encoding == lttng_encode_none)
2764 ? "none"
2765 : (type->u.integer.encoding == lttng_encode_UTF8)
2766 ? "UTF8"
2767 : "ASCII",
2768 type->u.integer.base,
2769 #if __BYTE_ORDER == __BIG_ENDIAN
2770 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2771 #else
2772 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2773 #endif
2774 );
2775 return ret;
2776 }
2777
2778 /*
2779 * Must be called with sessions_mutex held.
2780 */
2781 static
2782 int _lttng_struct_type_statedump(struct lttng_session *session,
2783 const struct lttng_type *type,
2784 size_t nesting)
2785 {
2786 int ret;
2787 uint32_t i, nr_fields;
2788 unsigned int alignment;
2789
2790 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2791
2792 ret = print_tabs(session, nesting);
2793 if (ret)
2794 return ret;
2795 ret = lttng_metadata_printf(session,
2796 "struct {\n");
2797 if (ret)
2798 return ret;
2799 nr_fields = type->u.struct_nestable.nr_fields;
2800 for (i = 0; i < nr_fields; i++) {
2801 const struct lttng_event_field *iter_field;
2802
2803 iter_field = &type->u.struct_nestable.fields[i];
2804 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2805 if (ret)
2806 return ret;
2807 }
2808 ret = print_tabs(session, nesting);
2809 if (ret)
2810 return ret;
2811 alignment = type->u.struct_nestable.alignment;
2812 if (alignment) {
2813 ret = lttng_metadata_printf(session,
2814 "} align(%u)",
2815 alignment);
2816 } else {
2817 ret = lttng_metadata_printf(session,
2818 "}");
2819 }
2820 return ret;
2821 }
2822
2823 /*
2824 * Must be called with sessions_mutex held.
2825 */
2826 static
2827 int _lttng_struct_field_statedump(struct lttng_session *session,
2828 const struct lttng_event_field *field,
2829 size_t nesting)
2830 {
2831 int ret;
2832
2833 ret = _lttng_struct_type_statedump(session,
2834 &field->type, nesting);
2835 if (ret)
2836 return ret;
2837 return lttng_field_name_statedump(session, field, nesting);
2838 }
2839
2840 /*
2841 * Must be called with sessions_mutex held.
2842 */
2843 static
2844 int _lttng_variant_type_statedump(struct lttng_session *session,
2845 const struct lttng_type *type,
2846 size_t nesting)
2847 {
2848 int ret;
2849 uint32_t i, nr_choices;
2850
2851 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2852 /*
2853 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2854 */
2855 if (type->u.variant_nestable.alignment != 0)
2856 return -EINVAL;
2857 ret = print_tabs(session, nesting);
2858 if (ret)
2859 return ret;
2860 ret = lttng_metadata_printf(session,
2861 "variant <_%s> {\n",
2862 type->u.variant_nestable.tag_name);
2863 if (ret)
2864 return ret;
2865 nr_choices = type->u.variant_nestable.nr_choices;
2866 for (i = 0; i < nr_choices; i++) {
2867 const struct lttng_event_field *iter_field;
2868
2869 iter_field = &type->u.variant_nestable.choices[i];
2870 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2871 if (ret)
2872 return ret;
2873 }
2874 ret = print_tabs(session, nesting);
2875 if (ret)
2876 return ret;
2877 ret = lttng_metadata_printf(session,
2878 "}");
2879 return ret;
2880 }
2881
2882 /*
2883 * Must be called with sessions_mutex held.
2884 */
2885 static
2886 int _lttng_variant_field_statedump(struct lttng_session *session,
2887 const struct lttng_event_field *field,
2888 size_t nesting)
2889 {
2890 int ret;
2891
2892 ret = _lttng_variant_type_statedump(session,
2893 &field->type, nesting);
2894 if (ret)
2895 return ret;
2896 return lttng_field_name_statedump(session, field, nesting);
2897 }
2898
2899 /*
2900 * Must be called with sessions_mutex held.
2901 */
2902 static
2903 int _lttng_array_field_statedump(struct lttng_session *session,
2904 const struct lttng_event_field *field,
2905 size_t nesting)
2906 {
2907 int ret;
2908 const struct lttng_type *elem_type;
2909
2910 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2911
2912 if (field->type.u.array_nestable.alignment) {
2913 ret = print_tabs(session, nesting);
2914 if (ret)
2915 return ret;
2916 ret = lttng_metadata_printf(session,
2917 "struct { } align(%u) _%s_padding;\n",
2918 field->type.u.array_nestable.alignment * CHAR_BIT,
2919 field->name);
2920 if (ret)
2921 return ret;
2922 }
2923 /*
2924 * Nested compound types: Only array of structures and variants are
2925 * currently supported.
2926 */
2927 elem_type = field->type.u.array_nestable.elem_type;
2928 switch (elem_type->atype) {
2929 case atype_integer:
2930 case atype_struct_nestable:
2931 case atype_variant_nestable:
2932 ret = _lttng_type_statedump(session, elem_type, nesting);
2933 if (ret)
2934 return ret;
2935 break;
2936
2937 default:
2938 return -EINVAL;
2939 }
2940 ret = lttng_metadata_printf(session,
2941 " _%s[%u];\n",
2942 field->name,
2943 field->type.u.array_nestable.length);
2944 return ret;
2945 }
2946
2947 /*
2948 * Must be called with sessions_mutex held.
2949 */
2950 static
2951 int _lttng_sequence_field_statedump(struct lttng_session *session,
2952 const struct lttng_event_field *field,
2953 size_t nesting)
2954 {
2955 int ret;
2956 const char *length_name;
2957 const struct lttng_type *elem_type;
2958
2959 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2960
2961 length_name = field->type.u.sequence_nestable.length_name;
2962
2963 if (field->type.u.sequence_nestable.alignment) {
2964 ret = print_tabs(session, nesting);
2965 if (ret)
2966 return ret;
2967 ret = lttng_metadata_printf(session,
2968 "struct { } align(%u) _%s_padding;\n",
2969 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2970 field->name);
2971 if (ret)
2972 return ret;
2973 }
2974
2975 /*
2976 * Nested compound types: Only array of structures and variants are
2977 * currently supported.
2978 */
2979 elem_type = field->type.u.sequence_nestable.elem_type;
2980 switch (elem_type->atype) {
2981 case atype_integer:
2982 case atype_struct_nestable:
2983 case atype_variant_nestable:
2984 ret = _lttng_type_statedump(session, elem_type, nesting);
2985 if (ret)
2986 return ret;
2987 break;
2988
2989 default:
2990 return -EINVAL;
2991 }
2992 ret = lttng_metadata_printf(session,
2993 " _%s[ _%s ];\n",
2994 field->name,
2995 field->type.u.sequence_nestable.length_name);
2996 return ret;
2997 }
2998
2999 /*
3000 * Must be called with sessions_mutex held.
3001 */
3002 static
3003 int _lttng_enum_type_statedump(struct lttng_session *session,
3004 const struct lttng_type *type,
3005 size_t nesting)
3006 {
3007 const struct lttng_enum_desc *enum_desc;
3008 const struct lttng_type *container_type;
3009 int ret;
3010 unsigned int i, nr_entries;
3011
3012 container_type = type->u.enum_nestable.container_type;
3013 if (container_type->atype != atype_integer) {
3014 ret = -EINVAL;
3015 goto end;
3016 }
3017 enum_desc = type->u.enum_nestable.desc;
3018 nr_entries = enum_desc->nr_entries;
3019
3020 ret = print_tabs(session, nesting);
3021 if (ret)
3022 goto end;
3023 ret = lttng_metadata_printf(session, "enum : ");
3024 if (ret)
3025 goto end;
3026 ret = _lttng_integer_type_statedump(session, container_type, 0);
3027 if (ret)
3028 goto end;
3029 ret = lttng_metadata_printf(session, " {\n");
3030 if (ret)
3031 goto end;
3032 /* Dump all entries */
3033 for (i = 0; i < nr_entries; i++) {
3034 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3035 int j, len;
3036
3037 ret = print_tabs(session, nesting + 1);
3038 if (ret)
3039 goto end;
3040 ret = lttng_metadata_printf(session,
3041 "\"");
3042 if (ret)
3043 goto end;
3044 len = strlen(entry->string);
3045 /* Escape the character '"' */
3046 for (j = 0; j < len; j++) {
3047 char c = entry->string[j];
3048
3049 switch (c) {
3050 case '"':
3051 ret = lttng_metadata_printf(session,
3052 "\\\"");
3053 break;
3054 case '\\':
3055 ret = lttng_metadata_printf(session,
3056 "\\\\");
3057 break;
3058 default:
3059 ret = lttng_metadata_printf(session,
3060 "%c", c);
3061 break;
3062 }
3063 if (ret)
3064 goto end;
3065 }
3066 ret = lttng_metadata_printf(session, "\"");
3067 if (ret)
3068 goto end;
3069
3070 if (entry->options.is_auto) {
3071 ret = lttng_metadata_printf(session, ",\n");
3072 if (ret)
3073 goto end;
3074 } else {
3075 ret = lttng_metadata_printf(session,
3076 " = ");
3077 if (ret)
3078 goto end;
3079 if (entry->start.signedness)
3080 ret = lttng_metadata_printf(session,
3081 "%lld", (long long) entry->start.value);
3082 else
3083 ret = lttng_metadata_printf(session,
3084 "%llu", entry->start.value);
3085 if (ret)
3086 goto end;
3087 if (entry->start.signedness == entry->end.signedness &&
3088 entry->start.value
3089 == entry->end.value) {
3090 ret = lttng_metadata_printf(session,
3091 ",\n");
3092 } else {
3093 if (entry->end.signedness) {
3094 ret = lttng_metadata_printf(session,
3095 " ... %lld,\n",
3096 (long long) entry->end.value);
3097 } else {
3098 ret = lttng_metadata_printf(session,
3099 " ... %llu,\n",
3100 entry->end.value);
3101 }
3102 }
3103 if (ret)
3104 goto end;
3105 }
3106 }
3107 ret = print_tabs(session, nesting);
3108 if (ret)
3109 goto end;
3110 ret = lttng_metadata_printf(session, "}");
3111 end:
3112 return ret;
3113 }
3114
3115 /*
3116 * Must be called with sessions_mutex held.
3117 */
3118 static
3119 int _lttng_enum_field_statedump(struct lttng_session *session,
3120 const struct lttng_event_field *field,
3121 size_t nesting)
3122 {
3123 int ret;
3124
3125 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3126 if (ret)
3127 return ret;
3128 return lttng_field_name_statedump(session, field, nesting);
3129 }
3130
3131 static
3132 int _lttng_integer_field_statedump(struct lttng_session *session,
3133 const struct lttng_event_field *field,
3134 size_t nesting)
3135 {
3136 int ret;
3137
3138 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3139 if (ret)
3140 return ret;
3141 return lttng_field_name_statedump(session, field, nesting);
3142 }
3143
3144 static
3145 int _lttng_string_type_statedump(struct lttng_session *session,
3146 const struct lttng_type *type,
3147 size_t nesting)
3148 {
3149 int ret;
3150
3151 WARN_ON_ONCE(type->atype != atype_string);
3152 /* Default encoding is UTF8 */
3153 ret = print_tabs(session, nesting);
3154 if (ret)
3155 return ret;
3156 ret = lttng_metadata_printf(session,
3157 "string%s",
3158 type->u.string.encoding == lttng_encode_ASCII ?
3159 " { encoding = ASCII; }" : "");
3160 return ret;
3161 }
3162
3163 static
3164 int _lttng_string_field_statedump(struct lttng_session *session,
3165 const struct lttng_event_field *field,
3166 size_t nesting)
3167 {
3168 int ret;
3169
3170 WARN_ON_ONCE(field->type.atype != atype_string);
3171 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3172 if (ret)
3173 return ret;
3174 return lttng_field_name_statedump(session, field, nesting);
3175 }
3176
3177 /*
3178 * Must be called with sessions_mutex held.
3179 */
3180 static
3181 int _lttng_type_statedump(struct lttng_session *session,
3182 const struct lttng_type *type,
3183 size_t nesting)
3184 {
3185 int ret = 0;
3186
3187 switch (type->atype) {
3188 case atype_integer:
3189 ret = _lttng_integer_type_statedump(session, type, nesting);
3190 break;
3191 case atype_enum_nestable:
3192 ret = _lttng_enum_type_statedump(session, type, nesting);
3193 break;
3194 case atype_string:
3195 ret = _lttng_string_type_statedump(session, type, nesting);
3196 break;
3197 case atype_struct_nestable:
3198 ret = _lttng_struct_type_statedump(session, type, nesting);
3199 break;
3200 case atype_variant_nestable:
3201 ret = _lttng_variant_type_statedump(session, type, nesting);
3202 break;
3203
3204 /* Nested arrays and sequences are not supported yet. */
3205 case atype_array_nestable:
3206 case atype_sequence_nestable:
3207 default:
3208 WARN_ON_ONCE(1);
3209 return -EINVAL;
3210 }
3211 return ret;
3212 }
3213
3214 /*
3215 * Must be called with sessions_mutex held.
3216 */
3217 static
3218 int _lttng_field_statedump(struct lttng_session *session,
3219 const struct lttng_event_field *field,
3220 size_t nesting)
3221 {
3222 int ret = 0;
3223
3224 switch (field->type.atype) {
3225 case atype_integer:
3226 ret = _lttng_integer_field_statedump(session, field, nesting);
3227 break;
3228 case atype_enum_nestable:
3229 ret = _lttng_enum_field_statedump(session, field, nesting);
3230 break;
3231 case atype_string:
3232 ret = _lttng_string_field_statedump(session, field, nesting);
3233 break;
3234 case atype_struct_nestable:
3235 ret = _lttng_struct_field_statedump(session, field, nesting);
3236 break;
3237 case atype_array_nestable:
3238 ret = _lttng_array_field_statedump(session, field, nesting);
3239 break;
3240 case atype_sequence_nestable:
3241 ret = _lttng_sequence_field_statedump(session, field, nesting);
3242 break;
3243 case atype_variant_nestable:
3244 ret = _lttng_variant_field_statedump(session, field, nesting);
3245 break;
3246
3247 default:
3248 WARN_ON_ONCE(1);
3249 return -EINVAL;
3250 }
3251 return ret;
3252 }
3253
3254 static
3255 int _lttng_context_metadata_statedump(struct lttng_session *session,
3256 struct lttng_ctx *ctx)
3257 {
3258 int ret = 0;
3259 int i;
3260
3261 if (!ctx)
3262 return 0;
3263 for (i = 0; i < ctx->nr_fields; i++) {
3264 const struct lttng_ctx_field *field = &ctx->fields[i];
3265
3266 ret = _lttng_field_statedump(session, &field->event_field, 2);
3267 if (ret)
3268 return ret;
3269 }
3270 return ret;
3271 }
3272
3273 static
3274 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3275 struct lttng_event *event)
3276 {
3277 const struct lttng_event_desc *desc = event->desc;
3278 int ret = 0;
3279 int i;
3280
3281 for (i = 0; i < desc->nr_fields; i++) {
3282 const struct lttng_event_field *field = &desc->fields[i];
3283
3284 ret = _lttng_field_statedump(session, field, 2);
3285 if (ret)
3286 return ret;
3287 }
3288 return ret;
3289 }
3290
3291 /*
3292 * Must be called with sessions_mutex held.
3293 * The entire event metadata is printed as a single atomic metadata
3294 * transaction.
3295 */
3296 static
3297 int _lttng_event_metadata_statedump(struct lttng_session *session,
3298 struct lttng_channel *chan,
3299 struct lttng_event *event)
3300 {
3301 int ret = 0;
3302
3303 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3304 return 0;
3305 if (chan->channel_type == METADATA_CHANNEL)
3306 return 0;
3307
3308 lttng_metadata_begin(session);
3309
3310 ret = lttng_metadata_printf(session,
3311 "event {\n"
3312 " name = \"%s\";\n"
3313 " id = %u;\n"
3314 " stream_id = %u;\n",
3315 event->desc->name,
3316 event->id,
3317 event->chan->id);
3318 if (ret)
3319 goto end;
3320
3321 if (event->ctx) {
3322 ret = lttng_metadata_printf(session,
3323 " context := struct {\n");
3324 if (ret)
3325 goto end;
3326 }
3327 ret = _lttng_context_metadata_statedump(session, event->ctx);
3328 if (ret)
3329 goto end;
3330 if (event->ctx) {
3331 ret = lttng_metadata_printf(session,
3332 " };\n");
3333 if (ret)
3334 goto end;
3335 }
3336
3337 ret = lttng_metadata_printf(session,
3338 " fields := struct {\n"
3339 );
3340 if (ret)
3341 goto end;
3342
3343 ret = _lttng_fields_metadata_statedump(session, event);
3344 if (ret)
3345 goto end;
3346
3347 /*
3348 * LTTng space reservation can only reserve multiples of the
3349 * byte size.
3350 */
3351 ret = lttng_metadata_printf(session,
3352 " };\n"
3353 "};\n\n");
3354 if (ret)
3355 goto end;
3356
3357 event->metadata_dumped = 1;
3358 end:
3359 lttng_metadata_end(session);
3360 return ret;
3361
3362 }
3363
3364 /*
3365 * Must be called with sessions_mutex held.
3366 * The entire channel metadata is printed as a single atomic metadata
3367 * transaction.
3368 */
3369 static
3370 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3371 struct lttng_channel *chan)
3372 {
3373 int ret = 0;
3374
3375 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3376 return 0;
3377
3378 if (chan->channel_type == METADATA_CHANNEL)
3379 return 0;
3380
3381 lttng_metadata_begin(session);
3382
3383 WARN_ON_ONCE(!chan->header_type);
3384 ret = lttng_metadata_printf(session,
3385 "stream {\n"
3386 " id = %u;\n"
3387 " event.header := %s;\n"
3388 " packet.context := struct packet_context;\n",
3389 chan->id,
3390 chan->header_type == 1 ? "struct event_header_compact" :
3391 "struct event_header_large");
3392 if (ret)
3393 goto end;
3394
3395 if (chan->ctx) {
3396 ret = lttng_metadata_printf(session,
3397 " event.context := struct {\n");
3398 if (ret)
3399 goto end;
3400 }
3401 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3402 if (ret)
3403 goto end;
3404 if (chan->ctx) {
3405 ret = lttng_metadata_printf(session,
3406 " };\n");
3407 if (ret)
3408 goto end;
3409 }
3410
3411 ret = lttng_metadata_printf(session,
3412 "};\n\n");
3413
3414 chan->metadata_dumped = 1;
3415 end:
3416 lttng_metadata_end(session);
3417 return ret;
3418 }
3419
3420 /*
3421 * Must be called with sessions_mutex held.
3422 */
3423 static
3424 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3425 {
3426 return lttng_metadata_printf(session,
3427 "struct packet_context {\n"
3428 " uint64_clock_monotonic_t timestamp_begin;\n"
3429 " uint64_clock_monotonic_t timestamp_end;\n"
3430 " uint64_t content_size;\n"
3431 " uint64_t packet_size;\n"
3432 " uint64_t packet_seq_num;\n"
3433 " unsigned long events_discarded;\n"
3434 " uint32_t cpu_id;\n"
3435 "};\n\n"
3436 );
3437 }
3438
3439 /*
3440 * Compact header:
3441 * id: range: 0 - 30.
3442 * id 31 is reserved to indicate an extended header.
3443 *
3444 * Large header:
3445 * id: range: 0 - 65534.
3446 * id 65535 is reserved to indicate an extended header.
3447 *
3448 * Must be called with sessions_mutex held.
3449 */
3450 static
3451 int _lttng_event_header_declare(struct lttng_session *session)
3452 {
3453 return lttng_metadata_printf(session,
3454 "struct event_header_compact {\n"
3455 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3456 " variant <id> {\n"
3457 " struct {\n"
3458 " uint27_clock_monotonic_t timestamp;\n"
3459 " } compact;\n"
3460 " struct {\n"
3461 " uint32_t id;\n"
3462 " uint64_clock_monotonic_t timestamp;\n"
3463 " } extended;\n"
3464 " } v;\n"
3465 "} align(%u);\n"
3466 "\n"
3467 "struct event_header_large {\n"
3468 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3469 " variant <id> {\n"
3470 " struct {\n"
3471 " uint32_clock_monotonic_t timestamp;\n"
3472 " } compact;\n"
3473 " struct {\n"
3474 " uint32_t id;\n"
3475 " uint64_clock_monotonic_t timestamp;\n"
3476 " } extended;\n"
3477 " } v;\n"
3478 "} align(%u);\n\n",
3479 lttng_alignof(uint32_t) * CHAR_BIT,
3480 lttng_alignof(uint16_t) * CHAR_BIT
3481 );
3482 }
3483
3484 /*
3485 * Approximation of NTP time of day to clock monotonic correlation,
3486 * taken at start of trace.
3487 * Yes, this is only an approximation. Yes, we can (and will) do better
3488 * in future versions.
3489 * This function may return a negative offset. It may happen if the
3490 * system sets the REALTIME clock to 0 after boot.
3491 *
3492 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3493 * y2038 compliant.
3494 */
3495 static
3496 int64_t measure_clock_offset(void)
3497 {
3498 uint64_t monotonic_avg, monotonic[2], realtime;
3499 uint64_t tcf = trace_clock_freq();
3500 int64_t offset;
3501 unsigned long flags;
3502 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3503 struct timespec64 rts = { 0, 0 };
3504 #else
3505 struct timespec rts = { 0, 0 };
3506 #endif
3507
3508 /* Disable interrupts to increase correlation precision. */
3509 local_irq_save(flags);
3510 monotonic[0] = trace_clock_read64();
3511 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3512 ktime_get_real_ts64(&rts);
3513 #else
3514 getnstimeofday(&rts);
3515 #endif
3516 monotonic[1] = trace_clock_read64();
3517 local_irq_restore(flags);
3518
3519 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3520 realtime = (uint64_t) rts.tv_sec * tcf;
3521 if (tcf == NSEC_PER_SEC) {
3522 realtime += rts.tv_nsec;
3523 } else {
3524 uint64_t n = rts.tv_nsec * tcf;
3525
3526 do_div(n, NSEC_PER_SEC);
3527 realtime += n;
3528 }
3529 offset = (int64_t) realtime - monotonic_avg;
3530 return offset;
3531 }
3532
3533 static
3534 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3535 {
3536 int ret = 0;
3537 size_t i;
3538 char cur;
3539
3540 i = 0;
3541 cur = string[i];
3542 while (cur != '\0') {
3543 switch (cur) {
3544 case '\n':
3545 ret = lttng_metadata_printf(session, "%s", "\\n");
3546 break;
3547 case '\\':
3548 case '"':
3549 ret = lttng_metadata_printf(session, "%c", '\\');
3550 if (ret)
3551 goto error;
3552 /* We still print the current char */
3553 /* Fallthrough */
3554 default:
3555 ret = lttng_metadata_printf(session, "%c", cur);
3556 break;
3557 }
3558
3559 if (ret)
3560 goto error;
3561
3562 cur = string[++i];
3563 }
3564 error:
3565 return ret;
3566 }
3567
3568 static
3569 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3570 const char *field_value)
3571 {
3572 int ret;
3573
3574 ret = lttng_metadata_printf(session, " %s = \"", field);
3575 if (ret)
3576 goto error;
3577
3578 ret = print_escaped_ctf_string(session, field_value);
3579 if (ret)
3580 goto error;
3581
3582 ret = lttng_metadata_printf(session, "\";\n");
3583
3584 error:
3585 return ret;
3586 }
3587
3588 /*
3589 * Output metadata into this session's metadata buffers.
3590 * Must be called with sessions_mutex held.
3591 */
3592 static
3593 int _lttng_session_metadata_statedump(struct lttng_session *session)
3594 {
3595 unsigned char *uuid_c = session->uuid.b;
3596 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3597 const char *product_uuid;
3598 struct lttng_channel *chan;
3599 struct lttng_event *event;
3600 int ret = 0;
3601
3602 if (!LTTNG_READ_ONCE(session->active))
3603 return 0;
3604
3605 lttng_metadata_begin(session);
3606
3607 if (session->metadata_dumped)
3608 goto skip_session;
3609
3610 snprintf(uuid_s, sizeof(uuid_s),
3611 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3612 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3613 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3614 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3615 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3616
3617 ret = lttng_metadata_printf(session,
3618 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3619 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3620 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3621 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3622 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3623 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3624 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3625 "\n"
3626 "trace {\n"
3627 " major = %u;\n"
3628 " minor = %u;\n"
3629 " uuid = \"%s\";\n"
3630 " byte_order = %s;\n"
3631 " packet.header := struct {\n"
3632 " uint32_t magic;\n"
3633 " uint8_t uuid[16];\n"
3634 " uint32_t stream_id;\n"
3635 " uint64_t stream_instance_id;\n"
3636 " };\n"
3637 "};\n\n",
3638 lttng_alignof(uint8_t) * CHAR_BIT,
3639 lttng_alignof(uint16_t) * CHAR_BIT,
3640 lttng_alignof(uint32_t) * CHAR_BIT,
3641 lttng_alignof(uint64_t) * CHAR_BIT,
3642 sizeof(unsigned long) * CHAR_BIT,
3643 lttng_alignof(unsigned long) * CHAR_BIT,
3644 CTF_SPEC_MAJOR,
3645 CTF_SPEC_MINOR,
3646 uuid_s,
3647 #if __BYTE_ORDER == __BIG_ENDIAN
3648 "be"
3649 #else
3650 "le"
3651 #endif
3652 );
3653 if (ret)
3654 goto end;
3655
3656 ret = lttng_metadata_printf(session,
3657 "env {\n"
3658 " hostname = \"%s\";\n"
3659 " domain = \"kernel\";\n"
3660 " sysname = \"%s\";\n"
3661 " kernel_release = \"%s\";\n"
3662 " kernel_version = \"%s\";\n"
3663 " tracer_name = \"lttng-modules\";\n"
3664 " tracer_major = %d;\n"
3665 " tracer_minor = %d;\n"
3666 " tracer_patchlevel = %d;\n"
3667 " trace_buffering_scheme = \"global\";\n",
3668 current->nsproxy->uts_ns->name.nodename,
3669 utsname()->sysname,
3670 utsname()->release,
3671 utsname()->version,
3672 LTTNG_MODULES_MAJOR_VERSION,
3673 LTTNG_MODULES_MINOR_VERSION,
3674 LTTNG_MODULES_PATCHLEVEL_VERSION
3675 );
3676 if (ret)
3677 goto end;
3678
3679 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3680 if (ret)
3681 goto end;
3682 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3683 session->creation_time);
3684 if (ret)
3685 goto end;
3686
3687 /* Add the product UUID to the 'env' section */
3688 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3689 if (product_uuid) {
3690 ret = lttng_metadata_printf(session,
3691 " product_uuid = \"%s\";\n",
3692 product_uuid
3693 );
3694 if (ret)
3695 goto end;
3696 }
3697
3698 /* Close the 'env' section */
3699 ret = lttng_metadata_printf(session, "};\n\n");
3700 if (ret)
3701 goto end;
3702
3703 ret = lttng_metadata_printf(session,
3704 "clock {\n"
3705 " name = \"%s\";\n",
3706 trace_clock_name()
3707 );
3708 if (ret)
3709 goto end;
3710
3711 if (!trace_clock_uuid(clock_uuid_s)) {
3712 ret = lttng_metadata_printf(session,
3713 " uuid = \"%s\";\n",
3714 clock_uuid_s
3715 );
3716 if (ret)
3717 goto end;
3718 }
3719
3720 ret = lttng_metadata_printf(session,
3721 " description = \"%s\";\n"
3722 " freq = %llu; /* Frequency, in Hz */\n"
3723 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3724 " offset = %lld;\n"
3725 "};\n\n",
3726 trace_clock_description(),
3727 (unsigned long long) trace_clock_freq(),
3728 (long long) measure_clock_offset()
3729 );
3730 if (ret)
3731 goto end;
3732
3733 ret = lttng_metadata_printf(session,
3734 "typealias integer {\n"
3735 " size = 27; align = 1; signed = false;\n"
3736 " map = clock.%s.value;\n"
3737 "} := uint27_clock_monotonic_t;\n"
3738 "\n"
3739 "typealias integer {\n"
3740 " size = 32; align = %u; signed = false;\n"
3741 " map = clock.%s.value;\n"
3742 "} := uint32_clock_monotonic_t;\n"
3743 "\n"
3744 "typealias integer {\n"
3745 " size = 64; align = %u; signed = false;\n"
3746 " map = clock.%s.value;\n"
3747 "} := uint64_clock_monotonic_t;\n\n",
3748 trace_clock_name(),
3749 lttng_alignof(uint32_t) * CHAR_BIT,
3750 trace_clock_name(),
3751 lttng_alignof(uint64_t) * CHAR_BIT,
3752 trace_clock_name()
3753 );
3754 if (ret)
3755 goto end;
3756
3757 ret = _lttng_stream_packet_context_declare(session);
3758 if (ret)
3759 goto end;
3760
3761 ret = _lttng_event_header_declare(session);
3762 if (ret)
3763 goto end;
3764
3765 skip_session:
3766 list_for_each_entry(chan, &session->chan, list) {
3767 ret = _lttng_channel_metadata_statedump(session, chan);
3768 if (ret)
3769 goto end;
3770 }
3771
3772 list_for_each_entry(event, &session->events, list) {
3773 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3774 if (ret)
3775 goto end;
3776 }
3777 session->metadata_dumped = 1;
3778 end:
3779 lttng_metadata_end(session);
3780 return ret;
3781 }
3782
3783 /**
3784 * lttng_transport_register - LTT transport registration
3785 * @transport: transport structure
3786 *
3787 * Registers a transport which can be used as output to extract the data out of
3788 * LTTng. The module calling this registration function must ensure that no
3789 * trap-inducing code will be executed by the transport functions. E.g.
3790 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3791 * is made visible to the transport function. This registration acts as a
3792 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3793 * after its registration must it synchronize the TLBs.
3794 */
3795 void lttng_transport_register(struct lttng_transport *transport)
3796 {
3797 /*
3798 * Make sure no page fault can be triggered by the module about to be
3799 * registered. We deal with this here so we don't have to call
3800 * vmalloc_sync_mappings() in each module's init.
3801 */
3802 wrapper_vmalloc_sync_mappings();
3803
3804 mutex_lock(&sessions_mutex);
3805 list_add_tail(&transport->node, &lttng_transport_list);
3806 mutex_unlock(&sessions_mutex);
3807 }
3808 EXPORT_SYMBOL_GPL(lttng_transport_register);
3809
3810 /**
3811 * lttng_transport_unregister - LTT transport unregistration
3812 * @transport: transport structure
3813 */
3814 void lttng_transport_unregister(struct lttng_transport *transport)
3815 {
3816 mutex_lock(&sessions_mutex);
3817 list_del(&transport->node);
3818 mutex_unlock(&sessions_mutex);
3819 }
3820 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3821
3822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3823
3824 enum cpuhp_state lttng_hp_prepare;
3825 enum cpuhp_state lttng_hp_online;
3826
3827 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3828 {
3829 struct lttng_cpuhp_node *lttng_node;
3830
3831 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3832 switch (lttng_node->component) {
3833 case LTTNG_RING_BUFFER_FRONTEND:
3834 return 0;
3835 case LTTNG_RING_BUFFER_BACKEND:
3836 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3837 case LTTNG_RING_BUFFER_ITER:
3838 return 0;
3839 case LTTNG_CONTEXT_PERF_COUNTERS:
3840 return 0;
3841 default:
3842 return -EINVAL;
3843 }
3844 }
3845
3846 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3847 {
3848 struct lttng_cpuhp_node *lttng_node;
3849
3850 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3851 switch (lttng_node->component) {
3852 case LTTNG_RING_BUFFER_FRONTEND:
3853 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3854 case LTTNG_RING_BUFFER_BACKEND:
3855 return 0;
3856 case LTTNG_RING_BUFFER_ITER:
3857 return 0;
3858 case LTTNG_CONTEXT_PERF_COUNTERS:
3859 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3860 default:
3861 return -EINVAL;
3862 }
3863 }
3864
3865 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3866 {
3867 struct lttng_cpuhp_node *lttng_node;
3868
3869 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3870 switch (lttng_node->component) {
3871 case LTTNG_RING_BUFFER_FRONTEND:
3872 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3873 case LTTNG_RING_BUFFER_BACKEND:
3874 return 0;
3875 case LTTNG_RING_BUFFER_ITER:
3876 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3877 case LTTNG_CONTEXT_PERF_COUNTERS:
3878 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3879 default:
3880 return -EINVAL;
3881 }
3882 }
3883
3884 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3885 {
3886 struct lttng_cpuhp_node *lttng_node;
3887
3888 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3889 switch (lttng_node->component) {
3890 case LTTNG_RING_BUFFER_FRONTEND:
3891 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3892 case LTTNG_RING_BUFFER_BACKEND:
3893 return 0;
3894 case LTTNG_RING_BUFFER_ITER:
3895 return 0;
3896 case LTTNG_CONTEXT_PERF_COUNTERS:
3897 return 0;
3898 default:
3899 return -EINVAL;
3900 }
3901 }
3902
3903 static int __init lttng_init_cpu_hotplug(void)
3904 {
3905 int ret;
3906
3907 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3908 lttng_hotplug_prepare,
3909 lttng_hotplug_dead);
3910 if (ret < 0) {
3911 return ret;
3912 }
3913 lttng_hp_prepare = ret;
3914 lttng_rb_set_hp_prepare(ret);
3915
3916 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3917 lttng_hotplug_online,
3918 lttng_hotplug_offline);
3919 if (ret < 0) {
3920 cpuhp_remove_multi_state(lttng_hp_prepare);
3921 lttng_hp_prepare = 0;
3922 return ret;
3923 }
3924 lttng_hp_online = ret;
3925 lttng_rb_set_hp_online(ret);
3926
3927 return 0;
3928 }
3929
3930 static void __exit lttng_exit_cpu_hotplug(void)
3931 {
3932 lttng_rb_set_hp_online(0);
3933 cpuhp_remove_multi_state(lttng_hp_online);
3934 lttng_rb_set_hp_prepare(0);
3935 cpuhp_remove_multi_state(lttng_hp_prepare);
3936 }
3937
3938 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3939 static int lttng_init_cpu_hotplug(void)
3940 {
3941 return 0;
3942 }
3943 static void lttng_exit_cpu_hotplug(void)
3944 {
3945 }
3946 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3947
3948
3949 static int __init lttng_events_init(void)
3950 {
3951 int ret;
3952
3953 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3954 if (ret)
3955 return ret;
3956 ret = wrapper_get_pfnblock_flags_mask_init();
3957 if (ret)
3958 return ret;
3959 ret = wrapper_get_pageblock_flags_mask_init();
3960 if (ret)
3961 return ret;
3962 ret = lttng_probes_init();
3963 if (ret)
3964 return ret;
3965 ret = lttng_context_init();
3966 if (ret)
3967 return ret;
3968 ret = lttng_tracepoint_init();
3969 if (ret)
3970 goto error_tp;
3971 event_cache = KMEM_CACHE(lttng_event, 0);
3972 if (!event_cache) {
3973 ret = -ENOMEM;
3974 goto error_kmem_event;
3975 }
3976 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
3977 if (!event_notifier_cache) {
3978 ret = -ENOMEM;
3979 goto error_kmem_event_notifier;
3980 }
3981 ret = lttng_abi_init();
3982 if (ret)
3983 goto error_abi;
3984 ret = lttng_logger_init();
3985 if (ret)
3986 goto error_logger;
3987 ret = lttng_init_cpu_hotplug();
3988 if (ret)
3989 goto error_hotplug;
3990 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3991 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3992 __stringify(LTTNG_MODULES_MINOR_VERSION),
3993 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3994 LTTNG_MODULES_EXTRAVERSION,
3995 LTTNG_VERSION_NAME,
3996 #ifdef LTTNG_EXTRA_VERSION_GIT
3997 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3998 #else
3999 "",
4000 #endif
4001 #ifdef LTTNG_EXTRA_VERSION_NAME
4002 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4003 #else
4004 "");
4005 #endif
4006 return 0;
4007
4008 error_hotplug:
4009 lttng_logger_exit();
4010 error_logger:
4011 lttng_abi_exit();
4012 error_abi:
4013 kmem_cache_destroy(event_notifier_cache);
4014 error_kmem_event_notifier:
4015 kmem_cache_destroy(event_cache);
4016 error_kmem_event:
4017 lttng_tracepoint_exit();
4018 error_tp:
4019 lttng_context_exit();
4020 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4021 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4022 __stringify(LTTNG_MODULES_MINOR_VERSION),
4023 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4024 LTTNG_MODULES_EXTRAVERSION,
4025 LTTNG_VERSION_NAME,
4026 #ifdef LTTNG_EXTRA_VERSION_GIT
4027 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4028 #else
4029 "",
4030 #endif
4031 #ifdef LTTNG_EXTRA_VERSION_NAME
4032 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4033 #else
4034 "");
4035 #endif
4036 return ret;
4037 }
4038
4039 module_init(lttng_events_init);
4040
4041 static void __exit lttng_events_exit(void)
4042 {
4043 struct lttng_session *session, *tmpsession;
4044
4045 lttng_exit_cpu_hotplug();
4046 lttng_logger_exit();
4047 lttng_abi_exit();
4048 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4049 lttng_session_destroy(session);
4050 kmem_cache_destroy(event_cache);
4051 kmem_cache_destroy(event_notifier_cache);
4052 lttng_tracepoint_exit();
4053 lttng_context_exit();
4054 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4055 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4056 __stringify(LTTNG_MODULES_MINOR_VERSION),
4057 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4058 LTTNG_MODULES_EXTRAVERSION,
4059 LTTNG_VERSION_NAME,
4060 #ifdef LTTNG_EXTRA_VERSION_GIT
4061 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4062 #else
4063 "",
4064 #endif
4065 #ifdef LTTNG_EXTRA_VERSION_NAME
4066 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4067 #else
4068 "");
4069 #endif
4070 }
4071
4072 module_exit(lttng_events_exit);
4073
4074 #include <generated/patches.h>
4075 #ifdef LTTNG_EXTRA_VERSION_GIT
4076 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4077 #endif
4078 #ifdef LTTNG_EXTRA_VERSION_NAME
4079 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4080 #endif
4081 MODULE_LICENSE("GPL and additional rights");
4082 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4083 MODULE_DESCRIPTION("LTTng tracer");
4084 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4085 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4086 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4087 LTTNG_MODULES_EXTRAVERSION);
This page took 0.191036 seconds and 4 git commands to generate.