85a064fc686cc1d13cd4bf3a9775a36a9820b5e4
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/tracer.h>
40 #include <lttng/event-notifier-notification.h>
41 #include <lttng/abi-old.h>
42 #include <lttng/endian.h>
43 #include <lttng/string-utils.h>
44 #include <lttng/utils.h>
45 #include <ringbuffer/backend.h>
46 #include <ringbuffer/frontend.h>
47 #include <wrapper/time.h>
48
49 #define METADATA_CACHE_DEFAULT_SIZE 4096
50
51 static LIST_HEAD(sessions);
52 static LIST_HEAD(event_notifier_groups);
53 static LIST_HEAD(lttng_transport_list);
54 /*
55 * Protect the sessions and metadata caches.
56 */
57 static DEFINE_MUTEX(sessions_mutex);
58 static struct kmem_cache *event_cache;
59 static struct kmem_cache *event_notifier_cache;
60
61 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
62 static void lttng_session_sync_event_enablers(struct lttng_session *session);
63 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
64 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
65 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
66
67 static void _lttng_event_destroy(struct lttng_event *event);
68 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
69 static void _lttng_channel_destroy(struct lttng_channel *chan);
70 static int _lttng_event_unregister(struct lttng_event *event);
71 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
72 static
73 int _lttng_event_metadata_statedump(struct lttng_session *session,
74 struct lttng_channel *chan,
75 struct lttng_event *event);
76 static
77 int _lttng_session_metadata_statedump(struct lttng_session *session);
78 static
79 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
80 static
81 int _lttng_type_statedump(struct lttng_session *session,
82 const struct lttng_type *type,
83 size_t nesting);
84 static
85 int _lttng_field_statedump(struct lttng_session *session,
86 const struct lttng_event_field *field,
87 size_t nesting);
88
89 void synchronize_trace(void)
90 {
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
92 synchronize_rcu();
93 #else
94 synchronize_sched();
95 #endif
96
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
98 #ifdef CONFIG_PREEMPT_RT_FULL
99 synchronize_rcu();
100 #endif
101 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
102 #ifdef CONFIG_PREEMPT_RT
103 synchronize_rcu();
104 #endif
105 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
106 }
107
108 void lttng_lock_sessions(void)
109 {
110 mutex_lock(&sessions_mutex);
111 }
112
113 void lttng_unlock_sessions(void)
114 {
115 mutex_unlock(&sessions_mutex);
116 }
117
118 static struct lttng_transport *lttng_transport_find(const char *name)
119 {
120 struct lttng_transport *transport;
121
122 list_for_each_entry(transport, &lttng_transport_list, node) {
123 if (!strcmp(transport->name, name))
124 return transport;
125 }
126 return NULL;
127 }
128
129 /*
130 * Called with sessions lock held.
131 */
132 int lttng_session_active(void)
133 {
134 struct lttng_session *iter;
135
136 list_for_each_entry(iter, &sessions, list) {
137 if (iter->active)
138 return 1;
139 }
140 return 0;
141 }
142
143 struct lttng_session *lttng_session_create(void)
144 {
145 struct lttng_session *session;
146 struct lttng_metadata_cache *metadata_cache;
147 int i;
148
149 mutex_lock(&sessions_mutex);
150 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
151 if (!session)
152 goto err;
153 INIT_LIST_HEAD(&session->chan);
154 INIT_LIST_HEAD(&session->events);
155 lttng_guid_gen(&session->uuid);
156
157 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
158 GFP_KERNEL);
159 if (!metadata_cache)
160 goto err_free_session;
161 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
162 if (!metadata_cache->data)
163 goto err_free_cache;
164 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
165 kref_init(&metadata_cache->refcount);
166 mutex_init(&metadata_cache->lock);
167 session->metadata_cache = metadata_cache;
168 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
169 memcpy(&metadata_cache->uuid, &session->uuid,
170 sizeof(metadata_cache->uuid));
171 INIT_LIST_HEAD(&session->enablers_head);
172 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
173 INIT_HLIST_HEAD(&session->events_ht.table[i]);
174 list_add(&session->list, &sessions);
175 session->pid_tracker.session = session;
176 session->pid_tracker.tracker_type = TRACKER_PID;
177 session->vpid_tracker.session = session;
178 session->vpid_tracker.tracker_type = TRACKER_VPID;
179 session->uid_tracker.session = session;
180 session->uid_tracker.tracker_type = TRACKER_UID;
181 session->vuid_tracker.session = session;
182 session->vuid_tracker.tracker_type = TRACKER_VUID;
183 session->gid_tracker.session = session;
184 session->gid_tracker.tracker_type = TRACKER_GID;
185 session->vgid_tracker.session = session;
186 session->vgid_tracker.tracker_type = TRACKER_VGID;
187 mutex_unlock(&sessions_mutex);
188 return session;
189
190 err_free_cache:
191 kfree(metadata_cache);
192 err_free_session:
193 lttng_kvfree(session);
194 err:
195 mutex_unlock(&sessions_mutex);
196 return NULL;
197 }
198
199 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
200 {
201 struct lttng_transport *transport = NULL;
202 struct lttng_event_notifier_group *event_notifier_group;
203 const char *transport_name = "relay-event-notifier";
204 size_t subbuf_size = 4096; //TODO
205 size_t num_subbuf = 16; //TODO
206 unsigned int switch_timer_interval = 0;
207 unsigned int read_timer_interval = 0;
208 int i;
209
210 mutex_lock(&sessions_mutex);
211
212 transport = lttng_transport_find(transport_name);
213 if (!transport) {
214 printk(KERN_WARNING "LTTng: transport %s not found\n",
215 transport_name);
216 goto notransport;
217 }
218 if (!try_module_get(transport->owner)) {
219 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
220 transport_name);
221 goto notransport;
222 }
223
224 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
225 GFP_KERNEL);
226 if (!event_notifier_group)
227 goto nomem;
228
229 /*
230 * Initialize the ring buffer used to store event notifier
231 * notifications.
232 */
233 event_notifier_group->ops = &transport->ops;
234 event_notifier_group->chan = transport->ops.channel_create(
235 transport_name, event_notifier_group, NULL,
236 subbuf_size, num_subbuf, switch_timer_interval,
237 read_timer_interval);
238 if (!event_notifier_group->chan)
239 goto create_error;
240
241 event_notifier_group->transport = transport;
242
243 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
244 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
245 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
246 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
247
248 list_add(&event_notifier_group->node, &event_notifier_groups);
249
250 mutex_unlock(&sessions_mutex);
251
252 return event_notifier_group;
253
254 create_error:
255 lttng_kvfree(event_notifier_group);
256 nomem:
257 if (transport)
258 module_put(transport->owner);
259 notransport:
260 mutex_unlock(&sessions_mutex);
261 return NULL;
262 }
263
264 void metadata_cache_destroy(struct kref *kref)
265 {
266 struct lttng_metadata_cache *cache =
267 container_of(kref, struct lttng_metadata_cache, refcount);
268 vfree(cache->data);
269 kfree(cache);
270 }
271
272 void lttng_session_destroy(struct lttng_session *session)
273 {
274 struct lttng_channel *chan, *tmpchan;
275 struct lttng_event *event, *tmpevent;
276 struct lttng_metadata_stream *metadata_stream;
277 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
278 int ret;
279
280 mutex_lock(&sessions_mutex);
281 WRITE_ONCE(session->active, 0);
282 list_for_each_entry(chan, &session->chan, list) {
283 ret = lttng_syscalls_unregister_event(chan);
284 WARN_ON(ret);
285 }
286 list_for_each_entry(event, &session->events, list) {
287 ret = _lttng_event_unregister(event);
288 WARN_ON(ret);
289 }
290 synchronize_trace(); /* Wait for in-flight events to complete */
291 list_for_each_entry(chan, &session->chan, list) {
292 ret = lttng_syscalls_destroy_event(chan);
293 WARN_ON(ret);
294 }
295 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
296 &session->enablers_head, node)
297 lttng_event_enabler_destroy(event_enabler);
298 list_for_each_entry_safe(event, tmpevent, &session->events, list)
299 _lttng_event_destroy(event);
300 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
301 BUG_ON(chan->channel_type == METADATA_CHANNEL);
302 _lttng_channel_destroy(chan);
303 }
304 mutex_lock(&session->metadata_cache->lock);
305 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
306 _lttng_metadata_channel_hangup(metadata_stream);
307 mutex_unlock(&session->metadata_cache->lock);
308 lttng_id_tracker_destroy(&session->pid_tracker, false);
309 lttng_id_tracker_destroy(&session->vpid_tracker, false);
310 lttng_id_tracker_destroy(&session->uid_tracker, false);
311 lttng_id_tracker_destroy(&session->vuid_tracker, false);
312 lttng_id_tracker_destroy(&session->gid_tracker, false);
313 lttng_id_tracker_destroy(&session->vgid_tracker, false);
314 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
315 list_del(&session->list);
316 mutex_unlock(&sessions_mutex);
317 lttng_kvfree(session);
318 }
319
320 void lttng_event_notifier_group_destroy(
321 struct lttng_event_notifier_group *event_notifier_group)
322 {
323 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
324 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
325 int ret;
326
327 if (!event_notifier_group)
328 return;
329
330 mutex_lock(&sessions_mutex);
331
332 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
333 WARN_ON(ret);
334
335 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
336 &event_notifier_group->event_notifiers_head, list) {
337 ret = _lttng_event_notifier_unregister(event_notifier);
338 WARN_ON(ret);
339 }
340
341 /* Wait for in-flight event notifier to complete */
342 synchronize_trace();
343
344 irq_work_sync(&event_notifier_group->wakeup_pending);
345
346 kfree(event_notifier_group->sc_filter);
347
348 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
349 &event_notifier_group->enablers_head, node)
350 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
351
352 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
353 &event_notifier_group->event_notifiers_head, list)
354 _lttng_event_notifier_destroy(event_notifier);
355
356 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
357 module_put(event_notifier_group->transport->owner);
358 list_del(&event_notifier_group->node);
359
360 mutex_unlock(&sessions_mutex);
361 lttng_kvfree(event_notifier_group);
362 }
363
364 int lttng_session_statedump(struct lttng_session *session)
365 {
366 int ret;
367
368 mutex_lock(&sessions_mutex);
369 ret = lttng_statedump_start(session);
370 mutex_unlock(&sessions_mutex);
371 return ret;
372 }
373
374 int lttng_session_enable(struct lttng_session *session)
375 {
376 int ret = 0;
377 struct lttng_channel *chan;
378
379 mutex_lock(&sessions_mutex);
380 if (session->active) {
381 ret = -EBUSY;
382 goto end;
383 }
384
385 /* Set transient enabler state to "enabled" */
386 session->tstate = 1;
387
388 /* We need to sync enablers with session before activation. */
389 lttng_session_sync_event_enablers(session);
390
391 /*
392 * Snapshot the number of events per channel to know the type of header
393 * we need to use.
394 */
395 list_for_each_entry(chan, &session->chan, list) {
396 if (chan->header_type)
397 continue; /* don't change it if session stop/restart */
398 if (chan->free_event_id < 31)
399 chan->header_type = 1; /* compact */
400 else
401 chan->header_type = 2; /* large */
402 }
403
404 /* Clear each stream's quiescent state. */
405 list_for_each_entry(chan, &session->chan, list) {
406 if (chan->channel_type != METADATA_CHANNEL)
407 lib_ring_buffer_clear_quiescent_channel(chan->chan);
408 }
409
410 WRITE_ONCE(session->active, 1);
411 WRITE_ONCE(session->been_active, 1);
412 ret = _lttng_session_metadata_statedump(session);
413 if (ret) {
414 WRITE_ONCE(session->active, 0);
415 goto end;
416 }
417 ret = lttng_statedump_start(session);
418 if (ret)
419 WRITE_ONCE(session->active, 0);
420 end:
421 mutex_unlock(&sessions_mutex);
422 return ret;
423 }
424
425 int lttng_session_disable(struct lttng_session *session)
426 {
427 int ret = 0;
428 struct lttng_channel *chan;
429
430 mutex_lock(&sessions_mutex);
431 if (!session->active) {
432 ret = -EBUSY;
433 goto end;
434 }
435 WRITE_ONCE(session->active, 0);
436
437 /* Set transient enabler state to "disabled" */
438 session->tstate = 0;
439 lttng_session_sync_event_enablers(session);
440
441 /* Set each stream's quiescent state. */
442 list_for_each_entry(chan, &session->chan, list) {
443 if (chan->channel_type != METADATA_CHANNEL)
444 lib_ring_buffer_set_quiescent_channel(chan->chan);
445 }
446 end:
447 mutex_unlock(&sessions_mutex);
448 return ret;
449 }
450
451 int lttng_session_metadata_regenerate(struct lttng_session *session)
452 {
453 int ret = 0;
454 struct lttng_channel *chan;
455 struct lttng_event *event;
456 struct lttng_metadata_cache *cache = session->metadata_cache;
457 struct lttng_metadata_stream *stream;
458
459 mutex_lock(&sessions_mutex);
460 if (!session->active) {
461 ret = -EBUSY;
462 goto end;
463 }
464
465 mutex_lock(&cache->lock);
466 memset(cache->data, 0, cache->cache_alloc);
467 cache->metadata_written = 0;
468 cache->version++;
469 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
470 stream->metadata_out = 0;
471 stream->metadata_in = 0;
472 }
473 mutex_unlock(&cache->lock);
474
475 session->metadata_dumped = 0;
476 list_for_each_entry(chan, &session->chan, list) {
477 chan->metadata_dumped = 0;
478 }
479
480 list_for_each_entry(event, &session->events, list) {
481 event->metadata_dumped = 0;
482 }
483
484 ret = _lttng_session_metadata_statedump(session);
485
486 end:
487 mutex_unlock(&sessions_mutex);
488 return ret;
489 }
490
491 int lttng_channel_enable(struct lttng_channel *channel)
492 {
493 int ret = 0;
494
495 mutex_lock(&sessions_mutex);
496 if (channel->channel_type == METADATA_CHANNEL) {
497 ret = -EPERM;
498 goto end;
499 }
500 if (channel->enabled) {
501 ret = -EEXIST;
502 goto end;
503 }
504 /* Set transient enabler state to "enabled" */
505 channel->tstate = 1;
506 lttng_session_sync_event_enablers(channel->session);
507 /* Set atomically the state to "enabled" */
508 WRITE_ONCE(channel->enabled, 1);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_channel_disable(struct lttng_channel *channel)
515 {
516 int ret = 0;
517
518 mutex_lock(&sessions_mutex);
519 if (channel->channel_type == METADATA_CHANNEL) {
520 ret = -EPERM;
521 goto end;
522 }
523 if (!channel->enabled) {
524 ret = -EEXIST;
525 goto end;
526 }
527 /* Set atomically the state to "disabled" */
528 WRITE_ONCE(channel->enabled, 0);
529 /* Set transient enabler state to "enabled" */
530 channel->tstate = 0;
531 lttng_session_sync_event_enablers(channel->session);
532 end:
533 mutex_unlock(&sessions_mutex);
534 return ret;
535 }
536
537 int lttng_event_enable(struct lttng_event *event)
538 {
539 int ret = 0;
540
541 mutex_lock(&sessions_mutex);
542 if (event->chan->channel_type == METADATA_CHANNEL) {
543 ret = -EPERM;
544 goto end;
545 }
546 if (event->enabled) {
547 ret = -EEXIST;
548 goto end;
549 }
550 switch (event->instrumentation) {
551 case LTTNG_KERNEL_TRACEPOINT:
552 case LTTNG_KERNEL_SYSCALL:
553 ret = -EINVAL;
554 break;
555 case LTTNG_KERNEL_KPROBE:
556 case LTTNG_KERNEL_UPROBE:
557 case LTTNG_KERNEL_NOOP:
558 WRITE_ONCE(event->enabled, 1);
559 break;
560 case LTTNG_KERNEL_KRETPROBE:
561 ret = lttng_kretprobes_event_enable_state(event, 1);
562 break;
563 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
564 default:
565 WARN_ON_ONCE(1);
566 ret = -EINVAL;
567 }
568 end:
569 mutex_unlock(&sessions_mutex);
570 return ret;
571 }
572
573 int lttng_event_disable(struct lttng_event *event)
574 {
575 int ret = 0;
576
577 mutex_lock(&sessions_mutex);
578 if (event->chan->channel_type == METADATA_CHANNEL) {
579 ret = -EPERM;
580 goto end;
581 }
582 if (!event->enabled) {
583 ret = -EEXIST;
584 goto end;
585 }
586 switch (event->instrumentation) {
587 case LTTNG_KERNEL_TRACEPOINT:
588 case LTTNG_KERNEL_SYSCALL:
589 ret = -EINVAL;
590 break;
591 case LTTNG_KERNEL_KPROBE:
592 case LTTNG_KERNEL_UPROBE:
593 case LTTNG_KERNEL_NOOP:
594 WRITE_ONCE(event->enabled, 0);
595 break;
596 case LTTNG_KERNEL_KRETPROBE:
597 ret = lttng_kretprobes_event_enable_state(event, 0);
598 break;
599 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
600 default:
601 WARN_ON_ONCE(1);
602 ret = -EINVAL;
603 }
604 end:
605 mutex_unlock(&sessions_mutex);
606 return ret;
607 }
608
609 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
610 {
611 int ret = 0;
612
613 mutex_lock(&sessions_mutex);
614 if (event_notifier->enabled) {
615 ret = -EEXIST;
616 goto end;
617 }
618 switch (event_notifier->instrumentation) {
619 case LTTNG_KERNEL_TRACEPOINT:
620 case LTTNG_KERNEL_SYSCALL:
621 ret = -EINVAL;
622 break;
623 case LTTNG_KERNEL_KPROBE:
624 case LTTNG_KERNEL_UPROBE:
625 WRITE_ONCE(event_notifier->enabled, 1);
626 break;
627 case LTTNG_KERNEL_FUNCTION:
628 case LTTNG_KERNEL_NOOP:
629 case LTTNG_KERNEL_KRETPROBE:
630 default:
631 WARN_ON_ONCE(1);
632 ret = -EINVAL;
633 }
634 end:
635 mutex_unlock(&sessions_mutex);
636 return ret;
637 }
638
639 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
640 {
641 int ret = 0;
642
643 mutex_lock(&sessions_mutex);
644 if (!event_notifier->enabled) {
645 ret = -EEXIST;
646 goto end;
647 }
648 switch (event_notifier->instrumentation) {
649 case LTTNG_KERNEL_TRACEPOINT:
650 case LTTNG_KERNEL_SYSCALL:
651 ret = -EINVAL;
652 break;
653 case LTTNG_KERNEL_KPROBE:
654 case LTTNG_KERNEL_UPROBE:
655 WRITE_ONCE(event_notifier->enabled, 0);
656 break;
657 case LTTNG_KERNEL_FUNCTION:
658 case LTTNG_KERNEL_NOOP:
659 case LTTNG_KERNEL_KRETPROBE:
660 default:
661 WARN_ON_ONCE(1);
662 ret = -EINVAL;
663 }
664 end:
665 mutex_unlock(&sessions_mutex);
666 return ret;
667 }
668
669 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
670 const char *transport_name,
671 void *buf_addr,
672 size_t subbuf_size, size_t num_subbuf,
673 unsigned int switch_timer_interval,
674 unsigned int read_timer_interval,
675 enum channel_type channel_type)
676 {
677 struct lttng_channel *chan;
678 struct lttng_transport *transport = NULL;
679
680 mutex_lock(&sessions_mutex);
681 if (session->been_active && channel_type != METADATA_CHANNEL)
682 goto active; /* Refuse to add channel to active session */
683 transport = lttng_transport_find(transport_name);
684 if (!transport) {
685 printk(KERN_WARNING "LTTng: transport %s not found\n",
686 transport_name);
687 goto notransport;
688 }
689 if (!try_module_get(transport->owner)) {
690 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
691 goto notransport;
692 }
693 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
694 if (!chan)
695 goto nomem;
696 chan->session = session;
697 chan->id = session->free_chan_id++;
698 chan->ops = &transport->ops;
699 /*
700 * Note: the channel creation op already writes into the packet
701 * headers. Therefore the "chan" information used as input
702 * should be already accessible.
703 */
704 chan->chan = transport->ops.channel_create(transport_name,
705 chan, buf_addr, subbuf_size, num_subbuf,
706 switch_timer_interval, read_timer_interval);
707 if (!chan->chan)
708 goto create_error;
709 chan->tstate = 1;
710 chan->enabled = 1;
711 chan->transport = transport;
712 chan->channel_type = channel_type;
713 list_add(&chan->list, &session->chan);
714 mutex_unlock(&sessions_mutex);
715 return chan;
716
717 create_error:
718 kfree(chan);
719 nomem:
720 if (transport)
721 module_put(transport->owner);
722 notransport:
723 active:
724 mutex_unlock(&sessions_mutex);
725 return NULL;
726 }
727
728 /*
729 * Only used internally at session destruction for per-cpu channels, and
730 * when metadata channel is released.
731 * Needs to be called with sessions mutex held.
732 */
733 static
734 void _lttng_channel_destroy(struct lttng_channel *chan)
735 {
736 chan->ops->channel_destroy(chan->chan);
737 module_put(chan->transport->owner);
738 list_del(&chan->list);
739 lttng_destroy_context(chan->ctx);
740 kfree(chan);
741 }
742
743 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
744 {
745 BUG_ON(chan->channel_type != METADATA_CHANNEL);
746
747 /* Protect the metadata cache with the sessions_mutex. */
748 mutex_lock(&sessions_mutex);
749 _lttng_channel_destroy(chan);
750 mutex_unlock(&sessions_mutex);
751 }
752 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
753
754 static
755 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
756 {
757 stream->finalized = 1;
758 wake_up_interruptible(&stream->read_wait);
759 }
760
761 /*
762 * Supports event creation while tracing session is active.
763 * Needs to be called with sessions mutex held.
764 */
765 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
766 struct lttng_kernel_event *event_param,
767 void *filter,
768 const struct lttng_event_desc *event_desc,
769 enum lttng_kernel_instrumentation itype)
770 {
771 struct lttng_session *session = chan->session;
772 struct lttng_event *event;
773 const char *event_name;
774 struct hlist_head *head;
775 int ret;
776
777 if (chan->free_event_id == -1U) {
778 ret = -EMFILE;
779 goto full;
780 }
781
782 switch (itype) {
783 case LTTNG_KERNEL_TRACEPOINT:
784 event_name = event_desc->name;
785 break;
786 case LTTNG_KERNEL_KPROBE:
787 case LTTNG_KERNEL_UPROBE:
788 case LTTNG_KERNEL_KRETPROBE:
789 case LTTNG_KERNEL_NOOP:
790 case LTTNG_KERNEL_SYSCALL:
791 event_name = event_param->name;
792 break;
793 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
794 default:
795 WARN_ON_ONCE(1);
796 ret = -EINVAL;
797 goto type_error;
798 }
799
800 head = utils_borrow_hash_table_bucket(session->events_ht.table,
801 LTTNG_EVENT_HT_SIZE, event_name);
802 lttng_hlist_for_each_entry(event, head, hlist) {
803 WARN_ON_ONCE(!event->desc);
804 if (!strncmp(event->desc->name, event_name,
805 LTTNG_KERNEL_SYM_NAME_LEN - 1)
806 && chan == event->chan) {
807 ret = -EEXIST;
808 goto exist;
809 }
810 }
811
812 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
813 if (!event) {
814 ret = -ENOMEM;
815 goto cache_error;
816 }
817 event->chan = chan;
818 event->filter = filter;
819 event->id = chan->free_event_id++;
820 event->instrumentation = itype;
821 event->evtype = LTTNG_TYPE_EVENT;
822 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
823 INIT_LIST_HEAD(&event->enablers_ref_head);
824
825 switch (itype) {
826 case LTTNG_KERNEL_TRACEPOINT:
827 /* Event will be enabled by enabler sync. */
828 event->enabled = 0;
829 event->registered = 0;
830 event->desc = lttng_event_desc_get(event_name);
831 if (!event->desc) {
832 ret = -ENOENT;
833 goto register_error;
834 }
835 /* Populate lttng_event structure before event registration. */
836 smp_wmb();
837 break;
838 case LTTNG_KERNEL_KPROBE:
839 /*
840 * Needs to be explicitly enabled after creation, since
841 * we may want to apply filters.
842 */
843 event->enabled = 0;
844 event->registered = 1;
845 /*
846 * Populate lttng_event structure before event
847 * registration.
848 */
849 smp_wmb();
850 ret = lttng_kprobes_register_event(event_name,
851 event_param->u.kprobe.symbol_name,
852 event_param->u.kprobe.offset,
853 event_param->u.kprobe.addr,
854 event);
855 if (ret) {
856 ret = -EINVAL;
857 goto register_error;
858 }
859 ret = try_module_get(event->desc->owner);
860 WARN_ON_ONCE(!ret);
861 break;
862 case LTTNG_KERNEL_KRETPROBE:
863 {
864 struct lttng_event *event_return;
865
866 /* kretprobe defines 2 events */
867 /*
868 * Needs to be explicitly enabled after creation, since
869 * we may want to apply filters.
870 */
871 event->enabled = 0;
872 event->registered = 1;
873 event_return =
874 kmem_cache_zalloc(event_cache, GFP_KERNEL);
875 if (!event_return) {
876 ret = -ENOMEM;
877 goto register_error;
878 }
879 event_return->chan = chan;
880 event_return->filter = filter;
881 event_return->id = chan->free_event_id++;
882 event_return->enabled = 0;
883 event_return->registered = 1;
884 event_return->instrumentation = itype;
885 /*
886 * Populate lttng_event structure before kretprobe registration.
887 */
888 smp_wmb();
889 ret = lttng_kretprobes_register(event_name,
890 event_param->u.kretprobe.symbol_name,
891 event_param->u.kretprobe.offset,
892 event_param->u.kretprobe.addr,
893 event, event_return);
894 if (ret) {
895 kmem_cache_free(event_cache, event_return);
896 ret = -EINVAL;
897 goto register_error;
898 }
899 /* Take 2 refs on the module: one per event. */
900 ret = try_module_get(event->desc->owner);
901 WARN_ON_ONCE(!ret);
902 ret = try_module_get(event->desc->owner);
903 WARN_ON_ONCE(!ret);
904 ret = _lttng_event_metadata_statedump(chan->session, chan,
905 event_return);
906 WARN_ON_ONCE(ret > 0);
907 if (ret) {
908 kmem_cache_free(event_cache, event_return);
909 module_put(event->desc->owner);
910 module_put(event->desc->owner);
911 goto statedump_error;
912 }
913 list_add(&event_return->list, &chan->session->events);
914 break;
915 }
916 case LTTNG_KERNEL_NOOP:
917 case LTTNG_KERNEL_SYSCALL:
918 /*
919 * Needs to be explicitly enabled after creation, since
920 * we may want to apply filters.
921 */
922 event->enabled = 0;
923 event->registered = 0;
924 event->desc = event_desc;
925 switch (event_param->u.syscall.entryexit) {
926 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
927 ret = -EINVAL;
928 goto register_error;
929 case LTTNG_KERNEL_SYSCALL_ENTRY:
930 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
931 break;
932 case LTTNG_KERNEL_SYSCALL_EXIT:
933 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
934 break;
935 }
936 switch (event_param->u.syscall.abi) {
937 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
938 ret = -EINVAL;
939 goto register_error;
940 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
941 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
942 break;
943 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
944 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
945 break;
946 }
947 if (!event->desc) {
948 ret = -EINVAL;
949 goto register_error;
950 }
951 break;
952 case LTTNG_KERNEL_UPROBE:
953 /*
954 * Needs to be explicitly enabled after creation, since
955 * we may want to apply filters.
956 */
957 event->enabled = 0;
958 event->registered = 1;
959
960 /*
961 * Populate lttng_event structure before event
962 * registration.
963 */
964 smp_wmb();
965
966 ret = lttng_uprobes_register_event(event_param->name,
967 event_param->u.uprobe.fd,
968 event);
969 if (ret)
970 goto register_error;
971 ret = try_module_get(event->desc->owner);
972 WARN_ON_ONCE(!ret);
973 break;
974 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
975 default:
976 WARN_ON_ONCE(1);
977 ret = -EINVAL;
978 goto register_error;
979 }
980 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
981 WARN_ON_ONCE(ret > 0);
982 if (ret) {
983 goto statedump_error;
984 }
985 hlist_add_head(&event->hlist, head);
986 list_add(&event->list, &chan->session->events);
987 return event;
988
989 statedump_error:
990 /* If a statedump error occurs, events will not be readable. */
991 register_error:
992 kmem_cache_free(event_cache, event);
993 cache_error:
994 exist:
995 type_error:
996 full:
997 return ERR_PTR(ret);
998 }
999
1000 struct lttng_event_notifier *_lttng_event_notifier_create(
1001 const struct lttng_event_desc *event_desc,
1002 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
1003 struct lttng_kernel_event_notifier *event_notifier_param,
1004 void *filter, enum lttng_kernel_instrumentation itype)
1005 {
1006 struct lttng_event_notifier *event_notifier;
1007 const char *event_name;
1008 struct hlist_head *head;
1009 int ret;
1010
1011 switch (itype) {
1012 case LTTNG_KERNEL_TRACEPOINT:
1013 event_name = event_desc->name;
1014 break;
1015 case LTTNG_KERNEL_KPROBE:
1016 case LTTNG_KERNEL_UPROBE:
1017 case LTTNG_KERNEL_SYSCALL:
1018 event_name = event_notifier_param->event.name;
1019 break;
1020 case LTTNG_KERNEL_KRETPROBE:
1021 case LTTNG_KERNEL_FUNCTION:
1022 case LTTNG_KERNEL_NOOP:
1023 default:
1024 WARN_ON_ONCE(1);
1025 ret = -EINVAL;
1026 goto type_error;
1027 }
1028
1029 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1030 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1031 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1032 WARN_ON_ONCE(!event_notifier->desc);
1033 if (!strncmp(event_notifier->desc->name, event_name,
1034 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1035 && event_notifier_group == event_notifier->group
1036 && token == event_notifier->user_token) {
1037 ret = -EEXIST;
1038 goto exist;
1039 }
1040 }
1041
1042 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1043 if (!event_notifier) {
1044 ret = -ENOMEM;
1045 goto cache_error;
1046 }
1047
1048 event_notifier->group = event_notifier_group;
1049 event_notifier->user_token = token;
1050 event_notifier->filter = filter;
1051 event_notifier->instrumentation = itype;
1052 event_notifier->evtype = LTTNG_TYPE_EVENT;
1053 event_notifier->send_notification = lttng_event_notifier_notification_send;
1054 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1055 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1056
1057 switch (itype) {
1058 case LTTNG_KERNEL_TRACEPOINT:
1059 /* Event will be enabled by enabler sync. */
1060 event_notifier->enabled = 0;
1061 event_notifier->registered = 0;
1062 event_notifier->desc = lttng_event_desc_get(event_name);
1063 if (!event_notifier->desc) {
1064 ret = -ENOENT;
1065 goto register_error;
1066 }
1067 /* Populate lttng_event_notifier structure before event registration. */
1068 smp_wmb();
1069 break;
1070 case LTTNG_KERNEL_KPROBE:
1071 /*
1072 * Needs to be explicitly enabled after creation, since
1073 * we may want to apply filters.
1074 */
1075 event_notifier->enabled = 0;
1076 event_notifier->registered = 1;
1077 /*
1078 * Populate lttng_event_notifier structure before event
1079 * registration.
1080 */
1081 smp_wmb();
1082 ret = lttng_kprobes_register_event_notifier(
1083 event_notifier_param->event.u.kprobe.symbol_name,
1084 event_notifier_param->event.u.kprobe.offset,
1085 event_notifier_param->event.u.kprobe.addr,
1086 event_notifier);
1087 if (ret) {
1088 ret = -EINVAL;
1089 goto register_error;
1090 }
1091 ret = try_module_get(event_notifier->desc->owner);
1092 WARN_ON_ONCE(!ret);
1093 break;
1094 case LTTNG_KERNEL_NOOP:
1095 case LTTNG_KERNEL_SYSCALL:
1096 /*
1097 * Needs to be explicitly enabled after creation, since
1098 * we may want to apply filters.
1099 */
1100 event_notifier->enabled = 0;
1101 event_notifier->registered = 0;
1102 event_notifier->desc = event_desc;
1103 switch (event_notifier_param->event.u.syscall.entryexit) {
1104 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1105 ret = -EINVAL;
1106 goto register_error;
1107 case LTTNG_KERNEL_SYSCALL_ENTRY:
1108 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1109 break;
1110 case LTTNG_KERNEL_SYSCALL_EXIT:
1111 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1112 break;
1113 }
1114 switch (event_notifier_param->event.u.syscall.abi) {
1115 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1116 ret = -EINVAL;
1117 goto register_error;
1118 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1119 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1120 break;
1121 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1122 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1123 break;
1124 }
1125
1126 if (!event_notifier->desc) {
1127 ret = -EINVAL;
1128 goto register_error;
1129 }
1130 break;
1131 case LTTNG_KERNEL_UPROBE:
1132 /*
1133 * Needs to be explicitly enabled after creation, since
1134 * we may want to apply filters.
1135 */
1136 event_notifier->enabled = 0;
1137 event_notifier->registered = 1;
1138
1139 /*
1140 * Populate lttng_event_notifier structure before
1141 * event_notifier registration.
1142 */
1143 smp_wmb();
1144
1145 ret = lttng_uprobes_register_event_notifier(
1146 event_notifier_param->event.name,
1147 event_notifier_param->event.u.uprobe.fd,
1148 event_notifier);
1149 if (ret)
1150 goto register_error;
1151 ret = try_module_get(event_notifier->desc->owner);
1152 WARN_ON_ONCE(!ret);
1153 break;
1154 case LTTNG_KERNEL_KRETPROBE:
1155 case LTTNG_KERNEL_FUNCTION:
1156 default:
1157 WARN_ON_ONCE(1);
1158 ret = -EINVAL;
1159 goto register_error;
1160 }
1161
1162 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1163 hlist_add_head(&event_notifier->hlist, head);
1164 return event_notifier;
1165
1166 register_error:
1167 kmem_cache_free(event_notifier_cache, event_notifier);
1168 cache_error:
1169 exist:
1170 type_error:
1171 return ERR_PTR(ret);
1172 }
1173
1174 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1175 struct lttng_kernel_event *event_param,
1176 void *filter,
1177 const struct lttng_event_desc *event_desc,
1178 enum lttng_kernel_instrumentation itype)
1179 {
1180 struct lttng_event *event;
1181
1182 mutex_lock(&sessions_mutex);
1183 event = _lttng_event_create(chan, event_param, filter, event_desc,
1184 itype);
1185 mutex_unlock(&sessions_mutex);
1186 return event;
1187 }
1188
1189 struct lttng_event_notifier *lttng_event_notifier_create(
1190 const struct lttng_event_desc *event_desc,
1191 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1192 struct lttng_kernel_event_notifier *event_notifier_param,
1193 void *filter, enum lttng_kernel_instrumentation itype)
1194 {
1195 struct lttng_event_notifier *event_notifier;
1196
1197 mutex_lock(&sessions_mutex);
1198 event_notifier = _lttng_event_notifier_create(event_desc, id,
1199 event_notifier_group, event_notifier_param, filter, itype);
1200 mutex_unlock(&sessions_mutex);
1201 return event_notifier;
1202 }
1203
1204 /* Only used for tracepoints for now. */
1205 static
1206 void register_event(struct lttng_event *event)
1207 {
1208 const struct lttng_event_desc *desc;
1209 int ret = -EINVAL;
1210
1211 if (event->registered)
1212 return;
1213
1214 desc = event->desc;
1215 switch (event->instrumentation) {
1216 case LTTNG_KERNEL_TRACEPOINT:
1217 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1218 desc->probe_callback,
1219 event);
1220 break;
1221 case LTTNG_KERNEL_SYSCALL:
1222 ret = lttng_syscall_filter_enable_event(event->chan, event);
1223 break;
1224 case LTTNG_KERNEL_KPROBE:
1225 case LTTNG_KERNEL_UPROBE:
1226 case LTTNG_KERNEL_KRETPROBE:
1227 case LTTNG_KERNEL_NOOP:
1228 ret = 0;
1229 break;
1230 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1231 default:
1232 WARN_ON_ONCE(1);
1233 }
1234 if (!ret)
1235 event->registered = 1;
1236 }
1237
1238 /*
1239 * Only used internally at session destruction.
1240 */
1241 int _lttng_event_unregister(struct lttng_event *event)
1242 {
1243 const struct lttng_event_desc *desc;
1244 int ret = -EINVAL;
1245
1246 if (!event->registered)
1247 return 0;
1248
1249 desc = event->desc;
1250 switch (event->instrumentation) {
1251 case LTTNG_KERNEL_TRACEPOINT:
1252 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1253 event->desc->probe_callback,
1254 event);
1255 break;
1256 case LTTNG_KERNEL_KPROBE:
1257 lttng_kprobes_unregister_event(event);
1258 ret = 0;
1259 break;
1260 case LTTNG_KERNEL_KRETPROBE:
1261 lttng_kretprobes_unregister(event);
1262 ret = 0;
1263 break;
1264 case LTTNG_KERNEL_SYSCALL:
1265 ret = lttng_syscall_filter_disable_event(event->chan, event);
1266 break;
1267 case LTTNG_KERNEL_NOOP:
1268 ret = 0;
1269 break;
1270 case LTTNG_KERNEL_UPROBE:
1271 lttng_uprobes_unregister_event(event);
1272 ret = 0;
1273 break;
1274 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1275 default:
1276 WARN_ON_ONCE(1);
1277 }
1278 if (!ret)
1279 event->registered = 0;
1280 return ret;
1281 }
1282
1283 /* Only used for tracepoints for now. */
1284 static
1285 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1286 {
1287 const struct lttng_event_desc *desc;
1288 int ret = -EINVAL;
1289
1290 if (event_notifier->registered)
1291 return;
1292
1293 desc = event_notifier->desc;
1294 switch (event_notifier->instrumentation) {
1295 case LTTNG_KERNEL_TRACEPOINT:
1296 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1297 desc->event_notifier_callback,
1298 event_notifier);
1299 break;
1300 case LTTNG_KERNEL_SYSCALL:
1301 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1302 break;
1303 case LTTNG_KERNEL_KPROBE:
1304 case LTTNG_KERNEL_UPROBE:
1305 ret = 0;
1306 break;
1307 case LTTNG_KERNEL_KRETPROBE:
1308 case LTTNG_KERNEL_FUNCTION:
1309 case LTTNG_KERNEL_NOOP:
1310 default:
1311 WARN_ON_ONCE(1);
1312 }
1313 if (!ret)
1314 event_notifier->registered = 1;
1315 }
1316
1317 static
1318 int _lttng_event_notifier_unregister(
1319 struct lttng_event_notifier *event_notifier)
1320 {
1321 const struct lttng_event_desc *desc;
1322 int ret = -EINVAL;
1323
1324 if (!event_notifier->registered)
1325 return 0;
1326
1327 desc = event_notifier->desc;
1328 switch (event_notifier->instrumentation) {
1329 case LTTNG_KERNEL_TRACEPOINT:
1330 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1331 event_notifier->desc->event_notifier_callback,
1332 event_notifier);
1333 break;
1334 case LTTNG_KERNEL_KPROBE:
1335 lttng_kprobes_unregister_event_notifier(event_notifier);
1336 ret = 0;
1337 break;
1338 case LTTNG_KERNEL_UPROBE:
1339 lttng_uprobes_unregister_event_notifier(event_notifier);
1340 ret = 0;
1341 break;
1342 case LTTNG_KERNEL_SYSCALL:
1343 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1344 break;
1345 case LTTNG_KERNEL_KRETPROBE:
1346 case LTTNG_KERNEL_FUNCTION:
1347 case LTTNG_KERNEL_NOOP:
1348 default:
1349 WARN_ON_ONCE(1);
1350 }
1351 if (!ret)
1352 event_notifier->registered = 0;
1353 return ret;
1354 }
1355
1356 /*
1357 * Only used internally at session destruction.
1358 */
1359 static
1360 void _lttng_event_destroy(struct lttng_event *event)
1361 {
1362 switch (event->instrumentation) {
1363 case LTTNG_KERNEL_TRACEPOINT:
1364 lttng_event_desc_put(event->desc);
1365 break;
1366 case LTTNG_KERNEL_KPROBE:
1367 module_put(event->desc->owner);
1368 lttng_kprobes_destroy_event_private(event);
1369 break;
1370 case LTTNG_KERNEL_KRETPROBE:
1371 module_put(event->desc->owner);
1372 lttng_kretprobes_destroy_private(event);
1373 break;
1374 case LTTNG_KERNEL_NOOP:
1375 case LTTNG_KERNEL_SYSCALL:
1376 break;
1377 case LTTNG_KERNEL_UPROBE:
1378 module_put(event->desc->owner);
1379 lttng_uprobes_destroy_event_private(event);
1380 break;
1381 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1382 default:
1383 WARN_ON_ONCE(1);
1384 }
1385 list_del(&event->list);
1386 lttng_destroy_context(event->ctx);
1387 kmem_cache_free(event_cache, event);
1388 }
1389
1390 /*
1391 * Only used internally at session destruction.
1392 */
1393 static
1394 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1395 {
1396 switch (event_notifier->instrumentation) {
1397 case LTTNG_KERNEL_TRACEPOINT:
1398 lttng_event_desc_put(event_notifier->desc);
1399 break;
1400 case LTTNG_KERNEL_KPROBE:
1401 module_put(event_notifier->desc->owner);
1402 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1403 break;
1404 case LTTNG_KERNEL_NOOP:
1405 case LTTNG_KERNEL_SYSCALL:
1406 break;
1407 case LTTNG_KERNEL_UPROBE:
1408 module_put(event_notifier->desc->owner);
1409 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1410 break;
1411 case LTTNG_KERNEL_KRETPROBE:
1412 case LTTNG_KERNEL_FUNCTION:
1413 default:
1414 WARN_ON_ONCE(1);
1415 }
1416 list_del(&event_notifier->list);
1417 kmem_cache_free(event_notifier_cache, event_notifier);
1418 }
1419
1420 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1421 enum tracker_type tracker_type)
1422 {
1423 switch (tracker_type) {
1424 case TRACKER_PID:
1425 return &session->pid_tracker;
1426 case TRACKER_VPID:
1427 return &session->vpid_tracker;
1428 case TRACKER_UID:
1429 return &session->uid_tracker;
1430 case TRACKER_VUID:
1431 return &session->vuid_tracker;
1432 case TRACKER_GID:
1433 return &session->gid_tracker;
1434 case TRACKER_VGID:
1435 return &session->vgid_tracker;
1436 default:
1437 WARN_ON_ONCE(1);
1438 return NULL;
1439 }
1440 }
1441
1442 int lttng_session_track_id(struct lttng_session *session,
1443 enum tracker_type tracker_type, int id)
1444 {
1445 struct lttng_id_tracker *tracker;
1446 int ret;
1447
1448 tracker = get_tracker(session, tracker_type);
1449 if (!tracker)
1450 return -EINVAL;
1451 if (id < -1)
1452 return -EINVAL;
1453 mutex_lock(&sessions_mutex);
1454 if (id == -1) {
1455 /* track all ids: destroy tracker. */
1456 lttng_id_tracker_destroy(tracker, true);
1457 ret = 0;
1458 } else {
1459 ret = lttng_id_tracker_add(tracker, id);
1460 }
1461 mutex_unlock(&sessions_mutex);
1462 return ret;
1463 }
1464
1465 int lttng_session_untrack_id(struct lttng_session *session,
1466 enum tracker_type tracker_type, int id)
1467 {
1468 struct lttng_id_tracker *tracker;
1469 int ret;
1470
1471 tracker = get_tracker(session, tracker_type);
1472 if (!tracker)
1473 return -EINVAL;
1474 if (id < -1)
1475 return -EINVAL;
1476 mutex_lock(&sessions_mutex);
1477 if (id == -1) {
1478 /* untrack all ids: replace by empty tracker. */
1479 ret = lttng_id_tracker_empty_set(tracker);
1480 } else {
1481 ret = lttng_id_tracker_del(tracker, id);
1482 }
1483 mutex_unlock(&sessions_mutex);
1484 return ret;
1485 }
1486
1487 static
1488 void *id_list_start(struct seq_file *m, loff_t *pos)
1489 {
1490 struct lttng_id_tracker *id_tracker = m->private;
1491 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1492 struct lttng_id_hash_node *e;
1493 int iter = 0, i;
1494
1495 mutex_lock(&sessions_mutex);
1496 if (id_tracker_p) {
1497 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1498 struct hlist_head *head = &id_tracker_p->id_hash[i];
1499
1500 lttng_hlist_for_each_entry(e, head, hlist) {
1501 if (iter++ >= *pos)
1502 return e;
1503 }
1504 }
1505 } else {
1506 /* ID tracker disabled. */
1507 if (iter >= *pos && iter == 0) {
1508 return id_tracker_p; /* empty tracker */
1509 }
1510 iter++;
1511 }
1512 /* End of list */
1513 return NULL;
1514 }
1515
1516 /* Called with sessions_mutex held. */
1517 static
1518 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1519 {
1520 struct lttng_id_tracker *id_tracker = m->private;
1521 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1522 struct lttng_id_hash_node *e;
1523 int iter = 0, i;
1524
1525 (*ppos)++;
1526 if (id_tracker_p) {
1527 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1528 struct hlist_head *head = &id_tracker_p->id_hash[i];
1529
1530 lttng_hlist_for_each_entry(e, head, hlist) {
1531 if (iter++ >= *ppos)
1532 return e;
1533 }
1534 }
1535 } else {
1536 /* ID tracker disabled. */
1537 if (iter >= *ppos && iter == 0)
1538 return p; /* empty tracker */
1539 iter++;
1540 }
1541
1542 /* End of list */
1543 return NULL;
1544 }
1545
1546 static
1547 void id_list_stop(struct seq_file *m, void *p)
1548 {
1549 mutex_unlock(&sessions_mutex);
1550 }
1551
1552 static
1553 int id_list_show(struct seq_file *m, void *p)
1554 {
1555 struct lttng_id_tracker *id_tracker = m->private;
1556 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1557 int id;
1558
1559 if (p == id_tracker_p) {
1560 /* Tracker disabled. */
1561 id = -1;
1562 } else {
1563 const struct lttng_id_hash_node *e = p;
1564
1565 id = lttng_id_tracker_get_node_id(e);
1566 }
1567 switch (id_tracker->tracker_type) {
1568 case TRACKER_PID:
1569 seq_printf(m, "process { pid = %d; };\n", id);
1570 break;
1571 case TRACKER_VPID:
1572 seq_printf(m, "process { vpid = %d; };\n", id);
1573 break;
1574 case TRACKER_UID:
1575 seq_printf(m, "user { uid = %d; };\n", id);
1576 break;
1577 case TRACKER_VUID:
1578 seq_printf(m, "user { vuid = %d; };\n", id);
1579 break;
1580 case TRACKER_GID:
1581 seq_printf(m, "group { gid = %d; };\n", id);
1582 break;
1583 case TRACKER_VGID:
1584 seq_printf(m, "group { vgid = %d; };\n", id);
1585 break;
1586 default:
1587 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1588 }
1589 return 0;
1590 }
1591
1592 static
1593 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1594 .start = id_list_start,
1595 .next = id_list_next,
1596 .stop = id_list_stop,
1597 .show = id_list_show,
1598 };
1599
1600 static
1601 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1602 {
1603 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1604 }
1605
1606 static
1607 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1608 {
1609 struct seq_file *m = file->private_data;
1610 struct lttng_id_tracker *id_tracker = m->private;
1611 int ret;
1612
1613 WARN_ON_ONCE(!id_tracker);
1614 ret = seq_release(inode, file);
1615 if (!ret)
1616 fput(id_tracker->session->file);
1617 return ret;
1618 }
1619
1620 const struct file_operations lttng_tracker_ids_list_fops = {
1621 .owner = THIS_MODULE,
1622 .open = lttng_tracker_ids_list_open,
1623 .read = seq_read,
1624 .llseek = seq_lseek,
1625 .release = lttng_tracker_ids_list_release,
1626 };
1627
1628 int lttng_session_list_tracker_ids(struct lttng_session *session,
1629 enum tracker_type tracker_type)
1630 {
1631 struct file *tracker_ids_list_file;
1632 struct seq_file *m;
1633 int file_fd, ret;
1634
1635 file_fd = lttng_get_unused_fd();
1636 if (file_fd < 0) {
1637 ret = file_fd;
1638 goto fd_error;
1639 }
1640
1641 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1642 &lttng_tracker_ids_list_fops,
1643 NULL, O_RDWR);
1644 if (IS_ERR(tracker_ids_list_file)) {
1645 ret = PTR_ERR(tracker_ids_list_file);
1646 goto file_error;
1647 }
1648 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1649 ret = -EOVERFLOW;
1650 goto refcount_error;
1651 }
1652 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1653 if (ret < 0)
1654 goto open_error;
1655 m = tracker_ids_list_file->private_data;
1656
1657 m->private = get_tracker(session, tracker_type);
1658 BUG_ON(!m->private);
1659 fd_install(file_fd, tracker_ids_list_file);
1660
1661 return file_fd;
1662
1663 open_error:
1664 atomic_long_dec(&session->file->f_count);
1665 refcount_error:
1666 fput(tracker_ids_list_file);
1667 file_error:
1668 put_unused_fd(file_fd);
1669 fd_error:
1670 return ret;
1671 }
1672
1673 /*
1674 * Enabler management.
1675 */
1676 static
1677 int lttng_match_enabler_star_glob(const char *desc_name,
1678 const char *pattern)
1679 {
1680 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1681 desc_name, LTTNG_SIZE_MAX))
1682 return 0;
1683 return 1;
1684 }
1685
1686 static
1687 int lttng_match_enabler_name(const char *desc_name,
1688 const char *name)
1689 {
1690 if (strcmp(desc_name, name))
1691 return 0;
1692 return 1;
1693 }
1694
1695 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1696 struct lttng_enabler *enabler)
1697 {
1698 const char *desc_name, *enabler_name;
1699 bool compat = false, entry = false;
1700
1701 enabler_name = enabler->event_param.name;
1702 switch (enabler->event_param.instrumentation) {
1703 case LTTNG_KERNEL_TRACEPOINT:
1704 desc_name = desc->name;
1705 switch (enabler->format_type) {
1706 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1707 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1708 case LTTNG_ENABLER_FORMAT_NAME:
1709 return lttng_match_enabler_name(desc_name, enabler_name);
1710 default:
1711 return -EINVAL;
1712 }
1713 break;
1714 case LTTNG_KERNEL_SYSCALL:
1715 desc_name = desc->name;
1716 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1717 desc_name += strlen("compat_");
1718 compat = true;
1719 }
1720 if (!strncmp(desc_name, "syscall_exit_",
1721 strlen("syscall_exit_"))) {
1722 desc_name += strlen("syscall_exit_");
1723 } else if (!strncmp(desc_name, "syscall_entry_",
1724 strlen("syscall_entry_"))) {
1725 desc_name += strlen("syscall_entry_");
1726 entry = true;
1727 } else {
1728 WARN_ON_ONCE(1);
1729 return -EINVAL;
1730 }
1731 switch (enabler->event_param.u.syscall.entryexit) {
1732 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1733 break;
1734 case LTTNG_KERNEL_SYSCALL_ENTRY:
1735 if (!entry)
1736 return 0;
1737 break;
1738 case LTTNG_KERNEL_SYSCALL_EXIT:
1739 if (entry)
1740 return 0;
1741 break;
1742 default:
1743 return -EINVAL;
1744 }
1745 switch (enabler->event_param.u.syscall.abi) {
1746 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1747 break;
1748 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1749 if (compat)
1750 return 0;
1751 break;
1752 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1753 if (!compat)
1754 return 0;
1755 break;
1756 default:
1757 return -EINVAL;
1758 }
1759 switch (enabler->event_param.u.syscall.match) {
1760 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1761 switch (enabler->format_type) {
1762 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1763 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1764 case LTTNG_ENABLER_FORMAT_NAME:
1765 return lttng_match_enabler_name(desc_name, enabler_name);
1766 default:
1767 return -EINVAL;
1768 }
1769 break;
1770 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1771 return -EINVAL; /* Not implemented. */
1772 default:
1773 return -EINVAL;
1774 }
1775 break;
1776 default:
1777 WARN_ON_ONCE(1);
1778 return -EINVAL;
1779 }
1780 }
1781
1782 static
1783 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1784 struct lttng_event *event)
1785 {
1786 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1787 event_enabler);
1788
1789 if (base_enabler->event_param.instrumentation != event->instrumentation)
1790 return 0;
1791 if (lttng_desc_match_enabler(event->desc, base_enabler)
1792 && event->chan == event_enabler->chan)
1793 return 1;
1794 else
1795 return 0;
1796 }
1797
1798 static
1799 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1800 struct lttng_event_notifier *event_notifier)
1801 {
1802 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1803 event_notifier_enabler);
1804
1805 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1806 return 0;
1807 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1808 && event_notifier->group == event_notifier_enabler->group
1809 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1810 return 1;
1811 else
1812 return 0;
1813 }
1814
1815 static
1816 struct lttng_enabler_ref *lttng_enabler_ref(
1817 struct list_head *enablers_ref_list,
1818 struct lttng_enabler *enabler)
1819 {
1820 struct lttng_enabler_ref *enabler_ref;
1821
1822 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1823 if (enabler_ref->ref == enabler)
1824 return enabler_ref;
1825 }
1826 return NULL;
1827 }
1828
1829 static
1830 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1831 {
1832 struct lttng_session *session = event_enabler->chan->session;
1833 struct lttng_probe_desc *probe_desc;
1834 const struct lttng_event_desc *desc;
1835 int i;
1836 struct list_head *probe_list;
1837
1838 probe_list = lttng_get_probe_list_head();
1839 /*
1840 * For each probe event, if we find that a probe event matches
1841 * our enabler, create an associated lttng_event if not
1842 * already present.
1843 */
1844 list_for_each_entry(probe_desc, probe_list, head) {
1845 for (i = 0; i < probe_desc->nr_events; i++) {
1846 int found = 0;
1847 struct hlist_head *head;
1848 struct lttng_event *event;
1849
1850 desc = probe_desc->event_desc[i];
1851 if (!lttng_desc_match_enabler(desc,
1852 lttng_event_enabler_as_enabler(event_enabler)))
1853 continue;
1854
1855 /*
1856 * Check if already created.
1857 */
1858 head = utils_borrow_hash_table_bucket(
1859 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1860 desc->name);
1861 lttng_hlist_for_each_entry(event, head, hlist) {
1862 if (event->desc == desc
1863 && event->chan == event_enabler->chan)
1864 found = 1;
1865 }
1866 if (found)
1867 continue;
1868
1869 /*
1870 * We need to create an event for this
1871 * event probe.
1872 */
1873 event = _lttng_event_create(event_enabler->chan,
1874 NULL, NULL, desc,
1875 LTTNG_KERNEL_TRACEPOINT);
1876 if (!event) {
1877 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1878 probe_desc->event_desc[i]->name);
1879 }
1880 }
1881 }
1882 }
1883
1884 static
1885 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1886 {
1887 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
1888 struct lttng_probe_desc *probe_desc;
1889 const struct lttng_event_desc *desc;
1890 int i;
1891 struct list_head *probe_list;
1892
1893 probe_list = lttng_get_probe_list_head();
1894 /*
1895 * For each probe event, if we find that a probe event matches
1896 * our enabler, create an associated lttng_event_notifier if not
1897 * already present.
1898 */
1899 list_for_each_entry(probe_desc, probe_list, head) {
1900 for (i = 0; i < probe_desc->nr_events; i++) {
1901 int found = 0;
1902 struct hlist_head *head;
1903 struct lttng_event_notifier *event_notifier;
1904
1905 desc = probe_desc->event_desc[i];
1906 if (!lttng_desc_match_enabler(desc,
1907 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
1908 continue;
1909
1910 /*
1911 * Check if already created.
1912 */
1913 head = utils_borrow_hash_table_bucket(
1914 event_notifier_group->event_notifiers_ht.table,
1915 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
1916 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1917 if (event_notifier->desc == desc
1918 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1919 found = 1;
1920 }
1921 if (found)
1922 continue;
1923
1924 /*
1925 * We need to create a event_notifier for this event probe.
1926 */
1927 event_notifier = _lttng_event_notifier_create(desc,
1928 event_notifier_enabler->base.user_token,
1929 event_notifier_group, NULL, NULL,
1930 LTTNG_KERNEL_TRACEPOINT);
1931 if (IS_ERR(event_notifier)) {
1932 printk(KERN_INFO "Unable to create event_notifier %s\n",
1933 probe_desc->event_desc[i]->name);
1934 }
1935 }
1936 }
1937 }
1938
1939 static
1940 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1941 {
1942 int ret;
1943
1944 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
1945 WARN_ON_ONCE(ret);
1946 }
1947
1948 static
1949 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1950 {
1951 int ret;
1952
1953 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
1954 WARN_ON_ONCE(ret);
1955 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
1956 WARN_ON_ONCE(ret);
1957 }
1958
1959 /*
1960 * Create struct lttng_event if it is missing and present in the list of
1961 * tracepoint probes.
1962 * Should be called with sessions mutex held.
1963 */
1964 static
1965 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1966 {
1967 switch (event_enabler->base.event_param.instrumentation) {
1968 case LTTNG_KERNEL_TRACEPOINT:
1969 lttng_create_tracepoint_event_if_missing(event_enabler);
1970 break;
1971 case LTTNG_KERNEL_SYSCALL:
1972 lttng_create_syscall_event_if_missing(event_enabler);
1973 break;
1974 default:
1975 WARN_ON_ONCE(1);
1976 break;
1977 }
1978 }
1979
1980 /*
1981 * Create events associated with an event_enabler (if not already present),
1982 * and add backward reference from the event to the enabler.
1983 * Should be called with sessions mutex held.
1984 */
1985 static
1986 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1987 {
1988 struct lttng_channel *chan = event_enabler->chan;
1989 struct lttng_session *session = event_enabler->chan->session;
1990 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1991 struct lttng_event *event;
1992
1993 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1994 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1995 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1996 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
1997 !strcmp(base_enabler->event_param.name, "*")) {
1998 if (base_enabler->enabled)
1999 WRITE_ONCE(chan->syscall_all, 1);
2000 else
2001 WRITE_ONCE(chan->syscall_all, 0);
2002 }
2003
2004 /* First ensure that probe events are created for this enabler. */
2005 lttng_create_event_if_missing(event_enabler);
2006
2007 /* For each event matching event_enabler in session event list. */
2008 list_for_each_entry(event, &session->events, list) {
2009 struct lttng_enabler_ref *enabler_ref;
2010
2011 if (!lttng_event_enabler_match_event(event_enabler, event))
2012 continue;
2013 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2014 lttng_event_enabler_as_enabler(event_enabler));
2015 if (!enabler_ref) {
2016 /*
2017 * If no backward ref, create it.
2018 * Add backward ref from event to event_enabler.
2019 */
2020 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2021 if (!enabler_ref)
2022 return -ENOMEM;
2023 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2024 list_add(&enabler_ref->node,
2025 &event->enablers_ref_head);
2026 }
2027
2028 /*
2029 * Link filter bytecodes if not linked yet.
2030 */
2031 lttng_enabler_link_bytecode(event->desc,
2032 lttng_static_ctx,
2033 &event->filter_bytecode_runtime_head,
2034 lttng_event_enabler_as_enabler(event_enabler));
2035
2036 /* TODO: merge event context. */
2037 }
2038 return 0;
2039 }
2040
2041 /*
2042 * Create struct lttng_event_notifier if it is missing and present in the list of
2043 * tracepoint probes.
2044 * Should be called with sessions mutex held.
2045 */
2046 static
2047 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2048 {
2049 switch (event_notifier_enabler->base.event_param.instrumentation) {
2050 case LTTNG_KERNEL_TRACEPOINT:
2051 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2052 break;
2053 case LTTNG_KERNEL_SYSCALL:
2054 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2055 break;
2056 default:
2057 WARN_ON_ONCE(1);
2058 break;
2059 }
2060 }
2061
2062 /*
2063 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2064 */
2065 static
2066 int lttng_event_notifier_enabler_ref_event_notifiers(
2067 struct lttng_event_notifier_enabler *event_notifier_enabler)
2068 {
2069 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2070 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2071 struct lttng_event_notifier *event_notifier;
2072
2073 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2074 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2075 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2076 !strcmp(base_enabler->event_param.name, "*")) {
2077
2078 int enabled = base_enabler->enabled;
2079 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2080
2081 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2082 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2083
2084 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2085 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2086
2087 }
2088
2089 /* First ensure that probe event_notifiers are created for this enabler. */
2090 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2091
2092 /* Link the created event_notifier with its associated enabler. */
2093 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2094 struct lttng_enabler_ref *enabler_ref;
2095
2096 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2097 continue;
2098
2099 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2100 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2101 if (!enabler_ref) {
2102 /*
2103 * If no backward ref, create it.
2104 * Add backward ref from event_notifier to enabler.
2105 */
2106 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2107 if (!enabler_ref)
2108 return -ENOMEM;
2109
2110 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2111 event_notifier_enabler);
2112 list_add(&enabler_ref->node,
2113 &event_notifier->enablers_ref_head);
2114 }
2115
2116 /*
2117 * Link filter bytecodes if not linked yet.
2118 */
2119 lttng_enabler_link_bytecode(event_notifier->desc,
2120 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2121 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2122 }
2123 return 0;
2124 }
2125
2126 /*
2127 * Called at module load: connect the probe on all enablers matching
2128 * this event.
2129 * Called with sessions lock held.
2130 */
2131 int lttng_fix_pending_events(void)
2132 {
2133 struct lttng_session *session;
2134
2135 list_for_each_entry(session, &sessions, list)
2136 lttng_session_lazy_sync_event_enablers(session);
2137 return 0;
2138 }
2139
2140 static bool lttng_event_notifier_group_has_active_event_notifiers(
2141 struct lttng_event_notifier_group *event_notifier_group)
2142 {
2143 struct lttng_event_notifier_enabler *event_notifier_enabler;
2144
2145 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2146 node) {
2147 if (event_notifier_enabler->base.enabled)
2148 return true;
2149 }
2150 return false;
2151 }
2152
2153 bool lttng_event_notifier_active(void)
2154 {
2155 struct lttng_event_notifier_group *event_notifier_group;
2156
2157 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2158 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2159 return true;
2160 }
2161 return false;
2162 }
2163
2164 int lttng_fix_pending_event_notifiers(void)
2165 {
2166 struct lttng_event_notifier_group *event_notifier_group;
2167
2168 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2169 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2170 return 0;
2171 }
2172
2173 struct lttng_event_enabler *lttng_event_enabler_create(
2174 enum lttng_enabler_format_type format_type,
2175 struct lttng_kernel_event *event_param,
2176 struct lttng_channel *chan)
2177 {
2178 struct lttng_event_enabler *event_enabler;
2179
2180 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2181 if (!event_enabler)
2182 return NULL;
2183 event_enabler->base.format_type = format_type;
2184 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2185 memcpy(&event_enabler->base.event_param, event_param,
2186 sizeof(event_enabler->base.event_param));
2187 event_enabler->chan = chan;
2188 /* ctx left NULL */
2189 event_enabler->base.enabled = 0;
2190 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2191 mutex_lock(&sessions_mutex);
2192 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2193 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2194 mutex_unlock(&sessions_mutex);
2195 return event_enabler;
2196 }
2197
2198 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2199 {
2200 mutex_lock(&sessions_mutex);
2201 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2202 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2203 mutex_unlock(&sessions_mutex);
2204 return 0;
2205 }
2206
2207 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2208 {
2209 mutex_lock(&sessions_mutex);
2210 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2211 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2212 mutex_unlock(&sessions_mutex);
2213 return 0;
2214 }
2215
2216 static
2217 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2218 struct lttng_kernel_filter_bytecode __user *bytecode)
2219 {
2220 struct lttng_filter_bytecode_node *bytecode_node;
2221 uint32_t bytecode_len;
2222 int ret;
2223
2224 ret = get_user(bytecode_len, &bytecode->len);
2225 if (ret)
2226 return ret;
2227 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
2228 GFP_KERNEL);
2229 if (!bytecode_node)
2230 return -ENOMEM;
2231 ret = copy_from_user(&bytecode_node->bc, bytecode,
2232 sizeof(*bytecode) + bytecode_len);
2233 if (ret)
2234 goto error_free;
2235
2236 bytecode_node->enabler = enabler;
2237 /* Enforce length based on allocated size */
2238 bytecode_node->bc.len = bytecode_len;
2239 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2240
2241 return 0;
2242
2243 error_free:
2244 kfree(bytecode_node);
2245 return ret;
2246 }
2247
2248 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2249 struct lttng_kernel_filter_bytecode __user *bytecode)
2250 {
2251 int ret;
2252 ret = lttng_enabler_attach_filter_bytecode(
2253 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2254 if (ret)
2255 goto error;
2256
2257 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2258 return 0;
2259
2260 error:
2261 return ret;
2262 }
2263
2264 int lttng_event_add_callsite(struct lttng_event *event,
2265 struct lttng_kernel_event_callsite __user *callsite)
2266 {
2267
2268 switch (event->instrumentation) {
2269 case LTTNG_KERNEL_UPROBE:
2270 return lttng_uprobes_event_add_callsite(event, callsite);
2271 default:
2272 return -EINVAL;
2273 }
2274 }
2275
2276 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2277 struct lttng_kernel_context *context_param)
2278 {
2279 return -ENOSYS;
2280 }
2281
2282 static
2283 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2284 {
2285 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
2286
2287 /* Destroy filter bytecode */
2288 list_for_each_entry_safe(filter_node, tmp_filter_node,
2289 &enabler->filter_bytecode_head, node) {
2290 kfree(filter_node);
2291 }
2292 }
2293
2294 static
2295 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2296 {
2297 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2298
2299 /* Destroy contexts */
2300 lttng_destroy_context(event_enabler->ctx);
2301
2302 list_del(&event_enabler->node);
2303 kfree(event_enabler);
2304 }
2305
2306 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2307 struct lttng_event_notifier_group *event_notifier_group,
2308 enum lttng_enabler_format_type format_type,
2309 struct lttng_kernel_event_notifier *event_notifier_param)
2310 {
2311 struct lttng_event_notifier_enabler *event_notifier_enabler;
2312
2313 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2314 if (!event_notifier_enabler)
2315 return NULL;
2316
2317 event_notifier_enabler->base.format_type = format_type;
2318 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2319
2320 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2321 sizeof(event_notifier_enabler->base.event_param));
2322 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2323
2324 event_notifier_enabler->base.enabled = 0;
2325 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2326 event_notifier_enabler->group = event_notifier_group;
2327
2328 mutex_lock(&sessions_mutex);
2329 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2330 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2331
2332 mutex_unlock(&sessions_mutex);
2333
2334 return event_notifier_enabler;
2335 }
2336
2337 int lttng_event_notifier_enabler_enable(
2338 struct lttng_event_notifier_enabler *event_notifier_enabler)
2339 {
2340 mutex_lock(&sessions_mutex);
2341 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2342 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2343 mutex_unlock(&sessions_mutex);
2344 return 0;
2345 }
2346
2347 int lttng_event_notifier_enabler_disable(
2348 struct lttng_event_notifier_enabler *event_notifier_enabler)
2349 {
2350 mutex_lock(&sessions_mutex);
2351 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2352 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2353 mutex_unlock(&sessions_mutex);
2354 return 0;
2355 }
2356
2357 int lttng_event_notifier_enabler_attach_filter_bytecode(
2358 struct lttng_event_notifier_enabler *event_notifier_enabler,
2359 struct lttng_kernel_filter_bytecode __user *bytecode)
2360 {
2361 int ret;
2362
2363 ret = lttng_enabler_attach_filter_bytecode(
2364 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2365 bytecode);
2366 if (ret)
2367 goto error;
2368
2369 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2370 return 0;
2371
2372 error:
2373 return ret;
2374 }
2375
2376 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2377 struct lttng_kernel_event_callsite __user *callsite)
2378 {
2379
2380 switch (event_notifier->instrumentation) {
2381 case LTTNG_KERNEL_UPROBE:
2382 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2383 callsite);
2384 default:
2385 return -EINVAL;
2386 }
2387 }
2388
2389 int lttng_event_notifier_enabler_attach_context(
2390 struct lttng_event_notifier_enabler *event_notifier_enabler,
2391 struct lttng_kernel_context *context_param)
2392 {
2393 return -ENOSYS;
2394 }
2395
2396 static
2397 void lttng_event_notifier_enabler_destroy(
2398 struct lttng_event_notifier_enabler *event_notifier_enabler)
2399 {
2400 if (!event_notifier_enabler) {
2401 return;
2402 }
2403
2404 list_del(&event_notifier_enabler->node);
2405
2406 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2407 kfree(event_notifier_enabler);
2408 }
2409
2410 /*
2411 * lttng_session_sync_event_enablers should be called just before starting a
2412 * session.
2413 * Should be called with sessions mutex held.
2414 */
2415 static
2416 void lttng_session_sync_event_enablers(struct lttng_session *session)
2417 {
2418 struct lttng_event_enabler *event_enabler;
2419 struct lttng_event *event;
2420
2421 list_for_each_entry(event_enabler, &session->enablers_head, node)
2422 lttng_event_enabler_ref_events(event_enabler);
2423 /*
2424 * For each event, if at least one of its enablers is enabled,
2425 * and its channel and session transient states are enabled, we
2426 * enable the event, else we disable it.
2427 */
2428 list_for_each_entry(event, &session->events, list) {
2429 struct lttng_enabler_ref *enabler_ref;
2430 struct lttng_bytecode_runtime *runtime;
2431 int enabled = 0, has_enablers_without_bytecode = 0;
2432
2433 switch (event->instrumentation) {
2434 case LTTNG_KERNEL_TRACEPOINT:
2435 case LTTNG_KERNEL_SYSCALL:
2436 /* Enable events */
2437 list_for_each_entry(enabler_ref,
2438 &event->enablers_ref_head, node) {
2439 if (enabler_ref->ref->enabled) {
2440 enabled = 1;
2441 break;
2442 }
2443 }
2444 break;
2445 default:
2446 /* Not handled with lazy sync. */
2447 continue;
2448 }
2449 /*
2450 * Enabled state is based on union of enablers, with
2451 * intesection of session and channel transient enable
2452 * states.
2453 */
2454 enabled = enabled && session->tstate && event->chan->tstate;
2455
2456 WRITE_ONCE(event->enabled, enabled);
2457 /*
2458 * Sync tracepoint registration with event enabled
2459 * state.
2460 */
2461 if (enabled) {
2462 register_event(event);
2463 } else {
2464 _lttng_event_unregister(event);
2465 }
2466
2467 /* Check if has enablers without bytecode enabled */
2468 list_for_each_entry(enabler_ref,
2469 &event->enablers_ref_head, node) {
2470 if (enabler_ref->ref->enabled
2471 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2472 has_enablers_without_bytecode = 1;
2473 break;
2474 }
2475 }
2476 event->has_enablers_without_bytecode =
2477 has_enablers_without_bytecode;
2478
2479 /* Enable filters */
2480 list_for_each_entry(runtime,
2481 &event->filter_bytecode_runtime_head, node)
2482 lttng_filter_sync_state(runtime);
2483 }
2484 }
2485
2486 /*
2487 * Apply enablers to session events, adding events to session if need
2488 * be. It is required after each modification applied to an active
2489 * session, and right before session "start".
2490 * "lazy" sync means we only sync if required.
2491 * Should be called with sessions mutex held.
2492 */
2493 static
2494 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2495 {
2496 /* We can skip if session is not active */
2497 if (!session->active)
2498 return;
2499 lttng_session_sync_event_enablers(session);
2500 }
2501
2502 static
2503 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2504 {
2505 struct lttng_event_notifier_enabler *event_notifier_enabler;
2506 struct lttng_event_notifier *event_notifier;
2507
2508 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2509 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2510
2511 /*
2512 * For each event_notifier, if at least one of its enablers is enabled,
2513 * we enable the event_notifier, else we disable it.
2514 */
2515 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2516 struct lttng_enabler_ref *enabler_ref;
2517 struct lttng_bytecode_runtime *runtime;
2518 int enabled = 0, has_enablers_without_bytecode = 0;
2519
2520 switch (event_notifier->instrumentation) {
2521 case LTTNG_KERNEL_TRACEPOINT:
2522 case LTTNG_KERNEL_SYSCALL:
2523 /* Enable event_notifiers */
2524 list_for_each_entry(enabler_ref,
2525 &event_notifier->enablers_ref_head, node) {
2526 if (enabler_ref->ref->enabled) {
2527 enabled = 1;
2528 break;
2529 }
2530 }
2531 break;
2532 default:
2533 /* Not handled with sync. */
2534 continue;
2535 }
2536
2537 WRITE_ONCE(event_notifier->enabled, enabled);
2538 /*
2539 * Sync tracepoint registration with event_notifier enabled
2540 * state.
2541 */
2542 if (enabled) {
2543 if (!event_notifier->registered)
2544 register_event_notifier(event_notifier);
2545 } else {
2546 if (event_notifier->registered)
2547 _lttng_event_notifier_unregister(event_notifier);
2548 }
2549
2550 /* Check if has enablers without bytecode enabled */
2551 list_for_each_entry(enabler_ref,
2552 &event_notifier->enablers_ref_head, node) {
2553 if (enabler_ref->ref->enabled
2554 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2555 has_enablers_without_bytecode = 1;
2556 break;
2557 }
2558 }
2559 event_notifier->has_enablers_without_bytecode =
2560 has_enablers_without_bytecode;
2561
2562 /* Enable filters */
2563 list_for_each_entry(runtime,
2564 &event_notifier->filter_bytecode_runtime_head, node)
2565 lttng_filter_sync_state(runtime);
2566 }
2567 }
2568
2569 /*
2570 * Serialize at most one packet worth of metadata into a metadata
2571 * channel.
2572 * We grab the metadata cache mutex to get exclusive access to our metadata
2573 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2574 * allows us to do racy operations such as looking for remaining space left in
2575 * packet and write, since mutual exclusion protects us from concurrent writes.
2576 * Mutual exclusion on the metadata cache allow us to read the cache content
2577 * without racing against reallocation of the cache by updates.
2578 * Returns the number of bytes written in the channel, 0 if no data
2579 * was written and a negative value on error.
2580 */
2581 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2582 struct channel *chan, bool *coherent)
2583 {
2584 struct lib_ring_buffer_ctx ctx;
2585 int ret = 0;
2586 size_t len, reserve_len;
2587
2588 /*
2589 * Ensure we support mutiple get_next / put sequences followed by
2590 * put_next. The metadata cache lock protects reading the metadata
2591 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2592 * "flush" operations on the buffer invoked by different processes.
2593 * Moreover, since the metadata cache memory can be reallocated, we
2594 * need to have exclusive access against updates even though we only
2595 * read it.
2596 */
2597 mutex_lock(&stream->metadata_cache->lock);
2598 WARN_ON(stream->metadata_in < stream->metadata_out);
2599 if (stream->metadata_in != stream->metadata_out)
2600 goto end;
2601
2602 /* Metadata regenerated, change the version. */
2603 if (stream->metadata_cache->version != stream->version)
2604 stream->version = stream->metadata_cache->version;
2605
2606 len = stream->metadata_cache->metadata_written -
2607 stream->metadata_in;
2608 if (!len)
2609 goto end;
2610 reserve_len = min_t(size_t,
2611 stream->transport->ops.packet_avail_size(chan),
2612 len);
2613 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2614 sizeof(char), -1);
2615 /*
2616 * If reservation failed, return an error to the caller.
2617 */
2618 ret = stream->transport->ops.event_reserve(&ctx, 0);
2619 if (ret != 0) {
2620 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2621 stream->coherent = false;
2622 goto end;
2623 }
2624 stream->transport->ops.event_write(&ctx,
2625 stream->metadata_cache->data + stream->metadata_in,
2626 reserve_len);
2627 stream->transport->ops.event_commit(&ctx);
2628 stream->metadata_in += reserve_len;
2629 if (reserve_len < len)
2630 stream->coherent = false;
2631 else
2632 stream->coherent = true;
2633 ret = reserve_len;
2634
2635 end:
2636 if (coherent)
2637 *coherent = stream->coherent;
2638 mutex_unlock(&stream->metadata_cache->lock);
2639 return ret;
2640 }
2641
2642 static
2643 void lttng_metadata_begin(struct lttng_session *session)
2644 {
2645 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2646 mutex_lock(&session->metadata_cache->lock);
2647 }
2648
2649 static
2650 void lttng_metadata_end(struct lttng_session *session)
2651 {
2652 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2653 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2654 struct lttng_metadata_stream *stream;
2655
2656 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2657 wake_up_interruptible(&stream->read_wait);
2658 mutex_unlock(&session->metadata_cache->lock);
2659 }
2660 }
2661
2662 /*
2663 * Write the metadata to the metadata cache.
2664 * Must be called with sessions_mutex held.
2665 * The metadata cache lock protects us from concurrent read access from
2666 * thread outputting metadata content to ring buffer.
2667 * The content of the printf is printed as a single atomic metadata
2668 * transaction.
2669 */
2670 int lttng_metadata_printf(struct lttng_session *session,
2671 const char *fmt, ...)
2672 {
2673 char *str;
2674 size_t len;
2675 va_list ap;
2676
2677 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2678
2679 va_start(ap, fmt);
2680 str = kvasprintf(GFP_KERNEL, fmt, ap);
2681 va_end(ap);
2682 if (!str)
2683 return -ENOMEM;
2684
2685 len = strlen(str);
2686 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2687 if (session->metadata_cache->metadata_written + len >
2688 session->metadata_cache->cache_alloc) {
2689 char *tmp_cache_realloc;
2690 unsigned int tmp_cache_alloc_size;
2691
2692 tmp_cache_alloc_size = max_t(unsigned int,
2693 session->metadata_cache->cache_alloc + len,
2694 session->metadata_cache->cache_alloc << 1);
2695 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2696 if (!tmp_cache_realloc)
2697 goto err;
2698 if (session->metadata_cache->data) {
2699 memcpy(tmp_cache_realloc,
2700 session->metadata_cache->data,
2701 session->metadata_cache->cache_alloc);
2702 vfree(session->metadata_cache->data);
2703 }
2704
2705 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2706 session->metadata_cache->data = tmp_cache_realloc;
2707 }
2708 memcpy(session->metadata_cache->data +
2709 session->metadata_cache->metadata_written,
2710 str, len);
2711 session->metadata_cache->metadata_written += len;
2712 kfree(str);
2713
2714 return 0;
2715
2716 err:
2717 kfree(str);
2718 return -ENOMEM;
2719 }
2720
2721 static
2722 int print_tabs(struct lttng_session *session, size_t nesting)
2723 {
2724 size_t i;
2725
2726 for (i = 0; i < nesting; i++) {
2727 int ret;
2728
2729 ret = lttng_metadata_printf(session, " ");
2730 if (ret) {
2731 return ret;
2732 }
2733 }
2734 return 0;
2735 }
2736
2737 static
2738 int lttng_field_name_statedump(struct lttng_session *session,
2739 const struct lttng_event_field *field,
2740 size_t nesting)
2741 {
2742 return lttng_metadata_printf(session, " _%s;\n", field->name);
2743 }
2744
2745 static
2746 int _lttng_integer_type_statedump(struct lttng_session *session,
2747 const struct lttng_type *type,
2748 size_t nesting)
2749 {
2750 int ret;
2751
2752 WARN_ON_ONCE(type->atype != atype_integer);
2753 ret = print_tabs(session, nesting);
2754 if (ret)
2755 return ret;
2756 ret = lttng_metadata_printf(session,
2757 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2758 type->u.integer.size,
2759 type->u.integer.alignment,
2760 type->u.integer.signedness,
2761 (type->u.integer.encoding == lttng_encode_none)
2762 ? "none"
2763 : (type->u.integer.encoding == lttng_encode_UTF8)
2764 ? "UTF8"
2765 : "ASCII",
2766 type->u.integer.base,
2767 #if __BYTE_ORDER == __BIG_ENDIAN
2768 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2769 #else
2770 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2771 #endif
2772 );
2773 return ret;
2774 }
2775
2776 /*
2777 * Must be called with sessions_mutex held.
2778 */
2779 static
2780 int _lttng_struct_type_statedump(struct lttng_session *session,
2781 const struct lttng_type *type,
2782 size_t nesting)
2783 {
2784 int ret;
2785 uint32_t i, nr_fields;
2786 unsigned int alignment;
2787
2788 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2789
2790 ret = print_tabs(session, nesting);
2791 if (ret)
2792 return ret;
2793 ret = lttng_metadata_printf(session,
2794 "struct {\n");
2795 if (ret)
2796 return ret;
2797 nr_fields = type->u.struct_nestable.nr_fields;
2798 for (i = 0; i < nr_fields; i++) {
2799 const struct lttng_event_field *iter_field;
2800
2801 iter_field = &type->u.struct_nestable.fields[i];
2802 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2803 if (ret)
2804 return ret;
2805 }
2806 ret = print_tabs(session, nesting);
2807 if (ret)
2808 return ret;
2809 alignment = type->u.struct_nestable.alignment;
2810 if (alignment) {
2811 ret = lttng_metadata_printf(session,
2812 "} align(%u)",
2813 alignment);
2814 } else {
2815 ret = lttng_metadata_printf(session,
2816 "}");
2817 }
2818 return ret;
2819 }
2820
2821 /*
2822 * Must be called with sessions_mutex held.
2823 */
2824 static
2825 int _lttng_struct_field_statedump(struct lttng_session *session,
2826 const struct lttng_event_field *field,
2827 size_t nesting)
2828 {
2829 int ret;
2830
2831 ret = _lttng_struct_type_statedump(session,
2832 &field->type, nesting);
2833 if (ret)
2834 return ret;
2835 return lttng_field_name_statedump(session, field, nesting);
2836 }
2837
2838 /*
2839 * Must be called with sessions_mutex held.
2840 */
2841 static
2842 int _lttng_variant_type_statedump(struct lttng_session *session,
2843 const struct lttng_type *type,
2844 size_t nesting)
2845 {
2846 int ret;
2847 uint32_t i, nr_choices;
2848
2849 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2850 /*
2851 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2852 */
2853 if (type->u.variant_nestable.alignment != 0)
2854 return -EINVAL;
2855 ret = print_tabs(session, nesting);
2856 if (ret)
2857 return ret;
2858 ret = lttng_metadata_printf(session,
2859 "variant <_%s> {\n",
2860 type->u.variant_nestable.tag_name);
2861 if (ret)
2862 return ret;
2863 nr_choices = type->u.variant_nestable.nr_choices;
2864 for (i = 0; i < nr_choices; i++) {
2865 const struct lttng_event_field *iter_field;
2866
2867 iter_field = &type->u.variant_nestable.choices[i];
2868 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2869 if (ret)
2870 return ret;
2871 }
2872 ret = print_tabs(session, nesting);
2873 if (ret)
2874 return ret;
2875 ret = lttng_metadata_printf(session,
2876 "}");
2877 return ret;
2878 }
2879
2880 /*
2881 * Must be called with sessions_mutex held.
2882 */
2883 static
2884 int _lttng_variant_field_statedump(struct lttng_session *session,
2885 const struct lttng_event_field *field,
2886 size_t nesting)
2887 {
2888 int ret;
2889
2890 ret = _lttng_variant_type_statedump(session,
2891 &field->type, nesting);
2892 if (ret)
2893 return ret;
2894 return lttng_field_name_statedump(session, field, nesting);
2895 }
2896
2897 /*
2898 * Must be called with sessions_mutex held.
2899 */
2900 static
2901 int _lttng_array_field_statedump(struct lttng_session *session,
2902 const struct lttng_event_field *field,
2903 size_t nesting)
2904 {
2905 int ret;
2906 const struct lttng_type *elem_type;
2907
2908 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2909
2910 if (field->type.u.array_nestable.alignment) {
2911 ret = print_tabs(session, nesting);
2912 if (ret)
2913 return ret;
2914 ret = lttng_metadata_printf(session,
2915 "struct { } align(%u) _%s_padding;\n",
2916 field->type.u.array_nestable.alignment * CHAR_BIT,
2917 field->name);
2918 if (ret)
2919 return ret;
2920 }
2921 /*
2922 * Nested compound types: Only array of structures and variants are
2923 * currently supported.
2924 */
2925 elem_type = field->type.u.array_nestable.elem_type;
2926 switch (elem_type->atype) {
2927 case atype_integer:
2928 case atype_struct_nestable:
2929 case atype_variant_nestable:
2930 ret = _lttng_type_statedump(session, elem_type, nesting);
2931 if (ret)
2932 return ret;
2933 break;
2934
2935 default:
2936 return -EINVAL;
2937 }
2938 ret = lttng_metadata_printf(session,
2939 " _%s[%u];\n",
2940 field->name,
2941 field->type.u.array_nestable.length);
2942 return ret;
2943 }
2944
2945 /*
2946 * Must be called with sessions_mutex held.
2947 */
2948 static
2949 int _lttng_sequence_field_statedump(struct lttng_session *session,
2950 const struct lttng_event_field *field,
2951 size_t nesting)
2952 {
2953 int ret;
2954 const char *length_name;
2955 const struct lttng_type *elem_type;
2956
2957 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2958
2959 length_name = field->type.u.sequence_nestable.length_name;
2960
2961 if (field->type.u.sequence_nestable.alignment) {
2962 ret = print_tabs(session, nesting);
2963 if (ret)
2964 return ret;
2965 ret = lttng_metadata_printf(session,
2966 "struct { } align(%u) _%s_padding;\n",
2967 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2968 field->name);
2969 if (ret)
2970 return ret;
2971 }
2972
2973 /*
2974 * Nested compound types: Only array of structures and variants are
2975 * currently supported.
2976 */
2977 elem_type = field->type.u.sequence_nestable.elem_type;
2978 switch (elem_type->atype) {
2979 case atype_integer:
2980 case atype_struct_nestable:
2981 case atype_variant_nestable:
2982 ret = _lttng_type_statedump(session, elem_type, nesting);
2983 if (ret)
2984 return ret;
2985 break;
2986
2987 default:
2988 return -EINVAL;
2989 }
2990 ret = lttng_metadata_printf(session,
2991 " _%s[ _%s ];\n",
2992 field->name,
2993 field->type.u.sequence_nestable.length_name);
2994 return ret;
2995 }
2996
2997 /*
2998 * Must be called with sessions_mutex held.
2999 */
3000 static
3001 int _lttng_enum_type_statedump(struct lttng_session *session,
3002 const struct lttng_type *type,
3003 size_t nesting)
3004 {
3005 const struct lttng_enum_desc *enum_desc;
3006 const struct lttng_type *container_type;
3007 int ret;
3008 unsigned int i, nr_entries;
3009
3010 container_type = type->u.enum_nestable.container_type;
3011 if (container_type->atype != atype_integer) {
3012 ret = -EINVAL;
3013 goto end;
3014 }
3015 enum_desc = type->u.enum_nestable.desc;
3016 nr_entries = enum_desc->nr_entries;
3017
3018 ret = print_tabs(session, nesting);
3019 if (ret)
3020 goto end;
3021 ret = lttng_metadata_printf(session, "enum : ");
3022 if (ret)
3023 goto end;
3024 ret = _lttng_integer_type_statedump(session, container_type, 0);
3025 if (ret)
3026 goto end;
3027 ret = lttng_metadata_printf(session, " {\n");
3028 if (ret)
3029 goto end;
3030 /* Dump all entries */
3031 for (i = 0; i < nr_entries; i++) {
3032 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3033 int j, len;
3034
3035 ret = print_tabs(session, nesting + 1);
3036 if (ret)
3037 goto end;
3038 ret = lttng_metadata_printf(session,
3039 "\"");
3040 if (ret)
3041 goto end;
3042 len = strlen(entry->string);
3043 /* Escape the character '"' */
3044 for (j = 0; j < len; j++) {
3045 char c = entry->string[j];
3046
3047 switch (c) {
3048 case '"':
3049 ret = lttng_metadata_printf(session,
3050 "\\\"");
3051 break;
3052 case '\\':
3053 ret = lttng_metadata_printf(session,
3054 "\\\\");
3055 break;
3056 default:
3057 ret = lttng_metadata_printf(session,
3058 "%c", c);
3059 break;
3060 }
3061 if (ret)
3062 goto end;
3063 }
3064 ret = lttng_metadata_printf(session, "\"");
3065 if (ret)
3066 goto end;
3067
3068 if (entry->options.is_auto) {
3069 ret = lttng_metadata_printf(session, ",\n");
3070 if (ret)
3071 goto end;
3072 } else {
3073 ret = lttng_metadata_printf(session,
3074 " = ");
3075 if (ret)
3076 goto end;
3077 if (entry->start.signedness)
3078 ret = lttng_metadata_printf(session,
3079 "%lld", (long long) entry->start.value);
3080 else
3081 ret = lttng_metadata_printf(session,
3082 "%llu", entry->start.value);
3083 if (ret)
3084 goto end;
3085 if (entry->start.signedness == entry->end.signedness &&
3086 entry->start.value
3087 == entry->end.value) {
3088 ret = lttng_metadata_printf(session,
3089 ",\n");
3090 } else {
3091 if (entry->end.signedness) {
3092 ret = lttng_metadata_printf(session,
3093 " ... %lld,\n",
3094 (long long) entry->end.value);
3095 } else {
3096 ret = lttng_metadata_printf(session,
3097 " ... %llu,\n",
3098 entry->end.value);
3099 }
3100 }
3101 if (ret)
3102 goto end;
3103 }
3104 }
3105 ret = print_tabs(session, nesting);
3106 if (ret)
3107 goto end;
3108 ret = lttng_metadata_printf(session, "}");
3109 end:
3110 return ret;
3111 }
3112
3113 /*
3114 * Must be called with sessions_mutex held.
3115 */
3116 static
3117 int _lttng_enum_field_statedump(struct lttng_session *session,
3118 const struct lttng_event_field *field,
3119 size_t nesting)
3120 {
3121 int ret;
3122
3123 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3124 if (ret)
3125 return ret;
3126 return lttng_field_name_statedump(session, field, nesting);
3127 }
3128
3129 static
3130 int _lttng_integer_field_statedump(struct lttng_session *session,
3131 const struct lttng_event_field *field,
3132 size_t nesting)
3133 {
3134 int ret;
3135
3136 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3137 if (ret)
3138 return ret;
3139 return lttng_field_name_statedump(session, field, nesting);
3140 }
3141
3142 static
3143 int _lttng_string_type_statedump(struct lttng_session *session,
3144 const struct lttng_type *type,
3145 size_t nesting)
3146 {
3147 int ret;
3148
3149 WARN_ON_ONCE(type->atype != atype_string);
3150 /* Default encoding is UTF8 */
3151 ret = print_tabs(session, nesting);
3152 if (ret)
3153 return ret;
3154 ret = lttng_metadata_printf(session,
3155 "string%s",
3156 type->u.string.encoding == lttng_encode_ASCII ?
3157 " { encoding = ASCII; }" : "");
3158 return ret;
3159 }
3160
3161 static
3162 int _lttng_string_field_statedump(struct lttng_session *session,
3163 const struct lttng_event_field *field,
3164 size_t nesting)
3165 {
3166 int ret;
3167
3168 WARN_ON_ONCE(field->type.atype != atype_string);
3169 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3170 if (ret)
3171 return ret;
3172 return lttng_field_name_statedump(session, field, nesting);
3173 }
3174
3175 /*
3176 * Must be called with sessions_mutex held.
3177 */
3178 static
3179 int _lttng_type_statedump(struct lttng_session *session,
3180 const struct lttng_type *type,
3181 size_t nesting)
3182 {
3183 int ret = 0;
3184
3185 switch (type->atype) {
3186 case atype_integer:
3187 ret = _lttng_integer_type_statedump(session, type, nesting);
3188 break;
3189 case atype_enum_nestable:
3190 ret = _lttng_enum_type_statedump(session, type, nesting);
3191 break;
3192 case atype_string:
3193 ret = _lttng_string_type_statedump(session, type, nesting);
3194 break;
3195 case atype_struct_nestable:
3196 ret = _lttng_struct_type_statedump(session, type, nesting);
3197 break;
3198 case atype_variant_nestable:
3199 ret = _lttng_variant_type_statedump(session, type, nesting);
3200 break;
3201
3202 /* Nested arrays and sequences are not supported yet. */
3203 case atype_array_nestable:
3204 case atype_sequence_nestable:
3205 default:
3206 WARN_ON_ONCE(1);
3207 return -EINVAL;
3208 }
3209 return ret;
3210 }
3211
3212 /*
3213 * Must be called with sessions_mutex held.
3214 */
3215 static
3216 int _lttng_field_statedump(struct lttng_session *session,
3217 const struct lttng_event_field *field,
3218 size_t nesting)
3219 {
3220 int ret = 0;
3221
3222 switch (field->type.atype) {
3223 case atype_integer:
3224 ret = _lttng_integer_field_statedump(session, field, nesting);
3225 break;
3226 case atype_enum_nestable:
3227 ret = _lttng_enum_field_statedump(session, field, nesting);
3228 break;
3229 case atype_string:
3230 ret = _lttng_string_field_statedump(session, field, nesting);
3231 break;
3232 case atype_struct_nestable:
3233 ret = _lttng_struct_field_statedump(session, field, nesting);
3234 break;
3235 case atype_array_nestable:
3236 ret = _lttng_array_field_statedump(session, field, nesting);
3237 break;
3238 case atype_sequence_nestable:
3239 ret = _lttng_sequence_field_statedump(session, field, nesting);
3240 break;
3241 case atype_variant_nestable:
3242 ret = _lttng_variant_field_statedump(session, field, nesting);
3243 break;
3244
3245 default:
3246 WARN_ON_ONCE(1);
3247 return -EINVAL;
3248 }
3249 return ret;
3250 }
3251
3252 static
3253 int _lttng_context_metadata_statedump(struct lttng_session *session,
3254 struct lttng_ctx *ctx)
3255 {
3256 int ret = 0;
3257 int i;
3258
3259 if (!ctx)
3260 return 0;
3261 for (i = 0; i < ctx->nr_fields; i++) {
3262 const struct lttng_ctx_field *field = &ctx->fields[i];
3263
3264 ret = _lttng_field_statedump(session, &field->event_field, 2);
3265 if (ret)
3266 return ret;
3267 }
3268 return ret;
3269 }
3270
3271 static
3272 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3273 struct lttng_event *event)
3274 {
3275 const struct lttng_event_desc *desc = event->desc;
3276 int ret = 0;
3277 int i;
3278
3279 for (i = 0; i < desc->nr_fields; i++) {
3280 const struct lttng_event_field *field = &desc->fields[i];
3281
3282 ret = _lttng_field_statedump(session, field, 2);
3283 if (ret)
3284 return ret;
3285 }
3286 return ret;
3287 }
3288
3289 /*
3290 * Must be called with sessions_mutex held.
3291 * The entire event metadata is printed as a single atomic metadata
3292 * transaction.
3293 */
3294 static
3295 int _lttng_event_metadata_statedump(struct lttng_session *session,
3296 struct lttng_channel *chan,
3297 struct lttng_event *event)
3298 {
3299 int ret = 0;
3300
3301 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3302 return 0;
3303 if (chan->channel_type == METADATA_CHANNEL)
3304 return 0;
3305
3306 lttng_metadata_begin(session);
3307
3308 ret = lttng_metadata_printf(session,
3309 "event {\n"
3310 " name = \"%s\";\n"
3311 " id = %u;\n"
3312 " stream_id = %u;\n",
3313 event->desc->name,
3314 event->id,
3315 event->chan->id);
3316 if (ret)
3317 goto end;
3318
3319 if (event->ctx) {
3320 ret = lttng_metadata_printf(session,
3321 " context := struct {\n");
3322 if (ret)
3323 goto end;
3324 }
3325 ret = _lttng_context_metadata_statedump(session, event->ctx);
3326 if (ret)
3327 goto end;
3328 if (event->ctx) {
3329 ret = lttng_metadata_printf(session,
3330 " };\n");
3331 if (ret)
3332 goto end;
3333 }
3334
3335 ret = lttng_metadata_printf(session,
3336 " fields := struct {\n"
3337 );
3338 if (ret)
3339 goto end;
3340
3341 ret = _lttng_fields_metadata_statedump(session, event);
3342 if (ret)
3343 goto end;
3344
3345 /*
3346 * LTTng space reservation can only reserve multiples of the
3347 * byte size.
3348 */
3349 ret = lttng_metadata_printf(session,
3350 " };\n"
3351 "};\n\n");
3352 if (ret)
3353 goto end;
3354
3355 event->metadata_dumped = 1;
3356 end:
3357 lttng_metadata_end(session);
3358 return ret;
3359
3360 }
3361
3362 /*
3363 * Must be called with sessions_mutex held.
3364 * The entire channel metadata is printed as a single atomic metadata
3365 * transaction.
3366 */
3367 static
3368 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3369 struct lttng_channel *chan)
3370 {
3371 int ret = 0;
3372
3373 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3374 return 0;
3375
3376 if (chan->channel_type == METADATA_CHANNEL)
3377 return 0;
3378
3379 lttng_metadata_begin(session);
3380
3381 WARN_ON_ONCE(!chan->header_type);
3382 ret = lttng_metadata_printf(session,
3383 "stream {\n"
3384 " id = %u;\n"
3385 " event.header := %s;\n"
3386 " packet.context := struct packet_context;\n",
3387 chan->id,
3388 chan->header_type == 1 ? "struct event_header_compact" :
3389 "struct event_header_large");
3390 if (ret)
3391 goto end;
3392
3393 if (chan->ctx) {
3394 ret = lttng_metadata_printf(session,
3395 " event.context := struct {\n");
3396 if (ret)
3397 goto end;
3398 }
3399 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3400 if (ret)
3401 goto end;
3402 if (chan->ctx) {
3403 ret = lttng_metadata_printf(session,
3404 " };\n");
3405 if (ret)
3406 goto end;
3407 }
3408
3409 ret = lttng_metadata_printf(session,
3410 "};\n\n");
3411
3412 chan->metadata_dumped = 1;
3413 end:
3414 lttng_metadata_end(session);
3415 return ret;
3416 }
3417
3418 /*
3419 * Must be called with sessions_mutex held.
3420 */
3421 static
3422 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3423 {
3424 return lttng_metadata_printf(session,
3425 "struct packet_context {\n"
3426 " uint64_clock_monotonic_t timestamp_begin;\n"
3427 " uint64_clock_monotonic_t timestamp_end;\n"
3428 " uint64_t content_size;\n"
3429 " uint64_t packet_size;\n"
3430 " uint64_t packet_seq_num;\n"
3431 " unsigned long events_discarded;\n"
3432 " uint32_t cpu_id;\n"
3433 "};\n\n"
3434 );
3435 }
3436
3437 /*
3438 * Compact header:
3439 * id: range: 0 - 30.
3440 * id 31 is reserved to indicate an extended header.
3441 *
3442 * Large header:
3443 * id: range: 0 - 65534.
3444 * id 65535 is reserved to indicate an extended header.
3445 *
3446 * Must be called with sessions_mutex held.
3447 */
3448 static
3449 int _lttng_event_header_declare(struct lttng_session *session)
3450 {
3451 return lttng_metadata_printf(session,
3452 "struct event_header_compact {\n"
3453 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3454 " variant <id> {\n"
3455 " struct {\n"
3456 " uint27_clock_monotonic_t timestamp;\n"
3457 " } compact;\n"
3458 " struct {\n"
3459 " uint32_t id;\n"
3460 " uint64_clock_monotonic_t timestamp;\n"
3461 " } extended;\n"
3462 " } v;\n"
3463 "} align(%u);\n"
3464 "\n"
3465 "struct event_header_large {\n"
3466 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3467 " variant <id> {\n"
3468 " struct {\n"
3469 " uint32_clock_monotonic_t timestamp;\n"
3470 " } compact;\n"
3471 " struct {\n"
3472 " uint32_t id;\n"
3473 " uint64_clock_monotonic_t timestamp;\n"
3474 " } extended;\n"
3475 " } v;\n"
3476 "} align(%u);\n\n",
3477 lttng_alignof(uint32_t) * CHAR_BIT,
3478 lttng_alignof(uint16_t) * CHAR_BIT
3479 );
3480 }
3481
3482 /*
3483 * Approximation of NTP time of day to clock monotonic correlation,
3484 * taken at start of trace.
3485 * Yes, this is only an approximation. Yes, we can (and will) do better
3486 * in future versions.
3487 * This function may return a negative offset. It may happen if the
3488 * system sets the REALTIME clock to 0 after boot.
3489 *
3490 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3491 * y2038 compliant.
3492 */
3493 static
3494 int64_t measure_clock_offset(void)
3495 {
3496 uint64_t monotonic_avg, monotonic[2], realtime;
3497 uint64_t tcf = trace_clock_freq();
3498 int64_t offset;
3499 unsigned long flags;
3500 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3501 struct timespec64 rts = { 0, 0 };
3502 #else
3503 struct timespec rts = { 0, 0 };
3504 #endif
3505
3506 /* Disable interrupts to increase correlation precision. */
3507 local_irq_save(flags);
3508 monotonic[0] = trace_clock_read64();
3509 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3510 ktime_get_real_ts64(&rts);
3511 #else
3512 getnstimeofday(&rts);
3513 #endif
3514 monotonic[1] = trace_clock_read64();
3515 local_irq_restore(flags);
3516
3517 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3518 realtime = (uint64_t) rts.tv_sec * tcf;
3519 if (tcf == NSEC_PER_SEC) {
3520 realtime += rts.tv_nsec;
3521 } else {
3522 uint64_t n = rts.tv_nsec * tcf;
3523
3524 do_div(n, NSEC_PER_SEC);
3525 realtime += n;
3526 }
3527 offset = (int64_t) realtime - monotonic_avg;
3528 return offset;
3529 }
3530
3531 static
3532 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3533 {
3534 int ret = 0;
3535 size_t i;
3536 char cur;
3537
3538 i = 0;
3539 cur = string[i];
3540 while (cur != '\0') {
3541 switch (cur) {
3542 case '\n':
3543 ret = lttng_metadata_printf(session, "%s", "\\n");
3544 break;
3545 case '\\':
3546 case '"':
3547 ret = lttng_metadata_printf(session, "%c", '\\');
3548 if (ret)
3549 goto error;
3550 /* We still print the current char */
3551 /* Fallthrough */
3552 default:
3553 ret = lttng_metadata_printf(session, "%c", cur);
3554 break;
3555 }
3556
3557 if (ret)
3558 goto error;
3559
3560 cur = string[++i];
3561 }
3562 error:
3563 return ret;
3564 }
3565
3566 static
3567 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3568 const char *field_value)
3569 {
3570 int ret;
3571
3572 ret = lttng_metadata_printf(session, " %s = \"", field);
3573 if (ret)
3574 goto error;
3575
3576 ret = print_escaped_ctf_string(session, field_value);
3577 if (ret)
3578 goto error;
3579
3580 ret = lttng_metadata_printf(session, "\";\n");
3581
3582 error:
3583 return ret;
3584 }
3585
3586 /*
3587 * Output metadata into this session's metadata buffers.
3588 * Must be called with sessions_mutex held.
3589 */
3590 static
3591 int _lttng_session_metadata_statedump(struct lttng_session *session)
3592 {
3593 unsigned char *uuid_c = session->uuid.b;
3594 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3595 const char *product_uuid;
3596 struct lttng_channel *chan;
3597 struct lttng_event *event;
3598 int ret = 0;
3599
3600 if (!LTTNG_READ_ONCE(session->active))
3601 return 0;
3602
3603 lttng_metadata_begin(session);
3604
3605 if (session->metadata_dumped)
3606 goto skip_session;
3607
3608 snprintf(uuid_s, sizeof(uuid_s),
3609 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3610 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3611 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3612 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3613 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3614
3615 ret = lttng_metadata_printf(session,
3616 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3617 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3618 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3619 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3620 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3621 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3622 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3623 "\n"
3624 "trace {\n"
3625 " major = %u;\n"
3626 " minor = %u;\n"
3627 " uuid = \"%s\";\n"
3628 " byte_order = %s;\n"
3629 " packet.header := struct {\n"
3630 " uint32_t magic;\n"
3631 " uint8_t uuid[16];\n"
3632 " uint32_t stream_id;\n"
3633 " uint64_t stream_instance_id;\n"
3634 " };\n"
3635 "};\n\n",
3636 lttng_alignof(uint8_t) * CHAR_BIT,
3637 lttng_alignof(uint16_t) * CHAR_BIT,
3638 lttng_alignof(uint32_t) * CHAR_BIT,
3639 lttng_alignof(uint64_t) * CHAR_BIT,
3640 sizeof(unsigned long) * CHAR_BIT,
3641 lttng_alignof(unsigned long) * CHAR_BIT,
3642 CTF_SPEC_MAJOR,
3643 CTF_SPEC_MINOR,
3644 uuid_s,
3645 #if __BYTE_ORDER == __BIG_ENDIAN
3646 "be"
3647 #else
3648 "le"
3649 #endif
3650 );
3651 if (ret)
3652 goto end;
3653
3654 ret = lttng_metadata_printf(session,
3655 "env {\n"
3656 " hostname = \"%s\";\n"
3657 " domain = \"kernel\";\n"
3658 " sysname = \"%s\";\n"
3659 " kernel_release = \"%s\";\n"
3660 " kernel_version = \"%s\";\n"
3661 " tracer_name = \"lttng-modules\";\n"
3662 " tracer_major = %d;\n"
3663 " tracer_minor = %d;\n"
3664 " tracer_patchlevel = %d;\n"
3665 " trace_buffering_scheme = \"global\";\n",
3666 current->nsproxy->uts_ns->name.nodename,
3667 utsname()->sysname,
3668 utsname()->release,
3669 utsname()->version,
3670 LTTNG_MODULES_MAJOR_VERSION,
3671 LTTNG_MODULES_MINOR_VERSION,
3672 LTTNG_MODULES_PATCHLEVEL_VERSION
3673 );
3674 if (ret)
3675 goto end;
3676
3677 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3678 if (ret)
3679 goto end;
3680 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3681 session->creation_time);
3682 if (ret)
3683 goto end;
3684
3685 /* Add the product UUID to the 'env' section */
3686 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3687 if (product_uuid) {
3688 ret = lttng_metadata_printf(session,
3689 " product_uuid = \"%s\";\n",
3690 product_uuid
3691 );
3692 if (ret)
3693 goto end;
3694 }
3695
3696 /* Close the 'env' section */
3697 ret = lttng_metadata_printf(session, "};\n\n");
3698 if (ret)
3699 goto end;
3700
3701 ret = lttng_metadata_printf(session,
3702 "clock {\n"
3703 " name = \"%s\";\n",
3704 trace_clock_name()
3705 );
3706 if (ret)
3707 goto end;
3708
3709 if (!trace_clock_uuid(clock_uuid_s)) {
3710 ret = lttng_metadata_printf(session,
3711 " uuid = \"%s\";\n",
3712 clock_uuid_s
3713 );
3714 if (ret)
3715 goto end;
3716 }
3717
3718 ret = lttng_metadata_printf(session,
3719 " description = \"%s\";\n"
3720 " freq = %llu; /* Frequency, in Hz */\n"
3721 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3722 " offset = %lld;\n"
3723 "};\n\n",
3724 trace_clock_description(),
3725 (unsigned long long) trace_clock_freq(),
3726 (long long) measure_clock_offset()
3727 );
3728 if (ret)
3729 goto end;
3730
3731 ret = lttng_metadata_printf(session,
3732 "typealias integer {\n"
3733 " size = 27; align = 1; signed = false;\n"
3734 " map = clock.%s.value;\n"
3735 "} := uint27_clock_monotonic_t;\n"
3736 "\n"
3737 "typealias integer {\n"
3738 " size = 32; align = %u; signed = false;\n"
3739 " map = clock.%s.value;\n"
3740 "} := uint32_clock_monotonic_t;\n"
3741 "\n"
3742 "typealias integer {\n"
3743 " size = 64; align = %u; signed = false;\n"
3744 " map = clock.%s.value;\n"
3745 "} := uint64_clock_monotonic_t;\n\n",
3746 trace_clock_name(),
3747 lttng_alignof(uint32_t) * CHAR_BIT,
3748 trace_clock_name(),
3749 lttng_alignof(uint64_t) * CHAR_BIT,
3750 trace_clock_name()
3751 );
3752 if (ret)
3753 goto end;
3754
3755 ret = _lttng_stream_packet_context_declare(session);
3756 if (ret)
3757 goto end;
3758
3759 ret = _lttng_event_header_declare(session);
3760 if (ret)
3761 goto end;
3762
3763 skip_session:
3764 list_for_each_entry(chan, &session->chan, list) {
3765 ret = _lttng_channel_metadata_statedump(session, chan);
3766 if (ret)
3767 goto end;
3768 }
3769
3770 list_for_each_entry(event, &session->events, list) {
3771 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3772 if (ret)
3773 goto end;
3774 }
3775 session->metadata_dumped = 1;
3776 end:
3777 lttng_metadata_end(session);
3778 return ret;
3779 }
3780
3781 /**
3782 * lttng_transport_register - LTT transport registration
3783 * @transport: transport structure
3784 *
3785 * Registers a transport which can be used as output to extract the data out of
3786 * LTTng. The module calling this registration function must ensure that no
3787 * trap-inducing code will be executed by the transport functions. E.g.
3788 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3789 * is made visible to the transport function. This registration acts as a
3790 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3791 * after its registration must it synchronize the TLBs.
3792 */
3793 void lttng_transport_register(struct lttng_transport *transport)
3794 {
3795 /*
3796 * Make sure no page fault can be triggered by the module about to be
3797 * registered. We deal with this here so we don't have to call
3798 * vmalloc_sync_mappings() in each module's init.
3799 */
3800 wrapper_vmalloc_sync_mappings();
3801
3802 mutex_lock(&sessions_mutex);
3803 list_add_tail(&transport->node, &lttng_transport_list);
3804 mutex_unlock(&sessions_mutex);
3805 }
3806 EXPORT_SYMBOL_GPL(lttng_transport_register);
3807
3808 /**
3809 * lttng_transport_unregister - LTT transport unregistration
3810 * @transport: transport structure
3811 */
3812 void lttng_transport_unregister(struct lttng_transport *transport)
3813 {
3814 mutex_lock(&sessions_mutex);
3815 list_del(&transport->node);
3816 mutex_unlock(&sessions_mutex);
3817 }
3818 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3819
3820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3821
3822 enum cpuhp_state lttng_hp_prepare;
3823 enum cpuhp_state lttng_hp_online;
3824
3825 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3826 {
3827 struct lttng_cpuhp_node *lttng_node;
3828
3829 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3830 switch (lttng_node->component) {
3831 case LTTNG_RING_BUFFER_FRONTEND:
3832 return 0;
3833 case LTTNG_RING_BUFFER_BACKEND:
3834 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3835 case LTTNG_RING_BUFFER_ITER:
3836 return 0;
3837 case LTTNG_CONTEXT_PERF_COUNTERS:
3838 return 0;
3839 default:
3840 return -EINVAL;
3841 }
3842 }
3843
3844 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3845 {
3846 struct lttng_cpuhp_node *lttng_node;
3847
3848 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3849 switch (lttng_node->component) {
3850 case LTTNG_RING_BUFFER_FRONTEND:
3851 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3852 case LTTNG_RING_BUFFER_BACKEND:
3853 return 0;
3854 case LTTNG_RING_BUFFER_ITER:
3855 return 0;
3856 case LTTNG_CONTEXT_PERF_COUNTERS:
3857 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3858 default:
3859 return -EINVAL;
3860 }
3861 }
3862
3863 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3864 {
3865 struct lttng_cpuhp_node *lttng_node;
3866
3867 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3868 switch (lttng_node->component) {
3869 case LTTNG_RING_BUFFER_FRONTEND:
3870 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3871 case LTTNG_RING_BUFFER_BACKEND:
3872 return 0;
3873 case LTTNG_RING_BUFFER_ITER:
3874 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3875 case LTTNG_CONTEXT_PERF_COUNTERS:
3876 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3877 default:
3878 return -EINVAL;
3879 }
3880 }
3881
3882 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3883 {
3884 struct lttng_cpuhp_node *lttng_node;
3885
3886 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3887 switch (lttng_node->component) {
3888 case LTTNG_RING_BUFFER_FRONTEND:
3889 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3890 case LTTNG_RING_BUFFER_BACKEND:
3891 return 0;
3892 case LTTNG_RING_BUFFER_ITER:
3893 return 0;
3894 case LTTNG_CONTEXT_PERF_COUNTERS:
3895 return 0;
3896 default:
3897 return -EINVAL;
3898 }
3899 }
3900
3901 static int __init lttng_init_cpu_hotplug(void)
3902 {
3903 int ret;
3904
3905 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3906 lttng_hotplug_prepare,
3907 lttng_hotplug_dead);
3908 if (ret < 0) {
3909 return ret;
3910 }
3911 lttng_hp_prepare = ret;
3912 lttng_rb_set_hp_prepare(ret);
3913
3914 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3915 lttng_hotplug_online,
3916 lttng_hotplug_offline);
3917 if (ret < 0) {
3918 cpuhp_remove_multi_state(lttng_hp_prepare);
3919 lttng_hp_prepare = 0;
3920 return ret;
3921 }
3922 lttng_hp_online = ret;
3923 lttng_rb_set_hp_online(ret);
3924
3925 return 0;
3926 }
3927
3928 static void __exit lttng_exit_cpu_hotplug(void)
3929 {
3930 lttng_rb_set_hp_online(0);
3931 cpuhp_remove_multi_state(lttng_hp_online);
3932 lttng_rb_set_hp_prepare(0);
3933 cpuhp_remove_multi_state(lttng_hp_prepare);
3934 }
3935
3936 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3937 static int lttng_init_cpu_hotplug(void)
3938 {
3939 return 0;
3940 }
3941 static void lttng_exit_cpu_hotplug(void)
3942 {
3943 }
3944 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3945
3946
3947 static int __init lttng_events_init(void)
3948 {
3949 int ret;
3950
3951 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3952 if (ret)
3953 return ret;
3954 ret = wrapper_get_pfnblock_flags_mask_init();
3955 if (ret)
3956 return ret;
3957 ret = wrapper_get_pageblock_flags_mask_init();
3958 if (ret)
3959 return ret;
3960 ret = lttng_probes_init();
3961 if (ret)
3962 return ret;
3963 ret = lttng_context_init();
3964 if (ret)
3965 return ret;
3966 ret = lttng_tracepoint_init();
3967 if (ret)
3968 goto error_tp;
3969 event_cache = KMEM_CACHE(lttng_event, 0);
3970 if (!event_cache) {
3971 ret = -ENOMEM;
3972 goto error_kmem_event;
3973 }
3974 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
3975 if (!event_notifier_cache) {
3976 ret = -ENOMEM;
3977 goto error_kmem_event_notifier;
3978 }
3979 ret = lttng_abi_init();
3980 if (ret)
3981 goto error_abi;
3982 ret = lttng_logger_init();
3983 if (ret)
3984 goto error_logger;
3985 ret = lttng_init_cpu_hotplug();
3986 if (ret)
3987 goto error_hotplug;
3988 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3989 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3990 __stringify(LTTNG_MODULES_MINOR_VERSION),
3991 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3992 LTTNG_MODULES_EXTRAVERSION,
3993 LTTNG_VERSION_NAME,
3994 #ifdef LTTNG_EXTRA_VERSION_GIT
3995 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3996 #else
3997 "",
3998 #endif
3999 #ifdef LTTNG_EXTRA_VERSION_NAME
4000 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4001 #else
4002 "");
4003 #endif
4004 return 0;
4005
4006 error_hotplug:
4007 lttng_logger_exit();
4008 error_logger:
4009 lttng_abi_exit();
4010 error_abi:
4011 kmem_cache_destroy(event_notifier_cache);
4012 error_kmem_event_notifier:
4013 kmem_cache_destroy(event_cache);
4014 error_kmem_event:
4015 lttng_tracepoint_exit();
4016 error_tp:
4017 lttng_context_exit();
4018 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4019 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4020 __stringify(LTTNG_MODULES_MINOR_VERSION),
4021 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4022 LTTNG_MODULES_EXTRAVERSION,
4023 LTTNG_VERSION_NAME,
4024 #ifdef LTTNG_EXTRA_VERSION_GIT
4025 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4026 #else
4027 "",
4028 #endif
4029 #ifdef LTTNG_EXTRA_VERSION_NAME
4030 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4031 #else
4032 "");
4033 #endif
4034 return ret;
4035 }
4036
4037 module_init(lttng_events_init);
4038
4039 static void __exit lttng_events_exit(void)
4040 {
4041 struct lttng_session *session, *tmpsession;
4042
4043 lttng_exit_cpu_hotplug();
4044 lttng_logger_exit();
4045 lttng_abi_exit();
4046 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4047 lttng_session_destroy(session);
4048 kmem_cache_destroy(event_cache);
4049 kmem_cache_destroy(event_notifier_cache);
4050 lttng_tracepoint_exit();
4051 lttng_context_exit();
4052 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4053 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4054 __stringify(LTTNG_MODULES_MINOR_VERSION),
4055 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4056 LTTNG_MODULES_EXTRAVERSION,
4057 LTTNG_VERSION_NAME,
4058 #ifdef LTTNG_EXTRA_VERSION_GIT
4059 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4060 #else
4061 "",
4062 #endif
4063 #ifdef LTTNG_EXTRA_VERSION_NAME
4064 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4065 #else
4066 "");
4067 #endif
4068 }
4069
4070 module_exit(lttng_events_exit);
4071
4072 #include <generated/patches.h>
4073 #ifdef LTTNG_EXTRA_VERSION_GIT
4074 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4075 #endif
4076 #ifdef LTTNG_EXTRA_VERSION_NAME
4077 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4078 #endif
4079 MODULE_LICENSE("GPL and additional rights");
4080 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4081 MODULE_DESCRIPTION("LTTng tracer");
4082 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4083 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4084 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4085 LTTNG_MODULES_EXTRAVERSION);
This page took 0.148099 seconds and 3 git commands to generate.