Implement event notifiers for syscalls
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/tracer.h>
40 #include <lttng/event-notifier-notification.h>
41 #include <lttng/abi-old.h>
42 #include <lttng/endian.h>
43 #include <lttng/string-utils.h>
44 #include <lttng/utils.h>
45 #include <ringbuffer/backend.h>
46 #include <ringbuffer/frontend.h>
47 #include <wrapper/time.h>
48
49 #define METADATA_CACHE_DEFAULT_SIZE 4096
50
51 static LIST_HEAD(sessions);
52 static LIST_HEAD(event_notifier_groups);
53 static LIST_HEAD(lttng_transport_list);
54 /*
55 * Protect the sessions and metadata caches.
56 */
57 static DEFINE_MUTEX(sessions_mutex);
58 static struct kmem_cache *event_cache;
59 static struct kmem_cache *event_notifier_cache;
60
61 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
62 static void lttng_session_sync_event_enablers(struct lttng_session *session);
63 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
64 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
65 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
66
67 static void _lttng_event_destroy(struct lttng_event *event);
68 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
69 static void _lttng_channel_destroy(struct lttng_channel *chan);
70 static int _lttng_event_unregister(struct lttng_event *event);
71 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
72 static
73 int _lttng_event_metadata_statedump(struct lttng_session *session,
74 struct lttng_channel *chan,
75 struct lttng_event *event);
76 static
77 int _lttng_session_metadata_statedump(struct lttng_session *session);
78 static
79 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
80 static
81 int _lttng_type_statedump(struct lttng_session *session,
82 const struct lttng_type *type,
83 size_t nesting);
84 static
85 int _lttng_field_statedump(struct lttng_session *session,
86 const struct lttng_event_field *field,
87 size_t nesting);
88
89 void synchronize_trace(void)
90 {
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
92 synchronize_rcu();
93 #else
94 synchronize_sched();
95 #endif
96
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
98 #ifdef CONFIG_PREEMPT_RT_FULL
99 synchronize_rcu();
100 #endif
101 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
102 #ifdef CONFIG_PREEMPT_RT
103 synchronize_rcu();
104 #endif
105 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
106 }
107
108 void lttng_lock_sessions(void)
109 {
110 mutex_lock(&sessions_mutex);
111 }
112
113 void lttng_unlock_sessions(void)
114 {
115 mutex_unlock(&sessions_mutex);
116 }
117
118 static struct lttng_transport *lttng_transport_find(const char *name)
119 {
120 struct lttng_transport *transport;
121
122 list_for_each_entry(transport, &lttng_transport_list, node) {
123 if (!strcmp(transport->name, name))
124 return transport;
125 }
126 return NULL;
127 }
128
129 /*
130 * Called with sessions lock held.
131 */
132 int lttng_session_active(void)
133 {
134 struct lttng_session *iter;
135
136 list_for_each_entry(iter, &sessions, list) {
137 if (iter->active)
138 return 1;
139 }
140 return 0;
141 }
142
143 struct lttng_session *lttng_session_create(void)
144 {
145 struct lttng_session *session;
146 struct lttng_metadata_cache *metadata_cache;
147 int i;
148
149 mutex_lock(&sessions_mutex);
150 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
151 if (!session)
152 goto err;
153 INIT_LIST_HEAD(&session->chan);
154 INIT_LIST_HEAD(&session->events);
155 lttng_guid_gen(&session->uuid);
156
157 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
158 GFP_KERNEL);
159 if (!metadata_cache)
160 goto err_free_session;
161 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
162 if (!metadata_cache->data)
163 goto err_free_cache;
164 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
165 kref_init(&metadata_cache->refcount);
166 mutex_init(&metadata_cache->lock);
167 session->metadata_cache = metadata_cache;
168 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
169 memcpy(&metadata_cache->uuid, &session->uuid,
170 sizeof(metadata_cache->uuid));
171 INIT_LIST_HEAD(&session->enablers_head);
172 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
173 INIT_HLIST_HEAD(&session->events_ht.table[i]);
174 list_add(&session->list, &sessions);
175 session->pid_tracker.session = session;
176 session->pid_tracker.tracker_type = TRACKER_PID;
177 session->vpid_tracker.session = session;
178 session->vpid_tracker.tracker_type = TRACKER_VPID;
179 session->uid_tracker.session = session;
180 session->uid_tracker.tracker_type = TRACKER_UID;
181 session->vuid_tracker.session = session;
182 session->vuid_tracker.tracker_type = TRACKER_VUID;
183 session->gid_tracker.session = session;
184 session->gid_tracker.tracker_type = TRACKER_GID;
185 session->vgid_tracker.session = session;
186 session->vgid_tracker.tracker_type = TRACKER_VGID;
187 mutex_unlock(&sessions_mutex);
188 return session;
189
190 err_free_cache:
191 kfree(metadata_cache);
192 err_free_session:
193 lttng_kvfree(session);
194 err:
195 mutex_unlock(&sessions_mutex);
196 return NULL;
197 }
198
199 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
200 {
201 struct lttng_transport *transport = NULL;
202 struct lttng_event_notifier_group *event_notifier_group;
203 const char *transport_name = "relay-event-notifier";
204 size_t subbuf_size = 4096; //TODO
205 size_t num_subbuf = 16; //TODO
206 unsigned int switch_timer_interval = 0;
207 unsigned int read_timer_interval = 0;
208 int i;
209
210 mutex_lock(&sessions_mutex);
211
212 transport = lttng_transport_find(transport_name);
213 if (!transport) {
214 printk(KERN_WARNING "LTTng: transport %s not found\n",
215 transport_name);
216 goto notransport;
217 }
218 if (!try_module_get(transport->owner)) {
219 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
220 transport_name);
221 goto notransport;
222 }
223
224 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
225 GFP_KERNEL);
226 if (!event_notifier_group)
227 goto nomem;
228
229 /*
230 * Initialize the ring buffer used to store event notifier
231 * notifications.
232 */
233 event_notifier_group->ops = &transport->ops;
234 event_notifier_group->chan = transport->ops.channel_create(
235 transport_name, event_notifier_group, NULL,
236 subbuf_size, num_subbuf, switch_timer_interval,
237 read_timer_interval);
238 if (!event_notifier_group->chan)
239 goto create_error;
240
241 event_notifier_group->transport = transport;
242
243 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
244 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
245 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
246 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
247
248 list_add(&event_notifier_group->node, &event_notifier_groups);
249
250 mutex_unlock(&sessions_mutex);
251
252 return event_notifier_group;
253
254 create_error:
255 lttng_kvfree(event_notifier_group);
256 nomem:
257 if (transport)
258 module_put(transport->owner);
259 notransport:
260 mutex_unlock(&sessions_mutex);
261 return NULL;
262 }
263
264 void metadata_cache_destroy(struct kref *kref)
265 {
266 struct lttng_metadata_cache *cache =
267 container_of(kref, struct lttng_metadata_cache, refcount);
268 vfree(cache->data);
269 kfree(cache);
270 }
271
272 void lttng_session_destroy(struct lttng_session *session)
273 {
274 struct lttng_channel *chan, *tmpchan;
275 struct lttng_event *event, *tmpevent;
276 struct lttng_metadata_stream *metadata_stream;
277 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
278 int ret;
279
280 mutex_lock(&sessions_mutex);
281 WRITE_ONCE(session->active, 0);
282 list_for_each_entry(chan, &session->chan, list) {
283 ret = lttng_syscalls_unregister_event(chan);
284 WARN_ON(ret);
285 }
286 list_for_each_entry(event, &session->events, list) {
287 ret = _lttng_event_unregister(event);
288 WARN_ON(ret);
289 }
290 synchronize_trace(); /* Wait for in-flight events to complete */
291 list_for_each_entry(chan, &session->chan, list) {
292 ret = lttng_syscalls_destroy_event(chan);
293 WARN_ON(ret);
294 }
295 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
296 &session->enablers_head, node)
297 lttng_event_enabler_destroy(event_enabler);
298 list_for_each_entry_safe(event, tmpevent, &session->events, list)
299 _lttng_event_destroy(event);
300 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
301 BUG_ON(chan->channel_type == METADATA_CHANNEL);
302 _lttng_channel_destroy(chan);
303 }
304 mutex_lock(&session->metadata_cache->lock);
305 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
306 _lttng_metadata_channel_hangup(metadata_stream);
307 mutex_unlock(&session->metadata_cache->lock);
308 lttng_id_tracker_destroy(&session->pid_tracker, false);
309 lttng_id_tracker_destroy(&session->vpid_tracker, false);
310 lttng_id_tracker_destroy(&session->uid_tracker, false);
311 lttng_id_tracker_destroy(&session->vuid_tracker, false);
312 lttng_id_tracker_destroy(&session->gid_tracker, false);
313 lttng_id_tracker_destroy(&session->vgid_tracker, false);
314 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
315 list_del(&session->list);
316 mutex_unlock(&sessions_mutex);
317 lttng_kvfree(session);
318 }
319
320 void lttng_event_notifier_group_destroy(
321 struct lttng_event_notifier_group *event_notifier_group)
322 {
323 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
324 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
325 int ret;
326
327 if (!event_notifier_group)
328 return;
329
330 mutex_lock(&sessions_mutex);
331
332 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
333 WARN_ON(ret);
334
335 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
336 &event_notifier_group->event_notifiers_head, list) {
337 ret = _lttng_event_notifier_unregister(event_notifier);
338 WARN_ON(ret);
339 }
340
341 /* Wait for in-flight event notifier to complete */
342 synchronize_trace();
343
344 irq_work_sync(&event_notifier_group->wakeup_pending);
345
346 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
347 &event_notifier_group->enablers_head, node)
348 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
349
350 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
351 &event_notifier_group->event_notifiers_head, list)
352 _lttng_event_notifier_destroy(event_notifier);
353
354 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
355 module_put(event_notifier_group->transport->owner);
356 list_del(&event_notifier_group->node);
357
358 mutex_unlock(&sessions_mutex);
359 lttng_kvfree(event_notifier_group);
360 }
361
362 int lttng_session_statedump(struct lttng_session *session)
363 {
364 int ret;
365
366 mutex_lock(&sessions_mutex);
367 ret = lttng_statedump_start(session);
368 mutex_unlock(&sessions_mutex);
369 return ret;
370 }
371
372 int lttng_session_enable(struct lttng_session *session)
373 {
374 int ret = 0;
375 struct lttng_channel *chan;
376
377 mutex_lock(&sessions_mutex);
378 if (session->active) {
379 ret = -EBUSY;
380 goto end;
381 }
382
383 /* Set transient enabler state to "enabled" */
384 session->tstate = 1;
385
386 /* We need to sync enablers with session before activation. */
387 lttng_session_sync_event_enablers(session);
388
389 /*
390 * Snapshot the number of events per channel to know the type of header
391 * we need to use.
392 */
393 list_for_each_entry(chan, &session->chan, list) {
394 if (chan->header_type)
395 continue; /* don't change it if session stop/restart */
396 if (chan->free_event_id < 31)
397 chan->header_type = 1; /* compact */
398 else
399 chan->header_type = 2; /* large */
400 }
401
402 /* Clear each stream's quiescent state. */
403 list_for_each_entry(chan, &session->chan, list) {
404 if (chan->channel_type != METADATA_CHANNEL)
405 lib_ring_buffer_clear_quiescent_channel(chan->chan);
406 }
407
408 WRITE_ONCE(session->active, 1);
409 WRITE_ONCE(session->been_active, 1);
410 ret = _lttng_session_metadata_statedump(session);
411 if (ret) {
412 WRITE_ONCE(session->active, 0);
413 goto end;
414 }
415 ret = lttng_statedump_start(session);
416 if (ret)
417 WRITE_ONCE(session->active, 0);
418 end:
419 mutex_unlock(&sessions_mutex);
420 return ret;
421 }
422
423 int lttng_session_disable(struct lttng_session *session)
424 {
425 int ret = 0;
426 struct lttng_channel *chan;
427
428 mutex_lock(&sessions_mutex);
429 if (!session->active) {
430 ret = -EBUSY;
431 goto end;
432 }
433 WRITE_ONCE(session->active, 0);
434
435 /* Set transient enabler state to "disabled" */
436 session->tstate = 0;
437 lttng_session_sync_event_enablers(session);
438
439 /* Set each stream's quiescent state. */
440 list_for_each_entry(chan, &session->chan, list) {
441 if (chan->channel_type != METADATA_CHANNEL)
442 lib_ring_buffer_set_quiescent_channel(chan->chan);
443 }
444 end:
445 mutex_unlock(&sessions_mutex);
446 return ret;
447 }
448
449 int lttng_session_metadata_regenerate(struct lttng_session *session)
450 {
451 int ret = 0;
452 struct lttng_channel *chan;
453 struct lttng_event *event;
454 struct lttng_metadata_cache *cache = session->metadata_cache;
455 struct lttng_metadata_stream *stream;
456
457 mutex_lock(&sessions_mutex);
458 if (!session->active) {
459 ret = -EBUSY;
460 goto end;
461 }
462
463 mutex_lock(&cache->lock);
464 memset(cache->data, 0, cache->cache_alloc);
465 cache->metadata_written = 0;
466 cache->version++;
467 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
468 stream->metadata_out = 0;
469 stream->metadata_in = 0;
470 }
471 mutex_unlock(&cache->lock);
472
473 session->metadata_dumped = 0;
474 list_for_each_entry(chan, &session->chan, list) {
475 chan->metadata_dumped = 0;
476 }
477
478 list_for_each_entry(event, &session->events, list) {
479 event->metadata_dumped = 0;
480 }
481
482 ret = _lttng_session_metadata_statedump(session);
483
484 end:
485 mutex_unlock(&sessions_mutex);
486 return ret;
487 }
488
489 int lttng_channel_enable(struct lttng_channel *channel)
490 {
491 int ret = 0;
492
493 mutex_lock(&sessions_mutex);
494 if (channel->channel_type == METADATA_CHANNEL) {
495 ret = -EPERM;
496 goto end;
497 }
498 if (channel->enabled) {
499 ret = -EEXIST;
500 goto end;
501 }
502 /* Set transient enabler state to "enabled" */
503 channel->tstate = 1;
504 lttng_session_sync_event_enablers(channel->session);
505 /* Set atomically the state to "enabled" */
506 WRITE_ONCE(channel->enabled, 1);
507 end:
508 mutex_unlock(&sessions_mutex);
509 return ret;
510 }
511
512 int lttng_channel_disable(struct lttng_channel *channel)
513 {
514 int ret = 0;
515
516 mutex_lock(&sessions_mutex);
517 if (channel->channel_type == METADATA_CHANNEL) {
518 ret = -EPERM;
519 goto end;
520 }
521 if (!channel->enabled) {
522 ret = -EEXIST;
523 goto end;
524 }
525 /* Set atomically the state to "disabled" */
526 WRITE_ONCE(channel->enabled, 0);
527 /* Set transient enabler state to "enabled" */
528 channel->tstate = 0;
529 lttng_session_sync_event_enablers(channel->session);
530 end:
531 mutex_unlock(&sessions_mutex);
532 return ret;
533 }
534
535 int lttng_event_enable(struct lttng_event *event)
536 {
537 int ret = 0;
538
539 mutex_lock(&sessions_mutex);
540 if (event->chan->channel_type == METADATA_CHANNEL) {
541 ret = -EPERM;
542 goto end;
543 }
544 if (event->enabled) {
545 ret = -EEXIST;
546 goto end;
547 }
548 switch (event->instrumentation) {
549 case LTTNG_KERNEL_TRACEPOINT:
550 case LTTNG_KERNEL_SYSCALL:
551 ret = -EINVAL;
552 break;
553 case LTTNG_KERNEL_KPROBE:
554 case LTTNG_KERNEL_UPROBE:
555 case LTTNG_KERNEL_NOOP:
556 WRITE_ONCE(event->enabled, 1);
557 break;
558 case LTTNG_KERNEL_KRETPROBE:
559 ret = lttng_kretprobes_event_enable_state(event, 1);
560 break;
561 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
562 default:
563 WARN_ON_ONCE(1);
564 ret = -EINVAL;
565 }
566 end:
567 mutex_unlock(&sessions_mutex);
568 return ret;
569 }
570
571 int lttng_event_disable(struct lttng_event *event)
572 {
573 int ret = 0;
574
575 mutex_lock(&sessions_mutex);
576 if (event->chan->channel_type == METADATA_CHANNEL) {
577 ret = -EPERM;
578 goto end;
579 }
580 if (!event->enabled) {
581 ret = -EEXIST;
582 goto end;
583 }
584 switch (event->instrumentation) {
585 case LTTNG_KERNEL_TRACEPOINT:
586 case LTTNG_KERNEL_SYSCALL:
587 ret = -EINVAL;
588 break;
589 case LTTNG_KERNEL_KPROBE:
590 case LTTNG_KERNEL_UPROBE:
591 case LTTNG_KERNEL_NOOP:
592 WRITE_ONCE(event->enabled, 0);
593 break;
594 case LTTNG_KERNEL_KRETPROBE:
595 ret = lttng_kretprobes_event_enable_state(event, 0);
596 break;
597 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
598 default:
599 WARN_ON_ONCE(1);
600 ret = -EINVAL;
601 }
602 end:
603 mutex_unlock(&sessions_mutex);
604 return ret;
605 }
606
607 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
608 {
609 int ret = 0;
610
611 mutex_lock(&sessions_mutex);
612 if (event_notifier->enabled) {
613 ret = -EEXIST;
614 goto end;
615 }
616 switch (event_notifier->instrumentation) {
617 case LTTNG_KERNEL_TRACEPOINT:
618 case LTTNG_KERNEL_SYSCALL:
619 ret = -EINVAL;
620 break;
621 case LTTNG_KERNEL_KPROBE:
622 case LTTNG_KERNEL_UPROBE:
623 WRITE_ONCE(event_notifier->enabled, 1);
624 break;
625 case LTTNG_KERNEL_FUNCTION:
626 case LTTNG_KERNEL_NOOP:
627 case LTTNG_KERNEL_KRETPROBE:
628 default:
629 WARN_ON_ONCE(1);
630 ret = -EINVAL;
631 }
632 end:
633 mutex_unlock(&sessions_mutex);
634 return ret;
635 }
636
637 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
638 {
639 int ret = 0;
640
641 mutex_lock(&sessions_mutex);
642 if (!event_notifier->enabled) {
643 ret = -EEXIST;
644 goto end;
645 }
646 switch (event_notifier->instrumentation) {
647 case LTTNG_KERNEL_TRACEPOINT:
648 case LTTNG_KERNEL_SYSCALL:
649 ret = -EINVAL;
650 break;
651 case LTTNG_KERNEL_KPROBE:
652 case LTTNG_KERNEL_UPROBE:
653 WRITE_ONCE(event_notifier->enabled, 0);
654 break;
655 case LTTNG_KERNEL_FUNCTION:
656 case LTTNG_KERNEL_NOOP:
657 case LTTNG_KERNEL_KRETPROBE:
658 default:
659 WARN_ON_ONCE(1);
660 ret = -EINVAL;
661 }
662 end:
663 mutex_unlock(&sessions_mutex);
664 return ret;
665 }
666
667 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
668 const char *transport_name,
669 void *buf_addr,
670 size_t subbuf_size, size_t num_subbuf,
671 unsigned int switch_timer_interval,
672 unsigned int read_timer_interval,
673 enum channel_type channel_type)
674 {
675 struct lttng_channel *chan;
676 struct lttng_transport *transport = NULL;
677
678 mutex_lock(&sessions_mutex);
679 if (session->been_active && channel_type != METADATA_CHANNEL)
680 goto active; /* Refuse to add channel to active session */
681 transport = lttng_transport_find(transport_name);
682 if (!transport) {
683 printk(KERN_WARNING "LTTng: transport %s not found\n",
684 transport_name);
685 goto notransport;
686 }
687 if (!try_module_get(transport->owner)) {
688 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
689 goto notransport;
690 }
691 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
692 if (!chan)
693 goto nomem;
694 chan->session = session;
695 chan->id = session->free_chan_id++;
696 chan->ops = &transport->ops;
697 /*
698 * Note: the channel creation op already writes into the packet
699 * headers. Therefore the "chan" information used as input
700 * should be already accessible.
701 */
702 chan->chan = transport->ops.channel_create(transport_name,
703 chan, buf_addr, subbuf_size, num_subbuf,
704 switch_timer_interval, read_timer_interval);
705 if (!chan->chan)
706 goto create_error;
707 chan->tstate = 1;
708 chan->enabled = 1;
709 chan->transport = transport;
710 chan->channel_type = channel_type;
711 list_add(&chan->list, &session->chan);
712 mutex_unlock(&sessions_mutex);
713 return chan;
714
715 create_error:
716 kfree(chan);
717 nomem:
718 if (transport)
719 module_put(transport->owner);
720 notransport:
721 active:
722 mutex_unlock(&sessions_mutex);
723 return NULL;
724 }
725
726 /*
727 * Only used internally at session destruction for per-cpu channels, and
728 * when metadata channel is released.
729 * Needs to be called with sessions mutex held.
730 */
731 static
732 void _lttng_channel_destroy(struct lttng_channel *chan)
733 {
734 chan->ops->channel_destroy(chan->chan);
735 module_put(chan->transport->owner);
736 list_del(&chan->list);
737 lttng_destroy_context(chan->ctx);
738 kfree(chan);
739 }
740
741 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
742 {
743 BUG_ON(chan->channel_type != METADATA_CHANNEL);
744
745 /* Protect the metadata cache with the sessions_mutex. */
746 mutex_lock(&sessions_mutex);
747 _lttng_channel_destroy(chan);
748 mutex_unlock(&sessions_mutex);
749 }
750 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
751
752 static
753 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
754 {
755 stream->finalized = 1;
756 wake_up_interruptible(&stream->read_wait);
757 }
758
759 /*
760 * Supports event creation while tracing session is active.
761 * Needs to be called with sessions mutex held.
762 */
763 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
764 struct lttng_kernel_event *event_param,
765 void *filter,
766 const struct lttng_event_desc *event_desc,
767 enum lttng_kernel_instrumentation itype)
768 {
769 struct lttng_session *session = chan->session;
770 struct lttng_event *event;
771 const char *event_name;
772 struct hlist_head *head;
773 int ret;
774
775 if (chan->free_event_id == -1U) {
776 ret = -EMFILE;
777 goto full;
778 }
779
780 switch (itype) {
781 case LTTNG_KERNEL_TRACEPOINT:
782 event_name = event_desc->name;
783 break;
784 case LTTNG_KERNEL_KPROBE:
785 case LTTNG_KERNEL_UPROBE:
786 case LTTNG_KERNEL_KRETPROBE:
787 case LTTNG_KERNEL_NOOP:
788 case LTTNG_KERNEL_SYSCALL:
789 event_name = event_param->name;
790 break;
791 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
792 default:
793 WARN_ON_ONCE(1);
794 ret = -EINVAL;
795 goto type_error;
796 }
797
798 head = utils_borrow_hash_table_bucket(session->events_ht.table,
799 LTTNG_EVENT_HT_SIZE, event_name);
800 lttng_hlist_for_each_entry(event, head, hlist) {
801 WARN_ON_ONCE(!event->desc);
802 if (!strncmp(event->desc->name, event_name,
803 LTTNG_KERNEL_SYM_NAME_LEN - 1)
804 && chan == event->chan) {
805 ret = -EEXIST;
806 goto exist;
807 }
808 }
809
810 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
811 if (!event) {
812 ret = -ENOMEM;
813 goto cache_error;
814 }
815 event->chan = chan;
816 event->filter = filter;
817 event->id = chan->free_event_id++;
818 event->instrumentation = itype;
819 event->evtype = LTTNG_TYPE_EVENT;
820 INIT_LIST_HEAD(&event->bytecode_runtime_head);
821 INIT_LIST_HEAD(&event->enablers_ref_head);
822
823 switch (itype) {
824 case LTTNG_KERNEL_TRACEPOINT:
825 /* Event will be enabled by enabler sync. */
826 event->enabled = 0;
827 event->registered = 0;
828 event->desc = lttng_event_desc_get(event_name);
829 if (!event->desc) {
830 ret = -ENOENT;
831 goto register_error;
832 }
833 /* Populate lttng_event structure before event registration. */
834 smp_wmb();
835 break;
836 case LTTNG_KERNEL_KPROBE:
837 /*
838 * Needs to be explicitly enabled after creation, since
839 * we may want to apply filters.
840 */
841 event->enabled = 0;
842 event->registered = 1;
843 /*
844 * Populate lttng_event structure before event
845 * registration.
846 */
847 smp_wmb();
848 ret = lttng_kprobes_register_event(event_name,
849 event_param->u.kprobe.symbol_name,
850 event_param->u.kprobe.offset,
851 event_param->u.kprobe.addr,
852 event);
853 if (ret) {
854 ret = -EINVAL;
855 goto register_error;
856 }
857 ret = try_module_get(event->desc->owner);
858 WARN_ON_ONCE(!ret);
859 break;
860 case LTTNG_KERNEL_KRETPROBE:
861 {
862 struct lttng_event *event_return;
863
864 /* kretprobe defines 2 events */
865 /*
866 * Needs to be explicitly enabled after creation, since
867 * we may want to apply filters.
868 */
869 event->enabled = 0;
870 event->registered = 1;
871 event_return =
872 kmem_cache_zalloc(event_cache, GFP_KERNEL);
873 if (!event_return) {
874 ret = -ENOMEM;
875 goto register_error;
876 }
877 event_return->chan = chan;
878 event_return->filter = filter;
879 event_return->id = chan->free_event_id++;
880 event_return->enabled = 0;
881 event_return->registered = 1;
882 event_return->instrumentation = itype;
883 /*
884 * Populate lttng_event structure before kretprobe registration.
885 */
886 smp_wmb();
887 ret = lttng_kretprobes_register(event_name,
888 event_param->u.kretprobe.symbol_name,
889 event_param->u.kretprobe.offset,
890 event_param->u.kretprobe.addr,
891 event, event_return);
892 if (ret) {
893 kmem_cache_free(event_cache, event_return);
894 ret = -EINVAL;
895 goto register_error;
896 }
897 /* Take 2 refs on the module: one per event. */
898 ret = try_module_get(event->desc->owner);
899 WARN_ON_ONCE(!ret);
900 ret = try_module_get(event->desc->owner);
901 WARN_ON_ONCE(!ret);
902 ret = _lttng_event_metadata_statedump(chan->session, chan,
903 event_return);
904 WARN_ON_ONCE(ret > 0);
905 if (ret) {
906 kmem_cache_free(event_cache, event_return);
907 module_put(event->desc->owner);
908 module_put(event->desc->owner);
909 goto statedump_error;
910 }
911 list_add(&event_return->list, &chan->session->events);
912 break;
913 }
914 case LTTNG_KERNEL_NOOP:
915 case LTTNG_KERNEL_SYSCALL:
916 /*
917 * Needs to be explicitly enabled after creation, since
918 * we may want to apply filters.
919 */
920 event->enabled = 0;
921 event->registered = 0;
922 event->desc = event_desc;
923 switch (event_param->u.syscall.entryexit) {
924 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
925 ret = -EINVAL;
926 goto register_error;
927 case LTTNG_KERNEL_SYSCALL_ENTRY:
928 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
929 break;
930 case LTTNG_KERNEL_SYSCALL_EXIT:
931 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
932 break;
933 }
934 switch (event_param->u.syscall.abi) {
935 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
936 ret = -EINVAL;
937 goto register_error;
938 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
939 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
940 break;
941 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
942 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
943 break;
944 }
945 if (!event->desc) {
946 ret = -EINVAL;
947 goto register_error;
948 }
949 break;
950 case LTTNG_KERNEL_UPROBE:
951 /*
952 * Needs to be explicitly enabled after creation, since
953 * we may want to apply filters.
954 */
955 event->enabled = 0;
956 event->registered = 1;
957
958 /*
959 * Populate lttng_event structure before event
960 * registration.
961 */
962 smp_wmb();
963
964 ret = lttng_uprobes_register_event(event_param->name,
965 event_param->u.uprobe.fd,
966 event);
967 if (ret)
968 goto register_error;
969 ret = try_module_get(event->desc->owner);
970 WARN_ON_ONCE(!ret);
971 break;
972 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
973 default:
974 WARN_ON_ONCE(1);
975 ret = -EINVAL;
976 goto register_error;
977 }
978 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
979 WARN_ON_ONCE(ret > 0);
980 if (ret) {
981 goto statedump_error;
982 }
983 hlist_add_head(&event->hlist, head);
984 list_add(&event->list, &chan->session->events);
985 return event;
986
987 statedump_error:
988 /* If a statedump error occurs, events will not be readable. */
989 register_error:
990 kmem_cache_free(event_cache, event);
991 cache_error:
992 exist:
993 type_error:
994 full:
995 return ERR_PTR(ret);
996 }
997
998 struct lttng_event_notifier *_lttng_event_notifier_create(
999 const struct lttng_event_desc *event_desc,
1000 uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
1001 struct lttng_kernel_event_notifier *event_notifier_param,
1002 void *filter, enum lttng_kernel_instrumentation itype)
1003 {
1004 struct lttng_event_notifier *event_notifier;
1005 const char *event_name;
1006 struct hlist_head *head;
1007 int ret;
1008
1009 switch (itype) {
1010 case LTTNG_KERNEL_TRACEPOINT:
1011 event_name = event_desc->name;
1012 break;
1013 case LTTNG_KERNEL_KPROBE:
1014 case LTTNG_KERNEL_UPROBE:
1015 case LTTNG_KERNEL_SYSCALL:
1016 event_name = event_notifier_param->event.name;
1017 break;
1018 case LTTNG_KERNEL_KRETPROBE:
1019 case LTTNG_KERNEL_FUNCTION:
1020 case LTTNG_KERNEL_NOOP:
1021 default:
1022 WARN_ON_ONCE(1);
1023 ret = -EINVAL;
1024 goto type_error;
1025 }
1026
1027 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1028 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1029 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1030 WARN_ON_ONCE(!event_notifier->desc);
1031 if (!strncmp(event_notifier->desc->name, event_name,
1032 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1033 && event_notifier_group == event_notifier->group
1034 && token == event_notifier->user_token) {
1035 ret = -EEXIST;
1036 goto exist;
1037 }
1038 }
1039
1040 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1041 if (!event_notifier) {
1042 ret = -ENOMEM;
1043 goto cache_error;
1044 }
1045
1046 event_notifier->group = event_notifier_group;
1047 event_notifier->user_token = token;
1048 event_notifier->filter = filter;
1049 event_notifier->instrumentation = itype;
1050 event_notifier->evtype = LTTNG_TYPE_EVENT;
1051 event_notifier->send_notification = lttng_event_notifier_notification_send;
1052 INIT_LIST_HEAD(&event_notifier->bytecode_runtime_head);
1053 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1054
1055 switch (itype) {
1056 case LTTNG_KERNEL_TRACEPOINT:
1057 /* Event will be enabled by enabler sync. */
1058 event_notifier->enabled = 0;
1059 event_notifier->registered = 0;
1060 event_notifier->desc = lttng_event_desc_get(event_name);
1061 if (!event_notifier->desc) {
1062 ret = -ENOENT;
1063 goto register_error;
1064 }
1065 /* Populate lttng_event_notifier structure before event registration. */
1066 smp_wmb();
1067 break;
1068 case LTTNG_KERNEL_KPROBE:
1069 /*
1070 * Needs to be explicitly enabled after creation, since
1071 * we may want to apply filters.
1072 */
1073 event_notifier->enabled = 0;
1074 event_notifier->registered = 1;
1075 /*
1076 * Populate lttng_event_notifier structure before event
1077 * registration.
1078 */
1079 smp_wmb();
1080 ret = lttng_kprobes_register_event_notifier(
1081 event_notifier_param->event.u.kprobe.symbol_name,
1082 event_notifier_param->event.u.kprobe.offset,
1083 event_notifier_param->event.u.kprobe.addr,
1084 event_notifier);
1085 if (ret) {
1086 ret = -EINVAL;
1087 goto register_error;
1088 }
1089 ret = try_module_get(event_notifier->desc->owner);
1090 WARN_ON_ONCE(!ret);
1091 break;
1092 case LTTNG_KERNEL_NOOP:
1093 case LTTNG_KERNEL_SYSCALL:
1094 /*
1095 * Needs to be explicitly enabled after creation, since
1096 * we may want to apply filters.
1097 */
1098 event_notifier->enabled = 0;
1099 event_notifier->registered = 0;
1100 event_notifier->desc = event_desc;
1101 if (!event_notifier->desc) {
1102 ret = -EINVAL;
1103 goto register_error;
1104 }
1105 break;
1106 case LTTNG_KERNEL_UPROBE:
1107 /*
1108 * Needs to be explicitly enabled after creation, since
1109 * we may want to apply filters.
1110 */
1111 event_notifier->enabled = 0;
1112 event_notifier->registered = 1;
1113
1114 /*
1115 * Populate lttng_event_notifier structure before
1116 * event_notifier registration.
1117 */
1118 smp_wmb();
1119
1120 ret = lttng_uprobes_register_event_notifier(
1121 event_notifier_param->event.name,
1122 event_notifier_param->event.u.uprobe.fd,
1123 event_notifier);
1124 if (ret)
1125 goto register_error;
1126 ret = try_module_get(event_notifier->desc->owner);
1127 WARN_ON_ONCE(!ret);
1128 break;
1129 case LTTNG_KERNEL_KRETPROBE:
1130 case LTTNG_KERNEL_FUNCTION:
1131 default:
1132 WARN_ON_ONCE(1);
1133 ret = -EINVAL;
1134 goto register_error;
1135 }
1136
1137 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1138 hlist_add_head(&event_notifier->hlist, head);
1139 return event_notifier;
1140
1141 register_error:
1142 kmem_cache_free(event_notifier_cache, event_notifier);
1143 cache_error:
1144 exist:
1145 type_error:
1146 return ERR_PTR(ret);
1147 }
1148
1149 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1150 struct lttng_kernel_event *event_param,
1151 void *filter,
1152 const struct lttng_event_desc *event_desc,
1153 enum lttng_kernel_instrumentation itype)
1154 {
1155 struct lttng_event *event;
1156
1157 mutex_lock(&sessions_mutex);
1158 event = _lttng_event_create(chan, event_param, filter, event_desc,
1159 itype);
1160 mutex_unlock(&sessions_mutex);
1161 return event;
1162 }
1163
1164 struct lttng_event_notifier *lttng_event_notifier_create(
1165 const struct lttng_event_desc *event_desc,
1166 uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
1167 struct lttng_kernel_event_notifier *event_notifier_param,
1168 void *filter, enum lttng_kernel_instrumentation itype)
1169 {
1170 struct lttng_event_notifier *event_notifier;
1171
1172 mutex_lock(&sessions_mutex);
1173 event_notifier = _lttng_event_notifier_create(event_desc, id,
1174 event_notifier_group, event_notifier_param, filter, itype);
1175 mutex_unlock(&sessions_mutex);
1176 return event_notifier;
1177 }
1178
1179 /* Only used for tracepoints for now. */
1180 static
1181 void register_event(struct lttng_event *event)
1182 {
1183 const struct lttng_event_desc *desc;
1184 int ret = -EINVAL;
1185
1186 if (event->registered)
1187 return;
1188
1189 desc = event->desc;
1190 switch (event->instrumentation) {
1191 case LTTNG_KERNEL_TRACEPOINT:
1192 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1193 desc->probe_callback,
1194 event);
1195 break;
1196 case LTTNG_KERNEL_SYSCALL:
1197 ret = lttng_syscall_filter_enable_event(event->chan, event);
1198 break;
1199 case LTTNG_KERNEL_KPROBE:
1200 case LTTNG_KERNEL_UPROBE:
1201 case LTTNG_KERNEL_KRETPROBE:
1202 case LTTNG_KERNEL_NOOP:
1203 ret = 0;
1204 break;
1205 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1206 default:
1207 WARN_ON_ONCE(1);
1208 }
1209 if (!ret)
1210 event->registered = 1;
1211 }
1212
1213 /*
1214 * Only used internally at session destruction.
1215 */
1216 int _lttng_event_unregister(struct lttng_event *event)
1217 {
1218 const struct lttng_event_desc *desc;
1219 int ret = -EINVAL;
1220
1221 if (!event->registered)
1222 return 0;
1223
1224 desc = event->desc;
1225 switch (event->instrumentation) {
1226 case LTTNG_KERNEL_TRACEPOINT:
1227 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1228 event->desc->probe_callback,
1229 event);
1230 break;
1231 case LTTNG_KERNEL_KPROBE:
1232 lttng_kprobes_unregister_event(event);
1233 ret = 0;
1234 break;
1235 case LTTNG_KERNEL_KRETPROBE:
1236 lttng_kretprobes_unregister(event);
1237 ret = 0;
1238 break;
1239 case LTTNG_KERNEL_SYSCALL:
1240 ret = lttng_syscall_filter_disable_event(event->chan, event);
1241 break;
1242 case LTTNG_KERNEL_NOOP:
1243 ret = 0;
1244 break;
1245 case LTTNG_KERNEL_UPROBE:
1246 lttng_uprobes_unregister_event(event);
1247 ret = 0;
1248 break;
1249 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1250 default:
1251 WARN_ON_ONCE(1);
1252 }
1253 if (!ret)
1254 event->registered = 0;
1255 return ret;
1256 }
1257
1258 /* Only used for tracepoints for now. */
1259 static
1260 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1261 {
1262 const struct lttng_event_desc *desc;
1263 int ret = -EINVAL;
1264
1265 if (event_notifier->registered)
1266 return;
1267
1268 desc = event_notifier->desc;
1269 switch (event_notifier->instrumentation) {
1270 case LTTNG_KERNEL_TRACEPOINT:
1271 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1272 desc->event_notifier_callback,
1273 event_notifier);
1274 break;
1275 case LTTNG_KERNEL_SYSCALL:
1276 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1277 break;
1278 case LTTNG_KERNEL_KPROBE:
1279 case LTTNG_KERNEL_UPROBE:
1280 ret = 0;
1281 break;
1282 case LTTNG_KERNEL_KRETPROBE:
1283 case LTTNG_KERNEL_FUNCTION:
1284 case LTTNG_KERNEL_NOOP:
1285 default:
1286 WARN_ON_ONCE(1);
1287 }
1288 if (!ret)
1289 event_notifier->registered = 1;
1290 }
1291
1292 static
1293 int _lttng_event_notifier_unregister(
1294 struct lttng_event_notifier *event_notifier)
1295 {
1296 const struct lttng_event_desc *desc;
1297 int ret = -EINVAL;
1298
1299 if (!event_notifier->registered)
1300 return 0;
1301
1302 desc = event_notifier->desc;
1303 switch (event_notifier->instrumentation) {
1304 case LTTNG_KERNEL_TRACEPOINT:
1305 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1306 event_notifier->desc->event_notifier_callback,
1307 event_notifier);
1308 break;
1309 case LTTNG_KERNEL_KPROBE:
1310 lttng_kprobes_unregister_event_notifier(event_notifier);
1311 ret = 0;
1312 break;
1313 case LTTNG_KERNEL_UPROBE:
1314 lttng_uprobes_unregister_event_notifier(event_notifier);
1315 ret = 0;
1316 break;
1317 case LTTNG_KERNEL_SYSCALL:
1318 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1319 break;
1320 case LTTNG_KERNEL_KRETPROBE:
1321 case LTTNG_KERNEL_FUNCTION:
1322 case LTTNG_KERNEL_NOOP:
1323 default:
1324 WARN_ON_ONCE(1);
1325 }
1326 if (!ret)
1327 event_notifier->registered = 0;
1328 return ret;
1329 }
1330
1331 /*
1332 * Only used internally at session destruction.
1333 */
1334 static
1335 void _lttng_event_destroy(struct lttng_event *event)
1336 {
1337 switch (event->instrumentation) {
1338 case LTTNG_KERNEL_TRACEPOINT:
1339 lttng_event_desc_put(event->desc);
1340 break;
1341 case LTTNG_KERNEL_KPROBE:
1342 module_put(event->desc->owner);
1343 lttng_kprobes_destroy_event_private(event);
1344 break;
1345 case LTTNG_KERNEL_KRETPROBE:
1346 module_put(event->desc->owner);
1347 lttng_kretprobes_destroy_private(event);
1348 break;
1349 case LTTNG_KERNEL_NOOP:
1350 case LTTNG_KERNEL_SYSCALL:
1351 break;
1352 case LTTNG_KERNEL_UPROBE:
1353 module_put(event->desc->owner);
1354 lttng_uprobes_destroy_event_private(event);
1355 break;
1356 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1357 default:
1358 WARN_ON_ONCE(1);
1359 }
1360 list_del(&event->list);
1361 lttng_destroy_context(event->ctx);
1362 kmem_cache_free(event_cache, event);
1363 }
1364
1365 /*
1366 * Only used internally at session destruction.
1367 */
1368 static
1369 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1370 {
1371 switch (event_notifier->instrumentation) {
1372 case LTTNG_KERNEL_TRACEPOINT:
1373 lttng_event_desc_put(event_notifier->desc);
1374 break;
1375 case LTTNG_KERNEL_KPROBE:
1376 module_put(event_notifier->desc->owner);
1377 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1378 break;
1379 case LTTNG_KERNEL_NOOP:
1380 case LTTNG_KERNEL_SYSCALL:
1381 break;
1382 case LTTNG_KERNEL_UPROBE:
1383 module_put(event_notifier->desc->owner);
1384 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1385 break;
1386 case LTTNG_KERNEL_KRETPROBE:
1387 case LTTNG_KERNEL_FUNCTION:
1388 default:
1389 WARN_ON_ONCE(1);
1390 }
1391 list_del(&event_notifier->list);
1392 kmem_cache_free(event_notifier_cache, event_notifier);
1393 }
1394
1395 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1396 enum tracker_type tracker_type)
1397 {
1398 switch (tracker_type) {
1399 case TRACKER_PID:
1400 return &session->pid_tracker;
1401 case TRACKER_VPID:
1402 return &session->vpid_tracker;
1403 case TRACKER_UID:
1404 return &session->uid_tracker;
1405 case TRACKER_VUID:
1406 return &session->vuid_tracker;
1407 case TRACKER_GID:
1408 return &session->gid_tracker;
1409 case TRACKER_VGID:
1410 return &session->vgid_tracker;
1411 default:
1412 WARN_ON_ONCE(1);
1413 return NULL;
1414 }
1415 }
1416
1417 int lttng_session_track_id(struct lttng_session *session,
1418 enum tracker_type tracker_type, int id)
1419 {
1420 struct lttng_id_tracker *tracker;
1421 int ret;
1422
1423 tracker = get_tracker(session, tracker_type);
1424 if (!tracker)
1425 return -EINVAL;
1426 if (id < -1)
1427 return -EINVAL;
1428 mutex_lock(&sessions_mutex);
1429 if (id == -1) {
1430 /* track all ids: destroy tracker. */
1431 lttng_id_tracker_destroy(tracker, true);
1432 ret = 0;
1433 } else {
1434 ret = lttng_id_tracker_add(tracker, id);
1435 }
1436 mutex_unlock(&sessions_mutex);
1437 return ret;
1438 }
1439
1440 int lttng_session_untrack_id(struct lttng_session *session,
1441 enum tracker_type tracker_type, int id)
1442 {
1443 struct lttng_id_tracker *tracker;
1444 int ret;
1445
1446 tracker = get_tracker(session, tracker_type);
1447 if (!tracker)
1448 return -EINVAL;
1449 if (id < -1)
1450 return -EINVAL;
1451 mutex_lock(&sessions_mutex);
1452 if (id == -1) {
1453 /* untrack all ids: replace by empty tracker. */
1454 ret = lttng_id_tracker_empty_set(tracker);
1455 } else {
1456 ret = lttng_id_tracker_del(tracker, id);
1457 }
1458 mutex_unlock(&sessions_mutex);
1459 return ret;
1460 }
1461
1462 static
1463 void *id_list_start(struct seq_file *m, loff_t *pos)
1464 {
1465 struct lttng_id_tracker *id_tracker = m->private;
1466 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1467 struct lttng_id_hash_node *e;
1468 int iter = 0, i;
1469
1470 mutex_lock(&sessions_mutex);
1471 if (id_tracker_p) {
1472 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1473 struct hlist_head *head = &id_tracker_p->id_hash[i];
1474
1475 lttng_hlist_for_each_entry(e, head, hlist) {
1476 if (iter++ >= *pos)
1477 return e;
1478 }
1479 }
1480 } else {
1481 /* ID tracker disabled. */
1482 if (iter >= *pos && iter == 0) {
1483 return id_tracker_p; /* empty tracker */
1484 }
1485 iter++;
1486 }
1487 /* End of list */
1488 return NULL;
1489 }
1490
1491 /* Called with sessions_mutex held. */
1492 static
1493 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1494 {
1495 struct lttng_id_tracker *id_tracker = m->private;
1496 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1497 struct lttng_id_hash_node *e;
1498 int iter = 0, i;
1499
1500 (*ppos)++;
1501 if (id_tracker_p) {
1502 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1503 struct hlist_head *head = &id_tracker_p->id_hash[i];
1504
1505 lttng_hlist_for_each_entry(e, head, hlist) {
1506 if (iter++ >= *ppos)
1507 return e;
1508 }
1509 }
1510 } else {
1511 /* ID tracker disabled. */
1512 if (iter >= *ppos && iter == 0)
1513 return p; /* empty tracker */
1514 iter++;
1515 }
1516
1517 /* End of list */
1518 return NULL;
1519 }
1520
1521 static
1522 void id_list_stop(struct seq_file *m, void *p)
1523 {
1524 mutex_unlock(&sessions_mutex);
1525 }
1526
1527 static
1528 int id_list_show(struct seq_file *m, void *p)
1529 {
1530 struct lttng_id_tracker *id_tracker = m->private;
1531 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1532 int id;
1533
1534 if (p == id_tracker_p) {
1535 /* Tracker disabled. */
1536 id = -1;
1537 } else {
1538 const struct lttng_id_hash_node *e = p;
1539
1540 id = lttng_id_tracker_get_node_id(e);
1541 }
1542 switch (id_tracker->tracker_type) {
1543 case TRACKER_PID:
1544 seq_printf(m, "process { pid = %d; };\n", id);
1545 break;
1546 case TRACKER_VPID:
1547 seq_printf(m, "process { vpid = %d; };\n", id);
1548 break;
1549 case TRACKER_UID:
1550 seq_printf(m, "user { uid = %d; };\n", id);
1551 break;
1552 case TRACKER_VUID:
1553 seq_printf(m, "user { vuid = %d; };\n", id);
1554 break;
1555 case TRACKER_GID:
1556 seq_printf(m, "group { gid = %d; };\n", id);
1557 break;
1558 case TRACKER_VGID:
1559 seq_printf(m, "group { vgid = %d; };\n", id);
1560 break;
1561 default:
1562 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1563 }
1564 return 0;
1565 }
1566
1567 static
1568 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1569 .start = id_list_start,
1570 .next = id_list_next,
1571 .stop = id_list_stop,
1572 .show = id_list_show,
1573 };
1574
1575 static
1576 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1577 {
1578 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1579 }
1580
1581 static
1582 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1583 {
1584 struct seq_file *m = file->private_data;
1585 struct lttng_id_tracker *id_tracker = m->private;
1586 int ret;
1587
1588 WARN_ON_ONCE(!id_tracker);
1589 ret = seq_release(inode, file);
1590 if (!ret)
1591 fput(id_tracker->session->file);
1592 return ret;
1593 }
1594
1595 const struct file_operations lttng_tracker_ids_list_fops = {
1596 .owner = THIS_MODULE,
1597 .open = lttng_tracker_ids_list_open,
1598 .read = seq_read,
1599 .llseek = seq_lseek,
1600 .release = lttng_tracker_ids_list_release,
1601 };
1602
1603 int lttng_session_list_tracker_ids(struct lttng_session *session,
1604 enum tracker_type tracker_type)
1605 {
1606 struct file *tracker_ids_list_file;
1607 struct seq_file *m;
1608 int file_fd, ret;
1609
1610 file_fd = lttng_get_unused_fd();
1611 if (file_fd < 0) {
1612 ret = file_fd;
1613 goto fd_error;
1614 }
1615
1616 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1617 &lttng_tracker_ids_list_fops,
1618 NULL, O_RDWR);
1619 if (IS_ERR(tracker_ids_list_file)) {
1620 ret = PTR_ERR(tracker_ids_list_file);
1621 goto file_error;
1622 }
1623 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1624 ret = -EOVERFLOW;
1625 goto refcount_error;
1626 }
1627 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1628 if (ret < 0)
1629 goto open_error;
1630 m = tracker_ids_list_file->private_data;
1631
1632 m->private = get_tracker(session, tracker_type);
1633 BUG_ON(!m->private);
1634 fd_install(file_fd, tracker_ids_list_file);
1635
1636 return file_fd;
1637
1638 open_error:
1639 atomic_long_dec(&session->file->f_count);
1640 refcount_error:
1641 fput(tracker_ids_list_file);
1642 file_error:
1643 put_unused_fd(file_fd);
1644 fd_error:
1645 return ret;
1646 }
1647
1648 /*
1649 * Enabler management.
1650 */
1651 static
1652 int lttng_match_enabler_star_glob(const char *desc_name,
1653 const char *pattern)
1654 {
1655 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1656 desc_name, LTTNG_SIZE_MAX))
1657 return 0;
1658 return 1;
1659 }
1660
1661 static
1662 int lttng_match_enabler_name(const char *desc_name,
1663 const char *name)
1664 {
1665 if (strcmp(desc_name, name))
1666 return 0;
1667 return 1;
1668 }
1669
1670 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1671 struct lttng_enabler *enabler)
1672 {
1673 const char *desc_name, *enabler_name;
1674 bool compat = false, entry = false;
1675
1676 enabler_name = enabler->event_param.name;
1677 switch (enabler->event_param.instrumentation) {
1678 case LTTNG_KERNEL_TRACEPOINT:
1679 desc_name = desc->name;
1680 switch (enabler->format_type) {
1681 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1682 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1683 case LTTNG_ENABLER_FORMAT_NAME:
1684 return lttng_match_enabler_name(desc_name, enabler_name);
1685 default:
1686 return -EINVAL;
1687 }
1688 break;
1689 case LTTNG_KERNEL_SYSCALL:
1690 desc_name = desc->name;
1691 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1692 desc_name += strlen("compat_");
1693 compat = true;
1694 }
1695 if (!strncmp(desc_name, "syscall_exit_",
1696 strlen("syscall_exit_"))) {
1697 desc_name += strlen("syscall_exit_");
1698 } else if (!strncmp(desc_name, "syscall_entry_",
1699 strlen("syscall_entry_"))) {
1700 desc_name += strlen("syscall_entry_");
1701 entry = true;
1702 } else {
1703 WARN_ON_ONCE(1);
1704 return -EINVAL;
1705 }
1706 switch (enabler->event_param.u.syscall.entryexit) {
1707 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1708 break;
1709 case LTTNG_KERNEL_SYSCALL_ENTRY:
1710 if (!entry)
1711 return 0;
1712 break;
1713 case LTTNG_KERNEL_SYSCALL_EXIT:
1714 if (entry)
1715 return 0;
1716 break;
1717 default:
1718 return -EINVAL;
1719 }
1720 switch (enabler->event_param.u.syscall.abi) {
1721 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1722 break;
1723 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1724 if (compat)
1725 return 0;
1726 break;
1727 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1728 if (!compat)
1729 return 0;
1730 break;
1731 default:
1732 return -EINVAL;
1733 }
1734 switch (enabler->event_param.u.syscall.match) {
1735 case LTTNG_SYSCALL_MATCH_NAME:
1736 switch (enabler->format_type) {
1737 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1738 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1739 case LTTNG_ENABLER_FORMAT_NAME:
1740 return lttng_match_enabler_name(desc_name, enabler_name);
1741 default:
1742 return -EINVAL;
1743 }
1744 break;
1745 case LTTNG_SYSCALL_MATCH_NR:
1746 return -EINVAL; /* Not implemented. */
1747 default:
1748 return -EINVAL;
1749 }
1750 break;
1751 default:
1752 WARN_ON_ONCE(1);
1753 return -EINVAL;
1754 }
1755 }
1756
1757 static
1758 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1759 struct lttng_event *event)
1760 {
1761 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1762 event_enabler);
1763
1764 if (base_enabler->event_param.instrumentation != event->instrumentation)
1765 return 0;
1766 if (lttng_desc_match_enabler(event->desc, base_enabler)
1767 && event->chan == event_enabler->chan)
1768 return 1;
1769 else
1770 return 0;
1771 }
1772
1773 static
1774 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1775 struct lttng_event_notifier *event_notifier)
1776 {
1777 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1778 event_notifier_enabler);
1779
1780 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1781 return 0;
1782 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1783 && event_notifier->group == event_notifier_enabler->group
1784 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1785 return 1;
1786 else
1787 return 0;
1788 }
1789
1790 static
1791 struct lttng_enabler_ref *lttng_enabler_ref(
1792 struct list_head *enablers_ref_list,
1793 struct lttng_enabler *enabler)
1794 {
1795 struct lttng_enabler_ref *enabler_ref;
1796
1797 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1798 if (enabler_ref->ref == enabler)
1799 return enabler_ref;
1800 }
1801 return NULL;
1802 }
1803
1804 static
1805 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1806 {
1807 struct lttng_session *session = event_enabler->chan->session;
1808 struct lttng_probe_desc *probe_desc;
1809 const struct lttng_event_desc *desc;
1810 int i;
1811 struct list_head *probe_list;
1812
1813 probe_list = lttng_get_probe_list_head();
1814 /*
1815 * For each probe event, if we find that a probe event matches
1816 * our enabler, create an associated lttng_event if not
1817 * already present.
1818 */
1819 list_for_each_entry(probe_desc, probe_list, head) {
1820 for (i = 0; i < probe_desc->nr_events; i++) {
1821 int found = 0;
1822 struct hlist_head *head;
1823 struct lttng_event *event;
1824
1825 desc = probe_desc->event_desc[i];
1826 if (!lttng_desc_match_enabler(desc,
1827 lttng_event_enabler_as_enabler(event_enabler)))
1828 continue;
1829
1830 /*
1831 * Check if already created.
1832 */
1833 head = utils_borrow_hash_table_bucket(
1834 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1835 desc->name);
1836 lttng_hlist_for_each_entry(event, head, hlist) {
1837 if (event->desc == desc
1838 && event->chan == event_enabler->chan)
1839 found = 1;
1840 }
1841 if (found)
1842 continue;
1843
1844 /*
1845 * We need to create an event for this
1846 * event probe.
1847 */
1848 event = _lttng_event_create(event_enabler->chan,
1849 NULL, NULL, desc,
1850 LTTNG_KERNEL_TRACEPOINT);
1851 if (!event) {
1852 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1853 probe_desc->event_desc[i]->name);
1854 }
1855 }
1856 }
1857 }
1858
1859 static
1860 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1861 {
1862 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
1863 struct lttng_probe_desc *probe_desc;
1864 const struct lttng_event_desc *desc;
1865 int i;
1866 struct list_head *probe_list;
1867
1868 probe_list = lttng_get_probe_list_head();
1869 /*
1870 * For each probe event, if we find that a probe event matches
1871 * our enabler, create an associated lttng_event_notifier if not
1872 * already present.
1873 */
1874 list_for_each_entry(probe_desc, probe_list, head) {
1875 for (i = 0; i < probe_desc->nr_events; i++) {
1876 int found = 0;
1877 struct hlist_head *head;
1878 struct lttng_event_notifier *event_notifier;
1879
1880 desc = probe_desc->event_desc[i];
1881 if (!lttng_desc_match_enabler(desc,
1882 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
1883 continue;
1884
1885 /*
1886 * Check if already created.
1887 */
1888 head = utils_borrow_hash_table_bucket(
1889 event_notifier_group->event_notifiers_ht.table,
1890 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
1891 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1892 if (event_notifier->desc == desc
1893 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1894 found = 1;
1895 }
1896 if (found)
1897 continue;
1898
1899 /*
1900 * We need to create a event_notifier for this event probe.
1901 */
1902 event_notifier = _lttng_event_notifier_create(desc,
1903 event_notifier_enabler->base.user_token,
1904 event_notifier_group, NULL, NULL,
1905 LTTNG_KERNEL_TRACEPOINT);
1906 if (IS_ERR(event_notifier)) {
1907 printk(KERN_INFO "Unable to create event_notifier %s\n",
1908 probe_desc->event_desc[i]->name);
1909 }
1910 }
1911 }
1912 }
1913
1914 static
1915 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
1916 {
1917 int ret;
1918
1919 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
1920 WARN_ON_ONCE(ret);
1921 }
1922
1923 static
1924 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
1925 {
1926 int ret;
1927
1928 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
1929 WARN_ON_ONCE(ret);
1930 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
1931 WARN_ON_ONCE(ret);
1932 }
1933
1934 /*
1935 * Create struct lttng_event if it is missing and present in the list of
1936 * tracepoint probes.
1937 * Should be called with sessions mutex held.
1938 */
1939 static
1940 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
1941 {
1942 switch (event_enabler->base.event_param.instrumentation) {
1943 case LTTNG_KERNEL_TRACEPOINT:
1944 lttng_create_tracepoint_event_if_missing(event_enabler);
1945 break;
1946 case LTTNG_KERNEL_SYSCALL:
1947 lttng_create_syscall_event_if_missing(event_enabler);
1948 break;
1949 default:
1950 WARN_ON_ONCE(1);
1951 break;
1952 }
1953 }
1954
1955 /*
1956 * Create events associated with an event_enabler (if not already present),
1957 * and add backward reference from the event to the enabler.
1958 * Should be called with sessions mutex held.
1959 */
1960 static
1961 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
1962 {
1963 struct lttng_channel *chan = event_enabler->chan;
1964 struct lttng_session *session = event_enabler->chan->session;
1965 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
1966 struct lttng_event *event;
1967
1968 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1969 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1970 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1971 base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
1972 !strcmp(base_enabler->event_param.name, "*")) {
1973 if (base_enabler->enabled)
1974 WRITE_ONCE(chan->syscall_all, 1);
1975 else
1976 WRITE_ONCE(chan->syscall_all, 0);
1977 }
1978
1979 /* First ensure that probe events are created for this enabler. */
1980 lttng_create_event_if_missing(event_enabler);
1981
1982 /* For each event matching event_enabler in session event list. */
1983 list_for_each_entry(event, &session->events, list) {
1984 struct lttng_enabler_ref *enabler_ref;
1985
1986 if (!lttng_event_enabler_match_event(event_enabler, event))
1987 continue;
1988 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1989 lttng_event_enabler_as_enabler(event_enabler));
1990 if (!enabler_ref) {
1991 /*
1992 * If no backward ref, create it.
1993 * Add backward ref from event to event_enabler.
1994 */
1995 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1996 if (!enabler_ref)
1997 return -ENOMEM;
1998 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
1999 list_add(&enabler_ref->node,
2000 &event->enablers_ref_head);
2001 }
2002
2003 /*
2004 * Link filter bytecodes if not linked yet.
2005 */
2006 lttng_enabler_link_bytecode(event->desc,
2007 lttng_static_ctx,
2008 &event->bytecode_runtime_head,
2009 lttng_event_enabler_as_enabler(event_enabler));
2010
2011 /* TODO: merge event context. */
2012 }
2013 return 0;
2014 }
2015
2016 /*
2017 * Create struct lttng_event_notifier if it is missing and present in the list of
2018 * tracepoint probes.
2019 * Should be called with sessions mutex held.
2020 */
2021 static
2022 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2023 {
2024 switch (event_notifier_enabler->base.event_param.instrumentation) {
2025 case LTTNG_KERNEL_TRACEPOINT:
2026 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2027 break;
2028 case LTTNG_KERNEL_SYSCALL:
2029 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2030 break;
2031 default:
2032 WARN_ON_ONCE(1);
2033 break;
2034 }
2035 }
2036
2037 /*
2038 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2039 */
2040 static
2041 int lttng_event_notifier_enabler_ref_event_notifiers(struct lttng_event_notifier_enabler *event_notifier_enabler)
2042 {
2043 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2044 struct lttng_event_notifier *event_notifier;
2045
2046 /* First ensure that probe event_notifiers are created for this enabler. */
2047 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2048
2049 /* Link the created event_notifier with its associated enabler. */
2050 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2051 struct lttng_enabler_ref *enabler_ref;
2052
2053 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2054 continue;
2055
2056 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2057 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2058 if (!enabler_ref) {
2059 /*
2060 * If no backward ref, create it.
2061 * Add backward ref from event_notifier to enabler.
2062 */
2063 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2064 if (!enabler_ref)
2065 return -ENOMEM;
2066
2067 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2068 event_notifier_enabler);
2069 list_add(&enabler_ref->node,
2070 &event_notifier->enablers_ref_head);
2071 }
2072
2073 /*
2074 * Link filter bytecodes if not linked yet.
2075 */
2076 lttng_enabler_link_bytecode(event_notifier->desc,
2077 lttng_static_ctx, &event_notifier->bytecode_runtime_head,
2078 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2079 }
2080 return 0;
2081 }
2082
2083 /*
2084 * Called at module load: connect the probe on all enablers matching
2085 * this event.
2086 * Called with sessions lock held.
2087 */
2088 int lttng_fix_pending_events(void)
2089 {
2090 struct lttng_session *session;
2091
2092 list_for_each_entry(session, &sessions, list)
2093 lttng_session_lazy_sync_event_enablers(session);
2094 return 0;
2095 }
2096
2097 static bool lttng_event_notifier_group_has_active_event_notifiers(
2098 struct lttng_event_notifier_group *event_notifier_group)
2099 {
2100 struct lttng_event_notifier_enabler *event_notifier_enabler;
2101
2102 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2103 node) {
2104 if (event_notifier_enabler->base.enabled)
2105 return true;
2106 }
2107 return false;
2108 }
2109
2110 bool lttng_event_notifier_active(void)
2111 {
2112 struct lttng_event_notifier_group *event_notifier_group;
2113
2114 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2115 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2116 return true;
2117 }
2118 return false;
2119 }
2120
2121 int lttng_fix_pending_event_notifiers(void)
2122 {
2123 struct lttng_event_notifier_group *event_notifier_group;
2124
2125 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2126 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2127 return 0;
2128 }
2129
2130 struct lttng_event_enabler *lttng_event_enabler_create(
2131 enum lttng_enabler_format_type format_type,
2132 struct lttng_kernel_event *event_param,
2133 struct lttng_channel *chan)
2134 {
2135 struct lttng_event_enabler *event_enabler;
2136
2137 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2138 if (!event_enabler)
2139 return NULL;
2140 event_enabler->base.format_type = format_type;
2141 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2142 memcpy(&event_enabler->base.event_param, event_param,
2143 sizeof(event_enabler->base.event_param));
2144 event_enabler->chan = chan;
2145 /* ctx left NULL */
2146 event_enabler->base.enabled = 0;
2147 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2148 mutex_lock(&sessions_mutex);
2149 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2150 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2151 mutex_unlock(&sessions_mutex);
2152 return event_enabler;
2153 }
2154
2155 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2156 {
2157 mutex_lock(&sessions_mutex);
2158 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2159 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2160 mutex_unlock(&sessions_mutex);
2161 return 0;
2162 }
2163
2164 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2165 {
2166 mutex_lock(&sessions_mutex);
2167 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2168 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2169 mutex_unlock(&sessions_mutex);
2170 return 0;
2171 }
2172
2173 static
2174 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
2175 struct lttng_kernel_filter_bytecode __user *bytecode)
2176 {
2177 struct lttng_filter_bytecode_node *bytecode_node;
2178 uint32_t bytecode_len;
2179 int ret;
2180
2181 ret = get_user(bytecode_len, &bytecode->len);
2182 if (ret)
2183 return ret;
2184 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
2185 GFP_KERNEL);
2186 if (!bytecode_node)
2187 return -ENOMEM;
2188 ret = copy_from_user(&bytecode_node->bc, bytecode,
2189 sizeof(*bytecode) + bytecode_len);
2190 if (ret)
2191 goto error_free;
2192
2193 bytecode_node->enabler = enabler;
2194 /* Enforce length based on allocated size */
2195 bytecode_node->bc.len = bytecode_len;
2196 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2197
2198 return 0;
2199
2200 error_free:
2201 kfree(bytecode_node);
2202 return ret;
2203 }
2204
2205 int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
2206 struct lttng_kernel_filter_bytecode __user *bytecode)
2207 {
2208 int ret;
2209 ret = lttng_enabler_attach_bytecode(
2210 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2211 if (ret)
2212 goto error;
2213
2214 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2215 return 0;
2216
2217 error:
2218 return ret;
2219 }
2220
2221 int lttng_event_add_callsite(struct lttng_event *event,
2222 struct lttng_kernel_event_callsite __user *callsite)
2223 {
2224
2225 switch (event->instrumentation) {
2226 case LTTNG_KERNEL_UPROBE:
2227 return lttng_uprobes_event_add_callsite(event, callsite);
2228 default:
2229 return -EINVAL;
2230 }
2231 }
2232
2233 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2234 struct lttng_kernel_context *context_param)
2235 {
2236 return -ENOSYS;
2237 }
2238
2239 static
2240 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2241 {
2242 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
2243
2244 /* Destroy filter bytecode */
2245 list_for_each_entry_safe(filter_node, tmp_filter_node,
2246 &enabler->filter_bytecode_head, node) {
2247 kfree(filter_node);
2248 }
2249 }
2250
2251 static
2252 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2253 {
2254 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2255
2256 /* Destroy contexts */
2257 lttng_destroy_context(event_enabler->ctx);
2258
2259 list_del(&event_enabler->node);
2260 kfree(event_enabler);
2261 }
2262
2263 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2264 struct lttng_event_notifier_group *event_notifier_group,
2265 enum lttng_enabler_format_type format_type,
2266 struct lttng_kernel_event_notifier *event_notifier_param)
2267 {
2268 struct lttng_event_notifier_enabler *event_notifier_enabler;
2269
2270 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2271 if (!event_notifier_enabler)
2272 return NULL;
2273
2274 event_notifier_enabler->base.format_type = format_type;
2275 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2276
2277 memcpy(&event_notifier_enabler->base.event_param.name, event_notifier_param->event.name,
2278 sizeof(event_notifier_enabler->base.event_param.name));
2279 event_notifier_enabler->base.event_param.instrumentation = event_notifier_param->event.instrumentation;
2280 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2281
2282 event_notifier_enabler->base.enabled = 0;
2283 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2284 event_notifier_enabler->group = event_notifier_group;
2285
2286 mutex_lock(&sessions_mutex);
2287 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2288 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2289
2290 mutex_unlock(&sessions_mutex);
2291
2292 return event_notifier_enabler;
2293 }
2294
2295 int lttng_event_notifier_enabler_enable(
2296 struct lttng_event_notifier_enabler *event_notifier_enabler)
2297 {
2298 mutex_lock(&sessions_mutex);
2299 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2300 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2301 mutex_unlock(&sessions_mutex);
2302 return 0;
2303 }
2304
2305 int lttng_event_notifier_enabler_disable(
2306 struct lttng_event_notifier_enabler *event_notifier_enabler)
2307 {
2308 mutex_lock(&sessions_mutex);
2309 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2310 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2311 mutex_unlock(&sessions_mutex);
2312 return 0;
2313 }
2314
2315 int lttng_event_notifier_enabler_attach_bytecode(
2316 struct lttng_event_notifier_enabler *event_notifier_enabler,
2317 struct lttng_kernel_filter_bytecode __user *bytecode)
2318 {
2319 int ret;
2320
2321 ret = lttng_enabler_attach_bytecode(
2322 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2323 bytecode);
2324 if (ret)
2325 goto error;
2326
2327 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2328 return 0;
2329
2330 error:
2331 return ret;
2332 }
2333
2334 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2335 struct lttng_kernel_event_callsite __user *callsite)
2336 {
2337
2338 switch (event_notifier->instrumentation) {
2339 case LTTNG_KERNEL_UPROBE:
2340 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2341 callsite);
2342 default:
2343 return -EINVAL;
2344 }
2345 }
2346
2347 int lttng_event_notifier_enabler_attach_context(
2348 struct lttng_event_notifier_enabler *event_notifier_enabler,
2349 struct lttng_kernel_context *context_param)
2350 {
2351 return -ENOSYS;
2352 }
2353
2354 static
2355 void lttng_event_notifier_enabler_destroy(
2356 struct lttng_event_notifier_enabler *event_notifier_enabler)
2357 {
2358 if (!event_notifier_enabler) {
2359 return;
2360 }
2361
2362 list_del(&event_notifier_enabler->node);
2363
2364 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2365 kfree(event_notifier_enabler);
2366 }
2367
2368 /*
2369 * lttng_session_sync_event_enablers should be called just before starting a
2370 * session.
2371 * Should be called with sessions mutex held.
2372 */
2373 static
2374 void lttng_session_sync_event_enablers(struct lttng_session *session)
2375 {
2376 struct lttng_event_enabler *event_enabler;
2377 struct lttng_event *event;
2378
2379 list_for_each_entry(event_enabler, &session->enablers_head, node)
2380 lttng_event_enabler_ref_events(event_enabler);
2381 /*
2382 * For each event, if at least one of its enablers is enabled,
2383 * and its channel and session transient states are enabled, we
2384 * enable the event, else we disable it.
2385 */
2386 list_for_each_entry(event, &session->events, list) {
2387 struct lttng_enabler_ref *enabler_ref;
2388 struct lttng_bytecode_runtime *runtime;
2389 int enabled = 0, has_enablers_without_bytecode = 0;
2390
2391 switch (event->instrumentation) {
2392 case LTTNG_KERNEL_TRACEPOINT:
2393 case LTTNG_KERNEL_SYSCALL:
2394 /* Enable events */
2395 list_for_each_entry(enabler_ref,
2396 &event->enablers_ref_head, node) {
2397 if (enabler_ref->ref->enabled) {
2398 enabled = 1;
2399 break;
2400 }
2401 }
2402 break;
2403 default:
2404 /* Not handled with lazy sync. */
2405 continue;
2406 }
2407 /*
2408 * Enabled state is based on union of enablers, with
2409 * intesection of session and channel transient enable
2410 * states.
2411 */
2412 enabled = enabled && session->tstate && event->chan->tstate;
2413
2414 WRITE_ONCE(event->enabled, enabled);
2415 /*
2416 * Sync tracepoint registration with event enabled
2417 * state.
2418 */
2419 if (enabled) {
2420 register_event(event);
2421 } else {
2422 _lttng_event_unregister(event);
2423 }
2424
2425 /* Check if has enablers without bytecode enabled */
2426 list_for_each_entry(enabler_ref,
2427 &event->enablers_ref_head, node) {
2428 if (enabler_ref->ref->enabled
2429 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2430 has_enablers_without_bytecode = 1;
2431 break;
2432 }
2433 }
2434 event->has_enablers_without_bytecode =
2435 has_enablers_without_bytecode;
2436
2437 /* Enable filters */
2438 list_for_each_entry(runtime,
2439 &event->bytecode_runtime_head, node)
2440 lttng_filter_sync_state(runtime);
2441 }
2442 }
2443
2444 /*
2445 * Apply enablers to session events, adding events to session if need
2446 * be. It is required after each modification applied to an active
2447 * session, and right before session "start".
2448 * "lazy" sync means we only sync if required.
2449 * Should be called with sessions mutex held.
2450 */
2451 static
2452 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2453 {
2454 /* We can skip if session is not active */
2455 if (!session->active)
2456 return;
2457 lttng_session_sync_event_enablers(session);
2458 }
2459
2460 static
2461 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2462 {
2463 struct lttng_event_notifier_enabler *event_notifier_enabler;
2464 struct lttng_event_notifier *event_notifier;
2465
2466 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2467 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2468
2469 /*
2470 * For each event_notifier, if at least one of its enablers is enabled,
2471 * we enable the event_notifier, else we disable it.
2472 */
2473 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2474 struct lttng_enabler_ref *enabler_ref;
2475 struct lttng_bytecode_runtime *runtime;
2476 int enabled = 0, has_enablers_without_bytecode = 0;
2477
2478 switch (event_notifier->instrumentation) {
2479 case LTTNG_KERNEL_TRACEPOINT:
2480 case LTTNG_KERNEL_SYSCALL:
2481 /* Enable event_notifiers */
2482 list_for_each_entry(enabler_ref,
2483 &event_notifier->enablers_ref_head, node) {
2484 if (enabler_ref->ref->enabled) {
2485 enabled = 1;
2486 break;
2487 }
2488 }
2489 break;
2490 default:
2491 /* Not handled with sync. */
2492 continue;
2493 }
2494
2495 WRITE_ONCE(event_notifier->enabled, enabled);
2496 /*
2497 * Sync tracepoint registration with event_notifier enabled
2498 * state.
2499 */
2500 if (enabled) {
2501 if (!event_notifier->registered)
2502 register_event_notifier(event_notifier);
2503 } else {
2504 if (event_notifier->registered)
2505 _lttng_event_notifier_unregister(event_notifier);
2506 }
2507
2508 /* Check if has enablers without bytecode enabled */
2509 list_for_each_entry(enabler_ref,
2510 &event_notifier->enablers_ref_head, node) {
2511 if (enabler_ref->ref->enabled
2512 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2513 has_enablers_without_bytecode = 1;
2514 break;
2515 }
2516 }
2517 event_notifier->has_enablers_without_bytecode =
2518 has_enablers_without_bytecode;
2519
2520 /* Enable filters */
2521 list_for_each_entry(runtime,
2522 &event_notifier->bytecode_runtime_head, node)
2523 lttng_filter_sync_state(runtime);
2524 }
2525 }
2526
2527 /*
2528 * Serialize at most one packet worth of metadata into a metadata
2529 * channel.
2530 * We grab the metadata cache mutex to get exclusive access to our metadata
2531 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2532 * allows us to do racy operations such as looking for remaining space left in
2533 * packet and write, since mutual exclusion protects us from concurrent writes.
2534 * Mutual exclusion on the metadata cache allow us to read the cache content
2535 * without racing against reallocation of the cache by updates.
2536 * Returns the number of bytes written in the channel, 0 if no data
2537 * was written and a negative value on error.
2538 */
2539 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2540 struct channel *chan, bool *coherent)
2541 {
2542 struct lib_ring_buffer_ctx ctx;
2543 int ret = 0;
2544 size_t len, reserve_len;
2545
2546 /*
2547 * Ensure we support mutiple get_next / put sequences followed by
2548 * put_next. The metadata cache lock protects reading the metadata
2549 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2550 * "flush" operations on the buffer invoked by different processes.
2551 * Moreover, since the metadata cache memory can be reallocated, we
2552 * need to have exclusive access against updates even though we only
2553 * read it.
2554 */
2555 mutex_lock(&stream->metadata_cache->lock);
2556 WARN_ON(stream->metadata_in < stream->metadata_out);
2557 if (stream->metadata_in != stream->metadata_out)
2558 goto end;
2559
2560 /* Metadata regenerated, change the version. */
2561 if (stream->metadata_cache->version != stream->version)
2562 stream->version = stream->metadata_cache->version;
2563
2564 len = stream->metadata_cache->metadata_written -
2565 stream->metadata_in;
2566 if (!len)
2567 goto end;
2568 reserve_len = min_t(size_t,
2569 stream->transport->ops.packet_avail_size(chan),
2570 len);
2571 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2572 sizeof(char), -1);
2573 /*
2574 * If reservation failed, return an error to the caller.
2575 */
2576 ret = stream->transport->ops.event_reserve(&ctx, 0);
2577 if (ret != 0) {
2578 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2579 stream->coherent = false;
2580 goto end;
2581 }
2582 stream->transport->ops.event_write(&ctx,
2583 stream->metadata_cache->data + stream->metadata_in,
2584 reserve_len);
2585 stream->transport->ops.event_commit(&ctx);
2586 stream->metadata_in += reserve_len;
2587 if (reserve_len < len)
2588 stream->coherent = false;
2589 else
2590 stream->coherent = true;
2591 ret = reserve_len;
2592
2593 end:
2594 if (coherent)
2595 *coherent = stream->coherent;
2596 mutex_unlock(&stream->metadata_cache->lock);
2597 return ret;
2598 }
2599
2600 static
2601 void lttng_metadata_begin(struct lttng_session *session)
2602 {
2603 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2604 mutex_lock(&session->metadata_cache->lock);
2605 }
2606
2607 static
2608 void lttng_metadata_end(struct lttng_session *session)
2609 {
2610 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2611 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2612 struct lttng_metadata_stream *stream;
2613
2614 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2615 wake_up_interruptible(&stream->read_wait);
2616 mutex_unlock(&session->metadata_cache->lock);
2617 }
2618 }
2619
2620 /*
2621 * Write the metadata to the metadata cache.
2622 * Must be called with sessions_mutex held.
2623 * The metadata cache lock protects us from concurrent read access from
2624 * thread outputting metadata content to ring buffer.
2625 * The content of the printf is printed as a single atomic metadata
2626 * transaction.
2627 */
2628 int lttng_metadata_printf(struct lttng_session *session,
2629 const char *fmt, ...)
2630 {
2631 char *str;
2632 size_t len;
2633 va_list ap;
2634
2635 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2636
2637 va_start(ap, fmt);
2638 str = kvasprintf(GFP_KERNEL, fmt, ap);
2639 va_end(ap);
2640 if (!str)
2641 return -ENOMEM;
2642
2643 len = strlen(str);
2644 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2645 if (session->metadata_cache->metadata_written + len >
2646 session->metadata_cache->cache_alloc) {
2647 char *tmp_cache_realloc;
2648 unsigned int tmp_cache_alloc_size;
2649
2650 tmp_cache_alloc_size = max_t(unsigned int,
2651 session->metadata_cache->cache_alloc + len,
2652 session->metadata_cache->cache_alloc << 1);
2653 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2654 if (!tmp_cache_realloc)
2655 goto err;
2656 if (session->metadata_cache->data) {
2657 memcpy(tmp_cache_realloc,
2658 session->metadata_cache->data,
2659 session->metadata_cache->cache_alloc);
2660 vfree(session->metadata_cache->data);
2661 }
2662
2663 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2664 session->metadata_cache->data = tmp_cache_realloc;
2665 }
2666 memcpy(session->metadata_cache->data +
2667 session->metadata_cache->metadata_written,
2668 str, len);
2669 session->metadata_cache->metadata_written += len;
2670 kfree(str);
2671
2672 return 0;
2673
2674 err:
2675 kfree(str);
2676 return -ENOMEM;
2677 }
2678
2679 static
2680 int print_tabs(struct lttng_session *session, size_t nesting)
2681 {
2682 size_t i;
2683
2684 for (i = 0; i < nesting; i++) {
2685 int ret;
2686
2687 ret = lttng_metadata_printf(session, " ");
2688 if (ret) {
2689 return ret;
2690 }
2691 }
2692 return 0;
2693 }
2694
2695 static
2696 int lttng_field_name_statedump(struct lttng_session *session,
2697 const struct lttng_event_field *field,
2698 size_t nesting)
2699 {
2700 return lttng_metadata_printf(session, " _%s;\n", field->name);
2701 }
2702
2703 static
2704 int _lttng_integer_type_statedump(struct lttng_session *session,
2705 const struct lttng_type *type,
2706 size_t nesting)
2707 {
2708 int ret;
2709
2710 WARN_ON_ONCE(type->atype != atype_integer);
2711 ret = print_tabs(session, nesting);
2712 if (ret)
2713 return ret;
2714 ret = lttng_metadata_printf(session,
2715 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2716 type->u.integer.size,
2717 type->u.integer.alignment,
2718 type->u.integer.signedness,
2719 (type->u.integer.encoding == lttng_encode_none)
2720 ? "none"
2721 : (type->u.integer.encoding == lttng_encode_UTF8)
2722 ? "UTF8"
2723 : "ASCII",
2724 type->u.integer.base,
2725 #if __BYTE_ORDER == __BIG_ENDIAN
2726 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2727 #else
2728 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2729 #endif
2730 );
2731 return ret;
2732 }
2733
2734 /*
2735 * Must be called with sessions_mutex held.
2736 */
2737 static
2738 int _lttng_struct_type_statedump(struct lttng_session *session,
2739 const struct lttng_type *type,
2740 size_t nesting)
2741 {
2742 int ret;
2743 uint32_t i, nr_fields;
2744 unsigned int alignment;
2745
2746 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2747
2748 ret = print_tabs(session, nesting);
2749 if (ret)
2750 return ret;
2751 ret = lttng_metadata_printf(session,
2752 "struct {\n");
2753 if (ret)
2754 return ret;
2755 nr_fields = type->u.struct_nestable.nr_fields;
2756 for (i = 0; i < nr_fields; i++) {
2757 const struct lttng_event_field *iter_field;
2758
2759 iter_field = &type->u.struct_nestable.fields[i];
2760 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2761 if (ret)
2762 return ret;
2763 }
2764 ret = print_tabs(session, nesting);
2765 if (ret)
2766 return ret;
2767 alignment = type->u.struct_nestable.alignment;
2768 if (alignment) {
2769 ret = lttng_metadata_printf(session,
2770 "} align(%u)",
2771 alignment);
2772 } else {
2773 ret = lttng_metadata_printf(session,
2774 "}");
2775 }
2776 return ret;
2777 }
2778
2779 /*
2780 * Must be called with sessions_mutex held.
2781 */
2782 static
2783 int _lttng_struct_field_statedump(struct lttng_session *session,
2784 const struct lttng_event_field *field,
2785 size_t nesting)
2786 {
2787 int ret;
2788
2789 ret = _lttng_struct_type_statedump(session,
2790 &field->type, nesting);
2791 if (ret)
2792 return ret;
2793 return lttng_field_name_statedump(session, field, nesting);
2794 }
2795
2796 /*
2797 * Must be called with sessions_mutex held.
2798 */
2799 static
2800 int _lttng_variant_type_statedump(struct lttng_session *session,
2801 const struct lttng_type *type,
2802 size_t nesting)
2803 {
2804 int ret;
2805 uint32_t i, nr_choices;
2806
2807 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2808 /*
2809 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2810 */
2811 if (type->u.variant_nestable.alignment != 0)
2812 return -EINVAL;
2813 ret = print_tabs(session, nesting);
2814 if (ret)
2815 return ret;
2816 ret = lttng_metadata_printf(session,
2817 "variant <_%s> {\n",
2818 type->u.variant_nestable.tag_name);
2819 if (ret)
2820 return ret;
2821 nr_choices = type->u.variant_nestable.nr_choices;
2822 for (i = 0; i < nr_choices; i++) {
2823 const struct lttng_event_field *iter_field;
2824
2825 iter_field = &type->u.variant_nestable.choices[i];
2826 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2827 if (ret)
2828 return ret;
2829 }
2830 ret = print_tabs(session, nesting);
2831 if (ret)
2832 return ret;
2833 ret = lttng_metadata_printf(session,
2834 "}");
2835 return ret;
2836 }
2837
2838 /*
2839 * Must be called with sessions_mutex held.
2840 */
2841 static
2842 int _lttng_variant_field_statedump(struct lttng_session *session,
2843 const struct lttng_event_field *field,
2844 size_t nesting)
2845 {
2846 int ret;
2847
2848 ret = _lttng_variant_type_statedump(session,
2849 &field->type, nesting);
2850 if (ret)
2851 return ret;
2852 return lttng_field_name_statedump(session, field, nesting);
2853 }
2854
2855 /*
2856 * Must be called with sessions_mutex held.
2857 */
2858 static
2859 int _lttng_array_field_statedump(struct lttng_session *session,
2860 const struct lttng_event_field *field,
2861 size_t nesting)
2862 {
2863 int ret;
2864 const struct lttng_type *elem_type;
2865
2866 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
2867
2868 if (field->type.u.array_nestable.alignment) {
2869 ret = print_tabs(session, nesting);
2870 if (ret)
2871 return ret;
2872 ret = lttng_metadata_printf(session,
2873 "struct { } align(%u) _%s_padding;\n",
2874 field->type.u.array_nestable.alignment * CHAR_BIT,
2875 field->name);
2876 if (ret)
2877 return ret;
2878 }
2879 /*
2880 * Nested compound types: Only array of structures and variants are
2881 * currently supported.
2882 */
2883 elem_type = field->type.u.array_nestable.elem_type;
2884 switch (elem_type->atype) {
2885 case atype_integer:
2886 case atype_struct_nestable:
2887 case atype_variant_nestable:
2888 ret = _lttng_type_statedump(session, elem_type, nesting);
2889 if (ret)
2890 return ret;
2891 break;
2892
2893 default:
2894 return -EINVAL;
2895 }
2896 ret = lttng_metadata_printf(session,
2897 " _%s[%u];\n",
2898 field->name,
2899 field->type.u.array_nestable.length);
2900 return ret;
2901 }
2902
2903 /*
2904 * Must be called with sessions_mutex held.
2905 */
2906 static
2907 int _lttng_sequence_field_statedump(struct lttng_session *session,
2908 const struct lttng_event_field *field,
2909 size_t nesting)
2910 {
2911 int ret;
2912 const char *length_name;
2913 const struct lttng_type *elem_type;
2914
2915 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2916
2917 length_name = field->type.u.sequence_nestable.length_name;
2918
2919 if (field->type.u.sequence_nestable.alignment) {
2920 ret = print_tabs(session, nesting);
2921 if (ret)
2922 return ret;
2923 ret = lttng_metadata_printf(session,
2924 "struct { } align(%u) _%s_padding;\n",
2925 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2926 field->name);
2927 if (ret)
2928 return ret;
2929 }
2930
2931 /*
2932 * Nested compound types: Only array of structures and variants are
2933 * currently supported.
2934 */
2935 elem_type = field->type.u.sequence_nestable.elem_type;
2936 switch (elem_type->atype) {
2937 case atype_integer:
2938 case atype_struct_nestable:
2939 case atype_variant_nestable:
2940 ret = _lttng_type_statedump(session, elem_type, nesting);
2941 if (ret)
2942 return ret;
2943 break;
2944
2945 default:
2946 return -EINVAL;
2947 }
2948 ret = lttng_metadata_printf(session,
2949 " _%s[ _%s ];\n",
2950 field->name,
2951 field->type.u.sequence_nestable.length_name);
2952 return ret;
2953 }
2954
2955 /*
2956 * Must be called with sessions_mutex held.
2957 */
2958 static
2959 int _lttng_enum_type_statedump(struct lttng_session *session,
2960 const struct lttng_type *type,
2961 size_t nesting)
2962 {
2963 const struct lttng_enum_desc *enum_desc;
2964 const struct lttng_type *container_type;
2965 int ret;
2966 unsigned int i, nr_entries;
2967
2968 container_type = type->u.enum_nestable.container_type;
2969 if (container_type->atype != atype_integer) {
2970 ret = -EINVAL;
2971 goto end;
2972 }
2973 enum_desc = type->u.enum_nestable.desc;
2974 nr_entries = enum_desc->nr_entries;
2975
2976 ret = print_tabs(session, nesting);
2977 if (ret)
2978 goto end;
2979 ret = lttng_metadata_printf(session, "enum : ");
2980 if (ret)
2981 goto end;
2982 ret = _lttng_integer_type_statedump(session, container_type, 0);
2983 if (ret)
2984 goto end;
2985 ret = lttng_metadata_printf(session, " {\n");
2986 if (ret)
2987 goto end;
2988 /* Dump all entries */
2989 for (i = 0; i < nr_entries; i++) {
2990 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2991 int j, len;
2992
2993 ret = print_tabs(session, nesting + 1);
2994 if (ret)
2995 goto end;
2996 ret = lttng_metadata_printf(session,
2997 "\"");
2998 if (ret)
2999 goto end;
3000 len = strlen(entry->string);
3001 /* Escape the character '"' */
3002 for (j = 0; j < len; j++) {
3003 char c = entry->string[j];
3004
3005 switch (c) {
3006 case '"':
3007 ret = lttng_metadata_printf(session,
3008 "\\\"");
3009 break;
3010 case '\\':
3011 ret = lttng_metadata_printf(session,
3012 "\\\\");
3013 break;
3014 default:
3015 ret = lttng_metadata_printf(session,
3016 "%c", c);
3017 break;
3018 }
3019 if (ret)
3020 goto end;
3021 }
3022 ret = lttng_metadata_printf(session, "\"");
3023 if (ret)
3024 goto end;
3025
3026 if (entry->options.is_auto) {
3027 ret = lttng_metadata_printf(session, ",\n");
3028 if (ret)
3029 goto end;
3030 } else {
3031 ret = lttng_metadata_printf(session,
3032 " = ");
3033 if (ret)
3034 goto end;
3035 if (entry->start.signedness)
3036 ret = lttng_metadata_printf(session,
3037 "%lld", (long long) entry->start.value);
3038 else
3039 ret = lttng_metadata_printf(session,
3040 "%llu", entry->start.value);
3041 if (ret)
3042 goto end;
3043 if (entry->start.signedness == entry->end.signedness &&
3044 entry->start.value
3045 == entry->end.value) {
3046 ret = lttng_metadata_printf(session,
3047 ",\n");
3048 } else {
3049 if (entry->end.signedness) {
3050 ret = lttng_metadata_printf(session,
3051 " ... %lld,\n",
3052 (long long) entry->end.value);
3053 } else {
3054 ret = lttng_metadata_printf(session,
3055 " ... %llu,\n",
3056 entry->end.value);
3057 }
3058 }
3059 if (ret)
3060 goto end;
3061 }
3062 }
3063 ret = print_tabs(session, nesting);
3064 if (ret)
3065 goto end;
3066 ret = lttng_metadata_printf(session, "}");
3067 end:
3068 return ret;
3069 }
3070
3071 /*
3072 * Must be called with sessions_mutex held.
3073 */
3074 static
3075 int _lttng_enum_field_statedump(struct lttng_session *session,
3076 const struct lttng_event_field *field,
3077 size_t nesting)
3078 {
3079 int ret;
3080
3081 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3082 if (ret)
3083 return ret;
3084 return lttng_field_name_statedump(session, field, nesting);
3085 }
3086
3087 static
3088 int _lttng_integer_field_statedump(struct lttng_session *session,
3089 const struct lttng_event_field *field,
3090 size_t nesting)
3091 {
3092 int ret;
3093
3094 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3095 if (ret)
3096 return ret;
3097 return lttng_field_name_statedump(session, field, nesting);
3098 }
3099
3100 static
3101 int _lttng_string_type_statedump(struct lttng_session *session,
3102 const struct lttng_type *type,
3103 size_t nesting)
3104 {
3105 int ret;
3106
3107 WARN_ON_ONCE(type->atype != atype_string);
3108 /* Default encoding is UTF8 */
3109 ret = print_tabs(session, nesting);
3110 if (ret)
3111 return ret;
3112 ret = lttng_metadata_printf(session,
3113 "string%s",
3114 type->u.string.encoding == lttng_encode_ASCII ?
3115 " { encoding = ASCII; }" : "");
3116 return ret;
3117 }
3118
3119 static
3120 int _lttng_string_field_statedump(struct lttng_session *session,
3121 const struct lttng_event_field *field,
3122 size_t nesting)
3123 {
3124 int ret;
3125
3126 WARN_ON_ONCE(field->type.atype != atype_string);
3127 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3128 if (ret)
3129 return ret;
3130 return lttng_field_name_statedump(session, field, nesting);
3131 }
3132
3133 /*
3134 * Must be called with sessions_mutex held.
3135 */
3136 static
3137 int _lttng_type_statedump(struct lttng_session *session,
3138 const struct lttng_type *type,
3139 size_t nesting)
3140 {
3141 int ret = 0;
3142
3143 switch (type->atype) {
3144 case atype_integer:
3145 ret = _lttng_integer_type_statedump(session, type, nesting);
3146 break;
3147 case atype_enum_nestable:
3148 ret = _lttng_enum_type_statedump(session, type, nesting);
3149 break;
3150 case atype_string:
3151 ret = _lttng_string_type_statedump(session, type, nesting);
3152 break;
3153 case atype_struct_nestable:
3154 ret = _lttng_struct_type_statedump(session, type, nesting);
3155 break;
3156 case atype_variant_nestable:
3157 ret = _lttng_variant_type_statedump(session, type, nesting);
3158 break;
3159
3160 /* Nested arrays and sequences are not supported yet. */
3161 case atype_array_nestable:
3162 case atype_sequence_nestable:
3163 default:
3164 WARN_ON_ONCE(1);
3165 return -EINVAL;
3166 }
3167 return ret;
3168 }
3169
3170 /*
3171 * Must be called with sessions_mutex held.
3172 */
3173 static
3174 int _lttng_field_statedump(struct lttng_session *session,
3175 const struct lttng_event_field *field,
3176 size_t nesting)
3177 {
3178 int ret = 0;
3179
3180 switch (field->type.atype) {
3181 case atype_integer:
3182 ret = _lttng_integer_field_statedump(session, field, nesting);
3183 break;
3184 case atype_enum_nestable:
3185 ret = _lttng_enum_field_statedump(session, field, nesting);
3186 break;
3187 case atype_string:
3188 ret = _lttng_string_field_statedump(session, field, nesting);
3189 break;
3190 case atype_struct_nestable:
3191 ret = _lttng_struct_field_statedump(session, field, nesting);
3192 break;
3193 case atype_array_nestable:
3194 ret = _lttng_array_field_statedump(session, field, nesting);
3195 break;
3196 case atype_sequence_nestable:
3197 ret = _lttng_sequence_field_statedump(session, field, nesting);
3198 break;
3199 case atype_variant_nestable:
3200 ret = _lttng_variant_field_statedump(session, field, nesting);
3201 break;
3202
3203 default:
3204 WARN_ON_ONCE(1);
3205 return -EINVAL;
3206 }
3207 return ret;
3208 }
3209
3210 static
3211 int _lttng_context_metadata_statedump(struct lttng_session *session,
3212 struct lttng_ctx *ctx)
3213 {
3214 int ret = 0;
3215 int i;
3216
3217 if (!ctx)
3218 return 0;
3219 for (i = 0; i < ctx->nr_fields; i++) {
3220 const struct lttng_ctx_field *field = &ctx->fields[i];
3221
3222 ret = _lttng_field_statedump(session, &field->event_field, 2);
3223 if (ret)
3224 return ret;
3225 }
3226 return ret;
3227 }
3228
3229 static
3230 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3231 struct lttng_event *event)
3232 {
3233 const struct lttng_event_desc *desc = event->desc;
3234 int ret = 0;
3235 int i;
3236
3237 for (i = 0; i < desc->nr_fields; i++) {
3238 const struct lttng_event_field *field = &desc->fields[i];
3239
3240 ret = _lttng_field_statedump(session, field, 2);
3241 if (ret)
3242 return ret;
3243 }
3244 return ret;
3245 }
3246
3247 /*
3248 * Must be called with sessions_mutex held.
3249 * The entire event metadata is printed as a single atomic metadata
3250 * transaction.
3251 */
3252 static
3253 int _lttng_event_metadata_statedump(struct lttng_session *session,
3254 struct lttng_channel *chan,
3255 struct lttng_event *event)
3256 {
3257 int ret = 0;
3258
3259 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3260 return 0;
3261 if (chan->channel_type == METADATA_CHANNEL)
3262 return 0;
3263
3264 lttng_metadata_begin(session);
3265
3266 ret = lttng_metadata_printf(session,
3267 "event {\n"
3268 " name = \"%s\";\n"
3269 " id = %u;\n"
3270 " stream_id = %u;\n",
3271 event->desc->name,
3272 event->id,
3273 event->chan->id);
3274 if (ret)
3275 goto end;
3276
3277 if (event->ctx) {
3278 ret = lttng_metadata_printf(session,
3279 " context := struct {\n");
3280 if (ret)
3281 goto end;
3282 }
3283 ret = _lttng_context_metadata_statedump(session, event->ctx);
3284 if (ret)
3285 goto end;
3286 if (event->ctx) {
3287 ret = lttng_metadata_printf(session,
3288 " };\n");
3289 if (ret)
3290 goto end;
3291 }
3292
3293 ret = lttng_metadata_printf(session,
3294 " fields := struct {\n"
3295 );
3296 if (ret)
3297 goto end;
3298
3299 ret = _lttng_fields_metadata_statedump(session, event);
3300 if (ret)
3301 goto end;
3302
3303 /*
3304 * LTTng space reservation can only reserve multiples of the
3305 * byte size.
3306 */
3307 ret = lttng_metadata_printf(session,
3308 " };\n"
3309 "};\n\n");
3310 if (ret)
3311 goto end;
3312
3313 event->metadata_dumped = 1;
3314 end:
3315 lttng_metadata_end(session);
3316 return ret;
3317
3318 }
3319
3320 /*
3321 * Must be called with sessions_mutex held.
3322 * The entire channel metadata is printed as a single atomic metadata
3323 * transaction.
3324 */
3325 static
3326 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3327 struct lttng_channel *chan)
3328 {
3329 int ret = 0;
3330
3331 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3332 return 0;
3333
3334 if (chan->channel_type == METADATA_CHANNEL)
3335 return 0;
3336
3337 lttng_metadata_begin(session);
3338
3339 WARN_ON_ONCE(!chan->header_type);
3340 ret = lttng_metadata_printf(session,
3341 "stream {\n"
3342 " id = %u;\n"
3343 " event.header := %s;\n"
3344 " packet.context := struct packet_context;\n",
3345 chan->id,
3346 chan->header_type == 1 ? "struct event_header_compact" :
3347 "struct event_header_large");
3348 if (ret)
3349 goto end;
3350
3351 if (chan->ctx) {
3352 ret = lttng_metadata_printf(session,
3353 " event.context := struct {\n");
3354 if (ret)
3355 goto end;
3356 }
3357 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3358 if (ret)
3359 goto end;
3360 if (chan->ctx) {
3361 ret = lttng_metadata_printf(session,
3362 " };\n");
3363 if (ret)
3364 goto end;
3365 }
3366
3367 ret = lttng_metadata_printf(session,
3368 "};\n\n");
3369
3370 chan->metadata_dumped = 1;
3371 end:
3372 lttng_metadata_end(session);
3373 return ret;
3374 }
3375
3376 /*
3377 * Must be called with sessions_mutex held.
3378 */
3379 static
3380 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3381 {
3382 return lttng_metadata_printf(session,
3383 "struct packet_context {\n"
3384 " uint64_clock_monotonic_t timestamp_begin;\n"
3385 " uint64_clock_monotonic_t timestamp_end;\n"
3386 " uint64_t content_size;\n"
3387 " uint64_t packet_size;\n"
3388 " uint64_t packet_seq_num;\n"
3389 " unsigned long events_discarded;\n"
3390 " uint32_t cpu_id;\n"
3391 "};\n\n"
3392 );
3393 }
3394
3395 /*
3396 * Compact header:
3397 * id: range: 0 - 30.
3398 * id 31 is reserved to indicate an extended header.
3399 *
3400 * Large header:
3401 * id: range: 0 - 65534.
3402 * id 65535 is reserved to indicate an extended header.
3403 *
3404 * Must be called with sessions_mutex held.
3405 */
3406 static
3407 int _lttng_event_header_declare(struct lttng_session *session)
3408 {
3409 return lttng_metadata_printf(session,
3410 "struct event_header_compact {\n"
3411 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3412 " variant <id> {\n"
3413 " struct {\n"
3414 " uint27_clock_monotonic_t timestamp;\n"
3415 " } compact;\n"
3416 " struct {\n"
3417 " uint32_t id;\n"
3418 " uint64_clock_monotonic_t timestamp;\n"
3419 " } extended;\n"
3420 " } v;\n"
3421 "} align(%u);\n"
3422 "\n"
3423 "struct event_header_large {\n"
3424 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3425 " variant <id> {\n"
3426 " struct {\n"
3427 " uint32_clock_monotonic_t timestamp;\n"
3428 " } compact;\n"
3429 " struct {\n"
3430 " uint32_t id;\n"
3431 " uint64_clock_monotonic_t timestamp;\n"
3432 " } extended;\n"
3433 " } v;\n"
3434 "} align(%u);\n\n",
3435 lttng_alignof(uint32_t) * CHAR_BIT,
3436 lttng_alignof(uint16_t) * CHAR_BIT
3437 );
3438 }
3439
3440 /*
3441 * Approximation of NTP time of day to clock monotonic correlation,
3442 * taken at start of trace.
3443 * Yes, this is only an approximation. Yes, we can (and will) do better
3444 * in future versions.
3445 * This function may return a negative offset. It may happen if the
3446 * system sets the REALTIME clock to 0 after boot.
3447 *
3448 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3449 * y2038 compliant.
3450 */
3451 static
3452 int64_t measure_clock_offset(void)
3453 {
3454 uint64_t monotonic_avg, monotonic[2], realtime;
3455 uint64_t tcf = trace_clock_freq();
3456 int64_t offset;
3457 unsigned long flags;
3458 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3459 struct timespec64 rts = { 0, 0 };
3460 #else
3461 struct timespec rts = { 0, 0 };
3462 #endif
3463
3464 /* Disable interrupts to increase correlation precision. */
3465 local_irq_save(flags);
3466 monotonic[0] = trace_clock_read64();
3467 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3468 ktime_get_real_ts64(&rts);
3469 #else
3470 getnstimeofday(&rts);
3471 #endif
3472 monotonic[1] = trace_clock_read64();
3473 local_irq_restore(flags);
3474
3475 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3476 realtime = (uint64_t) rts.tv_sec * tcf;
3477 if (tcf == NSEC_PER_SEC) {
3478 realtime += rts.tv_nsec;
3479 } else {
3480 uint64_t n = rts.tv_nsec * tcf;
3481
3482 do_div(n, NSEC_PER_SEC);
3483 realtime += n;
3484 }
3485 offset = (int64_t) realtime - monotonic_avg;
3486 return offset;
3487 }
3488
3489 static
3490 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3491 {
3492 int ret = 0;
3493 size_t i;
3494 char cur;
3495
3496 i = 0;
3497 cur = string[i];
3498 while (cur != '\0') {
3499 switch (cur) {
3500 case '\n':
3501 ret = lttng_metadata_printf(session, "%s", "\\n");
3502 break;
3503 case '\\':
3504 case '"':
3505 ret = lttng_metadata_printf(session, "%c", '\\');
3506 if (ret)
3507 goto error;
3508 /* We still print the current char */
3509 /* Fallthrough */
3510 default:
3511 ret = lttng_metadata_printf(session, "%c", cur);
3512 break;
3513 }
3514
3515 if (ret)
3516 goto error;
3517
3518 cur = string[++i];
3519 }
3520 error:
3521 return ret;
3522 }
3523
3524 static
3525 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3526 const char *field_value)
3527 {
3528 int ret;
3529
3530 ret = lttng_metadata_printf(session, " %s = \"", field);
3531 if (ret)
3532 goto error;
3533
3534 ret = print_escaped_ctf_string(session, field_value);
3535 if (ret)
3536 goto error;
3537
3538 ret = lttng_metadata_printf(session, "\";\n");
3539
3540 error:
3541 return ret;
3542 }
3543
3544 /*
3545 * Output metadata into this session's metadata buffers.
3546 * Must be called with sessions_mutex held.
3547 */
3548 static
3549 int _lttng_session_metadata_statedump(struct lttng_session *session)
3550 {
3551 unsigned char *uuid_c = session->uuid.b;
3552 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3553 const char *product_uuid;
3554 struct lttng_channel *chan;
3555 struct lttng_event *event;
3556 int ret = 0;
3557
3558 if (!LTTNG_READ_ONCE(session->active))
3559 return 0;
3560
3561 lttng_metadata_begin(session);
3562
3563 if (session->metadata_dumped)
3564 goto skip_session;
3565
3566 snprintf(uuid_s, sizeof(uuid_s),
3567 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3568 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3569 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3570 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3571 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3572
3573 ret = lttng_metadata_printf(session,
3574 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3575 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3576 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3577 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3578 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3579 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3580 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3581 "\n"
3582 "trace {\n"
3583 " major = %u;\n"
3584 " minor = %u;\n"
3585 " uuid = \"%s\";\n"
3586 " byte_order = %s;\n"
3587 " packet.header := struct {\n"
3588 " uint32_t magic;\n"
3589 " uint8_t uuid[16];\n"
3590 " uint32_t stream_id;\n"
3591 " uint64_t stream_instance_id;\n"
3592 " };\n"
3593 "};\n\n",
3594 lttng_alignof(uint8_t) * CHAR_BIT,
3595 lttng_alignof(uint16_t) * CHAR_BIT,
3596 lttng_alignof(uint32_t) * CHAR_BIT,
3597 lttng_alignof(uint64_t) * CHAR_BIT,
3598 sizeof(unsigned long) * CHAR_BIT,
3599 lttng_alignof(unsigned long) * CHAR_BIT,
3600 CTF_SPEC_MAJOR,
3601 CTF_SPEC_MINOR,
3602 uuid_s,
3603 #if __BYTE_ORDER == __BIG_ENDIAN
3604 "be"
3605 #else
3606 "le"
3607 #endif
3608 );
3609 if (ret)
3610 goto end;
3611
3612 ret = lttng_metadata_printf(session,
3613 "env {\n"
3614 " hostname = \"%s\";\n"
3615 " domain = \"kernel\";\n"
3616 " sysname = \"%s\";\n"
3617 " kernel_release = \"%s\";\n"
3618 " kernel_version = \"%s\";\n"
3619 " tracer_name = \"lttng-modules\";\n"
3620 " tracer_major = %d;\n"
3621 " tracer_minor = %d;\n"
3622 " tracer_patchlevel = %d;\n"
3623 " trace_buffering_scheme = \"global\";\n",
3624 current->nsproxy->uts_ns->name.nodename,
3625 utsname()->sysname,
3626 utsname()->release,
3627 utsname()->version,
3628 LTTNG_MODULES_MAJOR_VERSION,
3629 LTTNG_MODULES_MINOR_VERSION,
3630 LTTNG_MODULES_PATCHLEVEL_VERSION
3631 );
3632 if (ret)
3633 goto end;
3634
3635 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3636 if (ret)
3637 goto end;
3638 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3639 session->creation_time);
3640 if (ret)
3641 goto end;
3642
3643 /* Add the product UUID to the 'env' section */
3644 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3645 if (product_uuid) {
3646 ret = lttng_metadata_printf(session,
3647 " product_uuid = \"%s\";\n",
3648 product_uuid
3649 );
3650 if (ret)
3651 goto end;
3652 }
3653
3654 /* Close the 'env' section */
3655 ret = lttng_metadata_printf(session, "};\n\n");
3656 if (ret)
3657 goto end;
3658
3659 ret = lttng_metadata_printf(session,
3660 "clock {\n"
3661 " name = \"%s\";\n",
3662 trace_clock_name()
3663 );
3664 if (ret)
3665 goto end;
3666
3667 if (!trace_clock_uuid(clock_uuid_s)) {
3668 ret = lttng_metadata_printf(session,
3669 " uuid = \"%s\";\n",
3670 clock_uuid_s
3671 );
3672 if (ret)
3673 goto end;
3674 }
3675
3676 ret = lttng_metadata_printf(session,
3677 " description = \"%s\";\n"
3678 " freq = %llu; /* Frequency, in Hz */\n"
3679 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3680 " offset = %lld;\n"
3681 "};\n\n",
3682 trace_clock_description(),
3683 (unsigned long long) trace_clock_freq(),
3684 (long long) measure_clock_offset()
3685 );
3686 if (ret)
3687 goto end;
3688
3689 ret = lttng_metadata_printf(session,
3690 "typealias integer {\n"
3691 " size = 27; align = 1; signed = false;\n"
3692 " map = clock.%s.value;\n"
3693 "} := uint27_clock_monotonic_t;\n"
3694 "\n"
3695 "typealias integer {\n"
3696 " size = 32; align = %u; signed = false;\n"
3697 " map = clock.%s.value;\n"
3698 "} := uint32_clock_monotonic_t;\n"
3699 "\n"
3700 "typealias integer {\n"
3701 " size = 64; align = %u; signed = false;\n"
3702 " map = clock.%s.value;\n"
3703 "} := uint64_clock_monotonic_t;\n\n",
3704 trace_clock_name(),
3705 lttng_alignof(uint32_t) * CHAR_BIT,
3706 trace_clock_name(),
3707 lttng_alignof(uint64_t) * CHAR_BIT,
3708 trace_clock_name()
3709 );
3710 if (ret)
3711 goto end;
3712
3713 ret = _lttng_stream_packet_context_declare(session);
3714 if (ret)
3715 goto end;
3716
3717 ret = _lttng_event_header_declare(session);
3718 if (ret)
3719 goto end;
3720
3721 skip_session:
3722 list_for_each_entry(chan, &session->chan, list) {
3723 ret = _lttng_channel_metadata_statedump(session, chan);
3724 if (ret)
3725 goto end;
3726 }
3727
3728 list_for_each_entry(event, &session->events, list) {
3729 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3730 if (ret)
3731 goto end;
3732 }
3733 session->metadata_dumped = 1;
3734 end:
3735 lttng_metadata_end(session);
3736 return ret;
3737 }
3738
3739 /**
3740 * lttng_transport_register - LTT transport registration
3741 * @transport: transport structure
3742 *
3743 * Registers a transport which can be used as output to extract the data out of
3744 * LTTng. The module calling this registration function must ensure that no
3745 * trap-inducing code will be executed by the transport functions. E.g.
3746 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3747 * is made visible to the transport function. This registration acts as a
3748 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3749 * after its registration must it synchronize the TLBs.
3750 */
3751 void lttng_transport_register(struct lttng_transport *transport)
3752 {
3753 /*
3754 * Make sure no page fault can be triggered by the module about to be
3755 * registered. We deal with this here so we don't have to call
3756 * vmalloc_sync_mappings() in each module's init.
3757 */
3758 wrapper_vmalloc_sync_mappings();
3759
3760 mutex_lock(&sessions_mutex);
3761 list_add_tail(&transport->node, &lttng_transport_list);
3762 mutex_unlock(&sessions_mutex);
3763 }
3764 EXPORT_SYMBOL_GPL(lttng_transport_register);
3765
3766 /**
3767 * lttng_transport_unregister - LTT transport unregistration
3768 * @transport: transport structure
3769 */
3770 void lttng_transport_unregister(struct lttng_transport *transport)
3771 {
3772 mutex_lock(&sessions_mutex);
3773 list_del(&transport->node);
3774 mutex_unlock(&sessions_mutex);
3775 }
3776 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3777
3778 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3779
3780 enum cpuhp_state lttng_hp_prepare;
3781 enum cpuhp_state lttng_hp_online;
3782
3783 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3784 {
3785 struct lttng_cpuhp_node *lttng_node;
3786
3787 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3788 switch (lttng_node->component) {
3789 case LTTNG_RING_BUFFER_FRONTEND:
3790 return 0;
3791 case LTTNG_RING_BUFFER_BACKEND:
3792 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3793 case LTTNG_RING_BUFFER_ITER:
3794 return 0;
3795 case LTTNG_CONTEXT_PERF_COUNTERS:
3796 return 0;
3797 default:
3798 return -EINVAL;
3799 }
3800 }
3801
3802 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3803 {
3804 struct lttng_cpuhp_node *lttng_node;
3805
3806 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3807 switch (lttng_node->component) {
3808 case LTTNG_RING_BUFFER_FRONTEND:
3809 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3810 case LTTNG_RING_BUFFER_BACKEND:
3811 return 0;
3812 case LTTNG_RING_BUFFER_ITER:
3813 return 0;
3814 case LTTNG_CONTEXT_PERF_COUNTERS:
3815 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
3816 default:
3817 return -EINVAL;
3818 }
3819 }
3820
3821 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
3822 {
3823 struct lttng_cpuhp_node *lttng_node;
3824
3825 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3826 switch (lttng_node->component) {
3827 case LTTNG_RING_BUFFER_FRONTEND:
3828 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
3829 case LTTNG_RING_BUFFER_BACKEND:
3830 return 0;
3831 case LTTNG_RING_BUFFER_ITER:
3832 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
3833 case LTTNG_CONTEXT_PERF_COUNTERS:
3834 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
3835 default:
3836 return -EINVAL;
3837 }
3838 }
3839
3840 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
3841 {
3842 struct lttng_cpuhp_node *lttng_node;
3843
3844 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3845 switch (lttng_node->component) {
3846 case LTTNG_RING_BUFFER_FRONTEND:
3847 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
3848 case LTTNG_RING_BUFFER_BACKEND:
3849 return 0;
3850 case LTTNG_RING_BUFFER_ITER:
3851 return 0;
3852 case LTTNG_CONTEXT_PERF_COUNTERS:
3853 return 0;
3854 default:
3855 return -EINVAL;
3856 }
3857 }
3858
3859 static int __init lttng_init_cpu_hotplug(void)
3860 {
3861 int ret;
3862
3863 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
3864 lttng_hotplug_prepare,
3865 lttng_hotplug_dead);
3866 if (ret < 0) {
3867 return ret;
3868 }
3869 lttng_hp_prepare = ret;
3870 lttng_rb_set_hp_prepare(ret);
3871
3872 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
3873 lttng_hotplug_online,
3874 lttng_hotplug_offline);
3875 if (ret < 0) {
3876 cpuhp_remove_multi_state(lttng_hp_prepare);
3877 lttng_hp_prepare = 0;
3878 return ret;
3879 }
3880 lttng_hp_online = ret;
3881 lttng_rb_set_hp_online(ret);
3882
3883 return 0;
3884 }
3885
3886 static void __exit lttng_exit_cpu_hotplug(void)
3887 {
3888 lttng_rb_set_hp_online(0);
3889 cpuhp_remove_multi_state(lttng_hp_online);
3890 lttng_rb_set_hp_prepare(0);
3891 cpuhp_remove_multi_state(lttng_hp_prepare);
3892 }
3893
3894 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3895 static int lttng_init_cpu_hotplug(void)
3896 {
3897 return 0;
3898 }
3899 static void lttng_exit_cpu_hotplug(void)
3900 {
3901 }
3902 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
3903
3904
3905 static int __init lttng_events_init(void)
3906 {
3907 int ret;
3908
3909 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3910 if (ret)
3911 return ret;
3912 ret = wrapper_get_pfnblock_flags_mask_init();
3913 if (ret)
3914 return ret;
3915 ret = wrapper_get_pageblock_flags_mask_init();
3916 if (ret)
3917 return ret;
3918 ret = lttng_probes_init();
3919 if (ret)
3920 return ret;
3921 ret = lttng_context_init();
3922 if (ret)
3923 return ret;
3924 ret = lttng_tracepoint_init();
3925 if (ret)
3926 goto error_tp;
3927 event_cache = KMEM_CACHE(lttng_event, 0);
3928 if (!event_cache) {
3929 ret = -ENOMEM;
3930 goto error_kmem_event;
3931 }
3932 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
3933 if (!event_notifier_cache) {
3934 ret = -ENOMEM;
3935 goto error_kmem_event_notifier;
3936 }
3937 ret = lttng_abi_init();
3938 if (ret)
3939 goto error_abi;
3940 ret = lttng_logger_init();
3941 if (ret)
3942 goto error_logger;
3943 ret = lttng_init_cpu_hotplug();
3944 if (ret)
3945 goto error_hotplug;
3946 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3947 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3948 __stringify(LTTNG_MODULES_MINOR_VERSION),
3949 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3950 LTTNG_MODULES_EXTRAVERSION,
3951 LTTNG_VERSION_NAME,
3952 #ifdef LTTNG_EXTRA_VERSION_GIT
3953 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3954 #else
3955 "",
3956 #endif
3957 #ifdef LTTNG_EXTRA_VERSION_NAME
3958 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3959 #else
3960 "");
3961 #endif
3962 return 0;
3963
3964 error_hotplug:
3965 lttng_logger_exit();
3966 error_logger:
3967 lttng_abi_exit();
3968 error_abi:
3969 kmem_cache_destroy(event_notifier_cache);
3970 error_kmem_event_notifier:
3971 kmem_cache_destroy(event_cache);
3972 error_kmem_event:
3973 lttng_tracepoint_exit();
3974 error_tp:
3975 lttng_context_exit();
3976 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
3977 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3978 __stringify(LTTNG_MODULES_MINOR_VERSION),
3979 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3980 LTTNG_MODULES_EXTRAVERSION,
3981 LTTNG_VERSION_NAME,
3982 #ifdef LTTNG_EXTRA_VERSION_GIT
3983 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3984 #else
3985 "",
3986 #endif
3987 #ifdef LTTNG_EXTRA_VERSION_NAME
3988 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3989 #else
3990 "");
3991 #endif
3992 return ret;
3993 }
3994
3995 module_init(lttng_events_init);
3996
3997 static void __exit lttng_events_exit(void)
3998 {
3999 struct lttng_session *session, *tmpsession;
4000
4001 lttng_exit_cpu_hotplug();
4002 lttng_logger_exit();
4003 lttng_abi_exit();
4004 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4005 lttng_session_destroy(session);
4006 kmem_cache_destroy(event_cache);
4007 kmem_cache_destroy(event_notifier_cache);
4008 lttng_tracepoint_exit();
4009 lttng_context_exit();
4010 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4011 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4012 __stringify(LTTNG_MODULES_MINOR_VERSION),
4013 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4014 LTTNG_MODULES_EXTRAVERSION,
4015 LTTNG_VERSION_NAME,
4016 #ifdef LTTNG_EXTRA_VERSION_GIT
4017 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4018 #else
4019 "",
4020 #endif
4021 #ifdef LTTNG_EXTRA_VERSION_NAME
4022 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4023 #else
4024 "");
4025 #endif
4026 }
4027
4028 module_exit(lttng_events_exit);
4029
4030 #include <generated/patches.h>
4031 #ifdef LTTNG_EXTRA_VERSION_GIT
4032 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4033 #endif
4034 #ifdef LTTNG_EXTRA_VERSION_NAME
4035 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4036 #endif
4037 MODULE_LICENSE("GPL and additional rights");
4038 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4039 MODULE_DESCRIPTION("LTTng tracer");
4040 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4041 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4042 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4043 LTTNG_MODULES_EXTRAVERSION);
This page took 0.17255 seconds and 4 git commands to generate.