Introduce lazy system call event creation
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/lttng-bytecode.h>
40 #include <lttng/tracer.h>
41 #include <lttng/event-notifier-notification.h>
42 #include <lttng/abi-old.h>
43 #include <lttng/endian.h>
44 #include <lttng/string-utils.h>
45 #include <lttng/utils.h>
46 #include <ringbuffer/backend.h>
47 #include <ringbuffer/frontend.h>
48 #include <wrapper/time.h>
49
50 #define METADATA_CACHE_DEFAULT_SIZE 4096
51
52 static LIST_HEAD(sessions);
53 static LIST_HEAD(event_notifier_groups);
54 static LIST_HEAD(lttng_transport_list);
55 static LIST_HEAD(lttng_counter_transport_list);
56 /*
57 * Protect the sessions and metadata caches.
58 */
59 static DEFINE_MUTEX(sessions_mutex);
60 static struct kmem_cache *event_cache;
61 static struct kmem_cache *event_notifier_cache;
62
63 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
64 static void lttng_session_sync_event_enablers(struct lttng_session *session);
65 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
66 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
67 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
68
69 static void _lttng_event_destroy(struct lttng_event *event);
70 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
71 static void _lttng_channel_destroy(struct lttng_channel *chan);
72 static int _lttng_event_unregister(struct lttng_event *event);
73 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
74 static
75 int _lttng_event_metadata_statedump(struct lttng_session *session,
76 struct lttng_channel *chan,
77 struct lttng_event *event);
78 static
79 int _lttng_session_metadata_statedump(struct lttng_session *session);
80 static
81 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
82 static
83 int _lttng_type_statedump(struct lttng_session *session,
84 const struct lttng_type *type,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_session *session,
88 const struct lttng_event_field *field,
89 size_t nesting);
90
91 void synchronize_trace(void)
92 {
93 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
94 synchronize_rcu();
95 #else
96 synchronize_sched();
97 #endif
98
99 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
100 #ifdef CONFIG_PREEMPT_RT_FULL
101 synchronize_rcu();
102 #endif
103 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
104 #ifdef CONFIG_PREEMPT_RT
105 synchronize_rcu();
106 #endif
107 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
108 }
109
110 void lttng_lock_sessions(void)
111 {
112 mutex_lock(&sessions_mutex);
113 }
114
115 void lttng_unlock_sessions(void)
116 {
117 mutex_unlock(&sessions_mutex);
118 }
119
120 static struct lttng_transport *lttng_transport_find(const char *name)
121 {
122 struct lttng_transport *transport;
123
124 list_for_each_entry(transport, &lttng_transport_list, node) {
125 if (!strcmp(transport->name, name))
126 return transport;
127 }
128 return NULL;
129 }
130
131 /*
132 * Called with sessions lock held.
133 */
134 int lttng_session_active(void)
135 {
136 struct lttng_session *iter;
137
138 list_for_each_entry(iter, &sessions, list) {
139 if (iter->active)
140 return 1;
141 }
142 return 0;
143 }
144
145 struct lttng_session *lttng_session_create(void)
146 {
147 struct lttng_session *session;
148 struct lttng_metadata_cache *metadata_cache;
149 int i;
150
151 mutex_lock(&sessions_mutex);
152 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
153 if (!session)
154 goto err;
155 INIT_LIST_HEAD(&session->chan);
156 INIT_LIST_HEAD(&session->events);
157 lttng_guid_gen(&session->uuid);
158
159 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
160 GFP_KERNEL);
161 if (!metadata_cache)
162 goto err_free_session;
163 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
164 if (!metadata_cache->data)
165 goto err_free_cache;
166 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
167 kref_init(&metadata_cache->refcount);
168 mutex_init(&metadata_cache->lock);
169 session->metadata_cache = metadata_cache;
170 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
171 memcpy(&metadata_cache->uuid, &session->uuid,
172 sizeof(metadata_cache->uuid));
173 INIT_LIST_HEAD(&session->enablers_head);
174 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
175 INIT_HLIST_HEAD(&session->events_ht.table[i]);
176 list_add(&session->list, &sessions);
177 session->pid_tracker.session = session;
178 session->pid_tracker.tracker_type = TRACKER_PID;
179 session->vpid_tracker.session = session;
180 session->vpid_tracker.tracker_type = TRACKER_VPID;
181 session->uid_tracker.session = session;
182 session->uid_tracker.tracker_type = TRACKER_UID;
183 session->vuid_tracker.session = session;
184 session->vuid_tracker.tracker_type = TRACKER_VUID;
185 session->gid_tracker.session = session;
186 session->gid_tracker.tracker_type = TRACKER_GID;
187 session->vgid_tracker.session = session;
188 session->vgid_tracker.tracker_type = TRACKER_VGID;
189 mutex_unlock(&sessions_mutex);
190 return session;
191
192 err_free_cache:
193 kfree(metadata_cache);
194 err_free_session:
195 lttng_kvfree(session);
196 err:
197 mutex_unlock(&sessions_mutex);
198 return NULL;
199 }
200
201 static
202 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
203 {
204 struct lttng_counter_transport *transport;
205
206 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
207 if (!strcmp(transport->name, name))
208 return transport;
209 }
210 return NULL;
211 }
212
213 struct lttng_counter *lttng_kernel_counter_create(
214 const char *counter_transport_name,
215 size_t number_dimensions, const size_t *dimensions_sizes)
216 {
217 struct lttng_counter *counter = NULL;
218 struct lttng_counter_transport *counter_transport = NULL;
219
220 counter_transport = lttng_counter_transport_find(counter_transport_name);
221 if (!counter_transport) {
222 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
223 counter_transport_name);
224 goto notransport;
225 }
226 if (!try_module_get(counter_transport->owner)) {
227 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
228 goto notransport;
229 }
230
231 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
232 if (!counter)
233 goto nomem;
234
235 /* Create event notifier error counter. */
236 counter->ops = &counter_transport->ops;
237 counter->transport = counter_transport;
238
239 counter->counter = counter->ops->counter_create(
240 number_dimensions, dimensions_sizes, 0);
241 if (!counter->counter) {
242 goto create_error;
243 }
244
245 return counter;
246
247 create_error:
248 lttng_kvfree(counter);
249 nomem:
250 if (counter_transport)
251 module_put(counter_transport->owner);
252 notransport:
253 return NULL;
254 }
255
256 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
257 {
258 struct lttng_transport *transport = NULL;
259 struct lttng_event_notifier_group *event_notifier_group;
260 const char *transport_name = "relay-event-notifier";
261 size_t subbuf_size = 4096; //TODO
262 size_t num_subbuf = 16; //TODO
263 unsigned int switch_timer_interval = 0;
264 unsigned int read_timer_interval = 0;
265 int i;
266
267 mutex_lock(&sessions_mutex);
268
269 transport = lttng_transport_find(transport_name);
270 if (!transport) {
271 printk(KERN_WARNING "LTTng: transport %s not found\n",
272 transport_name);
273 goto notransport;
274 }
275 if (!try_module_get(transport->owner)) {
276 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
277 transport_name);
278 goto notransport;
279 }
280
281 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
282 GFP_KERNEL);
283 if (!event_notifier_group)
284 goto nomem;
285
286 /*
287 * Initialize the ring buffer used to store event notifier
288 * notifications.
289 */
290 event_notifier_group->ops = &transport->ops;
291 event_notifier_group->chan = transport->ops.channel_create(
292 transport_name, event_notifier_group, NULL,
293 subbuf_size, num_subbuf, switch_timer_interval,
294 read_timer_interval);
295 if (!event_notifier_group->chan)
296 goto create_error;
297
298 event_notifier_group->transport = transport;
299
300 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
301 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
302 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
303 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
304
305 list_add(&event_notifier_group->node, &event_notifier_groups);
306
307 mutex_unlock(&sessions_mutex);
308
309 return event_notifier_group;
310
311 create_error:
312 lttng_kvfree(event_notifier_group);
313 nomem:
314 if (transport)
315 module_put(transport->owner);
316 notransport:
317 mutex_unlock(&sessions_mutex);
318 return NULL;
319 }
320
321 void metadata_cache_destroy(struct kref *kref)
322 {
323 struct lttng_metadata_cache *cache =
324 container_of(kref, struct lttng_metadata_cache, refcount);
325 vfree(cache->data);
326 kfree(cache);
327 }
328
329 void lttng_session_destroy(struct lttng_session *session)
330 {
331 struct lttng_channel *chan, *tmpchan;
332 struct lttng_event *event, *tmpevent;
333 struct lttng_metadata_stream *metadata_stream;
334 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
335 int ret;
336
337 mutex_lock(&sessions_mutex);
338 WRITE_ONCE(session->active, 0);
339 list_for_each_entry(chan, &session->chan, list) {
340 ret = lttng_syscalls_unregister_channel(chan);
341 WARN_ON(ret);
342 }
343 list_for_each_entry(event, &session->events, list) {
344 ret = _lttng_event_unregister(event);
345 WARN_ON(ret);
346 }
347 synchronize_trace(); /* Wait for in-flight events to complete */
348 list_for_each_entry(chan, &session->chan, list) {
349 ret = lttng_syscalls_destroy_event(chan);
350 WARN_ON(ret);
351 }
352 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
353 &session->enablers_head, node)
354 lttng_event_enabler_destroy(event_enabler);
355 list_for_each_entry_safe(event, tmpevent, &session->events, list)
356 _lttng_event_destroy(event);
357 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
358 BUG_ON(chan->channel_type == METADATA_CHANNEL);
359 _lttng_channel_destroy(chan);
360 }
361 mutex_lock(&session->metadata_cache->lock);
362 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
363 _lttng_metadata_channel_hangup(metadata_stream);
364 mutex_unlock(&session->metadata_cache->lock);
365 lttng_id_tracker_destroy(&session->pid_tracker, false);
366 lttng_id_tracker_destroy(&session->vpid_tracker, false);
367 lttng_id_tracker_destroy(&session->uid_tracker, false);
368 lttng_id_tracker_destroy(&session->vuid_tracker, false);
369 lttng_id_tracker_destroy(&session->gid_tracker, false);
370 lttng_id_tracker_destroy(&session->vgid_tracker, false);
371 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
372 list_del(&session->list);
373 mutex_unlock(&sessions_mutex);
374 lttng_kvfree(session);
375 }
376
377 void lttng_event_notifier_group_destroy(
378 struct lttng_event_notifier_group *event_notifier_group)
379 {
380 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
381 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
382 int ret;
383
384 if (!event_notifier_group)
385 return;
386
387 mutex_lock(&sessions_mutex);
388
389 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
390 WARN_ON(ret);
391
392 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
393 &event_notifier_group->event_notifiers_head, list) {
394 ret = _lttng_event_notifier_unregister(event_notifier);
395 WARN_ON(ret);
396 }
397
398 /* Wait for in-flight event notifier to complete */
399 synchronize_trace();
400
401 irq_work_sync(&event_notifier_group->wakeup_pending);
402
403 kfree(event_notifier_group->sc_filter);
404
405 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
406 &event_notifier_group->enablers_head, node)
407 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
408
409 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
410 &event_notifier_group->event_notifiers_head, list)
411 _lttng_event_notifier_destroy(event_notifier);
412
413 if (event_notifier_group->error_counter) {
414 struct lttng_counter *error_counter = event_notifier_group->error_counter;
415 error_counter->ops->counter_destroy(error_counter->counter);
416 module_put(error_counter->transport->owner);
417 lttng_kvfree(error_counter);
418 event_notifier_group->error_counter = NULL;
419 }
420
421 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
422 module_put(event_notifier_group->transport->owner);
423 list_del(&event_notifier_group->node);
424
425 mutex_unlock(&sessions_mutex);
426 lttng_kvfree(event_notifier_group);
427 }
428
429 int lttng_session_statedump(struct lttng_session *session)
430 {
431 int ret;
432
433 mutex_lock(&sessions_mutex);
434 ret = lttng_statedump_start(session);
435 mutex_unlock(&sessions_mutex);
436 return ret;
437 }
438
439 int lttng_session_enable(struct lttng_session *session)
440 {
441 int ret = 0;
442 struct lttng_channel *chan;
443
444 mutex_lock(&sessions_mutex);
445 if (session->active) {
446 ret = -EBUSY;
447 goto end;
448 }
449
450 /* Set transient enabler state to "enabled" */
451 session->tstate = 1;
452
453 /* We need to sync enablers with session before activation. */
454 lttng_session_sync_event_enablers(session);
455
456 /*
457 * Snapshot the number of events per channel to know the type of header
458 * we need to use.
459 */
460 list_for_each_entry(chan, &session->chan, list) {
461 if (chan->header_type)
462 continue; /* don't change it if session stop/restart */
463 if (chan->free_event_id < 31)
464 chan->header_type = 1; /* compact */
465 else
466 chan->header_type = 2; /* large */
467 }
468
469 /* Clear each stream's quiescent state. */
470 list_for_each_entry(chan, &session->chan, list) {
471 if (chan->channel_type != METADATA_CHANNEL)
472 lib_ring_buffer_clear_quiescent_channel(chan->chan);
473 }
474
475 WRITE_ONCE(session->active, 1);
476 WRITE_ONCE(session->been_active, 1);
477 ret = _lttng_session_metadata_statedump(session);
478 if (ret) {
479 WRITE_ONCE(session->active, 0);
480 goto end;
481 }
482 ret = lttng_statedump_start(session);
483 if (ret)
484 WRITE_ONCE(session->active, 0);
485 end:
486 mutex_unlock(&sessions_mutex);
487 return ret;
488 }
489
490 int lttng_session_disable(struct lttng_session *session)
491 {
492 int ret = 0;
493 struct lttng_channel *chan;
494
495 mutex_lock(&sessions_mutex);
496 if (!session->active) {
497 ret = -EBUSY;
498 goto end;
499 }
500 WRITE_ONCE(session->active, 0);
501
502 /* Set transient enabler state to "disabled" */
503 session->tstate = 0;
504 lttng_session_sync_event_enablers(session);
505
506 /* Set each stream's quiescent state. */
507 list_for_each_entry(chan, &session->chan, list) {
508 if (chan->channel_type != METADATA_CHANNEL)
509 lib_ring_buffer_set_quiescent_channel(chan->chan);
510 }
511 end:
512 mutex_unlock(&sessions_mutex);
513 return ret;
514 }
515
516 int lttng_session_metadata_regenerate(struct lttng_session *session)
517 {
518 int ret = 0;
519 struct lttng_channel *chan;
520 struct lttng_event *event;
521 struct lttng_metadata_cache *cache = session->metadata_cache;
522 struct lttng_metadata_stream *stream;
523
524 mutex_lock(&sessions_mutex);
525 if (!session->active) {
526 ret = -EBUSY;
527 goto end;
528 }
529
530 mutex_lock(&cache->lock);
531 memset(cache->data, 0, cache->cache_alloc);
532 cache->metadata_written = 0;
533 cache->version++;
534 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
535 stream->metadata_out = 0;
536 stream->metadata_in = 0;
537 }
538 mutex_unlock(&cache->lock);
539
540 session->metadata_dumped = 0;
541 list_for_each_entry(chan, &session->chan, list) {
542 chan->metadata_dumped = 0;
543 }
544
545 list_for_each_entry(event, &session->events, list) {
546 event->metadata_dumped = 0;
547 }
548
549 ret = _lttng_session_metadata_statedump(session);
550
551 end:
552 mutex_unlock(&sessions_mutex);
553 return ret;
554 }
555
556 int lttng_channel_enable(struct lttng_channel *channel)
557 {
558 int ret = 0;
559
560 mutex_lock(&sessions_mutex);
561 if (channel->channel_type == METADATA_CHANNEL) {
562 ret = -EPERM;
563 goto end;
564 }
565 if (channel->enabled) {
566 ret = -EEXIST;
567 goto end;
568 }
569 /* Set transient enabler state to "enabled" */
570 channel->tstate = 1;
571 lttng_session_sync_event_enablers(channel->session);
572 /* Set atomically the state to "enabled" */
573 WRITE_ONCE(channel->enabled, 1);
574 end:
575 mutex_unlock(&sessions_mutex);
576 return ret;
577 }
578
579 int lttng_channel_disable(struct lttng_channel *channel)
580 {
581 int ret = 0;
582
583 mutex_lock(&sessions_mutex);
584 if (channel->channel_type == METADATA_CHANNEL) {
585 ret = -EPERM;
586 goto end;
587 }
588 if (!channel->enabled) {
589 ret = -EEXIST;
590 goto end;
591 }
592 /* Set atomically the state to "disabled" */
593 WRITE_ONCE(channel->enabled, 0);
594 /* Set transient enabler state to "enabled" */
595 channel->tstate = 0;
596 lttng_session_sync_event_enablers(channel->session);
597 end:
598 mutex_unlock(&sessions_mutex);
599 return ret;
600 }
601
602 int lttng_event_enable(struct lttng_event *event)
603 {
604 int ret = 0;
605
606 mutex_lock(&sessions_mutex);
607 if (event->chan->channel_type == METADATA_CHANNEL) {
608 ret = -EPERM;
609 goto end;
610 }
611 if (event->enabled) {
612 ret = -EEXIST;
613 goto end;
614 }
615 switch (event->instrumentation) {
616 case LTTNG_KERNEL_TRACEPOINT:
617 case LTTNG_KERNEL_SYSCALL:
618 ret = -EINVAL;
619 break;
620 case LTTNG_KERNEL_KPROBE:
621 case LTTNG_KERNEL_UPROBE:
622 case LTTNG_KERNEL_NOOP:
623 WRITE_ONCE(event->enabled, 1);
624 break;
625 case LTTNG_KERNEL_KRETPROBE:
626 ret = lttng_kretprobes_event_enable_state(event, 1);
627 break;
628 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
629 default:
630 WARN_ON_ONCE(1);
631 ret = -EINVAL;
632 }
633 end:
634 mutex_unlock(&sessions_mutex);
635 return ret;
636 }
637
638 int lttng_event_disable(struct lttng_event *event)
639 {
640 int ret = 0;
641
642 mutex_lock(&sessions_mutex);
643 if (event->chan->channel_type == METADATA_CHANNEL) {
644 ret = -EPERM;
645 goto end;
646 }
647 if (!event->enabled) {
648 ret = -EEXIST;
649 goto end;
650 }
651 switch (event->instrumentation) {
652 case LTTNG_KERNEL_TRACEPOINT:
653 case LTTNG_KERNEL_SYSCALL:
654 ret = -EINVAL;
655 break;
656 case LTTNG_KERNEL_KPROBE:
657 case LTTNG_KERNEL_UPROBE:
658 case LTTNG_KERNEL_NOOP:
659 WRITE_ONCE(event->enabled, 0);
660 break;
661 case LTTNG_KERNEL_KRETPROBE:
662 ret = lttng_kretprobes_event_enable_state(event, 0);
663 break;
664 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
665 default:
666 WARN_ON_ONCE(1);
667 ret = -EINVAL;
668 }
669 end:
670 mutex_unlock(&sessions_mutex);
671 return ret;
672 }
673
674 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
675 {
676 int ret = 0;
677
678 mutex_lock(&sessions_mutex);
679 if (event_notifier->enabled) {
680 ret = -EEXIST;
681 goto end;
682 }
683 switch (event_notifier->instrumentation) {
684 case LTTNG_KERNEL_TRACEPOINT:
685 case LTTNG_KERNEL_SYSCALL:
686 ret = -EINVAL;
687 break;
688 case LTTNG_KERNEL_KPROBE:
689 case LTTNG_KERNEL_UPROBE:
690 WRITE_ONCE(event_notifier->enabled, 1);
691 break;
692 case LTTNG_KERNEL_FUNCTION:
693 case LTTNG_KERNEL_NOOP:
694 case LTTNG_KERNEL_KRETPROBE:
695 default:
696 WARN_ON_ONCE(1);
697 ret = -EINVAL;
698 }
699 end:
700 mutex_unlock(&sessions_mutex);
701 return ret;
702 }
703
704 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
705 {
706 int ret = 0;
707
708 mutex_lock(&sessions_mutex);
709 if (!event_notifier->enabled) {
710 ret = -EEXIST;
711 goto end;
712 }
713 switch (event_notifier->instrumentation) {
714 case LTTNG_KERNEL_TRACEPOINT:
715 case LTTNG_KERNEL_SYSCALL:
716 ret = -EINVAL;
717 break;
718 case LTTNG_KERNEL_KPROBE:
719 case LTTNG_KERNEL_UPROBE:
720 WRITE_ONCE(event_notifier->enabled, 0);
721 break;
722 case LTTNG_KERNEL_FUNCTION:
723 case LTTNG_KERNEL_NOOP:
724 case LTTNG_KERNEL_KRETPROBE:
725 default:
726 WARN_ON_ONCE(1);
727 ret = -EINVAL;
728 }
729 end:
730 mutex_unlock(&sessions_mutex);
731 return ret;
732 }
733
734 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
735 const char *transport_name,
736 void *buf_addr,
737 size_t subbuf_size, size_t num_subbuf,
738 unsigned int switch_timer_interval,
739 unsigned int read_timer_interval,
740 enum channel_type channel_type)
741 {
742 struct lttng_channel *chan;
743 struct lttng_transport *transport = NULL;
744
745 mutex_lock(&sessions_mutex);
746 if (session->been_active && channel_type != METADATA_CHANNEL)
747 goto active; /* Refuse to add channel to active session */
748 transport = lttng_transport_find(transport_name);
749 if (!transport) {
750 printk(KERN_WARNING "LTTng: transport %s not found\n",
751 transport_name);
752 goto notransport;
753 }
754 if (!try_module_get(transport->owner)) {
755 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
756 goto notransport;
757 }
758 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
759 if (!chan)
760 goto nomem;
761 chan->session = session;
762 chan->id = session->free_chan_id++;
763 chan->ops = &transport->ops;
764 /*
765 * Note: the channel creation op already writes into the packet
766 * headers. Therefore the "chan" information used as input
767 * should be already accessible.
768 */
769 chan->chan = transport->ops.channel_create(transport_name,
770 chan, buf_addr, subbuf_size, num_subbuf,
771 switch_timer_interval, read_timer_interval);
772 if (!chan->chan)
773 goto create_error;
774 chan->tstate = 1;
775 chan->enabled = 1;
776 chan->transport = transport;
777 chan->channel_type = channel_type;
778 list_add(&chan->list, &session->chan);
779 mutex_unlock(&sessions_mutex);
780 return chan;
781
782 create_error:
783 kfree(chan);
784 nomem:
785 if (transport)
786 module_put(transport->owner);
787 notransport:
788 active:
789 mutex_unlock(&sessions_mutex);
790 return NULL;
791 }
792
793 /*
794 * Only used internally at session destruction for per-cpu channels, and
795 * when metadata channel is released.
796 * Needs to be called with sessions mutex held.
797 */
798 static
799 void _lttng_channel_destroy(struct lttng_channel *chan)
800 {
801 chan->ops->channel_destroy(chan->chan);
802 module_put(chan->transport->owner);
803 list_del(&chan->list);
804 lttng_destroy_context(chan->ctx);
805 kfree(chan);
806 }
807
808 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
809 {
810 BUG_ON(chan->channel_type != METADATA_CHANNEL);
811
812 /* Protect the metadata cache with the sessions_mutex. */
813 mutex_lock(&sessions_mutex);
814 _lttng_channel_destroy(chan);
815 mutex_unlock(&sessions_mutex);
816 }
817 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
818
819 static
820 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
821 {
822 stream->finalized = 1;
823 wake_up_interruptible(&stream->read_wait);
824 }
825
826
827 /*
828 * Supports event creation while tracing session is active.
829 * Needs to be called with sessions mutex held.
830 */
831 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
832 struct lttng_kernel_event *event_param,
833 void *filter,
834 const struct lttng_event_desc *event_desc,
835 enum lttng_kernel_instrumentation itype)
836 {
837 struct lttng_session *session = chan->session;
838 struct lttng_event *event;
839 const char *event_name;
840 struct hlist_head *head;
841 int ret;
842
843 if (chan->free_event_id == -1U) {
844 ret = -EMFILE;
845 goto full;
846 }
847
848 switch (itype) {
849 case LTTNG_KERNEL_TRACEPOINT:
850 event_name = event_desc->name;
851 break;
852 case LTTNG_KERNEL_KPROBE:
853 case LTTNG_KERNEL_UPROBE:
854 case LTTNG_KERNEL_KRETPROBE:
855 case LTTNG_KERNEL_NOOP:
856 case LTTNG_KERNEL_SYSCALL:
857 event_name = event_param->name;
858 break;
859 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
860 default:
861 WARN_ON_ONCE(1);
862 ret = -EINVAL;
863 goto type_error;
864 }
865
866 head = utils_borrow_hash_table_bucket(session->events_ht.table,
867 LTTNG_EVENT_HT_SIZE, event_name);
868 lttng_hlist_for_each_entry(event, head, hlist) {
869 WARN_ON_ONCE(!event->desc);
870 if (!strncmp(event->desc->name, event_name,
871 LTTNG_KERNEL_SYM_NAME_LEN - 1)
872 && chan == event->chan) {
873 ret = -EEXIST;
874 goto exist;
875 }
876 }
877
878 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
879 if (!event) {
880 ret = -ENOMEM;
881 goto cache_error;
882 }
883 event->chan = chan;
884 event->filter = filter;
885 event->id = chan->free_event_id++;
886 event->instrumentation = itype;
887 event->evtype = LTTNG_TYPE_EVENT;
888 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
889 INIT_LIST_HEAD(&event->enablers_ref_head);
890
891 switch (itype) {
892 case LTTNG_KERNEL_TRACEPOINT:
893 /* Event will be enabled by enabler sync. */
894 event->enabled = 0;
895 event->registered = 0;
896 event->desc = lttng_event_desc_get(event_name);
897 if (!event->desc) {
898 ret = -ENOENT;
899 goto register_error;
900 }
901 /* Populate lttng_event structure before event registration. */
902 smp_wmb();
903 break;
904 case LTTNG_KERNEL_KPROBE:
905 /*
906 * Needs to be explicitly enabled after creation, since
907 * we may want to apply filters.
908 */
909 event->enabled = 0;
910 event->registered = 1;
911 /*
912 * Populate lttng_event structure before event
913 * registration.
914 */
915 smp_wmb();
916 ret = lttng_kprobes_register_event(event_name,
917 event_param->u.kprobe.symbol_name,
918 event_param->u.kprobe.offset,
919 event_param->u.kprobe.addr,
920 event);
921 if (ret) {
922 ret = -EINVAL;
923 goto register_error;
924 }
925 ret = try_module_get(event->desc->owner);
926 WARN_ON_ONCE(!ret);
927 break;
928 case LTTNG_KERNEL_KRETPROBE:
929 {
930 struct lttng_event *event_return;
931
932 /* kretprobe defines 2 events */
933 /*
934 * Needs to be explicitly enabled after creation, since
935 * we may want to apply filters.
936 */
937 event->enabled = 0;
938 event->registered = 1;
939 event_return =
940 kmem_cache_zalloc(event_cache, GFP_KERNEL);
941 if (!event_return) {
942 ret = -ENOMEM;
943 goto register_error;
944 }
945 event_return->chan = chan;
946 event_return->filter = filter;
947 event_return->id = chan->free_event_id++;
948 event_return->enabled = 0;
949 event_return->registered = 1;
950 event_return->instrumentation = itype;
951 /*
952 * Populate lttng_event structure before kretprobe registration.
953 */
954 smp_wmb();
955 ret = lttng_kretprobes_register(event_name,
956 event_param->u.kretprobe.symbol_name,
957 event_param->u.kretprobe.offset,
958 event_param->u.kretprobe.addr,
959 event, event_return);
960 if (ret) {
961 kmem_cache_free(event_cache, event_return);
962 ret = -EINVAL;
963 goto register_error;
964 }
965 /* Take 2 refs on the module: one per event. */
966 ret = try_module_get(event->desc->owner);
967 WARN_ON_ONCE(!ret);
968 ret = try_module_get(event->desc->owner);
969 WARN_ON_ONCE(!ret);
970 ret = _lttng_event_metadata_statedump(chan->session, chan,
971 event_return);
972 WARN_ON_ONCE(ret > 0);
973 if (ret) {
974 kmem_cache_free(event_cache, event_return);
975 module_put(event->desc->owner);
976 module_put(event->desc->owner);
977 goto statedump_error;
978 }
979 list_add(&event_return->list, &chan->session->events);
980 break;
981 }
982 case LTTNG_KERNEL_NOOP:
983 case LTTNG_KERNEL_SYSCALL:
984 /*
985 * Needs to be explicitly enabled after creation, since
986 * we may want to apply filters.
987 */
988 event->enabled = 0;
989 event->registered = 0;
990 event->desc = event_desc;
991 switch (event_param->u.syscall.entryexit) {
992 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
993 ret = -EINVAL;
994 goto register_error;
995 case LTTNG_KERNEL_SYSCALL_ENTRY:
996 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
997 break;
998 case LTTNG_KERNEL_SYSCALL_EXIT:
999 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1000 break;
1001 }
1002 switch (event_param->u.syscall.abi) {
1003 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1004 ret = -EINVAL;
1005 goto register_error;
1006 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1007 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1008 break;
1009 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1010 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1011 break;
1012 }
1013 if (!event->desc) {
1014 ret = -EINVAL;
1015 goto register_error;
1016 }
1017 break;
1018 case LTTNG_KERNEL_UPROBE:
1019 /*
1020 * Needs to be explicitly enabled after creation, since
1021 * we may want to apply filters.
1022 */
1023 event->enabled = 0;
1024 event->registered = 1;
1025
1026 /*
1027 * Populate lttng_event structure before event
1028 * registration.
1029 */
1030 smp_wmb();
1031
1032 ret = lttng_uprobes_register_event(event_param->name,
1033 event_param->u.uprobe.fd,
1034 event);
1035 if (ret)
1036 goto register_error;
1037 ret = try_module_get(event->desc->owner);
1038 WARN_ON_ONCE(!ret);
1039 break;
1040 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1041 default:
1042 WARN_ON_ONCE(1);
1043 ret = -EINVAL;
1044 goto register_error;
1045 }
1046 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
1047 WARN_ON_ONCE(ret > 0);
1048 if (ret) {
1049 goto statedump_error;
1050 }
1051 hlist_add_head(&event->hlist, head);
1052 list_add(&event->list, &chan->session->events);
1053 return event;
1054
1055 statedump_error:
1056 /* If a statedump error occurs, events will not be readable. */
1057 register_error:
1058 kmem_cache_free(event_cache, event);
1059 cache_error:
1060 exist:
1061 type_error:
1062 full:
1063 return ERR_PTR(ret);
1064 }
1065
1066 struct lttng_event_notifier *_lttng_event_notifier_create(
1067 const struct lttng_event_desc *event_desc,
1068 uint64_t token, uint64_t error_counter_index,
1069 struct lttng_event_notifier_group *event_notifier_group,
1070 struct lttng_kernel_event_notifier *event_notifier_param,
1071 void *filter, enum lttng_kernel_instrumentation itype)
1072 {
1073 struct lttng_event_notifier *event_notifier;
1074 const char *event_name;
1075 struct hlist_head *head;
1076 int ret;
1077
1078 switch (itype) {
1079 case LTTNG_KERNEL_TRACEPOINT:
1080 event_name = event_desc->name;
1081 break;
1082 case LTTNG_KERNEL_KPROBE:
1083 case LTTNG_KERNEL_UPROBE:
1084 case LTTNG_KERNEL_SYSCALL:
1085 event_name = event_notifier_param->event.name;
1086 break;
1087 case LTTNG_KERNEL_KRETPROBE:
1088 case LTTNG_KERNEL_FUNCTION:
1089 case LTTNG_KERNEL_NOOP:
1090 default:
1091 WARN_ON_ONCE(1);
1092 ret = -EINVAL;
1093 goto type_error;
1094 }
1095
1096 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1097 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1098 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1099 WARN_ON_ONCE(!event_notifier->desc);
1100 if (!strncmp(event_notifier->desc->name, event_name,
1101 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1102 && event_notifier_group == event_notifier->group
1103 && token == event_notifier->user_token) {
1104 ret = -EEXIST;
1105 goto exist;
1106 }
1107 }
1108
1109 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1110 if (!event_notifier) {
1111 ret = -ENOMEM;
1112 goto cache_error;
1113 }
1114
1115 event_notifier->group = event_notifier_group;
1116 event_notifier->user_token = token;
1117 event_notifier->error_counter_index = error_counter_index;
1118 event_notifier->num_captures = 0;
1119 event_notifier->filter = filter;
1120 event_notifier->instrumentation = itype;
1121 event_notifier->evtype = LTTNG_TYPE_EVENT;
1122 event_notifier->send_notification = lttng_event_notifier_notification_send;
1123 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1124 INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
1125 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1126
1127 switch (itype) {
1128 case LTTNG_KERNEL_TRACEPOINT:
1129 /* Event will be enabled by enabler sync. */
1130 event_notifier->enabled = 0;
1131 event_notifier->registered = 0;
1132 event_notifier->desc = lttng_event_desc_get(event_name);
1133 if (!event_notifier->desc) {
1134 ret = -ENOENT;
1135 goto register_error;
1136 }
1137 /* Populate lttng_event_notifier structure before event registration. */
1138 smp_wmb();
1139 break;
1140 case LTTNG_KERNEL_KPROBE:
1141 /*
1142 * Needs to be explicitly enabled after creation, since
1143 * we may want to apply filters.
1144 */
1145 event_notifier->enabled = 0;
1146 event_notifier->registered = 1;
1147 /*
1148 * Populate lttng_event_notifier structure before event
1149 * registration.
1150 */
1151 smp_wmb();
1152 ret = lttng_kprobes_register_event_notifier(
1153 event_notifier_param->event.u.kprobe.symbol_name,
1154 event_notifier_param->event.u.kprobe.offset,
1155 event_notifier_param->event.u.kprobe.addr,
1156 event_notifier);
1157 if (ret) {
1158 ret = -EINVAL;
1159 goto register_error;
1160 }
1161 ret = try_module_get(event_notifier->desc->owner);
1162 WARN_ON_ONCE(!ret);
1163 break;
1164 case LTTNG_KERNEL_NOOP:
1165 case LTTNG_KERNEL_SYSCALL:
1166 /*
1167 * Needs to be explicitly enabled after creation, since
1168 * we may want to apply filters.
1169 */
1170 event_notifier->enabled = 0;
1171 event_notifier->registered = 0;
1172 event_notifier->desc = event_desc;
1173 switch (event_notifier_param->event.u.syscall.entryexit) {
1174 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1175 ret = -EINVAL;
1176 goto register_error;
1177 case LTTNG_KERNEL_SYSCALL_ENTRY:
1178 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1179 break;
1180 case LTTNG_KERNEL_SYSCALL_EXIT:
1181 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1182 break;
1183 }
1184 switch (event_notifier_param->event.u.syscall.abi) {
1185 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1186 ret = -EINVAL;
1187 goto register_error;
1188 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1189 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1190 break;
1191 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1192 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1193 break;
1194 }
1195
1196 if (!event_notifier->desc) {
1197 ret = -EINVAL;
1198 goto register_error;
1199 }
1200 break;
1201 case LTTNG_KERNEL_UPROBE:
1202 /*
1203 * Needs to be explicitly enabled after creation, since
1204 * we may want to apply filters.
1205 */
1206 event_notifier->enabled = 0;
1207 event_notifier->registered = 1;
1208
1209 /*
1210 * Populate lttng_event_notifier structure before
1211 * event_notifier registration.
1212 */
1213 smp_wmb();
1214
1215 ret = lttng_uprobes_register_event_notifier(
1216 event_notifier_param->event.name,
1217 event_notifier_param->event.u.uprobe.fd,
1218 event_notifier);
1219 if (ret)
1220 goto register_error;
1221 ret = try_module_get(event_notifier->desc->owner);
1222 WARN_ON_ONCE(!ret);
1223 break;
1224 case LTTNG_KERNEL_KRETPROBE:
1225 case LTTNG_KERNEL_FUNCTION:
1226 default:
1227 WARN_ON_ONCE(1);
1228 ret = -EINVAL;
1229 goto register_error;
1230 }
1231
1232 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1233 hlist_add_head(&event_notifier->hlist, head);
1234
1235 /*
1236 * Clear the error counter bucket. The sessiond keeps track of which
1237 * bucket is currently in use. We trust it.
1238 */
1239 if (event_notifier_group->error_counter) {
1240 size_t dimension_index[1];
1241
1242 /*
1243 * Check that the index is within the boundary of the counter.
1244 */
1245 if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
1246 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1247 event_notifier_group->error_counter_len, event_notifier->error_counter_index);
1248 ret = -EINVAL;
1249 goto register_error;
1250 }
1251
1252 dimension_index[0] = event_notifier->error_counter_index;
1253 ret = event_notifier_group->error_counter->ops->counter_clear(
1254 event_notifier_group->error_counter->counter,
1255 dimension_index);
1256 if (ret) {
1257 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1258 event_notifier->error_counter_index);
1259 goto register_error;
1260 }
1261 }
1262
1263 return event_notifier;
1264
1265 register_error:
1266 kmem_cache_free(event_notifier_cache, event_notifier);
1267 cache_error:
1268 exist:
1269 type_error:
1270 return ERR_PTR(ret);
1271 }
1272
1273 int lttng_kernel_counter_read(struct lttng_counter *counter,
1274 const size_t *dim_indexes, int32_t cpu,
1275 int64_t *val, bool *overflow, bool *underflow)
1276 {
1277 return counter->ops->counter_read(counter->counter, dim_indexes,
1278 cpu, val, overflow, underflow);
1279 }
1280
1281 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1282 const size_t *dim_indexes, int64_t *val,
1283 bool *overflow, bool *underflow)
1284 {
1285 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1286 val, overflow, underflow);
1287 }
1288
1289 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1290 const size_t *dim_indexes)
1291 {
1292 return counter->ops->counter_clear(counter->counter, dim_indexes);
1293 }
1294
1295 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1296 struct lttng_kernel_event *event_param,
1297 void *filter,
1298 const struct lttng_event_desc *event_desc,
1299 enum lttng_kernel_instrumentation itype)
1300 {
1301 struct lttng_event *event;
1302
1303 mutex_lock(&sessions_mutex);
1304 event = _lttng_event_create(chan, event_param, filter, event_desc,
1305 itype);
1306 mutex_unlock(&sessions_mutex);
1307 return event;
1308 }
1309
1310 struct lttng_event_notifier *lttng_event_notifier_create(
1311 const struct lttng_event_desc *event_desc,
1312 uint64_t id, uint64_t error_counter_index,
1313 struct lttng_event_notifier_group *event_notifier_group,
1314 struct lttng_kernel_event_notifier *event_notifier_param,
1315 void *filter, enum lttng_kernel_instrumentation itype)
1316 {
1317 struct lttng_event_notifier *event_notifier;
1318
1319 mutex_lock(&sessions_mutex);
1320 event_notifier = _lttng_event_notifier_create(event_desc, id,
1321 error_counter_index, event_notifier_group,
1322 event_notifier_param, filter, itype);
1323 mutex_unlock(&sessions_mutex);
1324 return event_notifier;
1325 }
1326
1327 /* Only used for tracepoints for now. */
1328 static
1329 void register_event(struct lttng_event *event)
1330 {
1331 const struct lttng_event_desc *desc;
1332 int ret = -EINVAL;
1333
1334 if (event->registered)
1335 return;
1336
1337 desc = event->desc;
1338 switch (event->instrumentation) {
1339 case LTTNG_KERNEL_TRACEPOINT:
1340 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1341 desc->probe_callback,
1342 event);
1343 break;
1344 case LTTNG_KERNEL_SYSCALL:
1345 ret = lttng_syscall_filter_enable_event(event->chan, event);
1346 break;
1347 case LTTNG_KERNEL_KPROBE:
1348 case LTTNG_KERNEL_UPROBE:
1349 case LTTNG_KERNEL_KRETPROBE:
1350 case LTTNG_KERNEL_NOOP:
1351 ret = 0;
1352 break;
1353 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1354 default:
1355 WARN_ON_ONCE(1);
1356 }
1357 if (!ret)
1358 event->registered = 1;
1359 }
1360
1361 /*
1362 * Only used internally at session destruction.
1363 */
1364 int _lttng_event_unregister(struct lttng_event *event)
1365 {
1366 const struct lttng_event_desc *desc;
1367 int ret = -EINVAL;
1368
1369 if (!event->registered)
1370 return 0;
1371
1372 desc = event->desc;
1373 switch (event->instrumentation) {
1374 case LTTNG_KERNEL_TRACEPOINT:
1375 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1376 event->desc->probe_callback,
1377 event);
1378 break;
1379 case LTTNG_KERNEL_KPROBE:
1380 lttng_kprobes_unregister_event(event);
1381 ret = 0;
1382 break;
1383 case LTTNG_KERNEL_KRETPROBE:
1384 lttng_kretprobes_unregister(event);
1385 ret = 0;
1386 break;
1387 case LTTNG_KERNEL_SYSCALL:
1388 ret = lttng_syscall_filter_disable_event(event->chan, event);
1389 break;
1390 case LTTNG_KERNEL_NOOP:
1391 ret = 0;
1392 break;
1393 case LTTNG_KERNEL_UPROBE:
1394 lttng_uprobes_unregister_event(event);
1395 ret = 0;
1396 break;
1397 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1398 default:
1399 WARN_ON_ONCE(1);
1400 }
1401 if (!ret)
1402 event->registered = 0;
1403 return ret;
1404 }
1405
1406 /* Only used for tracepoints for now. */
1407 static
1408 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1409 {
1410 const struct lttng_event_desc *desc;
1411 int ret = -EINVAL;
1412
1413 if (event_notifier->registered)
1414 return;
1415
1416 desc = event_notifier->desc;
1417 switch (event_notifier->instrumentation) {
1418 case LTTNG_KERNEL_TRACEPOINT:
1419 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1420 desc->event_notifier_callback,
1421 event_notifier);
1422 break;
1423 case LTTNG_KERNEL_SYSCALL:
1424 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1425 break;
1426 case LTTNG_KERNEL_KPROBE:
1427 case LTTNG_KERNEL_UPROBE:
1428 ret = 0;
1429 break;
1430 case LTTNG_KERNEL_KRETPROBE:
1431 case LTTNG_KERNEL_FUNCTION:
1432 case LTTNG_KERNEL_NOOP:
1433 default:
1434 WARN_ON_ONCE(1);
1435 }
1436 if (!ret)
1437 event_notifier->registered = 1;
1438 }
1439
1440 static
1441 int _lttng_event_notifier_unregister(
1442 struct lttng_event_notifier *event_notifier)
1443 {
1444 const struct lttng_event_desc *desc;
1445 int ret = -EINVAL;
1446
1447 if (!event_notifier->registered)
1448 return 0;
1449
1450 desc = event_notifier->desc;
1451 switch (event_notifier->instrumentation) {
1452 case LTTNG_KERNEL_TRACEPOINT:
1453 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1454 event_notifier->desc->event_notifier_callback,
1455 event_notifier);
1456 break;
1457 case LTTNG_KERNEL_KPROBE:
1458 lttng_kprobes_unregister_event_notifier(event_notifier);
1459 ret = 0;
1460 break;
1461 case LTTNG_KERNEL_UPROBE:
1462 lttng_uprobes_unregister_event_notifier(event_notifier);
1463 ret = 0;
1464 break;
1465 case LTTNG_KERNEL_SYSCALL:
1466 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1467 break;
1468 case LTTNG_KERNEL_KRETPROBE:
1469 case LTTNG_KERNEL_FUNCTION:
1470 case LTTNG_KERNEL_NOOP:
1471 default:
1472 WARN_ON_ONCE(1);
1473 }
1474 if (!ret)
1475 event_notifier->registered = 0;
1476 return ret;
1477 }
1478
1479 /*
1480 * Only used internally at session destruction.
1481 */
1482 static
1483 void _lttng_event_destroy(struct lttng_event *event)
1484 {
1485 switch (event->instrumentation) {
1486 case LTTNG_KERNEL_TRACEPOINT:
1487 lttng_event_desc_put(event->desc);
1488 break;
1489 case LTTNG_KERNEL_KPROBE:
1490 module_put(event->desc->owner);
1491 lttng_kprobes_destroy_event_private(event);
1492 break;
1493 case LTTNG_KERNEL_KRETPROBE:
1494 module_put(event->desc->owner);
1495 lttng_kretprobes_destroy_private(event);
1496 break;
1497 case LTTNG_KERNEL_NOOP:
1498 case LTTNG_KERNEL_SYSCALL:
1499 break;
1500 case LTTNG_KERNEL_UPROBE:
1501 module_put(event->desc->owner);
1502 lttng_uprobes_destroy_event_private(event);
1503 break;
1504 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1505 default:
1506 WARN_ON_ONCE(1);
1507 }
1508 list_del(&event->list);
1509 lttng_destroy_context(event->ctx);
1510 kmem_cache_free(event_cache, event);
1511 }
1512
1513 /*
1514 * Only used internally at session destruction.
1515 */
1516 static
1517 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1518 {
1519 switch (event_notifier->instrumentation) {
1520 case LTTNG_KERNEL_TRACEPOINT:
1521 lttng_event_desc_put(event_notifier->desc);
1522 break;
1523 case LTTNG_KERNEL_KPROBE:
1524 module_put(event_notifier->desc->owner);
1525 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1526 break;
1527 case LTTNG_KERNEL_NOOP:
1528 case LTTNG_KERNEL_SYSCALL:
1529 break;
1530 case LTTNG_KERNEL_UPROBE:
1531 module_put(event_notifier->desc->owner);
1532 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1533 break;
1534 case LTTNG_KERNEL_KRETPROBE:
1535 case LTTNG_KERNEL_FUNCTION:
1536 default:
1537 WARN_ON_ONCE(1);
1538 }
1539 list_del(&event_notifier->list);
1540 kmem_cache_free(event_notifier_cache, event_notifier);
1541 }
1542
1543 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1544 enum tracker_type tracker_type)
1545 {
1546 switch (tracker_type) {
1547 case TRACKER_PID:
1548 return &session->pid_tracker;
1549 case TRACKER_VPID:
1550 return &session->vpid_tracker;
1551 case TRACKER_UID:
1552 return &session->uid_tracker;
1553 case TRACKER_VUID:
1554 return &session->vuid_tracker;
1555 case TRACKER_GID:
1556 return &session->gid_tracker;
1557 case TRACKER_VGID:
1558 return &session->vgid_tracker;
1559 default:
1560 WARN_ON_ONCE(1);
1561 return NULL;
1562 }
1563 }
1564
1565 int lttng_session_track_id(struct lttng_session *session,
1566 enum tracker_type tracker_type, int id)
1567 {
1568 struct lttng_id_tracker *tracker;
1569 int ret;
1570
1571 tracker = get_tracker(session, tracker_type);
1572 if (!tracker)
1573 return -EINVAL;
1574 if (id < -1)
1575 return -EINVAL;
1576 mutex_lock(&sessions_mutex);
1577 if (id == -1) {
1578 /* track all ids: destroy tracker. */
1579 lttng_id_tracker_destroy(tracker, true);
1580 ret = 0;
1581 } else {
1582 ret = lttng_id_tracker_add(tracker, id);
1583 }
1584 mutex_unlock(&sessions_mutex);
1585 return ret;
1586 }
1587
1588 int lttng_session_untrack_id(struct lttng_session *session,
1589 enum tracker_type tracker_type, int id)
1590 {
1591 struct lttng_id_tracker *tracker;
1592 int ret;
1593
1594 tracker = get_tracker(session, tracker_type);
1595 if (!tracker)
1596 return -EINVAL;
1597 if (id < -1)
1598 return -EINVAL;
1599 mutex_lock(&sessions_mutex);
1600 if (id == -1) {
1601 /* untrack all ids: replace by empty tracker. */
1602 ret = lttng_id_tracker_empty_set(tracker);
1603 } else {
1604 ret = lttng_id_tracker_del(tracker, id);
1605 }
1606 mutex_unlock(&sessions_mutex);
1607 return ret;
1608 }
1609
1610 static
1611 void *id_list_start(struct seq_file *m, loff_t *pos)
1612 {
1613 struct lttng_id_tracker *id_tracker = m->private;
1614 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1615 struct lttng_id_hash_node *e;
1616 int iter = 0, i;
1617
1618 mutex_lock(&sessions_mutex);
1619 if (id_tracker_p) {
1620 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1621 struct hlist_head *head = &id_tracker_p->id_hash[i];
1622
1623 lttng_hlist_for_each_entry(e, head, hlist) {
1624 if (iter++ >= *pos)
1625 return e;
1626 }
1627 }
1628 } else {
1629 /* ID tracker disabled. */
1630 if (iter >= *pos && iter == 0) {
1631 return id_tracker_p; /* empty tracker */
1632 }
1633 iter++;
1634 }
1635 /* End of list */
1636 return NULL;
1637 }
1638
1639 /* Called with sessions_mutex held. */
1640 static
1641 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1642 {
1643 struct lttng_id_tracker *id_tracker = m->private;
1644 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1645 struct lttng_id_hash_node *e;
1646 int iter = 0, i;
1647
1648 (*ppos)++;
1649 if (id_tracker_p) {
1650 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1651 struct hlist_head *head = &id_tracker_p->id_hash[i];
1652
1653 lttng_hlist_for_each_entry(e, head, hlist) {
1654 if (iter++ >= *ppos)
1655 return e;
1656 }
1657 }
1658 } else {
1659 /* ID tracker disabled. */
1660 if (iter >= *ppos && iter == 0)
1661 return p; /* empty tracker */
1662 iter++;
1663 }
1664
1665 /* End of list */
1666 return NULL;
1667 }
1668
1669 static
1670 void id_list_stop(struct seq_file *m, void *p)
1671 {
1672 mutex_unlock(&sessions_mutex);
1673 }
1674
1675 static
1676 int id_list_show(struct seq_file *m, void *p)
1677 {
1678 struct lttng_id_tracker *id_tracker = m->private;
1679 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1680 int id;
1681
1682 if (p == id_tracker_p) {
1683 /* Tracker disabled. */
1684 id = -1;
1685 } else {
1686 const struct lttng_id_hash_node *e = p;
1687
1688 id = lttng_id_tracker_get_node_id(e);
1689 }
1690 switch (id_tracker->tracker_type) {
1691 case TRACKER_PID:
1692 seq_printf(m, "process { pid = %d; };\n", id);
1693 break;
1694 case TRACKER_VPID:
1695 seq_printf(m, "process { vpid = %d; };\n", id);
1696 break;
1697 case TRACKER_UID:
1698 seq_printf(m, "user { uid = %d; };\n", id);
1699 break;
1700 case TRACKER_VUID:
1701 seq_printf(m, "user { vuid = %d; };\n", id);
1702 break;
1703 case TRACKER_GID:
1704 seq_printf(m, "group { gid = %d; };\n", id);
1705 break;
1706 case TRACKER_VGID:
1707 seq_printf(m, "group { vgid = %d; };\n", id);
1708 break;
1709 default:
1710 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1711 }
1712 return 0;
1713 }
1714
1715 static
1716 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1717 .start = id_list_start,
1718 .next = id_list_next,
1719 .stop = id_list_stop,
1720 .show = id_list_show,
1721 };
1722
1723 static
1724 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1725 {
1726 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1727 }
1728
1729 static
1730 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1731 {
1732 struct seq_file *m = file->private_data;
1733 struct lttng_id_tracker *id_tracker = m->private;
1734 int ret;
1735
1736 WARN_ON_ONCE(!id_tracker);
1737 ret = seq_release(inode, file);
1738 if (!ret)
1739 fput(id_tracker->session->file);
1740 return ret;
1741 }
1742
1743 const struct file_operations lttng_tracker_ids_list_fops = {
1744 .owner = THIS_MODULE,
1745 .open = lttng_tracker_ids_list_open,
1746 .read = seq_read,
1747 .llseek = seq_lseek,
1748 .release = lttng_tracker_ids_list_release,
1749 };
1750
1751 int lttng_session_list_tracker_ids(struct lttng_session *session,
1752 enum tracker_type tracker_type)
1753 {
1754 struct file *tracker_ids_list_file;
1755 struct seq_file *m;
1756 int file_fd, ret;
1757
1758 file_fd = lttng_get_unused_fd();
1759 if (file_fd < 0) {
1760 ret = file_fd;
1761 goto fd_error;
1762 }
1763
1764 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1765 &lttng_tracker_ids_list_fops,
1766 NULL, O_RDWR);
1767 if (IS_ERR(tracker_ids_list_file)) {
1768 ret = PTR_ERR(tracker_ids_list_file);
1769 goto file_error;
1770 }
1771 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1772 ret = -EOVERFLOW;
1773 goto refcount_error;
1774 }
1775 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1776 if (ret < 0)
1777 goto open_error;
1778 m = tracker_ids_list_file->private_data;
1779
1780 m->private = get_tracker(session, tracker_type);
1781 BUG_ON(!m->private);
1782 fd_install(file_fd, tracker_ids_list_file);
1783
1784 return file_fd;
1785
1786 open_error:
1787 atomic_long_dec(&session->file->f_count);
1788 refcount_error:
1789 fput(tracker_ids_list_file);
1790 file_error:
1791 put_unused_fd(file_fd);
1792 fd_error:
1793 return ret;
1794 }
1795
1796 /*
1797 * Enabler management.
1798 */
1799 static
1800 int lttng_match_enabler_star_glob(const char *desc_name,
1801 const char *pattern)
1802 {
1803 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1804 desc_name, LTTNG_SIZE_MAX))
1805 return 0;
1806 return 1;
1807 }
1808
1809 static
1810 int lttng_match_enabler_name(const char *desc_name,
1811 const char *name)
1812 {
1813 if (strcmp(desc_name, name))
1814 return 0;
1815 return 1;
1816 }
1817
1818 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1819 struct lttng_enabler *enabler)
1820 {
1821 const char *desc_name, *enabler_name;
1822 bool compat = false, entry = false;
1823
1824 enabler_name = enabler->event_param.name;
1825 switch (enabler->event_param.instrumentation) {
1826 case LTTNG_KERNEL_TRACEPOINT:
1827 desc_name = desc->name;
1828 switch (enabler->format_type) {
1829 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1830 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1831 case LTTNG_ENABLER_FORMAT_NAME:
1832 return lttng_match_enabler_name(desc_name, enabler_name);
1833 default:
1834 return -EINVAL;
1835 }
1836 break;
1837 case LTTNG_KERNEL_SYSCALL:
1838 desc_name = desc->name;
1839 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1840 desc_name += strlen("compat_");
1841 compat = true;
1842 }
1843 if (!strncmp(desc_name, "syscall_exit_",
1844 strlen("syscall_exit_"))) {
1845 desc_name += strlen("syscall_exit_");
1846 } else if (!strncmp(desc_name, "syscall_entry_",
1847 strlen("syscall_entry_"))) {
1848 desc_name += strlen("syscall_entry_");
1849 entry = true;
1850 } else {
1851 WARN_ON_ONCE(1);
1852 return -EINVAL;
1853 }
1854 switch (enabler->event_param.u.syscall.entryexit) {
1855 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1856 break;
1857 case LTTNG_KERNEL_SYSCALL_ENTRY:
1858 if (!entry)
1859 return 0;
1860 break;
1861 case LTTNG_KERNEL_SYSCALL_EXIT:
1862 if (entry)
1863 return 0;
1864 break;
1865 default:
1866 return -EINVAL;
1867 }
1868 switch (enabler->event_param.u.syscall.abi) {
1869 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1870 break;
1871 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1872 if (compat)
1873 return 0;
1874 break;
1875 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1876 if (!compat)
1877 return 0;
1878 break;
1879 default:
1880 return -EINVAL;
1881 }
1882 switch (enabler->event_param.u.syscall.match) {
1883 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1884 switch (enabler->format_type) {
1885 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1886 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1887 case LTTNG_ENABLER_FORMAT_NAME:
1888 return lttng_match_enabler_name(desc_name, enabler_name);
1889 default:
1890 return -EINVAL;
1891 }
1892 break;
1893 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1894 return -EINVAL; /* Not implemented. */
1895 default:
1896 return -EINVAL;
1897 }
1898 break;
1899 default:
1900 WARN_ON_ONCE(1);
1901 return -EINVAL;
1902 }
1903 }
1904
1905 static
1906 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1907 struct lttng_event *event)
1908 {
1909 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1910 event_enabler);
1911
1912 if (base_enabler->event_param.instrumentation != event->instrumentation)
1913 return 0;
1914 if (lttng_desc_match_enabler(event->desc, base_enabler)
1915 && event->chan == event_enabler->chan)
1916 return 1;
1917 else
1918 return 0;
1919 }
1920
1921 static
1922 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1923 struct lttng_event_notifier *event_notifier)
1924 {
1925 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1926 event_notifier_enabler);
1927
1928 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1929 return 0;
1930 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1931 && event_notifier->group == event_notifier_enabler->group
1932 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1933 return 1;
1934 else
1935 return 0;
1936 }
1937
1938 static
1939 struct lttng_enabler_ref *lttng_enabler_ref(
1940 struct list_head *enablers_ref_list,
1941 struct lttng_enabler *enabler)
1942 {
1943 struct lttng_enabler_ref *enabler_ref;
1944
1945 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1946 if (enabler_ref->ref == enabler)
1947 return enabler_ref;
1948 }
1949 return NULL;
1950 }
1951
1952 static
1953 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1954 {
1955 struct lttng_session *session = event_enabler->chan->session;
1956 struct lttng_probe_desc *probe_desc;
1957 const struct lttng_event_desc *desc;
1958 int i;
1959 struct list_head *probe_list;
1960
1961 probe_list = lttng_get_probe_list_head();
1962 /*
1963 * For each probe event, if we find that a probe event matches
1964 * our enabler, create an associated lttng_event if not
1965 * already present.
1966 */
1967 list_for_each_entry(probe_desc, probe_list, head) {
1968 for (i = 0; i < probe_desc->nr_events; i++) {
1969 int found = 0;
1970 struct hlist_head *head;
1971 struct lttng_event *event;
1972
1973 desc = probe_desc->event_desc[i];
1974 if (!lttng_desc_match_enabler(desc,
1975 lttng_event_enabler_as_enabler(event_enabler)))
1976 continue;
1977
1978 /*
1979 * Check if already created.
1980 */
1981 head = utils_borrow_hash_table_bucket(
1982 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1983 desc->name);
1984 lttng_hlist_for_each_entry(event, head, hlist) {
1985 if (event->desc == desc
1986 && event->chan == event_enabler->chan)
1987 found = 1;
1988 }
1989 if (found)
1990 continue;
1991
1992 /*
1993 * We need to create an event for this
1994 * event probe.
1995 */
1996 event = _lttng_event_create(event_enabler->chan,
1997 NULL, NULL, desc,
1998 LTTNG_KERNEL_TRACEPOINT);
1999 if (!event) {
2000 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2001 probe_desc->event_desc[i]->name);
2002 }
2003 }
2004 }
2005 }
2006
2007 static
2008 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2009 {
2010 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2011 struct lttng_probe_desc *probe_desc;
2012 const struct lttng_event_desc *desc;
2013 int i;
2014 struct list_head *probe_list;
2015
2016 probe_list = lttng_get_probe_list_head();
2017 /*
2018 * For each probe event, if we find that a probe event matches
2019 * our enabler, create an associated lttng_event_notifier if not
2020 * already present.
2021 */
2022 list_for_each_entry(probe_desc, probe_list, head) {
2023 for (i = 0; i < probe_desc->nr_events; i++) {
2024 int found = 0;
2025 struct hlist_head *head;
2026 struct lttng_event_notifier *event_notifier;
2027
2028 desc = probe_desc->event_desc[i];
2029 if (!lttng_desc_match_enabler(desc,
2030 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2031 continue;
2032
2033 /*
2034 * Check if already created.
2035 */
2036 head = utils_borrow_hash_table_bucket(
2037 event_notifier_group->event_notifiers_ht.table,
2038 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
2039 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
2040 if (event_notifier->desc == desc
2041 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2042 found = 1;
2043 }
2044 if (found)
2045 continue;
2046
2047 /*
2048 * We need to create a event_notifier for this event probe.
2049 */
2050 event_notifier = _lttng_event_notifier_create(desc,
2051 event_notifier_enabler->base.user_token,
2052 event_notifier_enabler->error_counter_index,
2053 event_notifier_group, NULL, NULL,
2054 LTTNG_KERNEL_TRACEPOINT);
2055 if (IS_ERR(event_notifier)) {
2056 printk(KERN_INFO "Unable to create event_notifier %s\n",
2057 probe_desc->event_desc[i]->name);
2058 }
2059 }
2060 }
2061 }
2062
2063 static
2064 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2065 {
2066 int ret;
2067
2068 ret = lttng_syscalls_register_event(event_enabler, NULL);
2069 WARN_ON_ONCE(ret);
2070 }
2071
2072 static
2073 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2074 {
2075 int ret;
2076
2077 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
2078 WARN_ON_ONCE(ret);
2079 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
2080 WARN_ON_ONCE(ret);
2081 }
2082
2083 /*
2084 * Create struct lttng_event if it is missing and present in the list of
2085 * tracepoint probes.
2086 * Should be called with sessions mutex held.
2087 */
2088 static
2089 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2090 {
2091 switch (event_enabler->base.event_param.instrumentation) {
2092 case LTTNG_KERNEL_TRACEPOINT:
2093 lttng_create_tracepoint_event_if_missing(event_enabler);
2094 break;
2095 case LTTNG_KERNEL_SYSCALL:
2096 lttng_create_syscall_event_if_missing(event_enabler);
2097 break;
2098 default:
2099 WARN_ON_ONCE(1);
2100 break;
2101 }
2102 }
2103
2104 /*
2105 * Create events associated with an event_enabler (if not already present),
2106 * and add backward reference from the event to the enabler.
2107 * Should be called with sessions mutex held.
2108 */
2109 static
2110 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2111 {
2112 struct lttng_channel *chan = event_enabler->chan;
2113 struct lttng_session *session = event_enabler->chan->session;
2114 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2115 struct lttng_event *event;
2116
2117 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2118 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2119 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2120 !strcmp(base_enabler->event_param.name, "*")) {
2121 int enabled = base_enabler->enabled;
2122 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2123
2124 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2125 WRITE_ONCE(chan->syscall_all_entry, enabled);
2126
2127 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2128 WRITE_ONCE(chan->syscall_all_exit, enabled);
2129 }
2130
2131 /* First ensure that probe events are created for this enabler. */
2132 lttng_create_event_if_missing(event_enabler);
2133
2134 /* For each event matching event_enabler in session event list. */
2135 list_for_each_entry(event, &session->events, list) {
2136 struct lttng_enabler_ref *enabler_ref;
2137
2138 if (!lttng_event_enabler_match_event(event_enabler, event))
2139 continue;
2140 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2141 lttng_event_enabler_as_enabler(event_enabler));
2142 if (!enabler_ref) {
2143 /*
2144 * If no backward ref, create it.
2145 * Add backward ref from event to event_enabler.
2146 */
2147 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2148 if (!enabler_ref)
2149 return -ENOMEM;
2150 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2151 list_add(&enabler_ref->node,
2152 &event->enablers_ref_head);
2153 }
2154
2155 /*
2156 * Link filter bytecodes if not linked yet.
2157 */
2158 lttng_enabler_link_bytecode(event->desc,
2159 lttng_static_ctx,
2160 &event->filter_bytecode_runtime_head,
2161 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2162
2163 /* TODO: merge event context. */
2164 }
2165 return 0;
2166 }
2167
2168 /*
2169 * Create struct lttng_event_notifier if it is missing and present in the list of
2170 * tracepoint probes.
2171 * Should be called with sessions mutex held.
2172 */
2173 static
2174 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2175 {
2176 switch (event_notifier_enabler->base.event_param.instrumentation) {
2177 case LTTNG_KERNEL_TRACEPOINT:
2178 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2179 break;
2180 case LTTNG_KERNEL_SYSCALL:
2181 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2182 break;
2183 default:
2184 WARN_ON_ONCE(1);
2185 break;
2186 }
2187 }
2188
2189 /*
2190 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2191 */
2192 static
2193 int lttng_event_notifier_enabler_ref_event_notifiers(
2194 struct lttng_event_notifier_enabler *event_notifier_enabler)
2195 {
2196 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2197 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2198 struct lttng_event_notifier *event_notifier;
2199
2200 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2201 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2202 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2203 !strcmp(base_enabler->event_param.name, "*")) {
2204
2205 int enabled = base_enabler->enabled;
2206 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2207
2208 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2209 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2210
2211 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2212 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2213
2214 }
2215
2216 /* First ensure that probe event_notifiers are created for this enabler. */
2217 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2218
2219 /* Link the created event_notifier with its associated enabler. */
2220 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2221 struct lttng_enabler_ref *enabler_ref;
2222
2223 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2224 continue;
2225
2226 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2227 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2228 if (!enabler_ref) {
2229 /*
2230 * If no backward ref, create it.
2231 * Add backward ref from event_notifier to enabler.
2232 */
2233 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2234 if (!enabler_ref)
2235 return -ENOMEM;
2236
2237 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2238 event_notifier_enabler);
2239 list_add(&enabler_ref->node,
2240 &event_notifier->enablers_ref_head);
2241 }
2242
2243 /*
2244 * Link filter bytecodes if not linked yet.
2245 */
2246 lttng_enabler_link_bytecode(event_notifier->desc,
2247 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2248 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2249
2250 /* Link capture bytecodes if not linked yet. */
2251 lttng_enabler_link_bytecode(event_notifier->desc,
2252 lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
2253 &event_notifier_enabler->capture_bytecode_head);
2254
2255 event_notifier->num_captures = event_notifier_enabler->num_captures;
2256 }
2257 return 0;
2258 }
2259
2260 /*
2261 * Called at module load: connect the probe on all enablers matching
2262 * this event.
2263 * Called with sessions lock held.
2264 */
2265 int lttng_fix_pending_events(void)
2266 {
2267 struct lttng_session *session;
2268
2269 list_for_each_entry(session, &sessions, list)
2270 lttng_session_lazy_sync_event_enablers(session);
2271 return 0;
2272 }
2273
2274 static bool lttng_event_notifier_group_has_active_event_notifiers(
2275 struct lttng_event_notifier_group *event_notifier_group)
2276 {
2277 struct lttng_event_notifier_enabler *event_notifier_enabler;
2278
2279 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2280 node) {
2281 if (event_notifier_enabler->base.enabled)
2282 return true;
2283 }
2284 return false;
2285 }
2286
2287 bool lttng_event_notifier_active(void)
2288 {
2289 struct lttng_event_notifier_group *event_notifier_group;
2290
2291 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2292 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2293 return true;
2294 }
2295 return false;
2296 }
2297
2298 int lttng_fix_pending_event_notifiers(void)
2299 {
2300 struct lttng_event_notifier_group *event_notifier_group;
2301
2302 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2303 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2304 return 0;
2305 }
2306
2307 struct lttng_event_enabler *lttng_event_enabler_create(
2308 enum lttng_enabler_format_type format_type,
2309 struct lttng_kernel_event *event_param,
2310 struct lttng_channel *chan)
2311 {
2312 struct lttng_event_enabler *event_enabler;
2313
2314 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2315 if (!event_enabler)
2316 return NULL;
2317 event_enabler->base.format_type = format_type;
2318 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2319 memcpy(&event_enabler->base.event_param, event_param,
2320 sizeof(event_enabler->base.event_param));
2321 event_enabler->chan = chan;
2322 /* ctx left NULL */
2323 event_enabler->base.enabled = 0;
2324 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2325 mutex_lock(&sessions_mutex);
2326 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2327 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2328 mutex_unlock(&sessions_mutex);
2329 return event_enabler;
2330 }
2331
2332 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2333 {
2334 mutex_lock(&sessions_mutex);
2335 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2336 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2337 mutex_unlock(&sessions_mutex);
2338 return 0;
2339 }
2340
2341 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2342 {
2343 mutex_lock(&sessions_mutex);
2344 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2345 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2346 mutex_unlock(&sessions_mutex);
2347 return 0;
2348 }
2349
2350 static
2351 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2352 struct lttng_kernel_filter_bytecode __user *bytecode)
2353 {
2354 struct lttng_bytecode_node *bytecode_node;
2355 uint32_t bytecode_len;
2356 int ret;
2357
2358 ret = get_user(bytecode_len, &bytecode->len);
2359 if (ret)
2360 return ret;
2361 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2362 GFP_KERNEL);
2363 if (!bytecode_node)
2364 return -ENOMEM;
2365 ret = copy_from_user(&bytecode_node->bc, bytecode,
2366 sizeof(*bytecode) + bytecode_len);
2367 if (ret)
2368 goto error_free;
2369
2370 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2371 bytecode_node->enabler = enabler;
2372 /* Enforce length based on allocated size */
2373 bytecode_node->bc.len = bytecode_len;
2374 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2375
2376 return 0;
2377
2378 error_free:
2379 lttng_kvfree(bytecode_node);
2380 return ret;
2381 }
2382
2383 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2384 struct lttng_kernel_filter_bytecode __user *bytecode)
2385 {
2386 int ret;
2387 ret = lttng_enabler_attach_filter_bytecode(
2388 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2389 if (ret)
2390 goto error;
2391
2392 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2393 return 0;
2394
2395 error:
2396 return ret;
2397 }
2398
2399 int lttng_event_add_callsite(struct lttng_event *event,
2400 struct lttng_kernel_event_callsite __user *callsite)
2401 {
2402
2403 switch (event->instrumentation) {
2404 case LTTNG_KERNEL_UPROBE:
2405 return lttng_uprobes_event_add_callsite(event, callsite);
2406 default:
2407 return -EINVAL;
2408 }
2409 }
2410
2411 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2412 struct lttng_kernel_context *context_param)
2413 {
2414 return -ENOSYS;
2415 }
2416
2417 static
2418 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2419 {
2420 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2421
2422 /* Destroy filter bytecode */
2423 list_for_each_entry_safe(filter_node, tmp_filter_node,
2424 &enabler->filter_bytecode_head, node) {
2425 lttng_kvfree(filter_node);
2426 }
2427 }
2428
2429 static
2430 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2431 {
2432 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2433
2434 /* Destroy contexts */
2435 lttng_destroy_context(event_enabler->ctx);
2436
2437 list_del(&event_enabler->node);
2438 kfree(event_enabler);
2439 }
2440
2441 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2442 struct lttng_event_notifier_group *event_notifier_group,
2443 enum lttng_enabler_format_type format_type,
2444 struct lttng_kernel_event_notifier *event_notifier_param)
2445 {
2446 struct lttng_event_notifier_enabler *event_notifier_enabler;
2447
2448 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2449 if (!event_notifier_enabler)
2450 return NULL;
2451
2452 event_notifier_enabler->base.format_type = format_type;
2453 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2454 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2455
2456 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2457 event_notifier_enabler->num_captures = 0;
2458
2459 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2460 sizeof(event_notifier_enabler->base.event_param));
2461 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2462
2463 event_notifier_enabler->base.enabled = 0;
2464 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2465 event_notifier_enabler->group = event_notifier_group;
2466
2467 mutex_lock(&sessions_mutex);
2468 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2469 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2470
2471 mutex_unlock(&sessions_mutex);
2472
2473 return event_notifier_enabler;
2474 }
2475
2476 int lttng_event_notifier_enabler_enable(
2477 struct lttng_event_notifier_enabler *event_notifier_enabler)
2478 {
2479 mutex_lock(&sessions_mutex);
2480 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2481 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2482 mutex_unlock(&sessions_mutex);
2483 return 0;
2484 }
2485
2486 int lttng_event_notifier_enabler_disable(
2487 struct lttng_event_notifier_enabler *event_notifier_enabler)
2488 {
2489 mutex_lock(&sessions_mutex);
2490 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2491 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2492 mutex_unlock(&sessions_mutex);
2493 return 0;
2494 }
2495
2496 int lttng_event_notifier_enabler_attach_filter_bytecode(
2497 struct lttng_event_notifier_enabler *event_notifier_enabler,
2498 struct lttng_kernel_filter_bytecode __user *bytecode)
2499 {
2500 int ret;
2501
2502 ret = lttng_enabler_attach_filter_bytecode(
2503 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2504 bytecode);
2505 if (ret)
2506 goto error;
2507
2508 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2509 return 0;
2510
2511 error:
2512 return ret;
2513 }
2514
2515 int lttng_event_notifier_enabler_attach_capture_bytecode(
2516 struct lttng_event_notifier_enabler *event_notifier_enabler,
2517 struct lttng_kernel_capture_bytecode __user *bytecode)
2518 {
2519 struct lttng_bytecode_node *bytecode_node;
2520 struct lttng_enabler *enabler =
2521 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2522 uint32_t bytecode_len;
2523 int ret;
2524
2525 ret = get_user(bytecode_len, &bytecode->len);
2526 if (ret)
2527 return ret;
2528
2529 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2530 GFP_KERNEL);
2531 if (!bytecode_node)
2532 return -ENOMEM;
2533
2534 ret = copy_from_user(&bytecode_node->bc, bytecode,
2535 sizeof(*bytecode) + bytecode_len);
2536 if (ret)
2537 goto error_free;
2538
2539 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2540 bytecode_node->enabler = enabler;
2541
2542 /* Enforce length based on allocated size */
2543 bytecode_node->bc.len = bytecode_len;
2544 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2545
2546 event_notifier_enabler->num_captures++;
2547
2548 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2549 goto end;
2550
2551 error_free:
2552 lttng_kvfree(bytecode_node);
2553 end:
2554 return ret;
2555 }
2556
2557 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2558 struct lttng_kernel_event_callsite __user *callsite)
2559 {
2560
2561 switch (event_notifier->instrumentation) {
2562 case LTTNG_KERNEL_UPROBE:
2563 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2564 callsite);
2565 default:
2566 return -EINVAL;
2567 }
2568 }
2569
2570 int lttng_event_notifier_enabler_attach_context(
2571 struct lttng_event_notifier_enabler *event_notifier_enabler,
2572 struct lttng_kernel_context *context_param)
2573 {
2574 return -ENOSYS;
2575 }
2576
2577 static
2578 void lttng_event_notifier_enabler_destroy(
2579 struct lttng_event_notifier_enabler *event_notifier_enabler)
2580 {
2581 if (!event_notifier_enabler) {
2582 return;
2583 }
2584
2585 list_del(&event_notifier_enabler->node);
2586
2587 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2588 kfree(event_notifier_enabler);
2589 }
2590
2591 /*
2592 * lttng_session_sync_event_enablers should be called just before starting a
2593 * session.
2594 * Should be called with sessions mutex held.
2595 */
2596 static
2597 void lttng_session_sync_event_enablers(struct lttng_session *session)
2598 {
2599 struct lttng_event_enabler *event_enabler;
2600 struct lttng_event *event;
2601
2602 list_for_each_entry(event_enabler, &session->enablers_head, node)
2603 lttng_event_enabler_ref_events(event_enabler);
2604 /*
2605 * For each event, if at least one of its enablers is enabled,
2606 * and its channel and session transient states are enabled, we
2607 * enable the event, else we disable it.
2608 */
2609 list_for_each_entry(event, &session->events, list) {
2610 struct lttng_enabler_ref *enabler_ref;
2611 struct lttng_bytecode_runtime *runtime;
2612 int enabled = 0, has_enablers_without_bytecode = 0;
2613
2614 switch (event->instrumentation) {
2615 case LTTNG_KERNEL_TRACEPOINT:
2616 case LTTNG_KERNEL_SYSCALL:
2617 /* Enable events */
2618 list_for_each_entry(enabler_ref,
2619 &event->enablers_ref_head, node) {
2620 if (enabler_ref->ref->enabled) {
2621 enabled = 1;
2622 break;
2623 }
2624 }
2625 break;
2626 default:
2627 /* Not handled with lazy sync. */
2628 continue;
2629 }
2630 /*
2631 * Enabled state is based on union of enablers, with
2632 * intesection of session and channel transient enable
2633 * states.
2634 */
2635 enabled = enabled && session->tstate && event->chan->tstate;
2636
2637 WRITE_ONCE(event->enabled, enabled);
2638 /*
2639 * Sync tracepoint registration with event enabled
2640 * state.
2641 */
2642 if (enabled) {
2643 register_event(event);
2644 } else {
2645 _lttng_event_unregister(event);
2646 }
2647
2648 /* Check if has enablers without bytecode enabled */
2649 list_for_each_entry(enabler_ref,
2650 &event->enablers_ref_head, node) {
2651 if (enabler_ref->ref->enabled
2652 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2653 has_enablers_without_bytecode = 1;
2654 break;
2655 }
2656 }
2657 event->has_enablers_without_bytecode =
2658 has_enablers_without_bytecode;
2659
2660 /* Enable filters */
2661 list_for_each_entry(runtime,
2662 &event->filter_bytecode_runtime_head, node)
2663 lttng_bytecode_filter_sync_state(runtime);
2664 }
2665 }
2666
2667 /*
2668 * Apply enablers to session events, adding events to session if need
2669 * be. It is required after each modification applied to an active
2670 * session, and right before session "start".
2671 * "lazy" sync means we only sync if required.
2672 * Should be called with sessions mutex held.
2673 */
2674 static
2675 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2676 {
2677 /* We can skip if session is not active */
2678 if (!session->active)
2679 return;
2680 lttng_session_sync_event_enablers(session);
2681 }
2682
2683 static
2684 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2685 {
2686 struct lttng_event_notifier_enabler *event_notifier_enabler;
2687 struct lttng_event_notifier *event_notifier;
2688
2689 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2690 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2691
2692 /*
2693 * For each event_notifier, if at least one of its enablers is enabled,
2694 * we enable the event_notifier, else we disable it.
2695 */
2696 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2697 struct lttng_enabler_ref *enabler_ref;
2698 struct lttng_bytecode_runtime *runtime;
2699 int enabled = 0, has_enablers_without_bytecode = 0;
2700
2701 switch (event_notifier->instrumentation) {
2702 case LTTNG_KERNEL_TRACEPOINT:
2703 case LTTNG_KERNEL_SYSCALL:
2704 /* Enable event_notifiers */
2705 list_for_each_entry(enabler_ref,
2706 &event_notifier->enablers_ref_head, node) {
2707 if (enabler_ref->ref->enabled) {
2708 enabled = 1;
2709 break;
2710 }
2711 }
2712 break;
2713 default:
2714 /* Not handled with sync. */
2715 continue;
2716 }
2717
2718 WRITE_ONCE(event_notifier->enabled, enabled);
2719 /*
2720 * Sync tracepoint registration with event_notifier enabled
2721 * state.
2722 */
2723 if (enabled) {
2724 if (!event_notifier->registered)
2725 register_event_notifier(event_notifier);
2726 } else {
2727 if (event_notifier->registered)
2728 _lttng_event_notifier_unregister(event_notifier);
2729 }
2730
2731 /* Check if has enablers without bytecode enabled */
2732 list_for_each_entry(enabler_ref,
2733 &event_notifier->enablers_ref_head, node) {
2734 if (enabler_ref->ref->enabled
2735 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2736 has_enablers_without_bytecode = 1;
2737 break;
2738 }
2739 }
2740 event_notifier->has_enablers_without_bytecode =
2741 has_enablers_without_bytecode;
2742
2743 /* Enable filters */
2744 list_for_each_entry(runtime,
2745 &event_notifier->filter_bytecode_runtime_head, node)
2746 lttng_bytecode_filter_sync_state(runtime);
2747
2748 /* Enable captures */
2749 list_for_each_entry(runtime,
2750 &event_notifier->capture_bytecode_runtime_head, node)
2751 lttng_bytecode_capture_sync_state(runtime);
2752 }
2753 }
2754
2755 /*
2756 * Serialize at most one packet worth of metadata into a metadata
2757 * channel.
2758 * We grab the metadata cache mutex to get exclusive access to our metadata
2759 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2760 * allows us to do racy operations such as looking for remaining space left in
2761 * packet and write, since mutual exclusion protects us from concurrent writes.
2762 * Mutual exclusion on the metadata cache allow us to read the cache content
2763 * without racing against reallocation of the cache by updates.
2764 * Returns the number of bytes written in the channel, 0 if no data
2765 * was written and a negative value on error.
2766 */
2767 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2768 struct channel *chan, bool *coherent)
2769 {
2770 struct lib_ring_buffer_ctx ctx;
2771 int ret = 0;
2772 size_t len, reserve_len;
2773
2774 /*
2775 * Ensure we support mutiple get_next / put sequences followed by
2776 * put_next. The metadata cache lock protects reading the metadata
2777 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2778 * "flush" operations on the buffer invoked by different processes.
2779 * Moreover, since the metadata cache memory can be reallocated, we
2780 * need to have exclusive access against updates even though we only
2781 * read it.
2782 */
2783 mutex_lock(&stream->metadata_cache->lock);
2784 WARN_ON(stream->metadata_in < stream->metadata_out);
2785 if (stream->metadata_in != stream->metadata_out)
2786 goto end;
2787
2788 /* Metadata regenerated, change the version. */
2789 if (stream->metadata_cache->version != stream->version)
2790 stream->version = stream->metadata_cache->version;
2791
2792 len = stream->metadata_cache->metadata_written -
2793 stream->metadata_in;
2794 if (!len)
2795 goto end;
2796 reserve_len = min_t(size_t,
2797 stream->transport->ops.packet_avail_size(chan),
2798 len);
2799 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2800 sizeof(char), -1);
2801 /*
2802 * If reservation failed, return an error to the caller.
2803 */
2804 ret = stream->transport->ops.event_reserve(&ctx, 0);
2805 if (ret != 0) {
2806 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2807 stream->coherent = false;
2808 goto end;
2809 }
2810 stream->transport->ops.event_write(&ctx,
2811 stream->metadata_cache->data + stream->metadata_in,
2812 reserve_len);
2813 stream->transport->ops.event_commit(&ctx);
2814 stream->metadata_in += reserve_len;
2815 if (reserve_len < len)
2816 stream->coherent = false;
2817 else
2818 stream->coherent = true;
2819 ret = reserve_len;
2820
2821 end:
2822 if (coherent)
2823 *coherent = stream->coherent;
2824 mutex_unlock(&stream->metadata_cache->lock);
2825 return ret;
2826 }
2827
2828 static
2829 void lttng_metadata_begin(struct lttng_session *session)
2830 {
2831 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2832 mutex_lock(&session->metadata_cache->lock);
2833 }
2834
2835 static
2836 void lttng_metadata_end(struct lttng_session *session)
2837 {
2838 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2839 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2840 struct lttng_metadata_stream *stream;
2841
2842 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2843 wake_up_interruptible(&stream->read_wait);
2844 mutex_unlock(&session->metadata_cache->lock);
2845 }
2846 }
2847
2848 /*
2849 * Write the metadata to the metadata cache.
2850 * Must be called with sessions_mutex held.
2851 * The metadata cache lock protects us from concurrent read access from
2852 * thread outputting metadata content to ring buffer.
2853 * The content of the printf is printed as a single atomic metadata
2854 * transaction.
2855 */
2856 int lttng_metadata_printf(struct lttng_session *session,
2857 const char *fmt, ...)
2858 {
2859 char *str;
2860 size_t len;
2861 va_list ap;
2862
2863 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2864
2865 va_start(ap, fmt);
2866 str = kvasprintf(GFP_KERNEL, fmt, ap);
2867 va_end(ap);
2868 if (!str)
2869 return -ENOMEM;
2870
2871 len = strlen(str);
2872 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2873 if (session->metadata_cache->metadata_written + len >
2874 session->metadata_cache->cache_alloc) {
2875 char *tmp_cache_realloc;
2876 unsigned int tmp_cache_alloc_size;
2877
2878 tmp_cache_alloc_size = max_t(unsigned int,
2879 session->metadata_cache->cache_alloc + len,
2880 session->metadata_cache->cache_alloc << 1);
2881 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2882 if (!tmp_cache_realloc)
2883 goto err;
2884 if (session->metadata_cache->data) {
2885 memcpy(tmp_cache_realloc,
2886 session->metadata_cache->data,
2887 session->metadata_cache->cache_alloc);
2888 vfree(session->metadata_cache->data);
2889 }
2890
2891 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2892 session->metadata_cache->data = tmp_cache_realloc;
2893 }
2894 memcpy(session->metadata_cache->data +
2895 session->metadata_cache->metadata_written,
2896 str, len);
2897 session->metadata_cache->metadata_written += len;
2898 kfree(str);
2899
2900 return 0;
2901
2902 err:
2903 kfree(str);
2904 return -ENOMEM;
2905 }
2906
2907 static
2908 int print_tabs(struct lttng_session *session, size_t nesting)
2909 {
2910 size_t i;
2911
2912 for (i = 0; i < nesting; i++) {
2913 int ret;
2914
2915 ret = lttng_metadata_printf(session, " ");
2916 if (ret) {
2917 return ret;
2918 }
2919 }
2920 return 0;
2921 }
2922
2923 static
2924 int lttng_field_name_statedump(struct lttng_session *session,
2925 const struct lttng_event_field *field,
2926 size_t nesting)
2927 {
2928 return lttng_metadata_printf(session, " _%s;\n", field->name);
2929 }
2930
2931 static
2932 int _lttng_integer_type_statedump(struct lttng_session *session,
2933 const struct lttng_type *type,
2934 size_t nesting)
2935 {
2936 int ret;
2937
2938 WARN_ON_ONCE(type->atype != atype_integer);
2939 ret = print_tabs(session, nesting);
2940 if (ret)
2941 return ret;
2942 ret = lttng_metadata_printf(session,
2943 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2944 type->u.integer.size,
2945 type->u.integer.alignment,
2946 type->u.integer.signedness,
2947 (type->u.integer.encoding == lttng_encode_none)
2948 ? "none"
2949 : (type->u.integer.encoding == lttng_encode_UTF8)
2950 ? "UTF8"
2951 : "ASCII",
2952 type->u.integer.base,
2953 #if __BYTE_ORDER == __BIG_ENDIAN
2954 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2955 #else
2956 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2957 #endif
2958 );
2959 return ret;
2960 }
2961
2962 /*
2963 * Must be called with sessions_mutex held.
2964 */
2965 static
2966 int _lttng_struct_type_statedump(struct lttng_session *session,
2967 const struct lttng_type *type,
2968 size_t nesting)
2969 {
2970 int ret;
2971 uint32_t i, nr_fields;
2972 unsigned int alignment;
2973
2974 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2975
2976 ret = print_tabs(session, nesting);
2977 if (ret)
2978 return ret;
2979 ret = lttng_metadata_printf(session,
2980 "struct {\n");
2981 if (ret)
2982 return ret;
2983 nr_fields = type->u.struct_nestable.nr_fields;
2984 for (i = 0; i < nr_fields; i++) {
2985 const struct lttng_event_field *iter_field;
2986
2987 iter_field = &type->u.struct_nestable.fields[i];
2988 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2989 if (ret)
2990 return ret;
2991 }
2992 ret = print_tabs(session, nesting);
2993 if (ret)
2994 return ret;
2995 alignment = type->u.struct_nestable.alignment;
2996 if (alignment) {
2997 ret = lttng_metadata_printf(session,
2998 "} align(%u)",
2999 alignment);
3000 } else {
3001 ret = lttng_metadata_printf(session,
3002 "}");
3003 }
3004 return ret;
3005 }
3006
3007 /*
3008 * Must be called with sessions_mutex held.
3009 */
3010 static
3011 int _lttng_struct_field_statedump(struct lttng_session *session,
3012 const struct lttng_event_field *field,
3013 size_t nesting)
3014 {
3015 int ret;
3016
3017 ret = _lttng_struct_type_statedump(session,
3018 &field->type, nesting);
3019 if (ret)
3020 return ret;
3021 return lttng_field_name_statedump(session, field, nesting);
3022 }
3023
3024 /*
3025 * Must be called with sessions_mutex held.
3026 */
3027 static
3028 int _lttng_variant_type_statedump(struct lttng_session *session,
3029 const struct lttng_type *type,
3030 size_t nesting)
3031 {
3032 int ret;
3033 uint32_t i, nr_choices;
3034
3035 WARN_ON_ONCE(type->atype != atype_variant_nestable);
3036 /*
3037 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3038 */
3039 if (type->u.variant_nestable.alignment != 0)
3040 return -EINVAL;
3041 ret = print_tabs(session, nesting);
3042 if (ret)
3043 return ret;
3044 ret = lttng_metadata_printf(session,
3045 "variant <_%s> {\n",
3046 type->u.variant_nestable.tag_name);
3047 if (ret)
3048 return ret;
3049 nr_choices = type->u.variant_nestable.nr_choices;
3050 for (i = 0; i < nr_choices; i++) {
3051 const struct lttng_event_field *iter_field;
3052
3053 iter_field = &type->u.variant_nestable.choices[i];
3054 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3055 if (ret)
3056 return ret;
3057 }
3058 ret = print_tabs(session, nesting);
3059 if (ret)
3060 return ret;
3061 ret = lttng_metadata_printf(session,
3062 "}");
3063 return ret;
3064 }
3065
3066 /*
3067 * Must be called with sessions_mutex held.
3068 */
3069 static
3070 int _lttng_variant_field_statedump(struct lttng_session *session,
3071 const struct lttng_event_field *field,
3072 size_t nesting)
3073 {
3074 int ret;
3075
3076 ret = _lttng_variant_type_statedump(session,
3077 &field->type, nesting);
3078 if (ret)
3079 return ret;
3080 return lttng_field_name_statedump(session, field, nesting);
3081 }
3082
3083 /*
3084 * Must be called with sessions_mutex held.
3085 */
3086 static
3087 int _lttng_array_field_statedump(struct lttng_session *session,
3088 const struct lttng_event_field *field,
3089 size_t nesting)
3090 {
3091 int ret;
3092 const struct lttng_type *elem_type;
3093
3094 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
3095
3096 if (field->type.u.array_nestable.alignment) {
3097 ret = print_tabs(session, nesting);
3098 if (ret)
3099 return ret;
3100 ret = lttng_metadata_printf(session,
3101 "struct { } align(%u) _%s_padding;\n",
3102 field->type.u.array_nestable.alignment * CHAR_BIT,
3103 field->name);
3104 if (ret)
3105 return ret;
3106 }
3107 /*
3108 * Nested compound types: Only array of structures and variants are
3109 * currently supported.
3110 */
3111 elem_type = field->type.u.array_nestable.elem_type;
3112 switch (elem_type->atype) {
3113 case atype_integer:
3114 case atype_struct_nestable:
3115 case atype_variant_nestable:
3116 ret = _lttng_type_statedump(session, elem_type, nesting);
3117 if (ret)
3118 return ret;
3119 break;
3120
3121 default:
3122 return -EINVAL;
3123 }
3124 ret = lttng_metadata_printf(session,
3125 " _%s[%u];\n",
3126 field->name,
3127 field->type.u.array_nestable.length);
3128 return ret;
3129 }
3130
3131 /*
3132 * Must be called with sessions_mutex held.
3133 */
3134 static
3135 int _lttng_sequence_field_statedump(struct lttng_session *session,
3136 const struct lttng_event_field *field,
3137 size_t nesting)
3138 {
3139 int ret;
3140 const char *length_name;
3141 const struct lttng_type *elem_type;
3142
3143 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
3144
3145 length_name = field->type.u.sequence_nestable.length_name;
3146
3147 if (field->type.u.sequence_nestable.alignment) {
3148 ret = print_tabs(session, nesting);
3149 if (ret)
3150 return ret;
3151 ret = lttng_metadata_printf(session,
3152 "struct { } align(%u) _%s_padding;\n",
3153 field->type.u.sequence_nestable.alignment * CHAR_BIT,
3154 field->name);
3155 if (ret)
3156 return ret;
3157 }
3158
3159 /*
3160 * Nested compound types: Only array of structures and variants are
3161 * currently supported.
3162 */
3163 elem_type = field->type.u.sequence_nestable.elem_type;
3164 switch (elem_type->atype) {
3165 case atype_integer:
3166 case atype_struct_nestable:
3167 case atype_variant_nestable:
3168 ret = _lttng_type_statedump(session, elem_type, nesting);
3169 if (ret)
3170 return ret;
3171 break;
3172
3173 default:
3174 return -EINVAL;
3175 }
3176 ret = lttng_metadata_printf(session,
3177 " _%s[ _%s ];\n",
3178 field->name,
3179 field->type.u.sequence_nestable.length_name);
3180 return ret;
3181 }
3182
3183 /*
3184 * Must be called with sessions_mutex held.
3185 */
3186 static
3187 int _lttng_enum_type_statedump(struct lttng_session *session,
3188 const struct lttng_type *type,
3189 size_t nesting)
3190 {
3191 const struct lttng_enum_desc *enum_desc;
3192 const struct lttng_type *container_type;
3193 int ret;
3194 unsigned int i, nr_entries;
3195
3196 container_type = type->u.enum_nestable.container_type;
3197 if (container_type->atype != atype_integer) {
3198 ret = -EINVAL;
3199 goto end;
3200 }
3201 enum_desc = type->u.enum_nestable.desc;
3202 nr_entries = enum_desc->nr_entries;
3203
3204 ret = print_tabs(session, nesting);
3205 if (ret)
3206 goto end;
3207 ret = lttng_metadata_printf(session, "enum : ");
3208 if (ret)
3209 goto end;
3210 ret = _lttng_integer_type_statedump(session, container_type, 0);
3211 if (ret)
3212 goto end;
3213 ret = lttng_metadata_printf(session, " {\n");
3214 if (ret)
3215 goto end;
3216 /* Dump all entries */
3217 for (i = 0; i < nr_entries; i++) {
3218 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3219 int j, len;
3220
3221 ret = print_tabs(session, nesting + 1);
3222 if (ret)
3223 goto end;
3224 ret = lttng_metadata_printf(session,
3225 "\"");
3226 if (ret)
3227 goto end;
3228 len = strlen(entry->string);
3229 /* Escape the character '"' */
3230 for (j = 0; j < len; j++) {
3231 char c = entry->string[j];
3232
3233 switch (c) {
3234 case '"':
3235 ret = lttng_metadata_printf(session,
3236 "\\\"");
3237 break;
3238 case '\\':
3239 ret = lttng_metadata_printf(session,
3240 "\\\\");
3241 break;
3242 default:
3243 ret = lttng_metadata_printf(session,
3244 "%c", c);
3245 break;
3246 }
3247 if (ret)
3248 goto end;
3249 }
3250 ret = lttng_metadata_printf(session, "\"");
3251 if (ret)
3252 goto end;
3253
3254 if (entry->options.is_auto) {
3255 ret = lttng_metadata_printf(session, ",\n");
3256 if (ret)
3257 goto end;
3258 } else {
3259 ret = lttng_metadata_printf(session,
3260 " = ");
3261 if (ret)
3262 goto end;
3263 if (entry->start.signedness)
3264 ret = lttng_metadata_printf(session,
3265 "%lld", (long long) entry->start.value);
3266 else
3267 ret = lttng_metadata_printf(session,
3268 "%llu", entry->start.value);
3269 if (ret)
3270 goto end;
3271 if (entry->start.signedness == entry->end.signedness &&
3272 entry->start.value
3273 == entry->end.value) {
3274 ret = lttng_metadata_printf(session,
3275 ",\n");
3276 } else {
3277 if (entry->end.signedness) {
3278 ret = lttng_metadata_printf(session,
3279 " ... %lld,\n",
3280 (long long) entry->end.value);
3281 } else {
3282 ret = lttng_metadata_printf(session,
3283 " ... %llu,\n",
3284 entry->end.value);
3285 }
3286 }
3287 if (ret)
3288 goto end;
3289 }
3290 }
3291 ret = print_tabs(session, nesting);
3292 if (ret)
3293 goto end;
3294 ret = lttng_metadata_printf(session, "}");
3295 end:
3296 return ret;
3297 }
3298
3299 /*
3300 * Must be called with sessions_mutex held.
3301 */
3302 static
3303 int _lttng_enum_field_statedump(struct lttng_session *session,
3304 const struct lttng_event_field *field,
3305 size_t nesting)
3306 {
3307 int ret;
3308
3309 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3310 if (ret)
3311 return ret;
3312 return lttng_field_name_statedump(session, field, nesting);
3313 }
3314
3315 static
3316 int _lttng_integer_field_statedump(struct lttng_session *session,
3317 const struct lttng_event_field *field,
3318 size_t nesting)
3319 {
3320 int ret;
3321
3322 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3323 if (ret)
3324 return ret;
3325 return lttng_field_name_statedump(session, field, nesting);
3326 }
3327
3328 static
3329 int _lttng_string_type_statedump(struct lttng_session *session,
3330 const struct lttng_type *type,
3331 size_t nesting)
3332 {
3333 int ret;
3334
3335 WARN_ON_ONCE(type->atype != atype_string);
3336 /* Default encoding is UTF8 */
3337 ret = print_tabs(session, nesting);
3338 if (ret)
3339 return ret;
3340 ret = lttng_metadata_printf(session,
3341 "string%s",
3342 type->u.string.encoding == lttng_encode_ASCII ?
3343 " { encoding = ASCII; }" : "");
3344 return ret;
3345 }
3346
3347 static
3348 int _lttng_string_field_statedump(struct lttng_session *session,
3349 const struct lttng_event_field *field,
3350 size_t nesting)
3351 {
3352 int ret;
3353
3354 WARN_ON_ONCE(field->type.atype != atype_string);
3355 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3356 if (ret)
3357 return ret;
3358 return lttng_field_name_statedump(session, field, nesting);
3359 }
3360
3361 /*
3362 * Must be called with sessions_mutex held.
3363 */
3364 static
3365 int _lttng_type_statedump(struct lttng_session *session,
3366 const struct lttng_type *type,
3367 size_t nesting)
3368 {
3369 int ret = 0;
3370
3371 switch (type->atype) {
3372 case atype_integer:
3373 ret = _lttng_integer_type_statedump(session, type, nesting);
3374 break;
3375 case atype_enum_nestable:
3376 ret = _lttng_enum_type_statedump(session, type, nesting);
3377 break;
3378 case atype_string:
3379 ret = _lttng_string_type_statedump(session, type, nesting);
3380 break;
3381 case atype_struct_nestable:
3382 ret = _lttng_struct_type_statedump(session, type, nesting);
3383 break;
3384 case atype_variant_nestable:
3385 ret = _lttng_variant_type_statedump(session, type, nesting);
3386 break;
3387
3388 /* Nested arrays and sequences are not supported yet. */
3389 case atype_array_nestable:
3390 case atype_sequence_nestable:
3391 default:
3392 WARN_ON_ONCE(1);
3393 return -EINVAL;
3394 }
3395 return ret;
3396 }
3397
3398 /*
3399 * Must be called with sessions_mutex held.
3400 */
3401 static
3402 int _lttng_field_statedump(struct lttng_session *session,
3403 const struct lttng_event_field *field,
3404 size_t nesting)
3405 {
3406 int ret = 0;
3407
3408 switch (field->type.atype) {
3409 case atype_integer:
3410 ret = _lttng_integer_field_statedump(session, field, nesting);
3411 break;
3412 case atype_enum_nestable:
3413 ret = _lttng_enum_field_statedump(session, field, nesting);
3414 break;
3415 case atype_string:
3416 ret = _lttng_string_field_statedump(session, field, nesting);
3417 break;
3418 case atype_struct_nestable:
3419 ret = _lttng_struct_field_statedump(session, field, nesting);
3420 break;
3421 case atype_array_nestable:
3422 ret = _lttng_array_field_statedump(session, field, nesting);
3423 break;
3424 case atype_sequence_nestable:
3425 ret = _lttng_sequence_field_statedump(session, field, nesting);
3426 break;
3427 case atype_variant_nestable:
3428 ret = _lttng_variant_field_statedump(session, field, nesting);
3429 break;
3430
3431 default:
3432 WARN_ON_ONCE(1);
3433 return -EINVAL;
3434 }
3435 return ret;
3436 }
3437
3438 static
3439 int _lttng_context_metadata_statedump(struct lttng_session *session,
3440 struct lttng_ctx *ctx)
3441 {
3442 int ret = 0;
3443 int i;
3444
3445 if (!ctx)
3446 return 0;
3447 for (i = 0; i < ctx->nr_fields; i++) {
3448 const struct lttng_ctx_field *field = &ctx->fields[i];
3449
3450 ret = _lttng_field_statedump(session, &field->event_field, 2);
3451 if (ret)
3452 return ret;
3453 }
3454 return ret;
3455 }
3456
3457 static
3458 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3459 struct lttng_event *event)
3460 {
3461 const struct lttng_event_desc *desc = event->desc;
3462 int ret = 0;
3463 int i;
3464
3465 for (i = 0; i < desc->nr_fields; i++) {
3466 const struct lttng_event_field *field = &desc->fields[i];
3467
3468 ret = _lttng_field_statedump(session, field, 2);
3469 if (ret)
3470 return ret;
3471 }
3472 return ret;
3473 }
3474
3475 /*
3476 * Must be called with sessions_mutex held.
3477 * The entire event metadata is printed as a single atomic metadata
3478 * transaction.
3479 */
3480 static
3481 int _lttng_event_metadata_statedump(struct lttng_session *session,
3482 struct lttng_channel *chan,
3483 struct lttng_event *event)
3484 {
3485 int ret = 0;
3486
3487 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3488 return 0;
3489 if (chan->channel_type == METADATA_CHANNEL)
3490 return 0;
3491
3492 lttng_metadata_begin(session);
3493
3494 ret = lttng_metadata_printf(session,
3495 "event {\n"
3496 " name = \"%s\";\n"
3497 " id = %u;\n"
3498 " stream_id = %u;\n",
3499 event->desc->name,
3500 event->id,
3501 event->chan->id);
3502 if (ret)
3503 goto end;
3504
3505 if (event->ctx) {
3506 ret = lttng_metadata_printf(session,
3507 " context := struct {\n");
3508 if (ret)
3509 goto end;
3510 }
3511 ret = _lttng_context_metadata_statedump(session, event->ctx);
3512 if (ret)
3513 goto end;
3514 if (event->ctx) {
3515 ret = lttng_metadata_printf(session,
3516 " };\n");
3517 if (ret)
3518 goto end;
3519 }
3520
3521 ret = lttng_metadata_printf(session,
3522 " fields := struct {\n"
3523 );
3524 if (ret)
3525 goto end;
3526
3527 ret = _lttng_fields_metadata_statedump(session, event);
3528 if (ret)
3529 goto end;
3530
3531 /*
3532 * LTTng space reservation can only reserve multiples of the
3533 * byte size.
3534 */
3535 ret = lttng_metadata_printf(session,
3536 " };\n"
3537 "};\n\n");
3538 if (ret)
3539 goto end;
3540
3541 event->metadata_dumped = 1;
3542 end:
3543 lttng_metadata_end(session);
3544 return ret;
3545
3546 }
3547
3548 /*
3549 * Must be called with sessions_mutex held.
3550 * The entire channel metadata is printed as a single atomic metadata
3551 * transaction.
3552 */
3553 static
3554 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3555 struct lttng_channel *chan)
3556 {
3557 int ret = 0;
3558
3559 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3560 return 0;
3561
3562 if (chan->channel_type == METADATA_CHANNEL)
3563 return 0;
3564
3565 lttng_metadata_begin(session);
3566
3567 WARN_ON_ONCE(!chan->header_type);
3568 ret = lttng_metadata_printf(session,
3569 "stream {\n"
3570 " id = %u;\n"
3571 " event.header := %s;\n"
3572 " packet.context := struct packet_context;\n",
3573 chan->id,
3574 chan->header_type == 1 ? "struct event_header_compact" :
3575 "struct event_header_large");
3576 if (ret)
3577 goto end;
3578
3579 if (chan->ctx) {
3580 ret = lttng_metadata_printf(session,
3581 " event.context := struct {\n");
3582 if (ret)
3583 goto end;
3584 }
3585 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3586 if (ret)
3587 goto end;
3588 if (chan->ctx) {
3589 ret = lttng_metadata_printf(session,
3590 " };\n");
3591 if (ret)
3592 goto end;
3593 }
3594
3595 ret = lttng_metadata_printf(session,
3596 "};\n\n");
3597
3598 chan->metadata_dumped = 1;
3599 end:
3600 lttng_metadata_end(session);
3601 return ret;
3602 }
3603
3604 /*
3605 * Must be called with sessions_mutex held.
3606 */
3607 static
3608 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3609 {
3610 return lttng_metadata_printf(session,
3611 "struct packet_context {\n"
3612 " uint64_clock_monotonic_t timestamp_begin;\n"
3613 " uint64_clock_monotonic_t timestamp_end;\n"
3614 " uint64_t content_size;\n"
3615 " uint64_t packet_size;\n"
3616 " uint64_t packet_seq_num;\n"
3617 " unsigned long events_discarded;\n"
3618 " uint32_t cpu_id;\n"
3619 "};\n\n"
3620 );
3621 }
3622
3623 /*
3624 * Compact header:
3625 * id: range: 0 - 30.
3626 * id 31 is reserved to indicate an extended header.
3627 *
3628 * Large header:
3629 * id: range: 0 - 65534.
3630 * id 65535 is reserved to indicate an extended header.
3631 *
3632 * Must be called with sessions_mutex held.
3633 */
3634 static
3635 int _lttng_event_header_declare(struct lttng_session *session)
3636 {
3637 return lttng_metadata_printf(session,
3638 "struct event_header_compact {\n"
3639 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3640 " variant <id> {\n"
3641 " struct {\n"
3642 " uint27_clock_monotonic_t timestamp;\n"
3643 " } compact;\n"
3644 " struct {\n"
3645 " uint32_t id;\n"
3646 " uint64_clock_monotonic_t timestamp;\n"
3647 " } extended;\n"
3648 " } v;\n"
3649 "} align(%u);\n"
3650 "\n"
3651 "struct event_header_large {\n"
3652 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3653 " variant <id> {\n"
3654 " struct {\n"
3655 " uint32_clock_monotonic_t timestamp;\n"
3656 " } compact;\n"
3657 " struct {\n"
3658 " uint32_t id;\n"
3659 " uint64_clock_monotonic_t timestamp;\n"
3660 " } extended;\n"
3661 " } v;\n"
3662 "} align(%u);\n\n",
3663 lttng_alignof(uint32_t) * CHAR_BIT,
3664 lttng_alignof(uint16_t) * CHAR_BIT
3665 );
3666 }
3667
3668 /*
3669 * Approximation of NTP time of day to clock monotonic correlation,
3670 * taken at start of trace.
3671 * Yes, this is only an approximation. Yes, we can (and will) do better
3672 * in future versions.
3673 * This function may return a negative offset. It may happen if the
3674 * system sets the REALTIME clock to 0 after boot.
3675 *
3676 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3677 * y2038 compliant.
3678 */
3679 static
3680 int64_t measure_clock_offset(void)
3681 {
3682 uint64_t monotonic_avg, monotonic[2], realtime;
3683 uint64_t tcf = trace_clock_freq();
3684 int64_t offset;
3685 unsigned long flags;
3686 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3687 struct timespec64 rts = { 0, 0 };
3688 #else
3689 struct timespec rts = { 0, 0 };
3690 #endif
3691
3692 /* Disable interrupts to increase correlation precision. */
3693 local_irq_save(flags);
3694 monotonic[0] = trace_clock_read64();
3695 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3696 ktime_get_real_ts64(&rts);
3697 #else
3698 getnstimeofday(&rts);
3699 #endif
3700 monotonic[1] = trace_clock_read64();
3701 local_irq_restore(flags);
3702
3703 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3704 realtime = (uint64_t) rts.tv_sec * tcf;
3705 if (tcf == NSEC_PER_SEC) {
3706 realtime += rts.tv_nsec;
3707 } else {
3708 uint64_t n = rts.tv_nsec * tcf;
3709
3710 do_div(n, NSEC_PER_SEC);
3711 realtime += n;
3712 }
3713 offset = (int64_t) realtime - monotonic_avg;
3714 return offset;
3715 }
3716
3717 static
3718 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3719 {
3720 int ret = 0;
3721 size_t i;
3722 char cur;
3723
3724 i = 0;
3725 cur = string[i];
3726 while (cur != '\0') {
3727 switch (cur) {
3728 case '\n':
3729 ret = lttng_metadata_printf(session, "%s", "\\n");
3730 break;
3731 case '\\':
3732 case '"':
3733 ret = lttng_metadata_printf(session, "%c", '\\');
3734 if (ret)
3735 goto error;
3736 /* We still print the current char */
3737 /* Fallthrough */
3738 default:
3739 ret = lttng_metadata_printf(session, "%c", cur);
3740 break;
3741 }
3742
3743 if (ret)
3744 goto error;
3745
3746 cur = string[++i];
3747 }
3748 error:
3749 return ret;
3750 }
3751
3752 static
3753 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3754 const char *field_value)
3755 {
3756 int ret;
3757
3758 ret = lttng_metadata_printf(session, " %s = \"", field);
3759 if (ret)
3760 goto error;
3761
3762 ret = print_escaped_ctf_string(session, field_value);
3763 if (ret)
3764 goto error;
3765
3766 ret = lttng_metadata_printf(session, "\";\n");
3767
3768 error:
3769 return ret;
3770 }
3771
3772 /*
3773 * Output metadata into this session's metadata buffers.
3774 * Must be called with sessions_mutex held.
3775 */
3776 static
3777 int _lttng_session_metadata_statedump(struct lttng_session *session)
3778 {
3779 unsigned char *uuid_c = session->uuid.b;
3780 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3781 const char *product_uuid;
3782 struct lttng_channel *chan;
3783 struct lttng_event *event;
3784 int ret = 0;
3785
3786 if (!LTTNG_READ_ONCE(session->active))
3787 return 0;
3788
3789 lttng_metadata_begin(session);
3790
3791 if (session->metadata_dumped)
3792 goto skip_session;
3793
3794 snprintf(uuid_s, sizeof(uuid_s),
3795 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3796 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3797 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3798 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3799 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3800
3801 ret = lttng_metadata_printf(session,
3802 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3803 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3804 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3805 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3806 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3807 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3808 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3809 "\n"
3810 "trace {\n"
3811 " major = %u;\n"
3812 " minor = %u;\n"
3813 " uuid = \"%s\";\n"
3814 " byte_order = %s;\n"
3815 " packet.header := struct {\n"
3816 " uint32_t magic;\n"
3817 " uint8_t uuid[16];\n"
3818 " uint32_t stream_id;\n"
3819 " uint64_t stream_instance_id;\n"
3820 " };\n"
3821 "};\n\n",
3822 lttng_alignof(uint8_t) * CHAR_BIT,
3823 lttng_alignof(uint16_t) * CHAR_BIT,
3824 lttng_alignof(uint32_t) * CHAR_BIT,
3825 lttng_alignof(uint64_t) * CHAR_BIT,
3826 sizeof(unsigned long) * CHAR_BIT,
3827 lttng_alignof(unsigned long) * CHAR_BIT,
3828 CTF_SPEC_MAJOR,
3829 CTF_SPEC_MINOR,
3830 uuid_s,
3831 #if __BYTE_ORDER == __BIG_ENDIAN
3832 "be"
3833 #else
3834 "le"
3835 #endif
3836 );
3837 if (ret)
3838 goto end;
3839
3840 ret = lttng_metadata_printf(session,
3841 "env {\n"
3842 " hostname = \"%s\";\n"
3843 " domain = \"kernel\";\n"
3844 " sysname = \"%s\";\n"
3845 " kernel_release = \"%s\";\n"
3846 " kernel_version = \"%s\";\n"
3847 " tracer_name = \"lttng-modules\";\n"
3848 " tracer_major = %d;\n"
3849 " tracer_minor = %d;\n"
3850 " tracer_patchlevel = %d;\n"
3851 " trace_buffering_scheme = \"global\";\n",
3852 current->nsproxy->uts_ns->name.nodename,
3853 utsname()->sysname,
3854 utsname()->release,
3855 utsname()->version,
3856 LTTNG_MODULES_MAJOR_VERSION,
3857 LTTNG_MODULES_MINOR_VERSION,
3858 LTTNG_MODULES_PATCHLEVEL_VERSION
3859 );
3860 if (ret)
3861 goto end;
3862
3863 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3864 if (ret)
3865 goto end;
3866 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3867 session->creation_time);
3868 if (ret)
3869 goto end;
3870
3871 /* Add the product UUID to the 'env' section */
3872 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3873 if (product_uuid) {
3874 ret = lttng_metadata_printf(session,
3875 " product_uuid = \"%s\";\n",
3876 product_uuid
3877 );
3878 if (ret)
3879 goto end;
3880 }
3881
3882 /* Close the 'env' section */
3883 ret = lttng_metadata_printf(session, "};\n\n");
3884 if (ret)
3885 goto end;
3886
3887 ret = lttng_metadata_printf(session,
3888 "clock {\n"
3889 " name = \"%s\";\n",
3890 trace_clock_name()
3891 );
3892 if (ret)
3893 goto end;
3894
3895 if (!trace_clock_uuid(clock_uuid_s)) {
3896 ret = lttng_metadata_printf(session,
3897 " uuid = \"%s\";\n",
3898 clock_uuid_s
3899 );
3900 if (ret)
3901 goto end;
3902 }
3903
3904 ret = lttng_metadata_printf(session,
3905 " description = \"%s\";\n"
3906 " freq = %llu; /* Frequency, in Hz */\n"
3907 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3908 " offset = %lld;\n"
3909 "};\n\n",
3910 trace_clock_description(),
3911 (unsigned long long) trace_clock_freq(),
3912 (long long) measure_clock_offset()
3913 );
3914 if (ret)
3915 goto end;
3916
3917 ret = lttng_metadata_printf(session,
3918 "typealias integer {\n"
3919 " size = 27; align = 1; signed = false;\n"
3920 " map = clock.%s.value;\n"
3921 "} := uint27_clock_monotonic_t;\n"
3922 "\n"
3923 "typealias integer {\n"
3924 " size = 32; align = %u; signed = false;\n"
3925 " map = clock.%s.value;\n"
3926 "} := uint32_clock_monotonic_t;\n"
3927 "\n"
3928 "typealias integer {\n"
3929 " size = 64; align = %u; signed = false;\n"
3930 " map = clock.%s.value;\n"
3931 "} := uint64_clock_monotonic_t;\n\n",
3932 trace_clock_name(),
3933 lttng_alignof(uint32_t) * CHAR_BIT,
3934 trace_clock_name(),
3935 lttng_alignof(uint64_t) * CHAR_BIT,
3936 trace_clock_name()
3937 );
3938 if (ret)
3939 goto end;
3940
3941 ret = _lttng_stream_packet_context_declare(session);
3942 if (ret)
3943 goto end;
3944
3945 ret = _lttng_event_header_declare(session);
3946 if (ret)
3947 goto end;
3948
3949 skip_session:
3950 list_for_each_entry(chan, &session->chan, list) {
3951 ret = _lttng_channel_metadata_statedump(session, chan);
3952 if (ret)
3953 goto end;
3954 }
3955
3956 list_for_each_entry(event, &session->events, list) {
3957 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3958 if (ret)
3959 goto end;
3960 }
3961 session->metadata_dumped = 1;
3962 end:
3963 lttng_metadata_end(session);
3964 return ret;
3965 }
3966
3967 /**
3968 * lttng_transport_register - LTT transport registration
3969 * @transport: transport structure
3970 *
3971 * Registers a transport which can be used as output to extract the data out of
3972 * LTTng. The module calling this registration function must ensure that no
3973 * trap-inducing code will be executed by the transport functions. E.g.
3974 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3975 * is made visible to the transport function. This registration acts as a
3976 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3977 * after its registration must it synchronize the TLBs.
3978 */
3979 void lttng_transport_register(struct lttng_transport *transport)
3980 {
3981 /*
3982 * Make sure no page fault can be triggered by the module about to be
3983 * registered. We deal with this here so we don't have to call
3984 * vmalloc_sync_mappings() in each module's init.
3985 */
3986 wrapper_vmalloc_sync_mappings();
3987
3988 mutex_lock(&sessions_mutex);
3989 list_add_tail(&transport->node, &lttng_transport_list);
3990 mutex_unlock(&sessions_mutex);
3991 }
3992 EXPORT_SYMBOL_GPL(lttng_transport_register);
3993
3994 /**
3995 * lttng_transport_unregister - LTT transport unregistration
3996 * @transport: transport structure
3997 */
3998 void lttng_transport_unregister(struct lttng_transport *transport)
3999 {
4000 mutex_lock(&sessions_mutex);
4001 list_del(&transport->node);
4002 mutex_unlock(&sessions_mutex);
4003 }
4004 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4005
4006 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4007 {
4008 /*
4009 * Make sure no page fault can be triggered by the module about to be
4010 * registered. We deal with this here so we don't have to call
4011 * vmalloc_sync_mappings() in each module's init.
4012 */
4013 wrapper_vmalloc_sync_mappings();
4014
4015 mutex_lock(&sessions_mutex);
4016 list_add_tail(&transport->node, &lttng_counter_transport_list);
4017 mutex_unlock(&sessions_mutex);
4018 }
4019 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4020
4021 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4022 {
4023 mutex_lock(&sessions_mutex);
4024 list_del(&transport->node);
4025 mutex_unlock(&sessions_mutex);
4026 }
4027 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4028
4029 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
4030
4031 enum cpuhp_state lttng_hp_prepare;
4032 enum cpuhp_state lttng_hp_online;
4033
4034 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4035 {
4036 struct lttng_cpuhp_node *lttng_node;
4037
4038 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4039 switch (lttng_node->component) {
4040 case LTTNG_RING_BUFFER_FRONTEND:
4041 return 0;
4042 case LTTNG_RING_BUFFER_BACKEND:
4043 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4044 case LTTNG_RING_BUFFER_ITER:
4045 return 0;
4046 case LTTNG_CONTEXT_PERF_COUNTERS:
4047 return 0;
4048 default:
4049 return -EINVAL;
4050 }
4051 }
4052
4053 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4054 {
4055 struct lttng_cpuhp_node *lttng_node;
4056
4057 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4058 switch (lttng_node->component) {
4059 case LTTNG_RING_BUFFER_FRONTEND:
4060 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4061 case LTTNG_RING_BUFFER_BACKEND:
4062 return 0;
4063 case LTTNG_RING_BUFFER_ITER:
4064 return 0;
4065 case LTTNG_CONTEXT_PERF_COUNTERS:
4066 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4067 default:
4068 return -EINVAL;
4069 }
4070 }
4071
4072 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4073 {
4074 struct lttng_cpuhp_node *lttng_node;
4075
4076 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4077 switch (lttng_node->component) {
4078 case LTTNG_RING_BUFFER_FRONTEND:
4079 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4080 case LTTNG_RING_BUFFER_BACKEND:
4081 return 0;
4082 case LTTNG_RING_BUFFER_ITER:
4083 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4084 case LTTNG_CONTEXT_PERF_COUNTERS:
4085 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4086 default:
4087 return -EINVAL;
4088 }
4089 }
4090
4091 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4092 {
4093 struct lttng_cpuhp_node *lttng_node;
4094
4095 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4096 switch (lttng_node->component) {
4097 case LTTNG_RING_BUFFER_FRONTEND:
4098 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4099 case LTTNG_RING_BUFFER_BACKEND:
4100 return 0;
4101 case LTTNG_RING_BUFFER_ITER:
4102 return 0;
4103 case LTTNG_CONTEXT_PERF_COUNTERS:
4104 return 0;
4105 default:
4106 return -EINVAL;
4107 }
4108 }
4109
4110 static int __init lttng_init_cpu_hotplug(void)
4111 {
4112 int ret;
4113
4114 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4115 lttng_hotplug_prepare,
4116 lttng_hotplug_dead);
4117 if (ret < 0) {
4118 return ret;
4119 }
4120 lttng_hp_prepare = ret;
4121 lttng_rb_set_hp_prepare(ret);
4122
4123 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4124 lttng_hotplug_online,
4125 lttng_hotplug_offline);
4126 if (ret < 0) {
4127 cpuhp_remove_multi_state(lttng_hp_prepare);
4128 lttng_hp_prepare = 0;
4129 return ret;
4130 }
4131 lttng_hp_online = ret;
4132 lttng_rb_set_hp_online(ret);
4133
4134 return 0;
4135 }
4136
4137 static void __exit lttng_exit_cpu_hotplug(void)
4138 {
4139 lttng_rb_set_hp_online(0);
4140 cpuhp_remove_multi_state(lttng_hp_online);
4141 lttng_rb_set_hp_prepare(0);
4142 cpuhp_remove_multi_state(lttng_hp_prepare);
4143 }
4144
4145 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4146 static int lttng_init_cpu_hotplug(void)
4147 {
4148 return 0;
4149 }
4150 static void lttng_exit_cpu_hotplug(void)
4151 {
4152 }
4153 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4154
4155
4156 static int __init lttng_events_init(void)
4157 {
4158 int ret;
4159
4160 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4161 if (ret)
4162 return ret;
4163 ret = wrapper_get_pfnblock_flags_mask_init();
4164 if (ret)
4165 return ret;
4166 ret = wrapper_get_pageblock_flags_mask_init();
4167 if (ret)
4168 return ret;
4169 ret = lttng_probes_init();
4170 if (ret)
4171 return ret;
4172 ret = lttng_context_init();
4173 if (ret)
4174 return ret;
4175 ret = lttng_tracepoint_init();
4176 if (ret)
4177 goto error_tp;
4178 event_cache = KMEM_CACHE(lttng_event, 0);
4179 if (!event_cache) {
4180 ret = -ENOMEM;
4181 goto error_kmem_event;
4182 }
4183 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
4184 if (!event_notifier_cache) {
4185 ret = -ENOMEM;
4186 goto error_kmem_event_notifier;
4187 }
4188 ret = lttng_abi_init();
4189 if (ret)
4190 goto error_abi;
4191 ret = lttng_logger_init();
4192 if (ret)
4193 goto error_logger;
4194 ret = lttng_init_cpu_hotplug();
4195 if (ret)
4196 goto error_hotplug;
4197 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4198 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4199 __stringify(LTTNG_MODULES_MINOR_VERSION),
4200 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4201 LTTNG_MODULES_EXTRAVERSION,
4202 LTTNG_VERSION_NAME,
4203 #ifdef LTTNG_EXTRA_VERSION_GIT
4204 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4205 #else
4206 "",
4207 #endif
4208 #ifdef LTTNG_EXTRA_VERSION_NAME
4209 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4210 #else
4211 "");
4212 #endif
4213 return 0;
4214
4215 error_hotplug:
4216 lttng_logger_exit();
4217 error_logger:
4218 lttng_abi_exit();
4219 error_abi:
4220 kmem_cache_destroy(event_notifier_cache);
4221 error_kmem_event_notifier:
4222 kmem_cache_destroy(event_cache);
4223 error_kmem_event:
4224 lttng_tracepoint_exit();
4225 error_tp:
4226 lttng_context_exit();
4227 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4228 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4229 __stringify(LTTNG_MODULES_MINOR_VERSION),
4230 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4231 LTTNG_MODULES_EXTRAVERSION,
4232 LTTNG_VERSION_NAME,
4233 #ifdef LTTNG_EXTRA_VERSION_GIT
4234 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4235 #else
4236 "",
4237 #endif
4238 #ifdef LTTNG_EXTRA_VERSION_NAME
4239 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4240 #else
4241 "");
4242 #endif
4243 return ret;
4244 }
4245
4246 module_init(lttng_events_init);
4247
4248 static void __exit lttng_events_exit(void)
4249 {
4250 struct lttng_session *session, *tmpsession;
4251
4252 lttng_exit_cpu_hotplug();
4253 lttng_logger_exit();
4254 lttng_abi_exit();
4255 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4256 lttng_session_destroy(session);
4257 kmem_cache_destroy(event_cache);
4258 kmem_cache_destroy(event_notifier_cache);
4259 lttng_tracepoint_exit();
4260 lttng_context_exit();
4261 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4262 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4263 __stringify(LTTNG_MODULES_MINOR_VERSION),
4264 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4265 LTTNG_MODULES_EXTRAVERSION,
4266 LTTNG_VERSION_NAME,
4267 #ifdef LTTNG_EXTRA_VERSION_GIT
4268 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4269 #else
4270 "",
4271 #endif
4272 #ifdef LTTNG_EXTRA_VERSION_NAME
4273 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4274 #else
4275 "");
4276 #endif
4277 }
4278
4279 module_exit(lttng_events_exit);
4280
4281 #include <generated/patches.h>
4282 #ifdef LTTNG_EXTRA_VERSION_GIT
4283 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4284 #endif
4285 #ifdef LTTNG_EXTRA_VERSION_NAME
4286 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4287 #endif
4288 MODULE_LICENSE("GPL and additional rights");
4289 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4290 MODULE_DESCRIPTION("LTTng tracer");
4291 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4292 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4293 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4294 LTTNG_MODULES_EXTRAVERSION);
This page took 0.214976 seconds and 4 git commands to generate.