Fix: build failure on 2.6.36
[lttng-modules.git] / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
25 * overrides a function with a define.
26 */
27 #include "wrapper/page_alloc.h"
28
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/jiffies.h>
34 #include <linux/utsname.h>
35 #include <linux/err.h>
36 #include <linux/seq_file.h>
37 #include <linux/file.h>
38 #include <linux/anon_inodes.h>
39 #include "wrapper/file.h"
40 #include <linux/jhash.h>
41 #include <linux/uaccess.h>
42 #include <linux/vmalloc.h>
43
44 #include "wrapper/uuid.h"
45 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
46 #include "wrapper/random.h"
47 #include "wrapper/tracepoint.h"
48 #include "wrapper/list.h"
49 #include "lttng-kernel-version.h"
50 #include "lttng-events.h"
51 #include "lttng-tracer.h"
52 #include "lttng-abi-old.h"
53 #include "wrapper/vzalloc.h"
54
55 #define METADATA_CACHE_DEFAULT_SIZE 4096
56
57 static LIST_HEAD(sessions);
58 static LIST_HEAD(lttng_transport_list);
59 /*
60 * Protect the sessions and metadata caches.
61 */
62 static DEFINE_MUTEX(sessions_mutex);
63 static struct kmem_cache *event_cache;
64
65 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
66 static void lttng_session_sync_enablers(struct lttng_session *session);
67 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
68
69 static void _lttng_event_destroy(struct lttng_event *event);
70 static void _lttng_channel_destroy(struct lttng_channel *chan);
71 static int _lttng_event_unregister(struct lttng_event *event);
72 static
73 int _lttng_event_metadata_statedump(struct lttng_session *session,
74 struct lttng_channel *chan,
75 struct lttng_event *event);
76 static
77 int _lttng_session_metadata_statedump(struct lttng_session *session);
78 static
79 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
80
81 void synchronize_trace(void)
82 {
83 synchronize_sched();
84 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
85 #ifdef CONFIG_PREEMPT_RT_FULL
86 synchronize_rcu();
87 #endif
88 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
89 #ifdef CONFIG_PREEMPT_RT
90 synchronize_rcu();
91 #endif
92 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
93 }
94
95 void lttng_lock_sessions(void)
96 {
97 mutex_lock(&sessions_mutex);
98 }
99
100 void lttng_unlock_sessions(void)
101 {
102 mutex_unlock(&sessions_mutex);
103 }
104
105 /*
106 * Called with sessions lock held.
107 */
108 int lttng_session_active(void)
109 {
110 struct lttng_session *iter;
111
112 list_for_each_entry(iter, &sessions, list) {
113 if (iter->active)
114 return 1;
115 }
116 return 0;
117 }
118
119 struct lttng_session *lttng_session_create(void)
120 {
121 struct lttng_session *session;
122 struct lttng_metadata_cache *metadata_cache;
123 int i;
124
125 mutex_lock(&sessions_mutex);
126 session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
127 if (!session)
128 goto err;
129 INIT_LIST_HEAD(&session->chan);
130 INIT_LIST_HEAD(&session->events);
131 uuid_le_gen(&session->uuid);
132
133 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
134 GFP_KERNEL);
135 if (!metadata_cache)
136 goto err_free_session;
137 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
138 if (!metadata_cache->data)
139 goto err_free_cache;
140 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
141 kref_init(&metadata_cache->refcount);
142 mutex_init(&metadata_cache->lock);
143 session->metadata_cache = metadata_cache;
144 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
145 memcpy(&metadata_cache->uuid, &session->uuid,
146 sizeof(metadata_cache->uuid));
147 INIT_LIST_HEAD(&session->enablers_head);
148 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
149 INIT_HLIST_HEAD(&session->events_ht.table[i]);
150 list_add(&session->list, &sessions);
151 mutex_unlock(&sessions_mutex);
152 return session;
153
154 err_free_cache:
155 kfree(metadata_cache);
156 err_free_session:
157 kfree(session);
158 err:
159 mutex_unlock(&sessions_mutex);
160 return NULL;
161 }
162
163 void metadata_cache_destroy(struct kref *kref)
164 {
165 struct lttng_metadata_cache *cache =
166 container_of(kref, struct lttng_metadata_cache, refcount);
167 vfree(cache->data);
168 kfree(cache);
169 }
170
171 void lttng_session_destroy(struct lttng_session *session)
172 {
173 struct lttng_channel *chan, *tmpchan;
174 struct lttng_event *event, *tmpevent;
175 struct lttng_metadata_stream *metadata_stream;
176 struct lttng_enabler *enabler, *tmpenabler;
177 int ret;
178
179 mutex_lock(&sessions_mutex);
180 ACCESS_ONCE(session->active) = 0;
181 list_for_each_entry(chan, &session->chan, list) {
182 ret = lttng_syscalls_unregister(chan);
183 WARN_ON(ret);
184 }
185 list_for_each_entry(event, &session->events, list) {
186 ret = _lttng_event_unregister(event);
187 WARN_ON(ret);
188 }
189 synchronize_trace(); /* Wait for in-flight events to complete */
190 list_for_each_entry_safe(enabler, tmpenabler,
191 &session->enablers_head, node)
192 lttng_enabler_destroy(enabler);
193 list_for_each_entry_safe(event, tmpevent, &session->events, list)
194 _lttng_event_destroy(event);
195 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
196 BUG_ON(chan->channel_type == METADATA_CHANNEL);
197 _lttng_channel_destroy(chan);
198 }
199 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
200 _lttng_metadata_channel_hangup(metadata_stream);
201 if (session->pid_tracker)
202 lttng_pid_tracker_destroy(session->pid_tracker);
203 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
204 list_del(&session->list);
205 mutex_unlock(&sessions_mutex);
206 kfree(session);
207 }
208
209 int lttng_session_enable(struct lttng_session *session)
210 {
211 int ret = 0;
212 struct lttng_channel *chan;
213
214 mutex_lock(&sessions_mutex);
215 if (session->active) {
216 ret = -EBUSY;
217 goto end;
218 }
219
220 /* Set transient enabler state to "enabled" */
221 session->tstate = 1;
222
223 /*
224 * Snapshot the number of events per channel to know the type of header
225 * we need to use.
226 */
227 list_for_each_entry(chan, &session->chan, list) {
228 if (chan->header_type)
229 continue; /* don't change it if session stop/restart */
230 if (chan->free_event_id < 31)
231 chan->header_type = 1; /* compact */
232 else
233 chan->header_type = 2; /* large */
234 }
235
236 /* We need to sync enablers with session before activation. */
237 lttng_session_sync_enablers(session);
238
239 ACCESS_ONCE(session->active) = 1;
240 ACCESS_ONCE(session->been_active) = 1;
241 ret = _lttng_session_metadata_statedump(session);
242 if (ret) {
243 ACCESS_ONCE(session->active) = 0;
244 goto end;
245 }
246 ret = lttng_statedump_start(session);
247 if (ret)
248 ACCESS_ONCE(session->active) = 0;
249 end:
250 mutex_unlock(&sessions_mutex);
251 return ret;
252 }
253
254 int lttng_session_disable(struct lttng_session *session)
255 {
256 int ret = 0;
257
258 mutex_lock(&sessions_mutex);
259 if (!session->active) {
260 ret = -EBUSY;
261 goto end;
262 }
263 ACCESS_ONCE(session->active) = 0;
264
265 /* Set transient enabler state to "disabled" */
266 session->tstate = 0;
267 lttng_session_sync_enablers(session);
268 end:
269 mutex_unlock(&sessions_mutex);
270 return ret;
271 }
272
273 int lttng_channel_enable(struct lttng_channel *channel)
274 {
275 int ret = 0;
276
277 mutex_lock(&sessions_mutex);
278 if (channel->channel_type == METADATA_CHANNEL) {
279 ret = -EPERM;
280 goto end;
281 }
282 if (channel->enabled) {
283 ret = -EEXIST;
284 goto end;
285 }
286 /* Set transient enabler state to "enabled" */
287 channel->tstate = 1;
288 lttng_session_sync_enablers(channel->session);
289 /* Set atomically the state to "enabled" */
290 ACCESS_ONCE(channel->enabled) = 1;
291 end:
292 mutex_unlock(&sessions_mutex);
293 return ret;
294 }
295
296 int lttng_channel_disable(struct lttng_channel *channel)
297 {
298 int ret = 0;
299
300 mutex_lock(&sessions_mutex);
301 if (channel->channel_type == METADATA_CHANNEL) {
302 ret = -EPERM;
303 goto end;
304 }
305 if (!channel->enabled) {
306 ret = -EEXIST;
307 goto end;
308 }
309 /* Set atomically the state to "disabled" */
310 ACCESS_ONCE(channel->enabled) = 0;
311 /* Set transient enabler state to "enabled" */
312 channel->tstate = 0;
313 lttng_session_sync_enablers(channel->session);
314 end:
315 mutex_unlock(&sessions_mutex);
316 return ret;
317 }
318
319 int lttng_event_enable(struct lttng_event *event)
320 {
321 int ret = 0;
322
323 mutex_lock(&sessions_mutex);
324 if (event->chan->channel_type == METADATA_CHANNEL) {
325 ret = -EPERM;
326 goto end;
327 }
328 if (event->enabled) {
329 ret = -EEXIST;
330 goto end;
331 }
332 ACCESS_ONCE(event->enabled) = 1;
333 lttng_session_sync_enablers(event->chan->session);
334 end:
335 mutex_unlock(&sessions_mutex);
336 return ret;
337 }
338
339 int lttng_event_disable(struct lttng_event *event)
340 {
341 int ret = 0;
342
343 mutex_lock(&sessions_mutex);
344 if (event->chan->channel_type == METADATA_CHANNEL) {
345 ret = -EPERM;
346 goto end;
347 }
348 if (!event->enabled) {
349 ret = -EEXIST;
350 goto end;
351 }
352 ACCESS_ONCE(event->enabled) = 0;
353 lttng_session_sync_enablers(event->chan->session);
354 end:
355 mutex_unlock(&sessions_mutex);
356 return ret;
357 }
358
359 static struct lttng_transport *lttng_transport_find(const char *name)
360 {
361 struct lttng_transport *transport;
362
363 list_for_each_entry(transport, &lttng_transport_list, node) {
364 if (!strcmp(transport->name, name))
365 return transport;
366 }
367 return NULL;
368 }
369
370 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
371 const char *transport_name,
372 void *buf_addr,
373 size_t subbuf_size, size_t num_subbuf,
374 unsigned int switch_timer_interval,
375 unsigned int read_timer_interval,
376 enum channel_type channel_type)
377 {
378 struct lttng_channel *chan;
379 struct lttng_transport *transport = NULL;
380
381 mutex_lock(&sessions_mutex);
382 if (session->been_active && channel_type != METADATA_CHANNEL)
383 goto active; /* Refuse to add channel to active session */
384 transport = lttng_transport_find(transport_name);
385 if (!transport) {
386 printk(KERN_WARNING "LTTng transport %s not found\n",
387 transport_name);
388 goto notransport;
389 }
390 if (!try_module_get(transport->owner)) {
391 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
392 goto notransport;
393 }
394 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
395 if (!chan)
396 goto nomem;
397 chan->session = session;
398 chan->id = session->free_chan_id++;
399 chan->ops = &transport->ops;
400 /*
401 * Note: the channel creation op already writes into the packet
402 * headers. Therefore the "chan" information used as input
403 * should be already accessible.
404 */
405 chan->chan = transport->ops.channel_create(transport_name,
406 chan, buf_addr, subbuf_size, num_subbuf,
407 switch_timer_interval, read_timer_interval);
408 if (!chan->chan)
409 goto create_error;
410 chan->tstate = 1;
411 chan->enabled = 1;
412 chan->transport = transport;
413 chan->channel_type = channel_type;
414 list_add(&chan->list, &session->chan);
415 mutex_unlock(&sessions_mutex);
416 return chan;
417
418 create_error:
419 kfree(chan);
420 nomem:
421 if (transport)
422 module_put(transport->owner);
423 notransport:
424 active:
425 mutex_unlock(&sessions_mutex);
426 return NULL;
427 }
428
429 /*
430 * Only used internally at session destruction for per-cpu channels, and
431 * when metadata channel is released.
432 * Needs to be called with sessions mutex held.
433 */
434 static
435 void _lttng_channel_destroy(struct lttng_channel *chan)
436 {
437 chan->ops->channel_destroy(chan->chan);
438 module_put(chan->transport->owner);
439 list_del(&chan->list);
440 lttng_destroy_context(chan->ctx);
441 kfree(chan);
442 }
443
444 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
445 {
446 BUG_ON(chan->channel_type != METADATA_CHANNEL);
447
448 /* Protect the metadata cache with the sessions_mutex. */
449 mutex_lock(&sessions_mutex);
450 _lttng_channel_destroy(chan);
451 mutex_unlock(&sessions_mutex);
452 }
453 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
454
455 static
456 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
457 {
458 stream->finalized = 1;
459 wake_up_interruptible(&stream->read_wait);
460 }
461
462 /*
463 * Supports event creation while tracing session is active.
464 * Needs to be called with sessions mutex held.
465 */
466 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
467 struct lttng_kernel_event *event_param,
468 void *filter,
469 const struct lttng_event_desc *event_desc,
470 enum lttng_kernel_instrumentation itype)
471 {
472 struct lttng_session *session = chan->session;
473 struct lttng_event *event;
474 const char *event_name;
475 struct hlist_head *head;
476 size_t name_len;
477 uint32_t hash;
478 int ret;
479
480 if (chan->free_event_id == -1U) {
481 ret = -EMFILE;
482 goto full;
483 }
484
485 switch (itype) {
486 case LTTNG_KERNEL_TRACEPOINT:
487 event_name = event_desc->name;
488 break;
489 case LTTNG_KERNEL_KPROBE:
490 case LTTNG_KERNEL_KRETPROBE:
491 case LTTNG_KERNEL_FUNCTION:
492 case LTTNG_KERNEL_NOOP:
493 case LTTNG_KERNEL_SYSCALL:
494 event_name = event_param->name;
495 break;
496 default:
497 WARN_ON_ONCE(1);
498 ret = -EINVAL;
499 goto type_error;
500 }
501 name_len = strlen(event_name);
502 hash = jhash(event_name, name_len, 0);
503 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
504 lttng_hlist_for_each_entry(event, head, hlist) {
505 WARN_ON_ONCE(!event->desc);
506 if (!strncmp(event->desc->name, event_name,
507 LTTNG_KERNEL_SYM_NAME_LEN - 1)
508 && chan == event->chan) {
509 ret = -EEXIST;
510 goto exist;
511 }
512 }
513
514 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
515 if (!event) {
516 ret = -ENOMEM;
517 goto cache_error;
518 }
519 event->chan = chan;
520 event->filter = filter;
521 event->id = chan->free_event_id++;
522 event->instrumentation = itype;
523 event->evtype = LTTNG_TYPE_EVENT;
524 INIT_LIST_HEAD(&event->bytecode_runtime_head);
525 INIT_LIST_HEAD(&event->enablers_ref_head);
526
527 switch (itype) {
528 case LTTNG_KERNEL_TRACEPOINT:
529 /* Event will be enabled by enabler sync. */
530 event->enabled = 0;
531 event->registered = 0;
532 event->desc = lttng_event_get(event_name);
533 if (!event->desc) {
534 ret = -ENOENT;
535 goto register_error;
536 }
537 /* Populate lttng_event structure before event registration. */
538 smp_wmb();
539 break;
540 case LTTNG_KERNEL_KPROBE:
541 event->enabled = 1;
542 event->registered = 1;
543 /*
544 * Populate lttng_event structure before event
545 * registration.
546 */
547 smp_wmb();
548 ret = lttng_kprobes_register(event_name,
549 event_param->u.kprobe.symbol_name,
550 event_param->u.kprobe.offset,
551 event_param->u.kprobe.addr,
552 event);
553 if (ret) {
554 ret = -EINVAL;
555 goto register_error;
556 }
557 ret = try_module_get(event->desc->owner);
558 WARN_ON_ONCE(!ret);
559 break;
560 case LTTNG_KERNEL_KRETPROBE:
561 {
562 struct lttng_event *event_return;
563
564 /* kretprobe defines 2 events */
565 event->enabled = 1;
566 event->registered = 1;
567 event_return =
568 kmem_cache_zalloc(event_cache, GFP_KERNEL);
569 if (!event_return) {
570 ret = -ENOMEM;
571 goto register_error;
572 }
573 event_return->chan = chan;
574 event_return->filter = filter;
575 event_return->id = chan->free_event_id++;
576 event_return->enabled = 1;
577 event_return->registered = 1;
578 event_return->instrumentation = itype;
579 /*
580 * Populate lttng_event structure before kretprobe registration.
581 */
582 smp_wmb();
583 ret = lttng_kretprobes_register(event_name,
584 event_param->u.kretprobe.symbol_name,
585 event_param->u.kretprobe.offset,
586 event_param->u.kretprobe.addr,
587 event, event_return);
588 if (ret) {
589 kmem_cache_free(event_cache, event_return);
590 ret = -EINVAL;
591 goto register_error;
592 }
593 /* Take 2 refs on the module: one per event. */
594 ret = try_module_get(event->desc->owner);
595 WARN_ON_ONCE(!ret);
596 ret = try_module_get(event->desc->owner);
597 WARN_ON_ONCE(!ret);
598 ret = _lttng_event_metadata_statedump(chan->session, chan,
599 event_return);
600 WARN_ON_ONCE(ret > 0);
601 if (ret) {
602 kmem_cache_free(event_cache, event_return);
603 module_put(event->desc->owner);
604 module_put(event->desc->owner);
605 goto statedump_error;
606 }
607 list_add(&event_return->list, &chan->session->events);
608 break;
609 }
610 case LTTNG_KERNEL_FUNCTION:
611 event->enabled = 1;
612 event->registered = 1;
613 /*
614 * Populate lttng_event structure before event
615 * registration.
616 */
617 smp_wmb();
618 ret = lttng_ftrace_register(event_name,
619 event_param->u.ftrace.symbol_name,
620 event);
621 if (ret) {
622 goto register_error;
623 }
624 ret = try_module_get(event->desc->owner);
625 WARN_ON_ONCE(!ret);
626 break;
627 case LTTNG_KERNEL_NOOP:
628 case LTTNG_KERNEL_SYSCALL:
629 event->enabled = 1;
630 event->registered = 0;
631 event->desc = event_desc;
632 if (!event->desc) {
633 ret = -EINVAL;
634 goto register_error;
635 }
636 break;
637 default:
638 WARN_ON_ONCE(1);
639 ret = -EINVAL;
640 goto register_error;
641 }
642 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
643 WARN_ON_ONCE(ret > 0);
644 if (ret) {
645 goto statedump_error;
646 }
647 hlist_add_head(&event->hlist, head);
648 list_add(&event->list, &chan->session->events);
649 return event;
650
651 statedump_error:
652 /* If a statedump error occurs, events will not be readable. */
653 register_error:
654 kmem_cache_free(event_cache, event);
655 cache_error:
656 exist:
657 type_error:
658 full:
659 return ERR_PTR(ret);
660 }
661
662 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
663 struct lttng_kernel_event *event_param,
664 void *filter,
665 const struct lttng_event_desc *event_desc,
666 enum lttng_kernel_instrumentation itype)
667 {
668 struct lttng_event *event;
669
670 mutex_lock(&sessions_mutex);
671 event = _lttng_event_create(chan, event_param, filter, event_desc,
672 itype);
673 mutex_unlock(&sessions_mutex);
674 return event;
675 }
676
677 /* Only used for tracepoints for now. */
678 static
679 void register_event(struct lttng_event *event)
680 {
681 const struct lttng_event_desc *desc;
682 int ret = -EINVAL;
683
684 if (event->registered)
685 return;
686
687 desc = event->desc;
688 switch (event->instrumentation) {
689 case LTTNG_KERNEL_TRACEPOINT:
690 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
691 desc->probe_callback,
692 event);
693 break;
694 case LTTNG_KERNEL_SYSCALL:
695 ret = lttng_syscall_filter_enable(event->chan,
696 desc->name);
697 break;
698 case LTTNG_KERNEL_KPROBE:
699 case LTTNG_KERNEL_KRETPROBE:
700 case LTTNG_KERNEL_FUNCTION:
701 case LTTNG_KERNEL_NOOP:
702 ret = 0;
703 break;
704 default:
705 WARN_ON_ONCE(1);
706 }
707 if (!ret)
708 event->registered = 1;
709 }
710
711 /*
712 * Only used internally at session destruction.
713 */
714 int _lttng_event_unregister(struct lttng_event *event)
715 {
716 const struct lttng_event_desc *desc;
717 int ret = -EINVAL;
718
719 if (!event->registered)
720 return 0;
721
722 desc = event->desc;
723 switch (event->instrumentation) {
724 case LTTNG_KERNEL_TRACEPOINT:
725 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
726 event->desc->probe_callback,
727 event);
728 break;
729 case LTTNG_KERNEL_KPROBE:
730 lttng_kprobes_unregister(event);
731 ret = 0;
732 break;
733 case LTTNG_KERNEL_KRETPROBE:
734 lttng_kretprobes_unregister(event);
735 ret = 0;
736 break;
737 case LTTNG_KERNEL_FUNCTION:
738 lttng_ftrace_unregister(event);
739 ret = 0;
740 break;
741 case LTTNG_KERNEL_SYSCALL:
742 ret = lttng_syscall_filter_disable(event->chan,
743 desc->name);
744 break;
745 case LTTNG_KERNEL_NOOP:
746 ret = 0;
747 break;
748 default:
749 WARN_ON_ONCE(1);
750 }
751 if (!ret)
752 event->registered = 0;
753 return ret;
754 }
755
756 /*
757 * Only used internally at session destruction.
758 */
759 static
760 void _lttng_event_destroy(struct lttng_event *event)
761 {
762 switch (event->instrumentation) {
763 case LTTNG_KERNEL_TRACEPOINT:
764 lttng_event_put(event->desc);
765 break;
766 case LTTNG_KERNEL_KPROBE:
767 module_put(event->desc->owner);
768 lttng_kprobes_destroy_private(event);
769 break;
770 case LTTNG_KERNEL_KRETPROBE:
771 module_put(event->desc->owner);
772 lttng_kretprobes_destroy_private(event);
773 break;
774 case LTTNG_KERNEL_FUNCTION:
775 module_put(event->desc->owner);
776 lttng_ftrace_destroy_private(event);
777 break;
778 case LTTNG_KERNEL_NOOP:
779 case LTTNG_KERNEL_SYSCALL:
780 break;
781 default:
782 WARN_ON_ONCE(1);
783 }
784 list_del(&event->list);
785 lttng_destroy_context(event->ctx);
786 kmem_cache_free(event_cache, event);
787 }
788
789 int lttng_session_track_pid(struct lttng_session *session, int pid)
790 {
791 int ret;
792
793 if (pid < -1)
794 return -EINVAL;
795 mutex_lock(&sessions_mutex);
796 if (pid == -1) {
797 /* track all pids: destroy tracker. */
798 if (session->pid_tracker) {
799 struct lttng_pid_tracker *lpf;
800
801 lpf = session->pid_tracker;
802 rcu_assign_pointer(session->pid_tracker, NULL);
803 synchronize_trace();
804 lttng_pid_tracker_destroy(lpf);
805 }
806 ret = 0;
807 } else {
808 if (!session->pid_tracker) {
809 struct lttng_pid_tracker *lpf;
810
811 lpf = lttng_pid_tracker_create();
812 if (!lpf) {
813 ret = -ENOMEM;
814 goto unlock;
815 }
816 ret = lttng_pid_tracker_add(lpf, pid);
817 rcu_assign_pointer(session->pid_tracker, lpf);
818 } else {
819 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
820 }
821 }
822 unlock:
823 mutex_unlock(&sessions_mutex);
824 return ret;
825 }
826
827 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
828 {
829 int ret;
830
831 if (pid < -1)
832 return -EINVAL;
833 mutex_lock(&sessions_mutex);
834 if (pid == -1) {
835 /* untrack all pids: replace by empty tracker. */
836 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
837 struct lttng_pid_tracker *lpf;
838
839 lpf = lttng_pid_tracker_create();
840 if (!lpf) {
841 ret = -ENOMEM;
842 goto unlock;
843 }
844 rcu_assign_pointer(session->pid_tracker, lpf);
845 synchronize_trace();
846 if (old_lpf)
847 lttng_pid_tracker_destroy(old_lpf);
848 ret = 0;
849 } else {
850 if (!session->pid_tracker) {
851 ret = -ENOENT;
852 goto unlock;
853 }
854 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
855 }
856 unlock:
857 mutex_unlock(&sessions_mutex);
858 return ret;
859 }
860
861 static
862 void *pid_list_start(struct seq_file *m, loff_t *pos)
863 {
864 struct lttng_session *session = m->private;
865 struct lttng_pid_tracker *lpf;
866 struct lttng_pid_hash_node *e;
867 int iter = 0, i;
868
869 mutex_lock(&sessions_mutex);
870 lpf = session->pid_tracker;
871 if (lpf) {
872 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
873 struct hlist_head *head = &lpf->pid_hash[i];
874
875 lttng_hlist_for_each_entry(e, head, hlist) {
876 if (iter++ >= *pos)
877 return e;
878 }
879 }
880 } else {
881 /* PID tracker disabled. */
882 if (iter >= *pos && iter == 0) {
883 return session; /* empty tracker */
884 }
885 iter++;
886 }
887 /* End of list */
888 return NULL;
889 }
890
891 /* Called with sessions_mutex held. */
892 static
893 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
894 {
895 struct lttng_session *session = m->private;
896 struct lttng_pid_tracker *lpf;
897 struct lttng_pid_hash_node *e;
898 int iter = 0, i;
899
900 (*ppos)++;
901 lpf = session->pid_tracker;
902 if (lpf) {
903 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
904 struct hlist_head *head = &lpf->pid_hash[i];
905
906 lttng_hlist_for_each_entry(e, head, hlist) {
907 if (iter++ >= *ppos)
908 return e;
909 }
910 }
911 } else {
912 /* PID tracker disabled. */
913 if (iter >= *ppos && iter == 0)
914 return session; /* empty tracker */
915 iter++;
916 }
917
918 /* End of list */
919 return NULL;
920 }
921
922 static
923 void pid_list_stop(struct seq_file *m, void *p)
924 {
925 mutex_unlock(&sessions_mutex);
926 }
927
928 static
929 int pid_list_show(struct seq_file *m, void *p)
930 {
931 int pid;
932
933 if (p == m->private) {
934 /* Tracker disabled. */
935 pid = -1;
936 } else {
937 const struct lttng_pid_hash_node *e = p;
938
939 pid = lttng_pid_tracker_get_node_pid(e);
940 }
941 seq_printf(m, "process { pid = %d; };\n", pid);
942 return 0;
943 }
944
945 static
946 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
947 .start = pid_list_start,
948 .next = pid_list_next,
949 .stop = pid_list_stop,
950 .show = pid_list_show,
951 };
952
953 static
954 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
955 {
956 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
957 }
958
959 static
960 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
961 {
962 struct seq_file *m = file->private_data;
963 struct lttng_session *session = m->private;
964 int ret;
965
966 WARN_ON_ONCE(!session);
967 ret = seq_release(inode, file);
968 if (!ret && session)
969 fput(session->file);
970 return ret;
971 }
972
973 const struct file_operations lttng_tracker_pids_list_fops = {
974 .owner = THIS_MODULE,
975 .open = lttng_tracker_pids_list_open,
976 .read = seq_read,
977 .llseek = seq_lseek,
978 .release = lttng_tracker_pids_list_release,
979 };
980
981 int lttng_session_list_tracker_pids(struct lttng_session *session)
982 {
983 struct file *tracker_pids_list_file;
984 struct seq_file *m;
985 int file_fd, ret;
986
987 file_fd = lttng_get_unused_fd();
988 if (file_fd < 0) {
989 ret = file_fd;
990 goto fd_error;
991 }
992
993 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
994 &lttng_tracker_pids_list_fops,
995 NULL, O_RDWR);
996 if (IS_ERR(tracker_pids_list_file)) {
997 ret = PTR_ERR(tracker_pids_list_file);
998 goto file_error;
999 }
1000 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1001 if (ret < 0)
1002 goto open_error;
1003 m = tracker_pids_list_file->private_data;
1004 m->private = session;
1005 fd_install(file_fd, tracker_pids_list_file);
1006 atomic_long_inc(&session->file->f_count);
1007
1008 return file_fd;
1009
1010 open_error:
1011 fput(tracker_pids_list_file);
1012 file_error:
1013 put_unused_fd(file_fd);
1014 fd_error:
1015 return ret;
1016 }
1017
1018 /*
1019 * Enabler management.
1020 */
1021 static
1022 int lttng_match_enabler_wildcard(const char *desc_name,
1023 const char *name)
1024 {
1025 /* Compare excluding final '*' */
1026 if (strncmp(desc_name, name, strlen(name) - 1))
1027 return 0;
1028 return 1;
1029 }
1030
1031 static
1032 int lttng_match_enabler_name(const char *desc_name,
1033 const char *name)
1034 {
1035 if (strcmp(desc_name, name))
1036 return 0;
1037 return 1;
1038 }
1039
1040 static
1041 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1042 struct lttng_enabler *enabler)
1043 {
1044 const char *desc_name, *enabler_name;
1045
1046 enabler_name = enabler->event_param.name;
1047 switch (enabler->event_param.instrumentation) {
1048 case LTTNG_KERNEL_TRACEPOINT:
1049 desc_name = desc->name;
1050 break;
1051 case LTTNG_KERNEL_SYSCALL:
1052 desc_name = desc->name;
1053 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1054 desc_name += strlen("compat_");
1055 if (!strncmp(desc_name, "syscall_exit_",
1056 strlen("syscall_exit_"))) {
1057 desc_name += strlen("syscall_exit_");
1058 } else if (!strncmp(desc_name, "syscall_entry_",
1059 strlen("syscall_entry_"))) {
1060 desc_name += strlen("syscall_entry_");
1061 } else {
1062 WARN_ON_ONCE(1);
1063 return -EINVAL;
1064 }
1065 break;
1066 default:
1067 WARN_ON_ONCE(1);
1068 return -EINVAL;
1069 }
1070 switch (enabler->type) {
1071 case LTTNG_ENABLER_WILDCARD:
1072 return lttng_match_enabler_wildcard(desc_name, enabler_name);
1073 case LTTNG_ENABLER_NAME:
1074 return lttng_match_enabler_name(desc_name, enabler_name);
1075 default:
1076 return -EINVAL;
1077 }
1078 }
1079
1080 static
1081 int lttng_event_match_enabler(struct lttng_event *event,
1082 struct lttng_enabler *enabler)
1083 {
1084 if (enabler->event_param.instrumentation != event->instrumentation)
1085 return 0;
1086 if (lttng_desc_match_enabler(event->desc, enabler)
1087 && event->chan == enabler->chan)
1088 return 1;
1089 else
1090 return 0;
1091 }
1092
1093 static
1094 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1095 struct lttng_enabler *enabler)
1096 {
1097 struct lttng_enabler_ref *enabler_ref;
1098
1099 list_for_each_entry(enabler_ref,
1100 &event->enablers_ref_head, node) {
1101 if (enabler_ref->ref == enabler)
1102 return enabler_ref;
1103 }
1104 return NULL;
1105 }
1106
1107 static
1108 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1109 {
1110 struct lttng_session *session = enabler->chan->session;
1111 struct lttng_probe_desc *probe_desc;
1112 const struct lttng_event_desc *desc;
1113 int i;
1114 struct list_head *probe_list;
1115
1116 probe_list = lttng_get_probe_list_head();
1117 /*
1118 * For each probe event, if we find that a probe event matches
1119 * our enabler, create an associated lttng_event if not
1120 * already present.
1121 */
1122 list_for_each_entry(probe_desc, probe_list, head) {
1123 for (i = 0; i < probe_desc->nr_events; i++) {
1124 int found = 0;
1125 struct hlist_head *head;
1126 const char *event_name;
1127 size_t name_len;
1128 uint32_t hash;
1129 struct lttng_event *event;
1130
1131 desc = probe_desc->event_desc[i];
1132 if (!lttng_desc_match_enabler(desc, enabler))
1133 continue;
1134 event_name = desc->name;
1135 name_len = strlen(event_name);
1136
1137 /*
1138 * Check if already created.
1139 */
1140 hash = jhash(event_name, name_len, 0);
1141 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1142 lttng_hlist_for_each_entry(event, head, hlist) {
1143 if (event->desc == desc
1144 && event->chan == enabler->chan)
1145 found = 1;
1146 }
1147 if (found)
1148 continue;
1149
1150 /*
1151 * We need to create an event for this
1152 * event probe.
1153 */
1154 event = _lttng_event_create(enabler->chan,
1155 NULL, NULL, desc,
1156 LTTNG_KERNEL_TRACEPOINT);
1157 if (!event) {
1158 printk(KERN_INFO "Unable to create event %s\n",
1159 probe_desc->event_desc[i]->name);
1160 }
1161 }
1162 }
1163 }
1164
1165 static
1166 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1167 {
1168 int ret;
1169
1170 ret = lttng_syscalls_register(enabler->chan, NULL);
1171 WARN_ON_ONCE(ret);
1172 }
1173
1174 /*
1175 * Create struct lttng_event if it is missing and present in the list of
1176 * tracepoint probes.
1177 * Should be called with sessions mutex held.
1178 */
1179 static
1180 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1181 {
1182 switch (enabler->event_param.instrumentation) {
1183 case LTTNG_KERNEL_TRACEPOINT:
1184 lttng_create_tracepoint_if_missing(enabler);
1185 break;
1186 case LTTNG_KERNEL_SYSCALL:
1187 lttng_create_syscall_if_missing(enabler);
1188 break;
1189 default:
1190 WARN_ON_ONCE(1);
1191 break;
1192 }
1193 }
1194
1195 /*
1196 * Create events associated with an enabler (if not already present),
1197 * and add backward reference from the event to the enabler.
1198 * Should be called with sessions mutex held.
1199 */
1200 static
1201 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1202 {
1203 struct lttng_session *session = enabler->chan->session;
1204 struct lttng_event *event;
1205
1206 /* First ensure that probe events are created for this enabler. */
1207 lttng_create_event_if_missing(enabler);
1208
1209 /* For each event matching enabler in session event list. */
1210 list_for_each_entry(event, &session->events, list) {
1211 struct lttng_enabler_ref *enabler_ref;
1212
1213 if (!lttng_event_match_enabler(event, enabler))
1214 continue;
1215 enabler_ref = lttng_event_enabler_ref(event, enabler);
1216 if (!enabler_ref) {
1217 /*
1218 * If no backward ref, create it.
1219 * Add backward ref from event to enabler.
1220 */
1221 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1222 if (!enabler_ref)
1223 return -ENOMEM;
1224 enabler_ref->ref = enabler;
1225 list_add(&enabler_ref->node,
1226 &event->enablers_ref_head);
1227 }
1228
1229 /*
1230 * Link filter bytecodes if not linked yet.
1231 */
1232 lttng_enabler_event_link_bytecode(event, enabler);
1233
1234 /* TODO: merge event context. */
1235 }
1236 return 0;
1237 }
1238
1239 /*
1240 * Called at module load: connect the probe on all enablers matching
1241 * this event.
1242 * Called with sessions lock held.
1243 */
1244 int lttng_fix_pending_events(void)
1245 {
1246 struct lttng_session *session;
1247
1248 list_for_each_entry(session, &sessions, list)
1249 lttng_session_lazy_sync_enablers(session);
1250 return 0;
1251 }
1252
1253 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1254 struct lttng_kernel_event *event_param,
1255 struct lttng_channel *chan)
1256 {
1257 struct lttng_enabler *enabler;
1258
1259 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1260 if (!enabler)
1261 return NULL;
1262 enabler->type = type;
1263 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1264 memcpy(&enabler->event_param, event_param,
1265 sizeof(enabler->event_param));
1266 enabler->chan = chan;
1267 /* ctx left NULL */
1268 enabler->enabled = 0;
1269 enabler->evtype = LTTNG_TYPE_ENABLER;
1270 mutex_lock(&sessions_mutex);
1271 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1272 lttng_session_lazy_sync_enablers(enabler->chan->session);
1273 mutex_unlock(&sessions_mutex);
1274 return enabler;
1275 }
1276
1277 int lttng_enabler_enable(struct lttng_enabler *enabler)
1278 {
1279 mutex_lock(&sessions_mutex);
1280 enabler->enabled = 1;
1281 lttng_session_lazy_sync_enablers(enabler->chan->session);
1282 mutex_unlock(&sessions_mutex);
1283 return 0;
1284 }
1285
1286 int lttng_enabler_disable(struct lttng_enabler *enabler)
1287 {
1288 mutex_lock(&sessions_mutex);
1289 enabler->enabled = 0;
1290 lttng_session_lazy_sync_enablers(enabler->chan->session);
1291 mutex_unlock(&sessions_mutex);
1292 return 0;
1293 }
1294
1295 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1296 struct lttng_kernel_filter_bytecode __user *bytecode)
1297 {
1298 struct lttng_filter_bytecode_node *bytecode_node;
1299 uint32_t bytecode_len;
1300 int ret;
1301
1302 ret = get_user(bytecode_len, &bytecode->len);
1303 if (ret)
1304 return ret;
1305 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1306 GFP_KERNEL);
1307 if (!bytecode_node)
1308 return -ENOMEM;
1309 ret = copy_from_user(&bytecode_node->bc, bytecode,
1310 sizeof(*bytecode) + bytecode_len);
1311 if (ret)
1312 goto error_free;
1313 bytecode_node->enabler = enabler;
1314 /* Enforce length based on allocated size */
1315 bytecode_node->bc.len = bytecode_len;
1316 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1317 lttng_session_lazy_sync_enablers(enabler->chan->session);
1318 return 0;
1319
1320 error_free:
1321 kfree(bytecode_node);
1322 return ret;
1323 }
1324
1325 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1326 struct lttng_kernel_context *context_param)
1327 {
1328 return -ENOSYS;
1329 }
1330
1331 static
1332 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1333 {
1334 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1335
1336 /* Destroy filter bytecode */
1337 list_for_each_entry_safe(filter_node, tmp_filter_node,
1338 &enabler->filter_bytecode_head, node) {
1339 kfree(filter_node);
1340 }
1341
1342 /* Destroy contexts */
1343 lttng_destroy_context(enabler->ctx);
1344
1345 list_del(&enabler->node);
1346 kfree(enabler);
1347 }
1348
1349 /*
1350 * lttng_session_sync_enablers should be called just before starting a
1351 * session.
1352 * Should be called with sessions mutex held.
1353 */
1354 static
1355 void lttng_session_sync_enablers(struct lttng_session *session)
1356 {
1357 struct lttng_enabler *enabler;
1358 struct lttng_event *event;
1359
1360 list_for_each_entry(enabler, &session->enablers_head, node)
1361 lttng_enabler_ref_events(enabler);
1362 /*
1363 * For each event, if at least one of its enablers is enabled,
1364 * and its channel and session transient states are enabled, we
1365 * enable the event, else we disable it.
1366 */
1367 list_for_each_entry(event, &session->events, list) {
1368 struct lttng_enabler_ref *enabler_ref;
1369 struct lttng_bytecode_runtime *runtime;
1370 int enabled = 0, has_enablers_without_bytecode = 0;
1371
1372 switch (event->instrumentation) {
1373 case LTTNG_KERNEL_TRACEPOINT:
1374 case LTTNG_KERNEL_SYSCALL:
1375 /* Enable events */
1376 list_for_each_entry(enabler_ref,
1377 &event->enablers_ref_head, node) {
1378 if (enabler_ref->ref->enabled) {
1379 enabled = 1;
1380 break;
1381 }
1382 }
1383 break;
1384 default:
1385 /* Not handled with lazy sync. */
1386 continue;
1387 }
1388 /*
1389 * Enabled state is based on union of enablers, with
1390 * intesection of session and channel transient enable
1391 * states.
1392 */
1393 enabled = enabled && session->tstate && event->chan->tstate;
1394
1395 ACCESS_ONCE(event->enabled) = enabled;
1396 /*
1397 * Sync tracepoint registration with event enabled
1398 * state.
1399 */
1400 if (enabled) {
1401 register_event(event);
1402 } else {
1403 _lttng_event_unregister(event);
1404 }
1405
1406 /* Check if has enablers without bytecode enabled */
1407 list_for_each_entry(enabler_ref,
1408 &event->enablers_ref_head, node) {
1409 if (enabler_ref->ref->enabled
1410 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1411 has_enablers_without_bytecode = 1;
1412 break;
1413 }
1414 }
1415 event->has_enablers_without_bytecode =
1416 has_enablers_without_bytecode;
1417
1418 /* Enable filters */
1419 list_for_each_entry(runtime,
1420 &event->bytecode_runtime_head, node)
1421 lttng_filter_sync_state(runtime);
1422 }
1423 }
1424
1425 /*
1426 * Apply enablers to session events, adding events to session if need
1427 * be. It is required after each modification applied to an active
1428 * session, and right before session "start".
1429 * "lazy" sync means we only sync if required.
1430 * Should be called with sessions mutex held.
1431 */
1432 static
1433 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1434 {
1435 /* We can skip if session is not active */
1436 if (!session->active)
1437 return;
1438 lttng_session_sync_enablers(session);
1439 }
1440
1441 /*
1442 * Serialize at most one packet worth of metadata into a metadata
1443 * channel.
1444 * We grab the metadata cache mutex to get exclusive access to our metadata
1445 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1446 * allows us to do racy operations such as looking for remaining space left in
1447 * packet and write, since mutual exclusion protects us from concurrent writes.
1448 * Mutual exclusion on the metadata cache allow us to read the cache content
1449 * without racing against reallocation of the cache by updates.
1450 * Returns the number of bytes written in the channel, 0 if no data
1451 * was written and a negative value on error.
1452 */
1453 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1454 struct channel *chan)
1455 {
1456 struct lib_ring_buffer_ctx ctx;
1457 int ret = 0;
1458 size_t len, reserve_len;
1459
1460 /*
1461 * Ensure we support mutiple get_next / put sequences followed by
1462 * put_next. The metadata cache lock protects reading the metadata
1463 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1464 * "flush" operations on the buffer invoked by different processes.
1465 * Moreover, since the metadata cache memory can be reallocated, we
1466 * need to have exclusive access against updates even though we only
1467 * read it.
1468 */
1469 mutex_lock(&stream->metadata_cache->lock);
1470 WARN_ON(stream->metadata_in < stream->metadata_out);
1471 if (stream->metadata_in != stream->metadata_out)
1472 goto end;
1473
1474 len = stream->metadata_cache->metadata_written -
1475 stream->metadata_in;
1476 if (!len)
1477 goto end;
1478 reserve_len = min_t(size_t,
1479 stream->transport->ops.packet_avail_size(chan),
1480 len);
1481 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1482 sizeof(char), -1);
1483 /*
1484 * If reservation failed, return an error to the caller.
1485 */
1486 ret = stream->transport->ops.event_reserve(&ctx, 0);
1487 if (ret != 0) {
1488 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1489 goto end;
1490 }
1491 stream->transport->ops.event_write(&ctx,
1492 stream->metadata_cache->data + stream->metadata_in,
1493 reserve_len);
1494 stream->transport->ops.event_commit(&ctx);
1495 stream->metadata_in += reserve_len;
1496 ret = reserve_len;
1497
1498 end:
1499 mutex_unlock(&stream->metadata_cache->lock);
1500 return ret;
1501 }
1502
1503 /*
1504 * Write the metadata to the metadata cache.
1505 * Must be called with sessions_mutex held.
1506 * The metadata cache lock protects us from concurrent read access from
1507 * thread outputting metadata content to ring buffer.
1508 */
1509 int lttng_metadata_printf(struct lttng_session *session,
1510 const char *fmt, ...)
1511 {
1512 char *str;
1513 size_t len;
1514 va_list ap;
1515 struct lttng_metadata_stream *stream;
1516
1517 WARN_ON_ONCE(!ACCESS_ONCE(session->active));
1518
1519 va_start(ap, fmt);
1520 str = kvasprintf(GFP_KERNEL, fmt, ap);
1521 va_end(ap);
1522 if (!str)
1523 return -ENOMEM;
1524
1525 len = strlen(str);
1526 mutex_lock(&session->metadata_cache->lock);
1527 if (session->metadata_cache->metadata_written + len >
1528 session->metadata_cache->cache_alloc) {
1529 char *tmp_cache_realloc;
1530 unsigned int tmp_cache_alloc_size;
1531
1532 tmp_cache_alloc_size = max_t(unsigned int,
1533 session->metadata_cache->cache_alloc + len,
1534 session->metadata_cache->cache_alloc << 1);
1535 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1536 if (!tmp_cache_realloc)
1537 goto err;
1538 if (session->metadata_cache->data) {
1539 memcpy(tmp_cache_realloc,
1540 session->metadata_cache->data,
1541 session->metadata_cache->cache_alloc);
1542 vfree(session->metadata_cache->data);
1543 }
1544
1545 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1546 session->metadata_cache->data = tmp_cache_realloc;
1547 }
1548 memcpy(session->metadata_cache->data +
1549 session->metadata_cache->metadata_written,
1550 str, len);
1551 session->metadata_cache->metadata_written += len;
1552 mutex_unlock(&session->metadata_cache->lock);
1553 kfree(str);
1554
1555 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1556 wake_up_interruptible(&stream->read_wait);
1557
1558 return 0;
1559
1560 err:
1561 mutex_unlock(&session->metadata_cache->lock);
1562 kfree(str);
1563 return -ENOMEM;
1564 }
1565
1566 /*
1567 * Must be called with sessions_mutex held.
1568 */
1569 static
1570 int _lttng_field_statedump(struct lttng_session *session,
1571 const struct lttng_event_field *field)
1572 {
1573 int ret = 0;
1574
1575 switch (field->type.atype) {
1576 case atype_integer:
1577 ret = lttng_metadata_printf(session,
1578 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
1579 field->type.u.basic.integer.size,
1580 field->type.u.basic.integer.alignment,
1581 field->type.u.basic.integer.signedness,
1582 (field->type.u.basic.integer.encoding == lttng_encode_none)
1583 ? "none"
1584 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
1585 ? "UTF8"
1586 : "ASCII",
1587 field->type.u.basic.integer.base,
1588 #ifdef __BIG_ENDIAN
1589 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1590 #else
1591 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1592 #endif
1593 field->name);
1594 break;
1595 case atype_enum:
1596 ret = lttng_metadata_printf(session,
1597 " %s _%s;\n",
1598 field->type.u.basic.enumeration.name,
1599 field->name);
1600 break;
1601 case atype_array:
1602 {
1603 const struct lttng_basic_type *elem_type;
1604
1605 elem_type = &field->type.u.array.elem_type;
1606 ret = lttng_metadata_printf(session,
1607 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
1608 elem_type->u.basic.integer.size,
1609 elem_type->u.basic.integer.alignment,
1610 elem_type->u.basic.integer.signedness,
1611 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1612 ? "none"
1613 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1614 ? "UTF8"
1615 : "ASCII",
1616 elem_type->u.basic.integer.base,
1617 #ifdef __BIG_ENDIAN
1618 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1619 #else
1620 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1621 #endif
1622 field->name, field->type.u.array.length);
1623 break;
1624 }
1625 case atype_sequence:
1626 {
1627 const struct lttng_basic_type *elem_type;
1628 const struct lttng_basic_type *length_type;
1629
1630 elem_type = &field->type.u.sequence.elem_type;
1631 length_type = &field->type.u.sequence.length_type;
1632 ret = lttng_metadata_printf(session,
1633 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
1634 length_type->u.basic.integer.size,
1635 (unsigned int) length_type->u.basic.integer.alignment,
1636 length_type->u.basic.integer.signedness,
1637 (length_type->u.basic.integer.encoding == lttng_encode_none)
1638 ? "none"
1639 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
1640 ? "UTF8"
1641 : "ASCII"),
1642 length_type->u.basic.integer.base,
1643 #ifdef __BIG_ENDIAN
1644 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1645 #else
1646 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1647 #endif
1648 field->name);
1649 if (ret)
1650 return ret;
1651
1652 ret = lttng_metadata_printf(session,
1653 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
1654 elem_type->u.basic.integer.size,
1655 (unsigned int) elem_type->u.basic.integer.alignment,
1656 elem_type->u.basic.integer.signedness,
1657 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1658 ? "none"
1659 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1660 ? "UTF8"
1661 : "ASCII"),
1662 elem_type->u.basic.integer.base,
1663 #ifdef __BIG_ENDIAN
1664 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1665 #else
1666 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1667 #endif
1668 field->name,
1669 field->name);
1670 break;
1671 }
1672
1673 case atype_string:
1674 /* Default encoding is UTF8 */
1675 ret = lttng_metadata_printf(session,
1676 " string%s _%s;\n",
1677 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
1678 " { encoding = ASCII; }" : "",
1679 field->name);
1680 break;
1681 default:
1682 WARN_ON_ONCE(1);
1683 return -EINVAL;
1684 }
1685 return ret;
1686 }
1687
1688 static
1689 int _lttng_context_metadata_statedump(struct lttng_session *session,
1690 struct lttng_ctx *ctx)
1691 {
1692 int ret = 0;
1693 int i;
1694
1695 if (!ctx)
1696 return 0;
1697 for (i = 0; i < ctx->nr_fields; i++) {
1698 const struct lttng_ctx_field *field = &ctx->fields[i];
1699
1700 ret = _lttng_field_statedump(session, &field->event_field);
1701 if (ret)
1702 return ret;
1703 }
1704 return ret;
1705 }
1706
1707 static
1708 int _lttng_fields_metadata_statedump(struct lttng_session *session,
1709 struct lttng_event *event)
1710 {
1711 const struct lttng_event_desc *desc = event->desc;
1712 int ret = 0;
1713 int i;
1714
1715 for (i = 0; i < desc->nr_fields; i++) {
1716 const struct lttng_event_field *field = &desc->fields[i];
1717
1718 ret = _lttng_field_statedump(session, field);
1719 if (ret)
1720 return ret;
1721 }
1722 return ret;
1723 }
1724
1725 /*
1726 * Must be called with sessions_mutex held.
1727 */
1728 static
1729 int _lttng_event_metadata_statedump(struct lttng_session *session,
1730 struct lttng_channel *chan,
1731 struct lttng_event *event)
1732 {
1733 int ret = 0;
1734
1735 if (event->metadata_dumped || !ACCESS_ONCE(session->active))
1736 return 0;
1737 if (chan->channel_type == METADATA_CHANNEL)
1738 return 0;
1739
1740 ret = lttng_metadata_printf(session,
1741 "event {\n"
1742 " name = \"%s\";\n"
1743 " id = %u;\n"
1744 " stream_id = %u;\n",
1745 event->desc->name,
1746 event->id,
1747 event->chan->id);
1748 if (ret)
1749 goto end;
1750
1751 if (event->ctx) {
1752 ret = lttng_metadata_printf(session,
1753 " context := struct {\n");
1754 if (ret)
1755 goto end;
1756 }
1757 ret = _lttng_context_metadata_statedump(session, event->ctx);
1758 if (ret)
1759 goto end;
1760 if (event->ctx) {
1761 ret = lttng_metadata_printf(session,
1762 " };\n");
1763 if (ret)
1764 goto end;
1765 }
1766
1767 ret = lttng_metadata_printf(session,
1768 " fields := struct {\n"
1769 );
1770 if (ret)
1771 goto end;
1772
1773 ret = _lttng_fields_metadata_statedump(session, event);
1774 if (ret)
1775 goto end;
1776
1777 /*
1778 * LTTng space reservation can only reserve multiples of the
1779 * byte size.
1780 */
1781 ret = lttng_metadata_printf(session,
1782 " };\n"
1783 "};\n\n");
1784 if (ret)
1785 goto end;
1786
1787 event->metadata_dumped = 1;
1788 end:
1789 return ret;
1790
1791 }
1792
1793 /*
1794 * Must be called with sessions_mutex held.
1795 */
1796 static
1797 int _lttng_channel_metadata_statedump(struct lttng_session *session,
1798 struct lttng_channel *chan)
1799 {
1800 int ret = 0;
1801
1802 if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
1803 return 0;
1804
1805 if (chan->channel_type == METADATA_CHANNEL)
1806 return 0;
1807
1808 WARN_ON_ONCE(!chan->header_type);
1809 ret = lttng_metadata_printf(session,
1810 "stream {\n"
1811 " id = %u;\n"
1812 " event.header := %s;\n"
1813 " packet.context := struct packet_context;\n",
1814 chan->id,
1815 chan->header_type == 1 ? "struct event_header_compact" :
1816 "struct event_header_large");
1817 if (ret)
1818 goto end;
1819
1820 if (chan->ctx) {
1821 ret = lttng_metadata_printf(session,
1822 " event.context := struct {\n");
1823 if (ret)
1824 goto end;
1825 }
1826 ret = _lttng_context_metadata_statedump(session, chan->ctx);
1827 if (ret)
1828 goto end;
1829 if (chan->ctx) {
1830 ret = lttng_metadata_printf(session,
1831 " };\n");
1832 if (ret)
1833 goto end;
1834 }
1835
1836 ret = lttng_metadata_printf(session,
1837 "};\n\n");
1838
1839 chan->metadata_dumped = 1;
1840 end:
1841 return ret;
1842 }
1843
1844 /*
1845 * Must be called with sessions_mutex held.
1846 */
1847 static
1848 int _lttng_stream_packet_context_declare(struct lttng_session *session)
1849 {
1850 return lttng_metadata_printf(session,
1851 "struct packet_context {\n"
1852 " uint64_clock_monotonic_t timestamp_begin;\n"
1853 " uint64_clock_monotonic_t timestamp_end;\n"
1854 " uint64_t content_size;\n"
1855 " uint64_t packet_size;\n"
1856 " uint64_t packet_seq_num;\n"
1857 " unsigned long events_discarded;\n"
1858 " uint32_t cpu_id;\n"
1859 "};\n\n"
1860 );
1861 }
1862
1863 /*
1864 * Compact header:
1865 * id: range: 0 - 30.
1866 * id 31 is reserved to indicate an extended header.
1867 *
1868 * Large header:
1869 * id: range: 0 - 65534.
1870 * id 65535 is reserved to indicate an extended header.
1871 *
1872 * Must be called with sessions_mutex held.
1873 */
1874 static
1875 int _lttng_event_header_declare(struct lttng_session *session)
1876 {
1877 return lttng_metadata_printf(session,
1878 "struct event_header_compact {\n"
1879 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
1880 " variant <id> {\n"
1881 " struct {\n"
1882 " uint27_clock_monotonic_t timestamp;\n"
1883 " } compact;\n"
1884 " struct {\n"
1885 " uint32_t id;\n"
1886 " uint64_clock_monotonic_t timestamp;\n"
1887 " } extended;\n"
1888 " } v;\n"
1889 "} align(%u);\n"
1890 "\n"
1891 "struct event_header_large {\n"
1892 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
1893 " variant <id> {\n"
1894 " struct {\n"
1895 " uint32_clock_monotonic_t timestamp;\n"
1896 " } compact;\n"
1897 " struct {\n"
1898 " uint32_t id;\n"
1899 " uint64_clock_monotonic_t timestamp;\n"
1900 " } extended;\n"
1901 " } v;\n"
1902 "} align(%u);\n\n",
1903 lttng_alignof(uint32_t) * CHAR_BIT,
1904 lttng_alignof(uint16_t) * CHAR_BIT
1905 );
1906 }
1907
1908 /*
1909 * Approximation of NTP time of day to clock monotonic correlation,
1910 * taken at start of trace.
1911 * Yes, this is only an approximation. Yes, we can (and will) do better
1912 * in future versions.
1913 */
1914 static
1915 uint64_t measure_clock_offset(void)
1916 {
1917 uint64_t offset, monotonic[2], realtime;
1918 struct timespec rts = { 0, 0 };
1919 unsigned long flags;
1920
1921 /* Disable interrupts to increase correlation precision. */
1922 local_irq_save(flags);
1923 monotonic[0] = trace_clock_read64();
1924 getnstimeofday(&rts);
1925 monotonic[1] = trace_clock_read64();
1926 local_irq_restore(flags);
1927
1928 offset = (monotonic[0] + monotonic[1]) >> 1;
1929 realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
1930 realtime += rts.tv_nsec;
1931 offset = realtime - offset;
1932 return offset;
1933 }
1934
1935 /*
1936 * Output metadata into this session's metadata buffers.
1937 * Must be called with sessions_mutex held.
1938 */
1939 static
1940 int _lttng_session_metadata_statedump(struct lttng_session *session)
1941 {
1942 unsigned char *uuid_c = session->uuid.b;
1943 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
1944 struct lttng_channel *chan;
1945 struct lttng_event *event;
1946 int ret = 0;
1947
1948 if (!ACCESS_ONCE(session->active))
1949 return 0;
1950 if (session->metadata_dumped)
1951 goto skip_session;
1952
1953 snprintf(uuid_s, sizeof(uuid_s),
1954 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
1955 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
1956 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
1957 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
1958 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
1959
1960 ret = lttng_metadata_printf(session,
1961 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
1962 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
1963 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
1964 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
1965 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
1966 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
1967 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
1968 "\n"
1969 "trace {\n"
1970 " major = %u;\n"
1971 " minor = %u;\n"
1972 " uuid = \"%s\";\n"
1973 " byte_order = %s;\n"
1974 " packet.header := struct {\n"
1975 " uint32_t magic;\n"
1976 " uint8_t uuid[16];\n"
1977 " uint32_t stream_id;\n"
1978 " uint64_t stream_instance_id;\n"
1979 " };\n"
1980 "};\n\n",
1981 lttng_alignof(uint8_t) * CHAR_BIT,
1982 lttng_alignof(uint16_t) * CHAR_BIT,
1983 lttng_alignof(uint32_t) * CHAR_BIT,
1984 lttng_alignof(uint64_t) * CHAR_BIT,
1985 sizeof(unsigned long) * CHAR_BIT,
1986 lttng_alignof(unsigned long) * CHAR_BIT,
1987 CTF_SPEC_MAJOR,
1988 CTF_SPEC_MINOR,
1989 uuid_s,
1990 #ifdef __BIG_ENDIAN
1991 "be"
1992 #else
1993 "le"
1994 #endif
1995 );
1996 if (ret)
1997 goto end;
1998
1999 ret = lttng_metadata_printf(session,
2000 "env {\n"
2001 " hostname = \"%s\";\n"
2002 " domain = \"kernel\";\n"
2003 " sysname = \"%s\";\n"
2004 " kernel_release = \"%s\";\n"
2005 " kernel_version = \"%s\";\n"
2006 " tracer_name = \"lttng-modules\";\n"
2007 " tracer_major = %d;\n"
2008 " tracer_minor = %d;\n"
2009 " tracer_patchlevel = %d;\n"
2010 "};\n\n",
2011 current->nsproxy->uts_ns->name.nodename,
2012 utsname()->sysname,
2013 utsname()->release,
2014 utsname()->version,
2015 LTTNG_MODULES_MAJOR_VERSION,
2016 LTTNG_MODULES_MINOR_VERSION,
2017 LTTNG_MODULES_PATCHLEVEL_VERSION
2018 );
2019 if (ret)
2020 goto end;
2021
2022 ret = lttng_metadata_printf(session,
2023 "clock {\n"
2024 " name = %s;\n",
2025 "monotonic"
2026 );
2027 if (ret)
2028 goto end;
2029
2030 if (!trace_clock_uuid(clock_uuid_s)) {
2031 ret = lttng_metadata_printf(session,
2032 " uuid = \"%s\";\n",
2033 clock_uuid_s
2034 );
2035 if (ret)
2036 goto end;
2037 }
2038
2039 ret = lttng_metadata_printf(session,
2040 " description = \"Monotonic Clock\";\n"
2041 " freq = %llu; /* Frequency, in Hz */\n"
2042 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2043 " offset = %llu;\n"
2044 "};\n\n",
2045 (unsigned long long) trace_clock_freq(),
2046 (unsigned long long) measure_clock_offset()
2047 );
2048 if (ret)
2049 goto end;
2050
2051 ret = lttng_metadata_printf(session,
2052 "typealias integer {\n"
2053 " size = 27; align = 1; signed = false;\n"
2054 " map = clock.monotonic.value;\n"
2055 "} := uint27_clock_monotonic_t;\n"
2056 "\n"
2057 "typealias integer {\n"
2058 " size = 32; align = %u; signed = false;\n"
2059 " map = clock.monotonic.value;\n"
2060 "} := uint32_clock_monotonic_t;\n"
2061 "\n"
2062 "typealias integer {\n"
2063 " size = 64; align = %u; signed = false;\n"
2064 " map = clock.monotonic.value;\n"
2065 "} := uint64_clock_monotonic_t;\n\n",
2066 lttng_alignof(uint32_t) * CHAR_BIT,
2067 lttng_alignof(uint64_t) * CHAR_BIT
2068 );
2069 if (ret)
2070 goto end;
2071
2072 ret = _lttng_stream_packet_context_declare(session);
2073 if (ret)
2074 goto end;
2075
2076 ret = _lttng_event_header_declare(session);
2077 if (ret)
2078 goto end;
2079
2080 skip_session:
2081 list_for_each_entry(chan, &session->chan, list) {
2082 ret = _lttng_channel_metadata_statedump(session, chan);
2083 if (ret)
2084 goto end;
2085 }
2086
2087 list_for_each_entry(event, &session->events, list) {
2088 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2089 if (ret)
2090 goto end;
2091 }
2092 session->metadata_dumped = 1;
2093 end:
2094 return ret;
2095 }
2096
2097 /**
2098 * lttng_transport_register - LTT transport registration
2099 * @transport: transport structure
2100 *
2101 * Registers a transport which can be used as output to extract the data out of
2102 * LTTng. The module calling this registration function must ensure that no
2103 * trap-inducing code will be executed by the transport functions. E.g.
2104 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
2105 * is made visible to the transport function. This registration acts as a
2106 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
2107 * after its registration must it synchronize the TLBs.
2108 */
2109 void lttng_transport_register(struct lttng_transport *transport)
2110 {
2111 /*
2112 * Make sure no page fault can be triggered by the module about to be
2113 * registered. We deal with this here so we don't have to call
2114 * vmalloc_sync_all() in each module's init.
2115 */
2116 wrapper_vmalloc_sync_all();
2117
2118 mutex_lock(&sessions_mutex);
2119 list_add_tail(&transport->node, &lttng_transport_list);
2120 mutex_unlock(&sessions_mutex);
2121 }
2122 EXPORT_SYMBOL_GPL(lttng_transport_register);
2123
2124 /**
2125 * lttng_transport_unregister - LTT transport unregistration
2126 * @transport: transport structure
2127 */
2128 void lttng_transport_unregister(struct lttng_transport *transport)
2129 {
2130 mutex_lock(&sessions_mutex);
2131 list_del(&transport->node);
2132 mutex_unlock(&sessions_mutex);
2133 }
2134 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2135
2136 static int __init lttng_events_init(void)
2137 {
2138 int ret;
2139
2140 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
2141 if (ret)
2142 return ret;
2143 ret = wrapper_get_pfnblock_flags_mask_init();
2144 if (ret)
2145 return ret;
2146 ret = lttng_context_init();
2147 if (ret)
2148 return ret;
2149 ret = lttng_tracepoint_init();
2150 if (ret)
2151 goto error_tp;
2152 event_cache = KMEM_CACHE(lttng_event, 0);
2153 if (!event_cache) {
2154 ret = -ENOMEM;
2155 goto error_kmem;
2156 }
2157 ret = lttng_abi_init();
2158 if (ret)
2159 goto error_abi;
2160 ret = lttng_logger_init();
2161 if (ret)
2162 goto error_logger;
2163 return 0;
2164
2165 error_logger:
2166 lttng_abi_exit();
2167 error_abi:
2168 kmem_cache_destroy(event_cache);
2169 error_kmem:
2170 lttng_tracepoint_exit();
2171 error_tp:
2172 lttng_context_exit();
2173 return ret;
2174 }
2175
2176 module_init(lttng_events_init);
2177
2178 static void __exit lttng_events_exit(void)
2179 {
2180 struct lttng_session *session, *tmpsession;
2181
2182 lttng_logger_exit();
2183 lttng_abi_exit();
2184 list_for_each_entry_safe(session, tmpsession, &sessions, list)
2185 lttng_session_destroy(session);
2186 kmem_cache_destroy(event_cache);
2187 lttng_tracepoint_exit();
2188 lttng_context_exit();
2189 }
2190
2191 module_exit(lttng_events_exit);
2192
2193 MODULE_LICENSE("GPL and additional rights");
2194 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
2195 MODULE_DESCRIPTION("LTTng Events");
2196 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
2197 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
2198 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
2199 LTTNG_MODULES_EXTRAVERSION);
This page took 0.10462 seconds and 5 git commands to generate.