7181f0fbaf6cb5d3461eb48af469cc1098402041
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_recorder_unregister(struct lttng_kernel_event_recorder *event);
75 static int _lttng_event_notifier_unregister(struct lttng_kernel_event_notifier *event_notifier);
76 static
77 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
78 struct lttng_kernel_channel_buffer *chan,
79 struct lttng_kernel_event_recorder *event);
80 static
81 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
82 static
83 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
84 static
85 int _lttng_type_statedump(struct lttng_kernel_session *session,
86 const struct lttng_kernel_type_common *type,
87 enum lttng_kernel_string_encoding parent_encoding,
88 size_t nesting);
89 static
90 int _lttng_field_statedump(struct lttng_kernel_session *session,
91 const struct lttng_kernel_event_field *field,
92 size_t nesting, const char **prev_field_name_p);
93
94 void synchronize_trace(void)
95 {
96 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
97 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
98 synchronize_rcu();
99 #else
100 synchronize_sched();
101 #endif
102
103 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
104 #ifdef CONFIG_PREEMPT_RT_FULL
105 synchronize_rcu();
106 #endif
107 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
108 #ifdef CONFIG_PREEMPT_RT
109 synchronize_rcu();
110 #endif
111 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
112 }
113
114 void lttng_lock_sessions(void)
115 {
116 mutex_lock(&sessions_mutex);
117 }
118
119 void lttng_unlock_sessions(void)
120 {
121 mutex_unlock(&sessions_mutex);
122 }
123
124 static struct lttng_transport *lttng_transport_find(const char *name)
125 {
126 struct lttng_transport *transport;
127
128 list_for_each_entry(transport, &lttng_transport_list, node) {
129 if (!strcmp(transport->name, name))
130 return transport;
131 }
132 return NULL;
133 }
134
135 /*
136 * Called with sessions lock held.
137 */
138 int lttng_session_active(void)
139 {
140 struct lttng_kernel_session_private *iter;
141
142 list_for_each_entry(iter, &sessions, list) {
143 if (iter->pub->active)
144 return 1;
145 }
146 return 0;
147 }
148
149 struct lttng_kernel_session *lttng_session_create(void)
150 {
151 struct lttng_kernel_session *session;
152 struct lttng_kernel_session_private *session_priv;
153 struct lttng_metadata_cache *metadata_cache;
154 int i;
155
156 mutex_lock(&sessions_mutex);
157 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
158 if (!session)
159 goto err;
160 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
161 if (!session_priv)
162 goto err_free_session;
163 session->priv = session_priv;
164 session_priv->pub = session;
165
166 INIT_LIST_HEAD(&session_priv->chan);
167 INIT_LIST_HEAD(&session_priv->events);
168 lttng_guid_gen(&session_priv->uuid);
169
170 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
171 GFP_KERNEL);
172 if (!metadata_cache)
173 goto err_free_session_private;
174 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
175 if (!metadata_cache->data)
176 goto err_free_cache;
177 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
178 kref_init(&metadata_cache->refcount);
179 mutex_init(&metadata_cache->lock);
180 session_priv->metadata_cache = metadata_cache;
181 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
182 memcpy(&metadata_cache->uuid, &session_priv->uuid,
183 sizeof(metadata_cache->uuid));
184 INIT_LIST_HEAD(&session_priv->enablers_head);
185 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
186 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
187 list_add(&session_priv->list, &sessions);
188
189 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
190 goto tracker_alloc_error;
191 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
192 goto tracker_alloc_error;
193 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
194 goto tracker_alloc_error;
195 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
196 goto tracker_alloc_error;
197 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
198 goto tracker_alloc_error;
199 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
200 goto tracker_alloc_error;
201
202 mutex_unlock(&sessions_mutex);
203
204 return session;
205
206 tracker_alloc_error:
207 lttng_id_tracker_fini(&session->pid_tracker);
208 lttng_id_tracker_fini(&session->vpid_tracker);
209 lttng_id_tracker_fini(&session->uid_tracker);
210 lttng_id_tracker_fini(&session->vuid_tracker);
211 lttng_id_tracker_fini(&session->gid_tracker);
212 lttng_id_tracker_fini(&session->vgid_tracker);
213 err_free_cache:
214 kfree(metadata_cache);
215 err_free_session_private:
216 lttng_kvfree(session_priv);
217 err_free_session:
218 lttng_kvfree(session);
219 err:
220 mutex_unlock(&sessions_mutex);
221 return NULL;
222 }
223
224 static
225 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
226 {
227 struct lttng_counter_transport *transport;
228
229 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
230 if (!strcmp(transport->name, name))
231 return transport;
232 }
233 return NULL;
234 }
235
236 struct lttng_counter *lttng_kernel_counter_create(
237 const char *counter_transport_name,
238 size_t number_dimensions, const size_t *dimensions_sizes)
239 {
240 struct lttng_counter *counter = NULL;
241 struct lttng_counter_transport *counter_transport = NULL;
242
243 counter_transport = lttng_counter_transport_find(counter_transport_name);
244 if (!counter_transport) {
245 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
246 counter_transport_name);
247 goto notransport;
248 }
249 if (!try_module_get(counter_transport->owner)) {
250 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
251 goto notransport;
252 }
253
254 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
255 if (!counter)
256 goto nomem;
257
258 /* Create event notifier error counter. */
259 counter->ops = &counter_transport->ops;
260 counter->transport = counter_transport;
261
262 counter->counter = counter->ops->counter_create(
263 number_dimensions, dimensions_sizes, 0);
264 if (!counter->counter) {
265 goto create_error;
266 }
267
268 return counter;
269
270 create_error:
271 lttng_kvfree(counter);
272 nomem:
273 if (counter_transport)
274 module_put(counter_transport->owner);
275 notransport:
276 return NULL;
277 }
278
279 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
280 {
281 struct lttng_transport *transport = NULL;
282 struct lttng_event_notifier_group *event_notifier_group;
283 const char *transport_name = "relay-event-notifier";
284 size_t subbuf_size = 4096; //TODO
285 size_t num_subbuf = 16; //TODO
286 unsigned int switch_timer_interval = 0;
287 unsigned int read_timer_interval = 0;
288 int i;
289
290 mutex_lock(&sessions_mutex);
291
292 transport = lttng_transport_find(transport_name);
293 if (!transport) {
294 printk(KERN_WARNING "LTTng: transport %s not found\n",
295 transport_name);
296 goto notransport;
297 }
298 if (!try_module_get(transport->owner)) {
299 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
300 transport_name);
301 goto notransport;
302 }
303
304 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
305 GFP_KERNEL);
306 if (!event_notifier_group)
307 goto nomem;
308
309 /*
310 * Initialize the ring buffer used to store event notifier
311 * notifications.
312 */
313 event_notifier_group->ops = &transport->ops;
314 event_notifier_group->chan = transport->ops.priv->channel_create(
315 transport_name, event_notifier_group, NULL,
316 subbuf_size, num_subbuf, switch_timer_interval,
317 read_timer_interval);
318 if (!event_notifier_group->chan)
319 goto create_error;
320
321 event_notifier_group->transport = transport;
322
323 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
324 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
325 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
326 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
327
328 list_add(&event_notifier_group->node, &event_notifier_groups);
329
330 mutex_unlock(&sessions_mutex);
331
332 return event_notifier_group;
333
334 create_error:
335 lttng_kvfree(event_notifier_group);
336 nomem:
337 if (transport)
338 module_put(transport->owner);
339 notransport:
340 mutex_unlock(&sessions_mutex);
341 return NULL;
342 }
343
344 void metadata_cache_destroy(struct kref *kref)
345 {
346 struct lttng_metadata_cache *cache =
347 container_of(kref, struct lttng_metadata_cache, refcount);
348 vfree(cache->data);
349 kfree(cache);
350 }
351
352 void lttng_session_destroy(struct lttng_kernel_session *session)
353 {
354 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
355 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
356 struct lttng_metadata_stream *metadata_stream;
357 struct lttng_event_recorder_enabler *event_recorder_enabler, *tmp_event_recorder_enabler;
358 int ret;
359
360 mutex_lock(&sessions_mutex);
361 WRITE_ONCE(session->active, 0);
362 list_for_each_entry(chan_priv, &session->priv->chan, node) {
363 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
364 WARN_ON(ret);
365 }
366 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
367 ret = _lttng_event_recorder_unregister(event_recorder_priv->pub);
368 WARN_ON(ret);
369 }
370 synchronize_trace(); /* Wait for in-flight events to complete */
371 list_for_each_entry(chan_priv, &session->priv->chan, node) {
372 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
373 WARN_ON(ret);
374 }
375 list_for_each_entry_safe(event_recorder_enabler, tmp_event_recorder_enabler,
376 &session->priv->enablers_head, node)
377 lttng_event_enabler_destroy(&event_recorder_enabler->parent);
378 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, node)
379 _lttng_event_destroy(&event_recorder_priv->pub->parent);
380 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
381 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
382 _lttng_channel_destroy(chan_priv->pub);
383 }
384 mutex_lock(&session->priv->metadata_cache->lock);
385 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
386 _lttng_metadata_channel_hangup(metadata_stream);
387 mutex_unlock(&session->priv->metadata_cache->lock);
388 lttng_id_tracker_fini(&session->pid_tracker);
389 lttng_id_tracker_fini(&session->vpid_tracker);
390 lttng_id_tracker_fini(&session->uid_tracker);
391 lttng_id_tracker_fini(&session->vuid_tracker);
392 lttng_id_tracker_fini(&session->gid_tracker);
393 lttng_id_tracker_fini(&session->vgid_tracker);
394 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
395 list_del(&session->priv->list);
396 mutex_unlock(&sessions_mutex);
397 lttng_kvfree(session->priv);
398 lttng_kvfree(session);
399 }
400
401 void lttng_event_notifier_group_destroy(
402 struct lttng_event_notifier_group *event_notifier_group)
403 {
404 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
405 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
406 int ret;
407
408 if (!event_notifier_group)
409 return;
410
411 mutex_lock(&sessions_mutex);
412
413 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
414 WARN_ON(ret);
415
416 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
417 &event_notifier_group->event_notifiers_head, node) {
418 ret = _lttng_event_notifier_unregister(event_notifier_priv->pub);
419 WARN_ON(ret);
420 }
421
422 /* Wait for in-flight event notifier to complete */
423 synchronize_trace();
424
425 irq_work_sync(&event_notifier_group->wakeup_pending);
426
427 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
428 WARN_ON(ret);
429
430 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
431 &event_notifier_group->enablers_head, node)
432 lttng_event_enabler_destroy(&event_notifier_enabler->parent);
433
434 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
435 &event_notifier_group->event_notifiers_head, node)
436 _lttng_event_destroy(&event_notifier_priv->pub->parent);
437
438 if (event_notifier_group->error_counter) {
439 struct lttng_counter *error_counter = event_notifier_group->error_counter;
440
441 error_counter->ops->counter_destroy(error_counter->counter);
442 module_put(error_counter->transport->owner);
443 lttng_kvfree(error_counter);
444 event_notifier_group->error_counter = NULL;
445 }
446
447 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
448 module_put(event_notifier_group->transport->owner);
449 list_del(&event_notifier_group->node);
450
451 mutex_unlock(&sessions_mutex);
452 lttng_kvfree(event_notifier_group);
453 }
454
455 int lttng_session_statedump(struct lttng_kernel_session *session)
456 {
457 int ret;
458
459 mutex_lock(&sessions_mutex);
460 ret = lttng_statedump_start(session);
461 mutex_unlock(&sessions_mutex);
462 return ret;
463 }
464
465 int lttng_session_enable(struct lttng_kernel_session *session)
466 {
467 int ret = 0;
468 struct lttng_kernel_channel_buffer_private *chan_priv;
469
470 mutex_lock(&sessions_mutex);
471 if (session->active) {
472 ret = -EBUSY;
473 goto end;
474 }
475
476 /* Set transient enabler state to "enabled" */
477 session->priv->tstate = 1;
478
479 /* We need to sync enablers with session before activation. */
480 lttng_session_sync_event_enablers(session);
481
482 /*
483 * Snapshot the number of events per channel to know the type of header
484 * we need to use.
485 */
486 list_for_each_entry(chan_priv, &session->priv->chan, node) {
487 if (chan_priv->header_type)
488 continue; /* don't change it if session stop/restart */
489 if (chan_priv->free_event_id < 31)
490 chan_priv->header_type = 1; /* compact */
491 else
492 chan_priv->header_type = 2; /* large */
493 }
494
495 /* Clear each stream's quiescent state. */
496 list_for_each_entry(chan_priv, &session->priv->chan, node) {
497 if (chan_priv->channel_type != METADATA_CHANNEL)
498 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
499 }
500
501 WRITE_ONCE(session->active, 1);
502 WRITE_ONCE(session->priv->been_active, 1);
503 ret = _lttng_session_metadata_statedump(session);
504 if (ret) {
505 WRITE_ONCE(session->active, 0);
506 goto end;
507 }
508 ret = lttng_statedump_start(session);
509 if (ret)
510 WRITE_ONCE(session->active, 0);
511 end:
512 mutex_unlock(&sessions_mutex);
513 return ret;
514 }
515
516 int lttng_session_disable(struct lttng_kernel_session *session)
517 {
518 int ret = 0;
519 struct lttng_kernel_channel_buffer_private *chan_priv;
520
521 mutex_lock(&sessions_mutex);
522 if (!session->active) {
523 ret = -EBUSY;
524 goto end;
525 }
526 WRITE_ONCE(session->active, 0);
527
528 /* Set transient enabler state to "disabled" */
529 session->priv->tstate = 0;
530 lttng_session_sync_event_enablers(session);
531
532 /* Set each stream's quiescent state. */
533 list_for_each_entry(chan_priv, &session->priv->chan, node) {
534 if (chan_priv->channel_type != METADATA_CHANNEL)
535 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
536 }
537 end:
538 mutex_unlock(&sessions_mutex);
539 return ret;
540 }
541
542 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
543 {
544 int ret = 0;
545 struct lttng_kernel_channel_buffer_private *chan_priv;
546 struct lttng_kernel_event_recorder_private *event_recorder_priv;
547 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
548 struct lttng_metadata_stream *stream;
549
550 mutex_lock(&sessions_mutex);
551 if (!session->active) {
552 ret = -EBUSY;
553 goto end;
554 }
555
556 mutex_lock(&cache->lock);
557 memset(cache->data, 0, cache->cache_alloc);
558 cache->metadata_written = 0;
559 cache->version++;
560 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
561 stream->metadata_out = 0;
562 stream->metadata_in = 0;
563 }
564 mutex_unlock(&cache->lock);
565
566 session->priv->metadata_dumped = 0;
567 list_for_each_entry(chan_priv, &session->priv->chan, node) {
568 chan_priv->metadata_dumped = 0;
569 }
570
571 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
572 event_recorder_priv->metadata_dumped = 0;
573 }
574
575 ret = _lttng_session_metadata_statedump(session);
576
577 end:
578 mutex_unlock(&sessions_mutex);
579 return ret;
580 }
581
582 static
583 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
584 {
585 struct lttng_kernel_channel_buffer *chan_buf;
586
587 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
588 return false;
589 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
590 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
591 return true;
592 return false;
593 }
594
595 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
596 {
597 int ret = 0;
598
599 mutex_lock(&sessions_mutex);
600 if (is_channel_buffer_metadata(channel)) {
601 ret = -EPERM;
602 goto end;
603 }
604 if (channel->enabled) {
605 ret = -EEXIST;
606 goto end;
607 }
608 /* Set transient enabler state to "enabled" */
609 channel->priv->tstate = 1;
610 lttng_session_sync_event_enablers(channel->session);
611 /* Set atomically the state to "enabled" */
612 WRITE_ONCE(channel->enabled, 1);
613 end:
614 mutex_unlock(&sessions_mutex);
615 return ret;
616 }
617
618 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
619 {
620 int ret = 0;
621
622 mutex_lock(&sessions_mutex);
623 if (is_channel_buffer_metadata(channel)) {
624 ret = -EPERM;
625 goto end;
626 }
627 if (!channel->enabled) {
628 ret = -EEXIST;
629 goto end;
630 }
631 /* Set atomically the state to "disabled" */
632 WRITE_ONCE(channel->enabled, 0);
633 /* Set transient enabler state to "enabled" */
634 channel->priv->tstate = 0;
635 lttng_session_sync_event_enablers(channel->session);
636 end:
637 mutex_unlock(&sessions_mutex);
638 return ret;
639 }
640
641 int lttng_event_enable(struct lttng_kernel_event_common *event)
642 {
643 int ret = 0;
644
645 mutex_lock(&sessions_mutex);
646 switch (event->type) {
647 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
648 {
649 struct lttng_kernel_event_recorder *event_recorder =
650 container_of(event, struct lttng_kernel_event_recorder, parent);
651
652 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
653 ret = -EPERM;
654 goto end;
655 }
656 break;
657 }
658 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
659 switch (event->priv->instrumentation) {
660 case LTTNG_KERNEL_ABI_KRETPROBE:
661 ret = -EINVAL;
662 goto end;
663 default:
664 break;
665 }
666 break;
667 default:
668 break;
669 }
670
671 if (event->enabled) {
672 ret = -EEXIST;
673 goto end;
674 }
675 switch (event->priv->instrumentation) {
676 case LTTNG_KERNEL_ABI_TRACEPOINT:
677 lttng_fallthrough;
678 case LTTNG_KERNEL_ABI_SYSCALL:
679 ret = -EINVAL;
680 break;
681
682 case LTTNG_KERNEL_ABI_KPROBE:
683 lttng_fallthrough;
684 case LTTNG_KERNEL_ABI_UPROBE:
685 WRITE_ONCE(event->enabled, 1);
686 break;
687
688 case LTTNG_KERNEL_ABI_KRETPROBE:
689 ret = lttng_kretprobes_event_enable_state(event, 1);
690 break;
691
692 case LTTNG_KERNEL_ABI_FUNCTION:
693 lttng_fallthrough;
694 case LTTNG_KERNEL_ABI_NOOP:
695 lttng_fallthrough;
696 default:
697 WARN_ON_ONCE(1);
698 ret = -EINVAL;
699 }
700 end:
701 mutex_unlock(&sessions_mutex);
702 return ret;
703 }
704
705 int lttng_event_disable(struct lttng_kernel_event_common *event)
706 {
707 int ret = 0;
708
709 mutex_lock(&sessions_mutex);
710 switch (event->type) {
711 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
712 {
713 struct lttng_kernel_event_recorder *event_recorder =
714 container_of(event, struct lttng_kernel_event_recorder, parent);
715
716 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
717 ret = -EPERM;
718 goto end;
719 }
720 break;
721 }
722 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
723 switch (event->priv->instrumentation) {
724 case LTTNG_KERNEL_ABI_KRETPROBE:
725 ret = -EINVAL;
726 goto end;
727 default:
728 break;
729 }
730 break;
731 default:
732 break;
733 }
734
735 if (!event->enabled) {
736 ret = -EEXIST;
737 goto end;
738 }
739 switch (event->priv->instrumentation) {
740 case LTTNG_KERNEL_ABI_TRACEPOINT:
741 lttng_fallthrough;
742 case LTTNG_KERNEL_ABI_SYSCALL:
743 ret = -EINVAL;
744 break;
745
746 case LTTNG_KERNEL_ABI_KPROBE:
747 lttng_fallthrough;
748 case LTTNG_KERNEL_ABI_UPROBE:
749 WRITE_ONCE(event->enabled, 0);
750 break;
751
752 case LTTNG_KERNEL_ABI_KRETPROBE:
753 ret = lttng_kretprobes_event_enable_state(event, 0);
754 break;
755
756 case LTTNG_KERNEL_ABI_FUNCTION:
757 lttng_fallthrough;
758 case LTTNG_KERNEL_ABI_NOOP:
759 lttng_fallthrough;
760 default:
761 WARN_ON_ONCE(1);
762 ret = -EINVAL;
763 }
764 end:
765 mutex_unlock(&sessions_mutex);
766 return ret;
767 }
768
769 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
770 const char *transport_name,
771 void *buf_addr,
772 size_t subbuf_size, size_t num_subbuf,
773 unsigned int switch_timer_interval,
774 unsigned int read_timer_interval,
775 enum channel_type channel_type)
776 {
777 struct lttng_kernel_channel_buffer *chan;
778 struct lttng_kernel_channel_buffer_private *chan_priv;
779 struct lttng_transport *transport = NULL;
780
781 mutex_lock(&sessions_mutex);
782 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
783 goto active; /* Refuse to add channel to active session */
784 transport = lttng_transport_find(transport_name);
785 if (!transport) {
786 printk(KERN_WARNING "LTTng: transport %s not found\n",
787 transport_name);
788 goto notransport;
789 }
790 if (!try_module_get(transport->owner)) {
791 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
792 goto notransport;
793 }
794 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
795 if (!chan)
796 goto nomem;
797 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
798 if (!chan_priv)
799 goto nomem_priv;
800 chan->priv = chan_priv;
801 chan_priv->pub = chan;
802 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
803 chan->parent.session = session;
804 chan->priv->id = session->priv->free_chan_id++;
805 chan->ops = &transport->ops;
806 /*
807 * Note: the channel creation op already writes into the packet
808 * headers. Therefore the "chan" information used as input
809 * should be already accessible.
810 */
811 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
812 chan, buf_addr, subbuf_size, num_subbuf,
813 switch_timer_interval, read_timer_interval);
814 if (!chan->priv->rb_chan)
815 goto create_error;
816 chan->priv->parent.tstate = 1;
817 chan->parent.enabled = 1;
818 chan->priv->transport = transport;
819 chan->priv->channel_type = channel_type;
820 list_add(&chan->priv->node, &session->priv->chan);
821 mutex_unlock(&sessions_mutex);
822 return chan;
823
824 create_error:
825 kfree(chan_priv);
826 nomem_priv:
827 kfree(chan);
828 nomem:
829 if (transport)
830 module_put(transport->owner);
831 notransport:
832 active:
833 mutex_unlock(&sessions_mutex);
834 return NULL;
835 }
836
837 /*
838 * Only used internally at session destruction for per-cpu channels, and
839 * when metadata channel is released.
840 * Needs to be called with sessions mutex held.
841 */
842 static
843 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
844 {
845 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
846 module_put(chan->priv->transport->owner);
847 list_del(&chan->priv->node);
848 lttng_kernel_destroy_context(chan->priv->ctx);
849 kfree(chan->priv);
850 kfree(chan);
851 }
852
853 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
854 {
855 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
856
857 /* Protect the metadata cache with the sessions_mutex. */
858 mutex_lock(&sessions_mutex);
859 _lttng_channel_destroy(chan);
860 mutex_unlock(&sessions_mutex);
861 }
862 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
863
864 static
865 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
866 {
867 stream->finalized = 1;
868 wake_up_interruptible(&stream->read_wait);
869 }
870
871
872 /*
873 * Supports event creation while tracing session is active.
874 * Needs to be called with sessions mutex held.
875 */
876 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
877 const struct lttng_kernel_event_desc *event_desc)
878 {
879 struct lttng_kernel_channel_buffer *chan = event_enabler->chan;
880 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
881 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
882 struct lttng_kernel_session *session = chan->parent.session;
883 struct lttng_kernel_event_recorder *event_recorder;
884 struct lttng_kernel_event_recorder_private *event_recorder_priv;
885 const char *event_name;
886 struct hlist_head *head;
887 int ret;
888
889 if (chan->priv->free_event_id == -1U) {
890 ret = -EMFILE;
891 goto full;
892 }
893
894 switch (itype) {
895 case LTTNG_KERNEL_ABI_TRACEPOINT:
896 event_name = event_desc->event_name;
897 break;
898
899 case LTTNG_KERNEL_ABI_KPROBE:
900 lttng_fallthrough;
901 case LTTNG_KERNEL_ABI_UPROBE:
902 lttng_fallthrough;
903 case LTTNG_KERNEL_ABI_KRETPROBE:
904 lttng_fallthrough;
905 case LTTNG_KERNEL_ABI_SYSCALL:
906 event_name = event_param->name;
907 break;
908
909 case LTTNG_KERNEL_ABI_FUNCTION:
910 lttng_fallthrough;
911 case LTTNG_KERNEL_ABI_NOOP:
912 lttng_fallthrough;
913 default:
914 WARN_ON_ONCE(1);
915 ret = -EINVAL;
916 goto type_error;
917 }
918
919 head = utils_borrow_hash_table_bucket(session->priv->events_ht.table,
920 LTTNG_EVENT_HT_SIZE, event_name);
921 lttng_hlist_for_each_entry(event_recorder_priv, head, hlist) {
922 WARN_ON_ONCE(!event_recorder_priv->parent.desc);
923 if (!strncmp(event_recorder_priv->parent.desc->event_name, event_name,
924 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
925 && chan == event_recorder_priv->pub->chan) {
926 ret = -EEXIST;
927 goto exist;
928 }
929 }
930
931 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
932 if (!event_recorder) {
933 ret = -ENOMEM;
934 goto cache_error;
935 }
936 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
937 if (!event_recorder_priv) {
938 ret = -ENOMEM;
939 goto cache_private_error;
940 }
941 event_recorder_priv->pub = event_recorder;
942 event_recorder_priv->parent.pub = &event_recorder->parent;
943 event_recorder->priv = event_recorder_priv;
944 event_recorder->parent.priv = &event_recorder_priv->parent;
945 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
946
947 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
948 event_recorder->chan = chan;
949 event_recorder->priv->id = chan->priv->free_event_id++;
950 event_recorder->priv->parent.instrumentation = itype;
951 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
952 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
953
954 switch (itype) {
955 case LTTNG_KERNEL_ABI_TRACEPOINT:
956 /* Event will be enabled by enabler sync. */
957 event_recorder->parent.enabled = 0;
958 event_recorder->priv->parent.registered = 0;
959 event_recorder->priv->parent.desc = lttng_event_desc_get(event_name);
960 if (!event_recorder->priv->parent.desc) {
961 ret = -ENOENT;
962 goto register_error;
963 }
964 /* Populate lttng_event structure before event registration. */
965 smp_wmb();
966 break;
967
968 case LTTNG_KERNEL_ABI_KPROBE:
969 /*
970 * Needs to be explicitly enabled after creation, since
971 * we may want to apply filters.
972 */
973 event_recorder->parent.enabled = 0;
974 event_recorder->priv->parent.registered = 1;
975 /*
976 * Populate lttng_event structure before event
977 * registration.
978 */
979 smp_wmb();
980 ret = lttng_kprobes_register_event(event_name,
981 event_param->u.kprobe.symbol_name,
982 event_param->u.kprobe.offset,
983 event_param->u.kprobe.addr,
984 event_recorder);
985 if (ret) {
986 ret = -EINVAL;
987 goto register_error;
988 }
989 ret = try_module_get(event_recorder->priv->parent.desc->owner);
990 WARN_ON_ONCE(!ret);
991 break;
992
993 case LTTNG_KERNEL_ABI_KRETPROBE:
994 {
995 struct lttng_kernel_event_recorder *event_recorder_return;
996 struct lttng_kernel_event_recorder_private *event_recorder_return_priv;
997
998 /* kretprobe defines 2 events */
999 /*
1000 * Needs to be explicitly enabled after creation, since
1001 * we may want to apply filters.
1002 */
1003 event_recorder->parent.enabled = 0;
1004 event_recorder->priv->parent.registered = 1;
1005
1006 event_recorder_return = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
1007 if (!event_recorder_return) {
1008 ret = -ENOMEM;
1009 goto register_error;
1010 }
1011 event_recorder_return_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
1012 if (!event_recorder_return_priv) {
1013 kmem_cache_free(event_recorder_cache, event_recorder_return);
1014 ret = -ENOMEM;
1015 goto register_error;
1016 }
1017 event_recorder_return_priv->pub = event_recorder_return;
1018 event_recorder_return_priv->parent.pub = &event_recorder_return->parent;
1019 event_recorder_return->priv = event_recorder_return_priv;
1020 event_recorder_return->parent.priv = &event_recorder_return_priv->parent;
1021 event_recorder_return->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
1022
1023 event_recorder_return->parent.run_filter = lttng_kernel_interpret_event_filter;
1024 event_recorder_return->chan = chan;
1025 event_recorder_return->priv->id = chan->priv->free_event_id++;
1026 event_recorder_return->priv->parent.instrumentation = itype;
1027 event_recorder_return->parent.enabled = 0;
1028 event_recorder_return->priv->parent.registered = 1;
1029 INIT_LIST_HEAD(&event_recorder_return->priv->parent.filter_bytecode_runtime_head);
1030 INIT_LIST_HEAD(&event_recorder_return->priv->parent.enablers_ref_head);
1031 /*
1032 * Populate lttng_event structure before kretprobe registration.
1033 */
1034 smp_wmb();
1035 ret = lttng_kretprobes_register(event_name,
1036 event_param->u.kretprobe.symbol_name,
1037 event_param->u.kretprobe.offset,
1038 event_param->u.kretprobe.addr,
1039 event_recorder, event_recorder_return);
1040 if (ret) {
1041 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1042 kmem_cache_free(event_recorder_cache, event_recorder_return);
1043 ret = -EINVAL;
1044 goto register_error;
1045 }
1046 /* Take 2 refs on the module: one per event. */
1047 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1048 WARN_ON_ONCE(!ret);
1049 ret = try_module_get(event_recorder_return->priv->parent.desc->owner);
1050 WARN_ON_ONCE(!ret);
1051 ret = _lttng_event_metadata_statedump(chan->parent.session, chan,
1052 event_recorder_return);
1053 WARN_ON_ONCE(ret > 0);
1054 if (ret) {
1055 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1056 kmem_cache_free(event_recorder_cache, event_recorder_return);
1057 module_put(event_recorder_return->priv->parent.desc->owner);
1058 module_put(event_recorder->priv->parent.desc->owner);
1059 goto statedump_error;
1060 }
1061 list_add(&event_recorder_return->priv->node, &chan->parent.session->priv->events);
1062 break;
1063 }
1064
1065 case LTTNG_KERNEL_ABI_SYSCALL:
1066 /*
1067 * Needs to be explicitly enabled after creation, since
1068 * we may want to apply filters.
1069 */
1070 event_recorder->parent.enabled = 0;
1071 event_recorder->priv->parent.registered = 0;
1072 event_recorder->priv->parent.desc = event_desc;
1073 switch (event_param->u.syscall.entryexit) {
1074 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1075 ret = -EINVAL;
1076 goto register_error;
1077 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1078 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1079 break;
1080 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1081 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1082 break;
1083 }
1084 switch (event_param->u.syscall.abi) {
1085 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1086 ret = -EINVAL;
1087 goto register_error;
1088 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1089 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1090 break;
1091 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1092 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1093 break;
1094 }
1095 if (!event_recorder->priv->parent.desc) {
1096 ret = -EINVAL;
1097 goto register_error;
1098 }
1099 break;
1100
1101 case LTTNG_KERNEL_ABI_UPROBE:
1102 /*
1103 * Needs to be explicitly enabled after creation, since
1104 * we may want to apply filters.
1105 */
1106 event_recorder->parent.enabled = 0;
1107 event_recorder->priv->parent.registered = 1;
1108
1109 /*
1110 * Populate lttng_event structure before event
1111 * registration.
1112 */
1113 smp_wmb();
1114
1115 ret = lttng_uprobes_register_event(event_param->name,
1116 event_param->u.uprobe.fd,
1117 event_recorder);
1118 if (ret)
1119 goto register_error;
1120 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1121 WARN_ON_ONCE(!ret);
1122 break;
1123
1124 case LTTNG_KERNEL_ABI_FUNCTION:
1125 lttng_fallthrough;
1126 case LTTNG_KERNEL_ABI_NOOP:
1127 lttng_fallthrough;
1128 default:
1129 WARN_ON_ONCE(1);
1130 ret = -EINVAL;
1131 goto register_error;
1132 }
1133 ret = _lttng_event_metadata_statedump(chan->parent.session, chan, event_recorder);
1134 WARN_ON_ONCE(ret > 0);
1135 if (ret) {
1136 goto statedump_error;
1137 }
1138 hlist_add_head(&event_recorder->priv->hlist, head);
1139 list_add(&event_recorder->priv->node, &chan->parent.session->priv->events);
1140 return event_recorder;
1141
1142 statedump_error:
1143 /* If a statedump error occurs, events will not be readable. */
1144 register_error:
1145 kmem_cache_free(event_recorder_private_cache, event_recorder_priv);
1146 cache_private_error:
1147 kmem_cache_free(event_recorder_cache, event_recorder);
1148 cache_error:
1149 exist:
1150 type_error:
1151 full:
1152 return ERR_PTR(ret);
1153 }
1154
1155 struct lttng_kernel_event_notifier *_lttng_event_notifier_create(
1156 const struct lttng_kernel_event_desc *event_desc,
1157 uint64_t token, uint64_t error_counter_index,
1158 struct lttng_event_notifier_group *event_notifier_group,
1159 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1160 enum lttng_kernel_abi_instrumentation itype)
1161 {
1162 struct lttng_kernel_event_notifier *event_notifier;
1163 struct lttng_kernel_event_notifier_private *event_notifier_priv;
1164 struct lttng_counter *error_counter;
1165 const char *event_name;
1166 struct hlist_head *head;
1167 int ret;
1168
1169 switch (itype) {
1170 case LTTNG_KERNEL_ABI_TRACEPOINT:
1171 event_name = event_desc->event_name;
1172 break;
1173
1174 case LTTNG_KERNEL_ABI_KPROBE:
1175 lttng_fallthrough;
1176 case LTTNG_KERNEL_ABI_UPROBE:
1177 lttng_fallthrough;
1178 case LTTNG_KERNEL_ABI_SYSCALL:
1179 event_name = event_notifier_param->event.name;
1180 break;
1181
1182 case LTTNG_KERNEL_ABI_KRETPROBE:
1183 lttng_fallthrough;
1184 case LTTNG_KERNEL_ABI_FUNCTION:
1185 lttng_fallthrough;
1186 case LTTNG_KERNEL_ABI_NOOP:
1187 lttng_fallthrough;
1188 default:
1189 WARN_ON_ONCE(1);
1190 ret = -EINVAL;
1191 goto type_error;
1192 }
1193
1194 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1195 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1196 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
1197 WARN_ON_ONCE(!event_notifier_priv->parent.desc);
1198 if (!strncmp(event_notifier_priv->parent.desc->event_name, event_name,
1199 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1200 && event_notifier_group == event_notifier_priv->group
1201 && token == event_notifier_priv->parent.user_token) {
1202 ret = -EEXIST;
1203 goto exist;
1204 }
1205 }
1206
1207 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1208 if (!event_notifier) {
1209 ret = -ENOMEM;
1210 goto cache_error;
1211 }
1212 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
1213 if (!event_notifier_priv) {
1214 ret = -ENOMEM;
1215 goto cache_private_error;
1216 }
1217 event_notifier_priv->pub = event_notifier;
1218 event_notifier_priv->parent.pub = &event_notifier->parent;
1219 event_notifier->priv = event_notifier_priv;
1220 event_notifier->parent.priv = &event_notifier_priv->parent;
1221 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
1222
1223 event_notifier->priv->group = event_notifier_group;
1224 event_notifier->priv->parent.user_token = token;
1225 event_notifier->priv->error_counter_index = error_counter_index;
1226 event_notifier->priv->num_captures = 0;
1227 event_notifier->priv->parent.instrumentation = itype;
1228 event_notifier->notification_send = lttng_event_notifier_notification_send;
1229 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
1230 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
1231 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
1232 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
1233
1234 switch (itype) {
1235 case LTTNG_KERNEL_ABI_TRACEPOINT:
1236 /* Event will be enabled by enabler sync. */
1237 event_notifier->parent.enabled = 0;
1238 event_notifier->priv->parent.registered = 0;
1239 event_notifier->priv->parent.desc = lttng_event_desc_get(event_name);
1240 if (!event_notifier->priv->parent.desc) {
1241 ret = -ENOENT;
1242 goto register_error;
1243 }
1244 /* Populate lttng_event_notifier structure before event registration. */
1245 smp_wmb();
1246 break;
1247
1248 case LTTNG_KERNEL_ABI_KPROBE:
1249 /*
1250 * Needs to be explicitly enabled after creation, since
1251 * we may want to apply filters.
1252 */
1253 event_notifier->parent.enabled = 0;
1254 event_notifier->priv->parent.registered = 1;
1255 /*
1256 * Populate lttng_event_notifier structure before event
1257 * registration.
1258 */
1259 smp_wmb();
1260 ret = lttng_kprobes_register_event_notifier(
1261 event_notifier_param->event.u.kprobe.symbol_name,
1262 event_notifier_param->event.u.kprobe.offset,
1263 event_notifier_param->event.u.kprobe.addr,
1264 event_notifier);
1265 if (ret) {
1266 ret = -EINVAL;
1267 goto register_error;
1268 }
1269 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1270 WARN_ON_ONCE(!ret);
1271 break;
1272
1273 case LTTNG_KERNEL_ABI_SYSCALL:
1274 /*
1275 * Needs to be explicitly enabled after creation, since
1276 * we may want to apply filters.
1277 */
1278 event_notifier->parent.enabled = 0;
1279 event_notifier->priv->parent.registered = 0;
1280 event_notifier->priv->parent.desc = event_desc;
1281 switch (event_notifier_param->event.u.syscall.entryexit) {
1282 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1283 ret = -EINVAL;
1284 goto register_error;
1285 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1286 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1287 break;
1288 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1289 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1290 break;
1291 }
1292 switch (event_notifier_param->event.u.syscall.abi) {
1293 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1294 ret = -EINVAL;
1295 goto register_error;
1296 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1297 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1298 break;
1299 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1300 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1301 break;
1302 }
1303
1304 if (!event_notifier->priv->parent.desc) {
1305 ret = -EINVAL;
1306 goto register_error;
1307 }
1308 break;
1309
1310 case LTTNG_KERNEL_ABI_UPROBE:
1311 /*
1312 * Needs to be explicitly enabled after creation, since
1313 * we may want to apply filters.
1314 */
1315 event_notifier->parent.enabled = 0;
1316 event_notifier->priv->parent.registered = 1;
1317
1318 /*
1319 * Populate lttng_event_notifier structure before
1320 * event_notifier registration.
1321 */
1322 smp_wmb();
1323
1324 ret = lttng_uprobes_register_event_notifier(
1325 event_notifier_param->event.name,
1326 event_notifier_param->event.u.uprobe.fd,
1327 event_notifier);
1328 if (ret)
1329 goto register_error;
1330 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1331 WARN_ON_ONCE(!ret);
1332 break;
1333
1334 case LTTNG_KERNEL_ABI_KRETPROBE:
1335 lttng_fallthrough;
1336 case LTTNG_KERNEL_ABI_FUNCTION:
1337 lttng_fallthrough;
1338 case LTTNG_KERNEL_ABI_NOOP:
1339 lttng_fallthrough;
1340 default:
1341 WARN_ON_ONCE(1);
1342 ret = -EINVAL;
1343 goto register_error;
1344 }
1345
1346 list_add(&event_notifier->priv->node, &event_notifier_group->event_notifiers_head);
1347 hlist_add_head(&event_notifier->priv->hlist, head);
1348
1349 /*
1350 * Clear the error counter bucket. The sessiond keeps track of which
1351 * bucket is currently in use. We trust it. The session lock
1352 * synchronizes against concurrent creation of the error
1353 * counter.
1354 */
1355 error_counter = event_notifier_group->error_counter;
1356 if (error_counter) {
1357 size_t dimension_index[1];
1358
1359 /*
1360 * Check that the index is within the boundary of the counter.
1361 */
1362 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1363 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1364 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1365 ret = -EINVAL;
1366 goto register_error;
1367 }
1368
1369 dimension_index[0] = event_notifier->priv->error_counter_index;
1370 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1371 if (ret) {
1372 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1373 event_notifier->priv->error_counter_index);
1374 goto register_error;
1375 }
1376 }
1377
1378 return event_notifier;
1379
1380 register_error:
1381 kmem_cache_free(event_notifier_private_cache, event_notifier_priv);
1382 cache_private_error:
1383 kmem_cache_free(event_notifier_cache, event_notifier);
1384 cache_error:
1385 exist:
1386 type_error:
1387 return ERR_PTR(ret);
1388 }
1389
1390 int lttng_kernel_counter_read(struct lttng_counter *counter,
1391 const size_t *dim_indexes, int32_t cpu,
1392 int64_t *val, bool *overflow, bool *underflow)
1393 {
1394 return counter->ops->counter_read(counter->counter, dim_indexes,
1395 cpu, val, overflow, underflow);
1396 }
1397
1398 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1399 const size_t *dim_indexes, int64_t *val,
1400 bool *overflow, bool *underflow)
1401 {
1402 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1403 val, overflow, underflow);
1404 }
1405
1406 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1407 const size_t *dim_indexes)
1408 {
1409 return counter->ops->counter_clear(counter->counter, dim_indexes);
1410 }
1411
1412 struct lttng_kernel_event_recorder *lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
1413 const struct lttng_kernel_event_desc *event_desc)
1414 {
1415 struct lttng_kernel_event_recorder *event;
1416
1417 mutex_lock(&sessions_mutex);
1418 event = _lttng_kernel_event_recorder_create(event_enabler, event_desc);
1419 mutex_unlock(&sessions_mutex);
1420 return event;
1421 }
1422
1423 struct lttng_kernel_event_notifier *lttng_event_notifier_create(
1424 const struct lttng_kernel_event_desc *event_desc,
1425 uint64_t id, uint64_t error_counter_index,
1426 struct lttng_event_notifier_group *event_notifier_group,
1427 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1428 enum lttng_kernel_abi_instrumentation itype)
1429 {
1430 struct lttng_kernel_event_notifier *event_notifier;
1431
1432 mutex_lock(&sessions_mutex);
1433 event_notifier = _lttng_event_notifier_create(event_desc, id,
1434 error_counter_index, event_notifier_group,
1435 event_notifier_param, itype);
1436 mutex_unlock(&sessions_mutex);
1437 return event_notifier;
1438 }
1439
1440 /* Only used for tracepoints for now. */
1441 static
1442 void register_event_recorder(struct lttng_kernel_event_recorder *event_recorder)
1443 {
1444 const struct lttng_kernel_event_desc *desc;
1445 int ret = -EINVAL;
1446
1447 if (event_recorder->priv->parent.registered)
1448 return;
1449
1450 desc = event_recorder->priv->parent.desc;
1451 switch (event_recorder->priv->parent.instrumentation) {
1452 case LTTNG_KERNEL_ABI_TRACEPOINT:
1453 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1454 desc->tp_class->probe_callback,
1455 &event_recorder->parent);
1456 break;
1457
1458 case LTTNG_KERNEL_ABI_SYSCALL:
1459 ret = lttng_syscall_filter_enable_event(&event_recorder->parent);
1460 break;
1461
1462 case LTTNG_KERNEL_ABI_KPROBE:
1463 lttng_fallthrough;
1464 case LTTNG_KERNEL_ABI_UPROBE:
1465 lttng_fallthrough;
1466 case LTTNG_KERNEL_ABI_KRETPROBE:
1467 ret = 0;
1468 break;
1469
1470 case LTTNG_KERNEL_ABI_FUNCTION:
1471 lttng_fallthrough;
1472 case LTTNG_KERNEL_ABI_NOOP:
1473 lttng_fallthrough;
1474 default:
1475 WARN_ON_ONCE(1);
1476 }
1477 if (!ret)
1478 event_recorder->priv->parent.registered = 1;
1479 }
1480
1481 /*
1482 * Only used internally at session destruction.
1483 */
1484 int _lttng_event_recorder_unregister(struct lttng_kernel_event_recorder *event_recorder)
1485 {
1486 struct lttng_kernel_event_common_private *event_priv = &event_recorder->priv->parent;
1487 const struct lttng_kernel_event_desc *desc;
1488 int ret = -EINVAL;
1489
1490 if (!event_priv->registered)
1491 return 0;
1492
1493 desc = event_priv->desc;
1494 switch (event_priv->instrumentation) {
1495 case LTTNG_KERNEL_ABI_TRACEPOINT:
1496 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1497 event_priv->desc->tp_class->probe_callback,
1498 &event_recorder->parent);
1499 break;
1500
1501 case LTTNG_KERNEL_ABI_KPROBE:
1502 lttng_kprobes_unregister_event(event_recorder);
1503 ret = 0;
1504 break;
1505
1506 case LTTNG_KERNEL_ABI_KRETPROBE:
1507 lttng_kretprobes_unregister(event_recorder);
1508 ret = 0;
1509 break;
1510
1511 case LTTNG_KERNEL_ABI_SYSCALL:
1512 ret = lttng_syscall_filter_disable_event(&event_recorder->parent);
1513 break;
1514
1515 case LTTNG_KERNEL_ABI_NOOP:
1516 ret = 0;
1517 break;
1518
1519 case LTTNG_KERNEL_ABI_UPROBE:
1520 lttng_uprobes_unregister_event(event_recorder);
1521 ret = 0;
1522 break;
1523
1524 case LTTNG_KERNEL_ABI_FUNCTION:
1525 lttng_fallthrough;
1526 default:
1527 WARN_ON_ONCE(1);
1528 }
1529 if (!ret)
1530 event_priv->registered = 0;
1531 return ret;
1532 }
1533
1534 /* Only used for tracepoints for now. */
1535 static
1536 void register_event_notifier(struct lttng_kernel_event_notifier *event_notifier)
1537 {
1538 const struct lttng_kernel_event_desc *desc;
1539 int ret = -EINVAL;
1540
1541 if (event_notifier->priv->parent.registered)
1542 return;
1543
1544 desc = event_notifier->priv->parent.desc;
1545 switch (event_notifier->priv->parent.instrumentation) {
1546 case LTTNG_KERNEL_ABI_TRACEPOINT:
1547 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1548 desc->tp_class->probe_callback,
1549 &event_notifier->parent);
1550 break;
1551
1552 case LTTNG_KERNEL_ABI_SYSCALL:
1553 ret = lttng_syscall_filter_enable_event(&event_notifier->parent);
1554 break;
1555
1556 case LTTNG_KERNEL_ABI_KPROBE:
1557 lttng_fallthrough;
1558 case LTTNG_KERNEL_ABI_UPROBE:
1559 ret = 0;
1560 break;
1561
1562 case LTTNG_KERNEL_ABI_KRETPROBE:
1563 lttng_fallthrough;
1564 case LTTNG_KERNEL_ABI_FUNCTION:
1565 lttng_fallthrough;
1566 case LTTNG_KERNEL_ABI_NOOP:
1567 lttng_fallthrough;
1568 default:
1569 WARN_ON_ONCE(1);
1570 }
1571 if (!ret)
1572 event_notifier->priv->parent.registered = 1;
1573 }
1574
1575 static
1576 int _lttng_event_notifier_unregister(
1577 struct lttng_kernel_event_notifier *event_notifier)
1578 {
1579 const struct lttng_kernel_event_desc *desc;
1580 int ret = -EINVAL;
1581
1582 if (!event_notifier->priv->parent.registered)
1583 return 0;
1584
1585 desc = event_notifier->priv->parent.desc;
1586 switch (event_notifier->priv->parent.instrumentation) {
1587 case LTTNG_KERNEL_ABI_TRACEPOINT:
1588 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->priv->parent.desc->event_kname,
1589 event_notifier->priv->parent.desc->tp_class->probe_callback,
1590 &event_notifier->parent);
1591 break;
1592
1593 case LTTNG_KERNEL_ABI_KPROBE:
1594 lttng_kprobes_unregister_event_notifier(event_notifier);
1595 ret = 0;
1596 break;
1597
1598 case LTTNG_KERNEL_ABI_UPROBE:
1599 lttng_uprobes_unregister_event_notifier(event_notifier);
1600 ret = 0;
1601 break;
1602
1603 case LTTNG_KERNEL_ABI_SYSCALL:
1604 ret = lttng_syscall_filter_disable_event(&event_notifier->parent);
1605 break;
1606
1607 case LTTNG_KERNEL_ABI_KRETPROBE:
1608 lttng_fallthrough;
1609 case LTTNG_KERNEL_ABI_FUNCTION:
1610 lttng_fallthrough;
1611 case LTTNG_KERNEL_ABI_NOOP:
1612 lttng_fallthrough;
1613 default:
1614 WARN_ON_ONCE(1);
1615 }
1616 if (!ret)
1617 event_notifier->priv->parent.registered = 0;
1618 return ret;
1619 }
1620
1621 /*
1622 * Only used internally at session destruction.
1623 */
1624 static
1625 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1626 {
1627 struct lttng_kernel_event_common_private *event_priv = event->priv;
1628 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1629
1630 lttng_free_event_filter_runtime(event);
1631 /* Free event enabler refs */
1632 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1633 &event_priv->enablers_ref_head, node)
1634 kfree(enabler_ref);
1635
1636 switch (event->type) {
1637 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1638 {
1639 struct lttng_kernel_event_recorder *event_recorder =
1640 container_of(event, struct lttng_kernel_event_recorder, parent);
1641
1642 switch (event_priv->instrumentation) {
1643 case LTTNG_KERNEL_ABI_TRACEPOINT:
1644 lttng_event_desc_put(event_priv->desc);
1645 break;
1646
1647 case LTTNG_KERNEL_ABI_KPROBE:
1648 module_put(event_priv->desc->owner);
1649 lttng_kprobes_destroy_event_private(event_recorder);
1650 break;
1651
1652 case LTTNG_KERNEL_ABI_KRETPROBE:
1653 module_put(event_priv->desc->owner);
1654 lttng_kretprobes_destroy_private(event_recorder);
1655 break;
1656
1657 case LTTNG_KERNEL_ABI_SYSCALL:
1658 break;
1659
1660 case LTTNG_KERNEL_ABI_UPROBE:
1661 module_put(event_priv->desc->owner);
1662 lttng_uprobes_destroy_event_private(event_recorder);
1663 break;
1664
1665 case LTTNG_KERNEL_ABI_FUNCTION:
1666 lttng_fallthrough;
1667 case LTTNG_KERNEL_ABI_NOOP:
1668 lttng_fallthrough;
1669 default:
1670 WARN_ON_ONCE(1);
1671 }
1672 list_del(&event_recorder->priv->node);
1673 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1674 kmem_cache_free(event_recorder_cache, event_recorder);
1675 break;
1676 }
1677 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1678 {
1679 struct lttng_kernel_event_notifier *event_notifier =
1680 container_of(event, struct lttng_kernel_event_notifier, parent);
1681
1682 switch (event_notifier->priv->parent.instrumentation) {
1683 case LTTNG_KERNEL_ABI_TRACEPOINT:
1684 lttng_event_desc_put(event_notifier->priv->parent.desc);
1685 break;
1686
1687 case LTTNG_KERNEL_ABI_KPROBE:
1688 module_put(event_notifier->priv->parent.desc->owner);
1689 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1690 break;
1691
1692 case LTTNG_KERNEL_ABI_SYSCALL:
1693 break;
1694
1695 case LTTNG_KERNEL_ABI_UPROBE:
1696 module_put(event_notifier->priv->parent.desc->owner);
1697 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1698 break;
1699
1700 case LTTNG_KERNEL_ABI_KRETPROBE:
1701 lttng_fallthrough;
1702 case LTTNG_KERNEL_ABI_FUNCTION:
1703 lttng_fallthrough;
1704 case LTTNG_KERNEL_ABI_NOOP:
1705 lttng_fallthrough;
1706 default:
1707 WARN_ON_ONCE(1);
1708 }
1709 list_del(&event_notifier->priv->node);
1710 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1711 kmem_cache_free(event_notifier_cache, event_notifier);
1712 break;
1713 }
1714 default:
1715 WARN_ON_ONCE(1);
1716 }
1717 }
1718
1719 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1720 enum tracker_type tracker_type)
1721 {
1722 switch (tracker_type) {
1723 case TRACKER_PID:
1724 return &session->pid_tracker;
1725 case TRACKER_VPID:
1726 return &session->vpid_tracker;
1727 case TRACKER_UID:
1728 return &session->uid_tracker;
1729 case TRACKER_VUID:
1730 return &session->vuid_tracker;
1731 case TRACKER_GID:
1732 return &session->gid_tracker;
1733 case TRACKER_VGID:
1734 return &session->vgid_tracker;
1735 default:
1736 WARN_ON_ONCE(1);
1737 return NULL;
1738 }
1739 }
1740
1741 int lttng_session_track_id(struct lttng_kernel_session *session,
1742 enum tracker_type tracker_type, int id)
1743 {
1744 struct lttng_kernel_id_tracker *tracker;
1745 int ret;
1746
1747 tracker = get_tracker(session, tracker_type);
1748 if (!tracker)
1749 return -EINVAL;
1750 if (id < -1)
1751 return -EINVAL;
1752 mutex_lock(&sessions_mutex);
1753 if (id == -1) {
1754 /* track all ids: destroy tracker. */
1755 lttng_id_tracker_destroy(tracker, true);
1756 ret = 0;
1757 } else {
1758 ret = lttng_id_tracker_add(tracker, id);
1759 }
1760 mutex_unlock(&sessions_mutex);
1761 return ret;
1762 }
1763
1764 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1765 enum tracker_type tracker_type, int id)
1766 {
1767 struct lttng_kernel_id_tracker *tracker;
1768 int ret;
1769
1770 tracker = get_tracker(session, tracker_type);
1771 if (!tracker)
1772 return -EINVAL;
1773 if (id < -1)
1774 return -EINVAL;
1775 mutex_lock(&sessions_mutex);
1776 if (id == -1) {
1777 /* untrack all ids: replace by empty tracker. */
1778 ret = lttng_id_tracker_empty_set(tracker);
1779 } else {
1780 ret = lttng_id_tracker_del(tracker, id);
1781 }
1782 mutex_unlock(&sessions_mutex);
1783 return ret;
1784 }
1785
1786 static
1787 void *id_list_start(struct seq_file *m, loff_t *pos)
1788 {
1789 struct lttng_kernel_id_tracker *id_tracker = m->private;
1790 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1791 struct lttng_id_hash_node *e;
1792 int iter = 0, i;
1793
1794 mutex_lock(&sessions_mutex);
1795 if (id_tracker_p) {
1796 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1797 struct hlist_head *head = &id_tracker_p->id_hash[i];
1798
1799 lttng_hlist_for_each_entry(e, head, hlist) {
1800 if (iter++ >= *pos)
1801 return e;
1802 }
1803 }
1804 } else {
1805 /* ID tracker disabled. */
1806 if (iter >= *pos && iter == 0) {
1807 return id_tracker_p; /* empty tracker */
1808 }
1809 iter++;
1810 }
1811 /* End of list */
1812 return NULL;
1813 }
1814
1815 /* Called with sessions_mutex held. */
1816 static
1817 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1818 {
1819 struct lttng_kernel_id_tracker *id_tracker = m->private;
1820 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1821 struct lttng_id_hash_node *e;
1822 int iter = 0, i;
1823
1824 (*ppos)++;
1825 if (id_tracker_p) {
1826 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1827 struct hlist_head *head = &id_tracker_p->id_hash[i];
1828
1829 lttng_hlist_for_each_entry(e, head, hlist) {
1830 if (iter++ >= *ppos)
1831 return e;
1832 }
1833 }
1834 } else {
1835 /* ID tracker disabled. */
1836 if (iter >= *ppos && iter == 0)
1837 return p; /* empty tracker */
1838 iter++;
1839 }
1840
1841 /* End of list */
1842 return NULL;
1843 }
1844
1845 static
1846 void id_list_stop(struct seq_file *m, void *p)
1847 {
1848 mutex_unlock(&sessions_mutex);
1849 }
1850
1851 static
1852 int id_list_show(struct seq_file *m, void *p)
1853 {
1854 struct lttng_kernel_id_tracker *id_tracker = m->private;
1855 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1856 int id;
1857
1858 if (p == id_tracker_p) {
1859 /* Tracker disabled. */
1860 id = -1;
1861 } else {
1862 const struct lttng_id_hash_node *e = p;
1863
1864 id = lttng_id_tracker_get_node_id(e);
1865 }
1866 switch (id_tracker->priv->tracker_type) {
1867 case TRACKER_PID:
1868 seq_printf(m, "process { pid = %d; };\n", id);
1869 break;
1870 case TRACKER_VPID:
1871 seq_printf(m, "process { vpid = %d; };\n", id);
1872 break;
1873 case TRACKER_UID:
1874 seq_printf(m, "user { uid = %d; };\n", id);
1875 break;
1876 case TRACKER_VUID:
1877 seq_printf(m, "user { vuid = %d; };\n", id);
1878 break;
1879 case TRACKER_GID:
1880 seq_printf(m, "group { gid = %d; };\n", id);
1881 break;
1882 case TRACKER_VGID:
1883 seq_printf(m, "group { vgid = %d; };\n", id);
1884 break;
1885 default:
1886 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1887 }
1888 return 0;
1889 }
1890
1891 static
1892 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1893 .start = id_list_start,
1894 .next = id_list_next,
1895 .stop = id_list_stop,
1896 .show = id_list_show,
1897 };
1898
1899 static
1900 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1901 {
1902 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1903 }
1904
1905 static
1906 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1907 {
1908 struct seq_file *m = file->private_data;
1909 struct lttng_kernel_id_tracker *id_tracker = m->private;
1910 int ret;
1911
1912 WARN_ON_ONCE(!id_tracker);
1913 ret = seq_release(inode, file);
1914 if (!ret)
1915 fput(id_tracker->priv->session->priv->file);
1916 return ret;
1917 }
1918
1919 const struct file_operations lttng_tracker_ids_list_fops = {
1920 .owner = THIS_MODULE,
1921 .open = lttng_tracker_ids_list_open,
1922 .read = seq_read,
1923 .llseek = seq_lseek,
1924 .release = lttng_tracker_ids_list_release,
1925 };
1926
1927 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1928 enum tracker_type tracker_type)
1929 {
1930 struct file *tracker_ids_list_file;
1931 struct seq_file *m;
1932 int file_fd, ret;
1933
1934 file_fd = lttng_get_unused_fd();
1935 if (file_fd < 0) {
1936 ret = file_fd;
1937 goto fd_error;
1938 }
1939
1940 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1941 &lttng_tracker_ids_list_fops,
1942 NULL, O_RDWR);
1943 if (IS_ERR(tracker_ids_list_file)) {
1944 ret = PTR_ERR(tracker_ids_list_file);
1945 goto file_error;
1946 }
1947 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1948 ret = -EOVERFLOW;
1949 goto refcount_error;
1950 }
1951 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1952 if (ret < 0)
1953 goto open_error;
1954 m = tracker_ids_list_file->private_data;
1955
1956 m->private = get_tracker(session, tracker_type);
1957 BUG_ON(!m->private);
1958 fd_install(file_fd, tracker_ids_list_file);
1959
1960 return file_fd;
1961
1962 open_error:
1963 atomic_long_dec(&session->priv->file->f_count);
1964 refcount_error:
1965 fput(tracker_ids_list_file);
1966 file_error:
1967 put_unused_fd(file_fd);
1968 fd_error:
1969 return ret;
1970 }
1971
1972 /*
1973 * Enabler management.
1974 */
1975 static
1976 int lttng_match_enabler_star_glob(const char *desc_name,
1977 const char *pattern)
1978 {
1979 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1980 desc_name, LTTNG_SIZE_MAX))
1981 return 0;
1982 return 1;
1983 }
1984
1985 static
1986 int lttng_match_enabler_name(const char *desc_name,
1987 const char *name)
1988 {
1989 if (strcmp(desc_name, name))
1990 return 0;
1991 return 1;
1992 }
1993
1994 int lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1995 struct lttng_event_enabler_common *enabler)
1996 {
1997 const char *desc_name, *enabler_name;
1998 bool compat = false, entry = false;
1999
2000 enabler_name = enabler->event_param.name;
2001 switch (enabler->event_param.instrumentation) {
2002 case LTTNG_KERNEL_ABI_TRACEPOINT:
2003 desc_name = desc->event_name;
2004 switch (enabler->format_type) {
2005 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2006 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2007 case LTTNG_ENABLER_FORMAT_NAME:
2008 return lttng_match_enabler_name(desc_name, enabler_name);
2009 default:
2010 return -EINVAL;
2011 }
2012 break;
2013
2014 case LTTNG_KERNEL_ABI_SYSCALL:
2015 desc_name = desc->event_name;
2016 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
2017 desc_name += strlen("compat_");
2018 compat = true;
2019 }
2020 if (!strncmp(desc_name, "syscall_exit_",
2021 strlen("syscall_exit_"))) {
2022 desc_name += strlen("syscall_exit_");
2023 } else if (!strncmp(desc_name, "syscall_entry_",
2024 strlen("syscall_entry_"))) {
2025 desc_name += strlen("syscall_entry_");
2026 entry = true;
2027 } else {
2028 WARN_ON_ONCE(1);
2029 return -EINVAL;
2030 }
2031 switch (enabler->event_param.u.syscall.entryexit) {
2032 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
2033 break;
2034 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
2035 if (!entry)
2036 return 0;
2037 break;
2038 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
2039 if (entry)
2040 return 0;
2041 break;
2042 default:
2043 return -EINVAL;
2044 }
2045 switch (enabler->event_param.u.syscall.abi) {
2046 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
2047 break;
2048 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
2049 if (compat)
2050 return 0;
2051 break;
2052 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
2053 if (!compat)
2054 return 0;
2055 break;
2056 default:
2057 return -EINVAL;
2058 }
2059 switch (enabler->event_param.u.syscall.match) {
2060 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
2061 switch (enabler->format_type) {
2062 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2063 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2064 case LTTNG_ENABLER_FORMAT_NAME:
2065 return lttng_match_enabler_name(desc_name, enabler_name);
2066 default:
2067 return -EINVAL;
2068 }
2069 break;
2070 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2071 return -EINVAL; /* Not implemented. */
2072 default:
2073 return -EINVAL;
2074 }
2075 break;
2076
2077 default:
2078 WARN_ON_ONCE(1);
2079 return -EINVAL;
2080 }
2081 }
2082
2083 static
2084 int lttng_event_enabler_match_event(struct lttng_event_recorder_enabler *event_enabler,
2085 struct lttng_kernel_event_recorder *event_recorder)
2086 {
2087 struct lttng_event_enabler_common *base_enabler = lttng_event_recorder_enabler_as_enabler(
2088 event_enabler);
2089
2090 if (base_enabler->event_param.instrumentation != event_recorder->priv->parent.instrumentation)
2091 return 0;
2092 if (lttng_desc_match_enabler(event_recorder->priv->parent.desc, base_enabler)
2093 && event_recorder->chan == event_enabler->chan)
2094 return 1;
2095 else
2096 return 0;
2097 }
2098
2099 static
2100 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
2101 struct lttng_kernel_event_notifier *event_notifier)
2102 {
2103 struct lttng_event_enabler_common *base_enabler = lttng_event_notifier_enabler_as_enabler(
2104 event_notifier_enabler);
2105
2106 if (base_enabler->event_param.instrumentation != event_notifier->priv->parent.instrumentation)
2107 return 0;
2108 if (lttng_desc_match_enabler(event_notifier->priv->parent.desc, base_enabler)
2109 && event_notifier->priv->group == event_notifier_enabler->group
2110 && event_notifier->priv->parent.user_token == event_notifier_enabler->parent.user_token)
2111 return 1;
2112 else
2113 return 0;
2114 }
2115
2116 static
2117 struct lttng_enabler_ref *lttng_enabler_ref(
2118 struct list_head *enablers_ref_list,
2119 struct lttng_event_enabler_common *enabler)
2120 {
2121 struct lttng_enabler_ref *enabler_ref;
2122
2123 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2124 if (enabler_ref->ref == enabler)
2125 return enabler_ref;
2126 }
2127 return NULL;
2128 }
2129
2130 static
2131 void lttng_create_tracepoint_event_if_missing(struct lttng_event_recorder_enabler *event_enabler)
2132 {
2133 struct lttng_kernel_session *session = event_enabler->chan->parent.session;
2134 struct lttng_kernel_probe_desc *probe_desc;
2135 const struct lttng_kernel_event_desc *desc;
2136 int i;
2137 struct list_head *probe_list;
2138
2139 probe_list = lttng_get_probe_list_head();
2140 /*
2141 * For each probe event, if we find that a probe event matches
2142 * our enabler, create an associated lttng_event if not
2143 * already present.
2144 */
2145 list_for_each_entry(probe_desc, probe_list, head) {
2146 for (i = 0; i < probe_desc->nr_events; i++) {
2147 int found = 0;
2148 struct hlist_head *head;
2149 struct lttng_kernel_event_recorder_private *event_recorder_private;
2150 struct lttng_kernel_event_recorder *event_recorder;
2151
2152 desc = probe_desc->event_desc[i];
2153 if (!lttng_desc_match_enabler(desc,
2154 lttng_event_recorder_enabler_as_enabler(event_enabler)))
2155 continue;
2156
2157 /*
2158 * Check if already created.
2159 */
2160 head = utils_borrow_hash_table_bucket(
2161 session->priv->events_ht.table, LTTNG_EVENT_HT_SIZE,
2162 desc->event_name);
2163 lttng_hlist_for_each_entry(event_recorder_private, head, hlist) {
2164 if (event_recorder_private->parent.desc == desc
2165 && event_recorder_private->pub->chan == event_enabler->chan)
2166 found = 1;
2167 }
2168 if (found)
2169 continue;
2170
2171 /*
2172 * We need to create an event for this
2173 * event probe.
2174 */
2175 event_recorder = _lttng_kernel_event_recorder_create(event_enabler, desc);
2176 if (!event_recorder) {
2177 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2178 probe_desc->event_desc[i]->event_name);
2179 }
2180 }
2181 }
2182 }
2183
2184 static
2185 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2186 {
2187 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2188 struct lttng_kernel_probe_desc *probe_desc;
2189 const struct lttng_kernel_event_desc *desc;
2190 int i;
2191 struct list_head *probe_list;
2192
2193 probe_list = lttng_get_probe_list_head();
2194 /*
2195 * For each probe event, if we find that a probe event matches
2196 * our enabler, create an associated lttng_event_notifier if not
2197 * already present.
2198 */
2199 list_for_each_entry(probe_desc, probe_list, head) {
2200 for (i = 0; i < probe_desc->nr_events; i++) {
2201 int found = 0;
2202 struct hlist_head *head;
2203 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2204 struct lttng_kernel_event_notifier *event_notifier;
2205
2206 desc = probe_desc->event_desc[i];
2207 if (!lttng_desc_match_enabler(desc,
2208 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2209 continue;
2210
2211 /*
2212 * Check if already created.
2213 */
2214 head = utils_borrow_hash_table_bucket(
2215 event_notifier_group->event_notifiers_ht.table,
2216 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->event_name);
2217 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
2218 if (event_notifier_priv->parent.desc == desc
2219 && event_notifier_priv->parent.user_token == event_notifier_enabler->parent.user_token)
2220 found = 1;
2221 }
2222 if (found)
2223 continue;
2224
2225 /*
2226 * We need to create a event_notifier for this event probe.
2227 */
2228 event_notifier = _lttng_event_notifier_create(desc,
2229 event_notifier_enabler->parent.user_token,
2230 event_notifier_enabler->error_counter_index,
2231 event_notifier_group, NULL,
2232 LTTNG_KERNEL_ABI_TRACEPOINT);
2233 if (IS_ERR(event_notifier)) {
2234 printk(KERN_INFO "Unable to create event_notifier %s\n",
2235 probe_desc->event_desc[i]->event_name);
2236 }
2237 }
2238 }
2239 }
2240
2241 static
2242 void lttng_create_syscall_event_if_missing(struct lttng_event_recorder_enabler *event_enabler)
2243 {
2244 int ret;
2245
2246 ret = lttng_syscalls_register_event(&event_enabler->parent);
2247 WARN_ON_ONCE(ret);
2248 }
2249
2250 static
2251 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2252 {
2253 int ret;
2254
2255 ret = lttng_syscalls_register_event(&event_notifier_enabler->parent);
2256 WARN_ON_ONCE(ret);
2257 ret = lttng_syscalls_create_matching_event_notifiers(event_notifier_enabler);
2258 WARN_ON_ONCE(ret);
2259 }
2260
2261 /*
2262 * Create struct lttng_kernel_event_recorder if it is missing and present in the list of
2263 * tracepoint probes.
2264 * Should be called with sessions mutex held.
2265 */
2266 static
2267 void lttng_create_event_if_missing(struct lttng_event_recorder_enabler *event_enabler)
2268 {
2269 switch (event_enabler->parent.event_param.instrumentation) {
2270 case LTTNG_KERNEL_ABI_TRACEPOINT:
2271 lttng_create_tracepoint_event_if_missing(event_enabler);
2272 break;
2273
2274 case LTTNG_KERNEL_ABI_SYSCALL:
2275 lttng_create_syscall_event_if_missing(event_enabler);
2276 break;
2277
2278 default:
2279 WARN_ON_ONCE(1);
2280 break;
2281 }
2282 }
2283
2284 /*
2285 * Create events associated with an event_enabler (if not already present),
2286 * and add backward reference from the event to the enabler.
2287 * Should be called with sessions mutex held.
2288 */
2289 static
2290 int lttng_event_enabler_ref_events(struct lttng_event_recorder_enabler *event_enabler)
2291 {
2292 struct lttng_kernel_channel_buffer *chan = event_enabler->chan;
2293 struct lttng_kernel_session *session = event_enabler->chan->parent.session;
2294 struct lttng_event_enabler_common *base_enabler = lttng_event_recorder_enabler_as_enabler(event_enabler);
2295 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2296
2297 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2298 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2299 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2300 !strcmp(base_enabler->event_param.name, "*")) {
2301 int enabled = base_enabler->enabled;
2302 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2303
2304 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2305 WRITE_ONCE(chan->priv->parent.syscall_table.syscall_all_entry, enabled);
2306
2307 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2308 WRITE_ONCE(chan->priv->parent.syscall_table.syscall_all_exit, enabled);
2309 }
2310
2311 /* First ensure that probe events are created for this enabler. */
2312 lttng_create_event_if_missing(event_enabler);
2313
2314 /* For each event matching event_enabler in session event list. */
2315 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2316 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2317 struct lttng_enabler_ref *enabler_ref;
2318
2319 if (!lttng_event_enabler_match_event(event_enabler, event_recorder))
2320 continue;
2321 enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
2322 lttng_event_recorder_enabler_as_enabler(event_enabler));
2323 if (!enabler_ref) {
2324 /*
2325 * If no backward ref, create it.
2326 * Add backward ref from event to event_enabler.
2327 */
2328 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2329 if (!enabler_ref)
2330 return -ENOMEM;
2331 enabler_ref->ref = lttng_event_recorder_enabler_as_enabler(event_enabler);
2332 list_add(&enabler_ref->node,
2333 &event_recorder_priv->parent.enablers_ref_head);
2334 }
2335
2336 /*
2337 * Link filter bytecodes if not linked yet.
2338 */
2339 lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
2340 lttng_static_ctx,
2341 &event_recorder_priv->parent.filter_bytecode_runtime_head,
2342 &lttng_event_recorder_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2343 }
2344 return 0;
2345 }
2346
2347 /*
2348 * Create struct lttng_kernel_event_notifier if it is missing and present in the list of
2349 * tracepoint probes.
2350 * Should be called with sessions mutex held.
2351 */
2352 static
2353 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2354 {
2355 switch (event_notifier_enabler->parent.event_param.instrumentation) {
2356 case LTTNG_KERNEL_ABI_TRACEPOINT:
2357 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2358 break;
2359
2360 case LTTNG_KERNEL_ABI_SYSCALL:
2361 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2362 break;
2363
2364 default:
2365 WARN_ON_ONCE(1);
2366 break;
2367 }
2368 }
2369
2370 /*
2371 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2372 */
2373 static
2374 int lttng_event_notifier_enabler_ref_event_notifiers(
2375 struct lttng_event_notifier_enabler *event_notifier_enabler)
2376 {
2377 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2378 struct lttng_event_enabler_common *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2379 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2380
2381 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2382 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2383 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2384 !strcmp(base_enabler->event_param.name, "*")) {
2385
2386 int enabled = base_enabler->enabled;
2387 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2388
2389 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2390 WRITE_ONCE(event_notifier_group->syscall_table.syscall_all_entry, enabled);
2391
2392 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2393 WRITE_ONCE(event_notifier_group->syscall_table.syscall_all_exit, enabled);
2394
2395 }
2396
2397 /* First ensure that probe event_notifiers are created for this enabler. */
2398 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2399
2400 /* Link the created event_notifier with its associated enabler. */
2401 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2402 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2403 struct lttng_enabler_ref *enabler_ref;
2404
2405 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2406 continue;
2407
2408 enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
2409 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2410 if (!enabler_ref) {
2411 /*
2412 * If no backward ref, create it.
2413 * Add backward ref from event_notifier to enabler.
2414 */
2415 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2416 if (!enabler_ref)
2417 return -ENOMEM;
2418
2419 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2420 event_notifier_enabler);
2421 list_add(&enabler_ref->node,
2422 &event_notifier_priv->parent.enablers_ref_head);
2423 }
2424
2425 /*
2426 * Link filter bytecodes if not linked yet.
2427 */
2428 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2429 lttng_static_ctx, &event_notifier_priv->parent.filter_bytecode_runtime_head,
2430 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2431
2432 /* Link capture bytecodes if not linked yet. */
2433 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2434 lttng_static_ctx, &event_notifier_priv->capture_bytecode_runtime_head,
2435 &event_notifier_enabler->capture_bytecode_head);
2436
2437 event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
2438 }
2439 return 0;
2440 }
2441
2442 /*
2443 * Called at module load: connect the probe on all enablers matching
2444 * this event.
2445 * Called with sessions lock held.
2446 */
2447 int lttng_fix_pending_events(void)
2448 {
2449 struct lttng_kernel_session_private *session_priv;
2450
2451 list_for_each_entry(session_priv, &sessions, list)
2452 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2453 return 0;
2454 }
2455
2456 static bool lttng_event_notifier_group_has_active_event_notifiers(
2457 struct lttng_event_notifier_group *event_notifier_group)
2458 {
2459 struct lttng_event_notifier_enabler *event_notifier_enabler;
2460
2461 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2462 node) {
2463 if (event_notifier_enabler->parent.enabled)
2464 return true;
2465 }
2466 return false;
2467 }
2468
2469 bool lttng_event_notifier_active(void)
2470 {
2471 struct lttng_event_notifier_group *event_notifier_group;
2472
2473 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2474 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2475 return true;
2476 }
2477 return false;
2478 }
2479
2480 int lttng_fix_pending_event_notifiers(void)
2481 {
2482 struct lttng_event_notifier_group *event_notifier_group;
2483
2484 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2485 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2486 return 0;
2487 }
2488
2489 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2490 enum lttng_enabler_format_type format_type,
2491 struct lttng_kernel_abi_event *event_param,
2492 struct lttng_kernel_channel_buffer *chan)
2493 {
2494 struct lttng_event_recorder_enabler *event_enabler;
2495
2496 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2497 if (!event_enabler)
2498 return NULL;
2499 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2500 event_enabler->parent.format_type = format_type;
2501 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2502 memcpy(&event_enabler->parent.event_param, event_param,
2503 sizeof(event_enabler->parent.event_param));
2504 event_enabler->chan = chan;
2505 /* ctx left NULL */
2506 event_enabler->parent.enabled = 0;
2507 return event_enabler;
2508 }
2509
2510 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2511 struct lttng_event_recorder_enabler *event_enabler)
2512 {
2513 mutex_lock(&sessions_mutex);
2514 list_add(&event_enabler->node, &session->priv->enablers_head);
2515 event_enabler->published = true;
2516 lttng_session_lazy_sync_event_enablers(session);
2517 mutex_unlock(&sessions_mutex);
2518 }
2519
2520 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2521 {
2522 mutex_lock(&sessions_mutex);
2523 event_enabler->enabled = 1;
2524 lttng_event_enabler_sync(event_enabler);
2525 mutex_unlock(&sessions_mutex);
2526 return 0;
2527 }
2528
2529 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2530 {
2531 mutex_lock(&sessions_mutex);
2532 event_enabler->enabled = 0;
2533 lttng_event_enabler_sync(event_enabler);
2534 mutex_unlock(&sessions_mutex);
2535 return 0;
2536 }
2537
2538 static
2539 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2540 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2541 {
2542 struct lttng_kernel_bytecode_node *bytecode_node;
2543 uint32_t bytecode_len;
2544 int ret;
2545
2546 ret = get_user(bytecode_len, &bytecode->len);
2547 if (ret)
2548 return ret;
2549 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2550 GFP_KERNEL);
2551 if (!bytecode_node)
2552 return -ENOMEM;
2553 ret = copy_from_user(&bytecode_node->bc, bytecode,
2554 sizeof(*bytecode) + bytecode_len);
2555 if (ret)
2556 goto error_free;
2557
2558 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2559 bytecode_node->enabler = enabler;
2560 /* Enforce length based on allocated size */
2561 bytecode_node->bc.len = bytecode_len;
2562 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2563
2564 return 0;
2565
2566 error_free:
2567 lttng_kvfree(bytecode_node);
2568 return ret;
2569 }
2570
2571 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2572 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2573 {
2574 int ret;
2575 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2576 if (ret)
2577 goto error;
2578 lttng_event_enabler_sync(event_enabler);
2579 return 0;
2580
2581 error:
2582 return ret;
2583 }
2584
2585 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2586 struct lttng_kernel_abi_event_callsite __user *callsite)
2587 {
2588
2589 switch (event->priv->instrumentation) {
2590 case LTTNG_KERNEL_ABI_UPROBE:
2591 return lttng_uprobes_event_add_callsite(event, callsite);
2592 default:
2593 return -EINVAL;
2594 }
2595 }
2596
2597 static
2598 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2599 {
2600 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2601
2602 /* Destroy filter bytecode */
2603 list_for_each_entry_safe(filter_node, tmp_filter_node,
2604 &enabler->filter_bytecode_head, node) {
2605 lttng_kvfree(filter_node);
2606 }
2607 }
2608
2609 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2610 {
2611 switch (event_enabler->enabler_type) {
2612 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2613 {
2614 struct lttng_event_recorder_enabler *event_recorder_enabler =
2615 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2616
2617 lttng_enabler_destroy(event_enabler);
2618 if (event_recorder_enabler->published)
2619 list_del(&event_recorder_enabler->node);
2620 kfree(event_recorder_enabler);
2621 break;
2622 }
2623 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2624 {
2625 struct lttng_event_notifier_enabler *event_notifier_enabler =
2626 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2627
2628 list_del(&event_notifier_enabler->node);
2629 lttng_enabler_destroy(event_enabler);
2630 kfree(event_notifier_enabler);
2631 break;
2632 }
2633 default:
2634 WARN_ON_ONCE(1);
2635 }
2636 }
2637
2638 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2639 struct lttng_event_notifier_group *event_notifier_group,
2640 enum lttng_enabler_format_type format_type,
2641 struct lttng_kernel_abi_event_notifier *event_notifier_param)
2642 {
2643 struct lttng_event_notifier_enabler *event_notifier_enabler;
2644
2645 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2646 if (!event_notifier_enabler)
2647 return NULL;
2648
2649 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2650 event_notifier_enabler->parent.format_type = format_type;
2651 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2652 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2653
2654 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2655 event_notifier_enabler->num_captures = 0;
2656
2657 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2658 sizeof(event_notifier_enabler->parent.event_param));
2659
2660 event_notifier_enabler->parent.enabled = 0;
2661 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2662 event_notifier_enabler->group = event_notifier_group;
2663
2664 mutex_lock(&sessions_mutex);
2665 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2666 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2667
2668 mutex_unlock(&sessions_mutex);
2669
2670 return event_notifier_enabler;
2671 }
2672
2673 int lttng_event_notifier_enabler_enable(
2674 struct lttng_event_notifier_enabler *event_notifier_enabler)
2675 {
2676 mutex_lock(&sessions_mutex);
2677 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2678 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2679 mutex_unlock(&sessions_mutex);
2680 return 0;
2681 }
2682
2683 int lttng_event_notifier_enabler_disable(
2684 struct lttng_event_notifier_enabler *event_notifier_enabler)
2685 {
2686 mutex_lock(&sessions_mutex);
2687 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2688 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2689 mutex_unlock(&sessions_mutex);
2690 return 0;
2691 }
2692
2693 int lttng_event_notifier_enabler_attach_capture_bytecode(
2694 struct lttng_event_notifier_enabler *event_notifier_enabler,
2695 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2696 {
2697 struct lttng_kernel_bytecode_node *bytecode_node;
2698 struct lttng_event_enabler_common *enabler =
2699 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2700 uint32_t bytecode_len;
2701 int ret;
2702
2703 ret = get_user(bytecode_len, &bytecode->len);
2704 if (ret)
2705 return ret;
2706
2707 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2708 GFP_KERNEL);
2709 if (!bytecode_node)
2710 return -ENOMEM;
2711
2712 ret = copy_from_user(&bytecode_node->bc, bytecode,
2713 sizeof(*bytecode) + bytecode_len);
2714 if (ret)
2715 goto error_free;
2716
2717 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2718 bytecode_node->enabler = enabler;
2719
2720 /* Enforce length based on allocated size */
2721 bytecode_node->bc.len = bytecode_len;
2722 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2723
2724 event_notifier_enabler->num_captures++;
2725
2726 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2727 goto end;
2728
2729 error_free:
2730 lttng_kvfree(bytecode_node);
2731 end:
2732 return ret;
2733 }
2734
2735 /*
2736 * lttng_session_sync_event_enablers should be called just before starting a
2737 * session.
2738 * Should be called with sessions mutex held.
2739 */
2740 static
2741 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2742 {
2743 struct lttng_event_recorder_enabler *event_enabler;
2744 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2745
2746 list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
2747 lttng_event_enabler_ref_events(event_enabler);
2748 /*
2749 * For each event, if at least one of its enablers is enabled,
2750 * and its channel and session transient states are enabled, we
2751 * enable the event, else we disable it.
2752 */
2753 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2754 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2755 struct lttng_enabler_ref *enabler_ref;
2756 struct lttng_kernel_bytecode_runtime *runtime;
2757 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2758 int nr_filters = 0;
2759
2760 switch (event_recorder_priv->parent.instrumentation) {
2761 case LTTNG_KERNEL_ABI_TRACEPOINT:
2762 lttng_fallthrough;
2763 case LTTNG_KERNEL_ABI_SYSCALL:
2764 /* Enable events */
2765 list_for_each_entry(enabler_ref,
2766 &event_recorder_priv->parent.enablers_ref_head, node) {
2767 if (enabler_ref->ref->enabled) {
2768 enabled = 1;
2769 break;
2770 }
2771 }
2772 break;
2773
2774 default:
2775 /* Not handled with lazy sync. */
2776 continue;
2777 }
2778 /*
2779 * Enabled state is based on union of enablers, with
2780 * intesection of session and channel transient enable
2781 * states.
2782 */
2783 enabled = enabled && session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2784
2785 WRITE_ONCE(event_recorder->parent.enabled, enabled);
2786 /*
2787 * Sync tracepoint registration with event enabled
2788 * state.
2789 */
2790 if (enabled) {
2791 register_event_recorder(event_recorder);
2792 } else {
2793 _lttng_event_recorder_unregister(event_recorder);
2794 }
2795
2796 /* Check if has enablers without bytecode enabled */
2797 list_for_each_entry(enabler_ref,
2798 &event_recorder_priv->parent.enablers_ref_head, node) {
2799 if (enabler_ref->ref->enabled
2800 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2801 has_enablers_without_filter_bytecode = 1;
2802 break;
2803 }
2804 }
2805 event_recorder_priv->parent.has_enablers_without_filter_bytecode =
2806 has_enablers_without_filter_bytecode;
2807
2808 /* Enable filters */
2809 list_for_each_entry(runtime,
2810 &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
2811 lttng_bytecode_sync_state(runtime);
2812 nr_filters++;
2813 }
2814 WRITE_ONCE(event_recorder_priv->parent.pub->eval_filter,
2815 !(has_enablers_without_filter_bytecode || !nr_filters));
2816 }
2817 }
2818
2819 /*
2820 * Apply enablers to session events, adding events to session if need
2821 * be. It is required after each modification applied to an active
2822 * session, and right before session "start".
2823 * "lazy" sync means we only sync if required.
2824 * Should be called with sessions mutex held.
2825 */
2826 static
2827 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2828 {
2829 /* We can skip if session is not active */
2830 if (!session->active)
2831 return;
2832 lttng_session_sync_event_enablers(session);
2833 }
2834
2835 static
2836 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2837 {
2838 struct lttng_event_notifier_enabler *event_notifier_enabler;
2839 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2840
2841 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2842 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2843
2844 /*
2845 * For each event_notifier, if at least one of its enablers is enabled,
2846 * we enable the event_notifier, else we disable it.
2847 */
2848 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2849 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2850 struct lttng_enabler_ref *enabler_ref;
2851 struct lttng_kernel_bytecode_runtime *runtime;
2852 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2853 int nr_filters = 0, nr_captures = 0;
2854
2855 switch (event_notifier_priv->parent.instrumentation) {
2856 case LTTNG_KERNEL_ABI_TRACEPOINT:
2857 lttng_fallthrough;
2858 case LTTNG_KERNEL_ABI_SYSCALL:
2859 /* Enable event_notifiers */
2860 list_for_each_entry(enabler_ref,
2861 &event_notifier_priv->parent.enablers_ref_head, node) {
2862 if (enabler_ref->ref->enabled) {
2863 enabled = 1;
2864 break;
2865 }
2866 }
2867 break;
2868
2869 default:
2870 /* Not handled with sync. */
2871 continue;
2872 }
2873
2874 WRITE_ONCE(event_notifier->parent.enabled, enabled);
2875 /*
2876 * Sync tracepoint registration with event_notifier enabled
2877 * state.
2878 */
2879 if (enabled) {
2880 if (!event_notifier_priv->parent.registered)
2881 register_event_notifier(event_notifier);
2882 } else {
2883 if (event_notifier_priv->parent.registered)
2884 _lttng_event_notifier_unregister(event_notifier);
2885 }
2886
2887 /* Check if has enablers without bytecode enabled */
2888 list_for_each_entry(enabler_ref,
2889 &event_notifier_priv->parent.enablers_ref_head, node) {
2890 if (enabler_ref->ref->enabled
2891 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2892 has_enablers_without_filter_bytecode = 1;
2893 break;
2894 }
2895 }
2896 event_notifier_priv->parent.has_enablers_without_filter_bytecode =
2897 has_enablers_without_filter_bytecode;
2898
2899 /* Enable filters */
2900 list_for_each_entry(runtime,
2901 &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
2902 lttng_bytecode_sync_state(runtime);
2903 nr_filters++;
2904 }
2905 WRITE_ONCE(event_notifier_priv->parent.pub->eval_filter,
2906 !(has_enablers_without_filter_bytecode || !nr_filters));
2907
2908 /* Enable captures */
2909 list_for_each_entry(runtime,
2910 &event_notifier_priv->capture_bytecode_runtime_head, node) {
2911 lttng_bytecode_sync_state(runtime);
2912 nr_captures++;
2913 }
2914 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2915 }
2916 }
2917
2918 static
2919 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2920 {
2921 switch (event_enabler->enabler_type) {
2922 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2923 {
2924 struct lttng_event_recorder_enabler *event_recorder_enabler =
2925 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2926 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2927 break;
2928 }
2929 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2930 {
2931 struct lttng_event_notifier_enabler *event_notifier_enabler =
2932 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2933 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2934 break;
2935 }
2936 default:
2937 WARN_ON_ONCE(1);
2938 }
2939 }
2940
2941 /*
2942 * Serialize at most one packet worth of metadata into a metadata
2943 * channel.
2944 * We grab the metadata cache mutex to get exclusive access to our metadata
2945 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2946 * allows us to do racy operations such as looking for remaining space left in
2947 * packet and write, since mutual exclusion protects us from concurrent writes.
2948 * Mutual exclusion on the metadata cache allow us to read the cache content
2949 * without racing against reallocation of the cache by updates.
2950 * Returns the number of bytes written in the channel, 0 if no data
2951 * was written and a negative value on error.
2952 */
2953 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2954 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2955 {
2956 struct lttng_kernel_ring_buffer_ctx ctx;
2957 int ret = 0;
2958 size_t len, reserve_len;
2959
2960 /*
2961 * Ensure we support mutiple get_next / put sequences followed by
2962 * put_next. The metadata cache lock protects reading the metadata
2963 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2964 * "flush" operations on the buffer invoked by different processes.
2965 * Moreover, since the metadata cache memory can be reallocated, we
2966 * need to have exclusive access against updates even though we only
2967 * read it.
2968 */
2969 mutex_lock(&stream->metadata_cache->lock);
2970 WARN_ON(stream->metadata_in < stream->metadata_out);
2971 if (stream->metadata_in != stream->metadata_out)
2972 goto end;
2973
2974 /* Metadata regenerated, change the version. */
2975 if (stream->metadata_cache->version != stream->version)
2976 stream->version = stream->metadata_cache->version;
2977
2978 len = stream->metadata_cache->metadata_written -
2979 stream->metadata_in;
2980 if (!len)
2981 goto end;
2982 reserve_len = min_t(size_t,
2983 stream->transport->ops.priv->packet_avail_size(chan),
2984 len);
2985 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2986 sizeof(char), NULL);
2987 /*
2988 * If reservation failed, return an error to the caller.
2989 */
2990 ret = stream->transport->ops.event_reserve(&ctx);
2991 if (ret != 0) {
2992 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2993 stream->coherent = false;
2994 goto end;
2995 }
2996 stream->transport->ops.event_write(&ctx,
2997 stream->metadata_cache->data + stream->metadata_in,
2998 reserve_len, 1);
2999 stream->transport->ops.event_commit(&ctx);
3000 stream->metadata_in += reserve_len;
3001 if (reserve_len < len)
3002 stream->coherent = false;
3003 else
3004 stream->coherent = true;
3005 ret = reserve_len;
3006
3007 end:
3008 if (coherent)
3009 *coherent = stream->coherent;
3010 mutex_unlock(&stream->metadata_cache->lock);
3011 return ret;
3012 }
3013
3014 static
3015 void lttng_metadata_begin(struct lttng_kernel_session *session)
3016 {
3017 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
3018 mutex_lock(&session->priv->metadata_cache->lock);
3019 }
3020
3021 static
3022 void lttng_metadata_end(struct lttng_kernel_session *session)
3023 {
3024 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
3025 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
3026 struct lttng_metadata_stream *stream;
3027
3028 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
3029 wake_up_interruptible(&stream->read_wait);
3030 mutex_unlock(&session->priv->metadata_cache->lock);
3031 }
3032 }
3033
3034 /*
3035 * Write the metadata to the metadata cache.
3036 * Must be called with sessions_mutex held.
3037 * The metadata cache lock protects us from concurrent read access from
3038 * thread outputting metadata content to ring buffer.
3039 * The content of the printf is printed as a single atomic metadata
3040 * transaction.
3041 */
3042 int lttng_metadata_printf(struct lttng_kernel_session *session,
3043 const char *fmt, ...)
3044 {
3045 char *str;
3046 size_t len;
3047 va_list ap;
3048
3049 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
3050
3051 va_start(ap, fmt);
3052 str = kvasprintf(GFP_KERNEL, fmt, ap);
3053 va_end(ap);
3054 if (!str)
3055 return -ENOMEM;
3056
3057 len = strlen(str);
3058 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
3059 if (session->priv->metadata_cache->metadata_written + len >
3060 session->priv->metadata_cache->cache_alloc) {
3061 char *tmp_cache_realloc;
3062 unsigned int tmp_cache_alloc_size;
3063
3064 tmp_cache_alloc_size = max_t(unsigned int,
3065 session->priv->metadata_cache->cache_alloc + len,
3066 session->priv->metadata_cache->cache_alloc << 1);
3067 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
3068 if (!tmp_cache_realloc)
3069 goto err;
3070 if (session->priv->metadata_cache->data) {
3071 memcpy(tmp_cache_realloc,
3072 session->priv->metadata_cache->data,
3073 session->priv->metadata_cache->cache_alloc);
3074 vfree(session->priv->metadata_cache->data);
3075 }
3076
3077 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3078 session->priv->metadata_cache->data = tmp_cache_realloc;
3079 }
3080 memcpy(session->priv->metadata_cache->data +
3081 session->priv->metadata_cache->metadata_written,
3082 str, len);
3083 session->priv->metadata_cache->metadata_written += len;
3084 kfree(str);
3085
3086 return 0;
3087
3088 err:
3089 kfree(str);
3090 return -ENOMEM;
3091 }
3092
3093 static
3094 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3095 {
3096 size_t i;
3097
3098 for (i = 0; i < nesting; i++) {
3099 int ret;
3100
3101 ret = lttng_metadata_printf(session, " ");
3102 if (ret) {
3103 return ret;
3104 }
3105 }
3106 return 0;
3107 }
3108
3109 static
3110 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3111 const struct lttng_kernel_event_field *field,
3112 size_t nesting)
3113 {
3114 return lttng_metadata_printf(session, " _%s;\n", field->name);
3115 }
3116
3117 static
3118 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3119 const struct lttng_kernel_type_integer *type,
3120 enum lttng_kernel_string_encoding parent_encoding,
3121 size_t nesting)
3122 {
3123 int ret;
3124
3125 ret = print_tabs(session, nesting);
3126 if (ret)
3127 return ret;
3128 ret = lttng_metadata_printf(session,
3129 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3130 type->size,
3131 type->alignment,
3132 type->signedness,
3133 (parent_encoding == lttng_kernel_string_encoding_none)
3134 ? "none"
3135 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3136 ? "UTF8"
3137 : "ASCII",
3138 type->base,
3139 #if __BYTE_ORDER == __BIG_ENDIAN
3140 type->reverse_byte_order ? " byte_order = le;" : ""
3141 #else
3142 type->reverse_byte_order ? " byte_order = be;" : ""
3143 #endif
3144 );
3145 return ret;
3146 }
3147
3148 /*
3149 * Must be called with sessions_mutex held.
3150 */
3151 static
3152 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3153 const struct lttng_kernel_type_struct *type,
3154 size_t nesting)
3155 {
3156 const char *prev_field_name = NULL;
3157 int ret;
3158 uint32_t i, nr_fields;
3159 unsigned int alignment;
3160
3161 ret = print_tabs(session, nesting);
3162 if (ret)
3163 return ret;
3164 ret = lttng_metadata_printf(session,
3165 "struct {\n");
3166 if (ret)
3167 return ret;
3168 nr_fields = type->nr_fields;
3169 for (i = 0; i < nr_fields; i++) {
3170 const struct lttng_kernel_event_field *iter_field;
3171
3172 iter_field = type->fields[i];
3173 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3174 if (ret)
3175 return ret;
3176 }
3177 ret = print_tabs(session, nesting);
3178 if (ret)
3179 return ret;
3180 alignment = type->alignment;
3181 if (alignment) {
3182 ret = lttng_metadata_printf(session,
3183 "} align(%u)",
3184 alignment);
3185 } else {
3186 ret = lttng_metadata_printf(session,
3187 "}");
3188 }
3189 return ret;
3190 }
3191
3192 /*
3193 * Must be called with sessions_mutex held.
3194 */
3195 static
3196 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3197 const struct lttng_kernel_event_field *field,
3198 size_t nesting)
3199 {
3200 int ret;
3201
3202 ret = _lttng_struct_type_statedump(session,
3203 lttng_kernel_get_type_struct(field->type), nesting);
3204 if (ret)
3205 return ret;
3206 return lttng_field_name_statedump(session, field, nesting);
3207 }
3208
3209 /*
3210 * Must be called with sessions_mutex held.
3211 */
3212 static
3213 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3214 const struct lttng_kernel_type_variant *type,
3215 size_t nesting,
3216 const char *prev_field_name)
3217 {
3218 const char *tag_name;
3219 int ret;
3220 uint32_t i, nr_choices;
3221
3222 tag_name = type->tag_name;
3223 if (!tag_name)
3224 tag_name = prev_field_name;
3225 if (!tag_name)
3226 return -EINVAL;
3227 /*
3228 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3229 */
3230 if (type->alignment != 0)
3231 return -EINVAL;
3232 ret = print_tabs(session, nesting);
3233 if (ret)
3234 return ret;
3235 ret = lttng_metadata_printf(session,
3236 "variant <_%s> {\n",
3237 tag_name);
3238 if (ret)
3239 return ret;
3240 nr_choices = type->nr_choices;
3241 for (i = 0; i < nr_choices; i++) {
3242 const struct lttng_kernel_event_field *iter_field;
3243
3244 iter_field = type->choices[i];
3245 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3246 if (ret)
3247 return ret;
3248 }
3249 ret = print_tabs(session, nesting);
3250 if (ret)
3251 return ret;
3252 ret = lttng_metadata_printf(session,
3253 "}");
3254 return ret;
3255 }
3256
3257 /*
3258 * Must be called with sessions_mutex held.
3259 */
3260 static
3261 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3262 const struct lttng_kernel_event_field *field,
3263 size_t nesting,
3264 const char *prev_field_name)
3265 {
3266 int ret;
3267
3268 ret = _lttng_variant_type_statedump(session,
3269 lttng_kernel_get_type_variant(field->type), nesting,
3270 prev_field_name);
3271 if (ret)
3272 return ret;
3273 return lttng_field_name_statedump(session, field, nesting);
3274 }
3275
3276 /*
3277 * Must be called with sessions_mutex held.
3278 */
3279 static
3280 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3281 const struct lttng_kernel_event_field *field,
3282 size_t nesting)
3283 {
3284 int ret;
3285 const struct lttng_kernel_type_array *array_type;
3286 const struct lttng_kernel_type_common *elem_type;
3287
3288 array_type = lttng_kernel_get_type_array(field->type);
3289 WARN_ON_ONCE(!array_type);
3290
3291 if (array_type->alignment) {
3292 ret = print_tabs(session, nesting);
3293 if (ret)
3294 return ret;
3295 ret = lttng_metadata_printf(session,
3296 "struct { } align(%u) _%s_padding;\n",
3297 array_type->alignment * CHAR_BIT,
3298 field->name);
3299 if (ret)
3300 return ret;
3301 }
3302 /*
3303 * Nested compound types: Only array of structures and variants are
3304 * currently supported.
3305 */
3306 elem_type = array_type->elem_type;
3307 switch (elem_type->type) {
3308 case lttng_kernel_type_integer:
3309 case lttng_kernel_type_struct:
3310 case lttng_kernel_type_variant:
3311 ret = _lttng_type_statedump(session, elem_type,
3312 array_type->encoding, nesting);
3313 if (ret)
3314 return ret;
3315 break;
3316
3317 default:
3318 return -EINVAL;
3319 }
3320 ret = lttng_metadata_printf(session,
3321 " _%s[%u];\n",
3322 field->name,
3323 array_type->length);
3324 return ret;
3325 }
3326
3327 /*
3328 * Must be called with sessions_mutex held.
3329 */
3330 static
3331 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3332 const struct lttng_kernel_event_field *field,
3333 size_t nesting,
3334 const char *prev_field_name)
3335 {
3336 int ret;
3337 const char *length_name;
3338 const struct lttng_kernel_type_sequence *sequence_type;
3339 const struct lttng_kernel_type_common *elem_type;
3340
3341 sequence_type = lttng_kernel_get_type_sequence(field->type);
3342 WARN_ON_ONCE(!sequence_type);
3343
3344 length_name = sequence_type->length_name;
3345 if (!length_name)
3346 length_name = prev_field_name;
3347 if (!length_name)
3348 return -EINVAL;
3349
3350 if (sequence_type->alignment) {
3351 ret = print_tabs(session, nesting);
3352 if (ret)
3353 return ret;
3354 ret = lttng_metadata_printf(session,
3355 "struct { } align(%u) _%s_padding;\n",
3356 sequence_type->alignment * CHAR_BIT,
3357 field->name);
3358 if (ret)
3359 return ret;
3360 }
3361
3362 /*
3363 * Nested compound types: Only array of structures and variants are
3364 * currently supported.
3365 */
3366 elem_type = sequence_type->elem_type;
3367 switch (elem_type->type) {
3368 case lttng_kernel_type_integer:
3369 case lttng_kernel_type_struct:
3370 case lttng_kernel_type_variant:
3371 ret = _lttng_type_statedump(session, elem_type,
3372 sequence_type->encoding, nesting);
3373 if (ret)
3374 return ret;
3375 break;
3376
3377 default:
3378 return -EINVAL;
3379 }
3380 ret = lttng_metadata_printf(session,
3381 " _%s[ _%s ];\n",
3382 field->name,
3383 length_name);
3384 return ret;
3385 }
3386
3387 /*
3388 * Must be called with sessions_mutex held.
3389 */
3390 static
3391 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3392 const struct lttng_kernel_type_enum *type,
3393 size_t nesting)
3394 {
3395 const struct lttng_kernel_enum_desc *enum_desc;
3396 const struct lttng_kernel_type_common *container_type;
3397 int ret;
3398 unsigned int i, nr_entries;
3399
3400 container_type = type->container_type;
3401 if (container_type->type != lttng_kernel_type_integer) {
3402 ret = -EINVAL;
3403 goto end;
3404 }
3405 enum_desc = type->desc;
3406 nr_entries = enum_desc->nr_entries;
3407
3408 ret = print_tabs(session, nesting);
3409 if (ret)
3410 goto end;
3411 ret = lttng_metadata_printf(session, "enum : ");
3412 if (ret)
3413 goto end;
3414 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3415 lttng_kernel_string_encoding_none, 0);
3416 if (ret)
3417 goto end;
3418 ret = lttng_metadata_printf(session, " {\n");
3419 if (ret)
3420 goto end;
3421 /* Dump all entries */
3422 for (i = 0; i < nr_entries; i++) {
3423 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3424 int j, len;
3425
3426 ret = print_tabs(session, nesting + 1);
3427 if (ret)
3428 goto end;
3429 ret = lttng_metadata_printf(session,
3430 "\"");
3431 if (ret)
3432 goto end;
3433 len = strlen(entry->string);
3434 /* Escape the character '"' */
3435 for (j = 0; j < len; j++) {
3436 char c = entry->string[j];
3437
3438 switch (c) {
3439 case '"':
3440 ret = lttng_metadata_printf(session,
3441 "\\\"");
3442 break;
3443 case '\\':
3444 ret = lttng_metadata_printf(session,
3445 "\\\\");
3446 break;
3447 default:
3448 ret = lttng_metadata_printf(session,
3449 "%c", c);
3450 break;
3451 }
3452 if (ret)
3453 goto end;
3454 }
3455 ret = lttng_metadata_printf(session, "\"");
3456 if (ret)
3457 goto end;
3458
3459 if (entry->options.is_auto) {
3460 ret = lttng_metadata_printf(session, ",\n");
3461 if (ret)
3462 goto end;
3463 } else {
3464 ret = lttng_metadata_printf(session,
3465 " = ");
3466 if (ret)
3467 goto end;
3468 if (entry->start.signedness)
3469 ret = lttng_metadata_printf(session,
3470 "%lld", (long long) entry->start.value);
3471 else
3472 ret = lttng_metadata_printf(session,
3473 "%llu", entry->start.value);
3474 if (ret)
3475 goto end;
3476 if (entry->start.signedness == entry->end.signedness &&
3477 entry->start.value
3478 == entry->end.value) {
3479 ret = lttng_metadata_printf(session,
3480 ",\n");
3481 } else {
3482 if (entry->end.signedness) {
3483 ret = lttng_metadata_printf(session,
3484 " ... %lld,\n",
3485 (long long) entry->end.value);
3486 } else {
3487 ret = lttng_metadata_printf(session,
3488 " ... %llu,\n",
3489 entry->end.value);
3490 }
3491 }
3492 if (ret)
3493 goto end;
3494 }
3495 }
3496 ret = print_tabs(session, nesting);
3497 if (ret)
3498 goto end;
3499 ret = lttng_metadata_printf(session, "}");
3500 end:
3501 return ret;
3502 }
3503
3504 /*
3505 * Must be called with sessions_mutex held.
3506 */
3507 static
3508 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3509 const struct lttng_kernel_event_field *field,
3510 size_t nesting)
3511 {
3512 int ret;
3513 const struct lttng_kernel_type_enum *enum_type;
3514
3515 enum_type = lttng_kernel_get_type_enum(field->type);
3516 WARN_ON_ONCE(!enum_type);
3517 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3518 if (ret)
3519 return ret;
3520 return lttng_field_name_statedump(session, field, nesting);
3521 }
3522
3523 static
3524 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3525 const struct lttng_kernel_event_field *field,
3526 size_t nesting)
3527 {
3528 int ret;
3529
3530 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3531 lttng_kernel_string_encoding_none, nesting);
3532 if (ret)
3533 return ret;
3534 return lttng_field_name_statedump(session, field, nesting);
3535 }
3536
3537 static
3538 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3539 const struct lttng_kernel_type_string *type,
3540 size_t nesting)
3541 {
3542 int ret;
3543
3544 /* Default encoding is UTF8 */
3545 ret = print_tabs(session, nesting);
3546 if (ret)
3547 return ret;
3548 ret = lttng_metadata_printf(session,
3549 "string%s",
3550 type->encoding == lttng_kernel_string_encoding_ASCII ?
3551 " { encoding = ASCII; }" : "");
3552 return ret;
3553 }
3554
3555 static
3556 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3557 const struct lttng_kernel_event_field *field,
3558 size_t nesting)
3559 {
3560 const struct lttng_kernel_type_string *string_type;
3561 int ret;
3562
3563 string_type = lttng_kernel_get_type_string(field->type);
3564 WARN_ON_ONCE(!string_type);
3565 ret = _lttng_string_type_statedump(session, string_type, nesting);
3566 if (ret)
3567 return ret;
3568 return lttng_field_name_statedump(session, field, nesting);
3569 }
3570
3571 /*
3572 * Must be called with sessions_mutex held.
3573 */
3574 static
3575 int _lttng_type_statedump(struct lttng_kernel_session *session,
3576 const struct lttng_kernel_type_common *type,
3577 enum lttng_kernel_string_encoding parent_encoding,
3578 size_t nesting)
3579 {
3580 int ret = 0;
3581
3582 switch (type->type) {
3583 case lttng_kernel_type_integer:
3584 ret = _lttng_integer_type_statedump(session,
3585 lttng_kernel_get_type_integer(type),
3586 parent_encoding, nesting);
3587 break;
3588 case lttng_kernel_type_enum:
3589 ret = _lttng_enum_type_statedump(session,
3590 lttng_kernel_get_type_enum(type),
3591 nesting);
3592 break;
3593 case lttng_kernel_type_string:
3594 ret = _lttng_string_type_statedump(session,
3595 lttng_kernel_get_type_string(type),
3596 nesting);
3597 break;
3598 case lttng_kernel_type_struct:
3599 ret = _lttng_struct_type_statedump(session,
3600 lttng_kernel_get_type_struct(type),
3601 nesting);
3602 break;
3603 case lttng_kernel_type_variant:
3604 ret = _lttng_variant_type_statedump(session,
3605 lttng_kernel_get_type_variant(type),
3606 nesting, NULL);
3607 break;
3608
3609 /* Nested arrays and sequences are not supported yet. */
3610 case lttng_kernel_type_array:
3611 case lttng_kernel_type_sequence:
3612 default:
3613 WARN_ON_ONCE(1);
3614 return -EINVAL;
3615 }
3616 return ret;
3617 }
3618
3619 /*
3620 * Must be called with sessions_mutex held.
3621 */
3622 static
3623 int _lttng_field_statedump(struct lttng_kernel_session *session,
3624 const struct lttng_kernel_event_field *field,
3625 size_t nesting,
3626 const char **prev_field_name_p)
3627 {
3628 const char *prev_field_name = NULL;
3629 int ret = 0;
3630
3631 if (prev_field_name_p)
3632 prev_field_name = *prev_field_name_p;
3633 switch (field->type->type) {
3634 case lttng_kernel_type_integer:
3635 ret = _lttng_integer_field_statedump(session, field, nesting);
3636 break;
3637 case lttng_kernel_type_enum:
3638 ret = _lttng_enum_field_statedump(session, field, nesting);
3639 break;
3640 case lttng_kernel_type_string:
3641 ret = _lttng_string_field_statedump(session, field, nesting);
3642 break;
3643 case lttng_kernel_type_struct:
3644 ret = _lttng_struct_field_statedump(session, field, nesting);
3645 break;
3646 case lttng_kernel_type_array:
3647 ret = _lttng_array_field_statedump(session, field, nesting);
3648 break;
3649 case lttng_kernel_type_sequence:
3650 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3651 break;
3652 case lttng_kernel_type_variant:
3653 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3654 break;
3655
3656 default:
3657 WARN_ON_ONCE(1);
3658 return -EINVAL;
3659 }
3660 if (prev_field_name_p)
3661 *prev_field_name_p = field->name;
3662 return ret;
3663 }
3664
3665 static
3666 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3667 struct lttng_kernel_ctx *ctx)
3668 {
3669 const char *prev_field_name = NULL;
3670 int ret = 0;
3671 int i;
3672
3673 if (!ctx)
3674 return 0;
3675 for (i = 0; i < ctx->nr_fields; i++) {
3676 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3677
3678 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3679 if (ret)
3680 return ret;
3681 }
3682 return ret;
3683 }
3684
3685 static
3686 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3687 struct lttng_kernel_event_recorder *event_recorder)
3688 {
3689 const char *prev_field_name = NULL;
3690 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3691 int ret = 0;
3692 int i;
3693
3694 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3695 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3696
3697 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3698 if (ret)
3699 return ret;
3700 }
3701 return ret;
3702 }
3703
3704 /*
3705 * Must be called with sessions_mutex held.
3706 * The entire event metadata is printed as a single atomic metadata
3707 * transaction.
3708 */
3709 static
3710 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
3711 struct lttng_kernel_channel_buffer *chan,
3712 struct lttng_kernel_event_recorder *event_recorder)
3713 {
3714 int ret = 0;
3715
3716 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3717 return 0;
3718 if (chan->priv->channel_type == METADATA_CHANNEL)
3719 return 0;
3720
3721 lttng_metadata_begin(session);
3722
3723 ret = lttng_metadata_printf(session,
3724 "event {\n"
3725 " name = \"%s\";\n"
3726 " id = %u;\n"
3727 " stream_id = %u;\n",
3728 event_recorder->priv->parent.desc->event_name,
3729 event_recorder->priv->id,
3730 event_recorder->chan->priv->id);
3731 if (ret)
3732 goto end;
3733
3734 ret = lttng_metadata_printf(session,
3735 " fields := struct {\n"
3736 );
3737 if (ret)
3738 goto end;
3739
3740 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3741 if (ret)
3742 goto end;
3743
3744 /*
3745 * LTTng space reservation can only reserve multiples of the
3746 * byte size.
3747 */
3748 ret = lttng_metadata_printf(session,
3749 " };\n"
3750 "};\n\n");
3751 if (ret)
3752 goto end;
3753
3754 event_recorder->priv->metadata_dumped = 1;
3755 end:
3756 lttng_metadata_end(session);
3757 return ret;
3758
3759 }
3760
3761 /*
3762 * Must be called with sessions_mutex held.
3763 * The entire channel metadata is printed as a single atomic metadata
3764 * transaction.
3765 */
3766 static
3767 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3768 struct lttng_kernel_channel_buffer *chan)
3769 {
3770 int ret = 0;
3771
3772 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3773 return 0;
3774
3775 if (chan->priv->channel_type == METADATA_CHANNEL)
3776 return 0;
3777
3778 lttng_metadata_begin(session);
3779
3780 WARN_ON_ONCE(!chan->priv->header_type);
3781 ret = lttng_metadata_printf(session,
3782 "stream {\n"
3783 " id = %u;\n"
3784 " event.header := %s;\n"
3785 " packet.context := struct packet_context;\n",
3786 chan->priv->id,
3787 chan->priv->header_type == 1 ? "struct event_header_compact" :
3788 "struct event_header_large");
3789 if (ret)
3790 goto end;
3791
3792 if (chan->priv->ctx) {
3793 ret = lttng_metadata_printf(session,
3794 " event.context := struct {\n");
3795 if (ret)
3796 goto end;
3797 }
3798 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3799 if (ret)
3800 goto end;
3801 if (chan->priv->ctx) {
3802 ret = lttng_metadata_printf(session,
3803 " };\n");
3804 if (ret)
3805 goto end;
3806 }
3807
3808 ret = lttng_metadata_printf(session,
3809 "};\n\n");
3810
3811 chan->priv->metadata_dumped = 1;
3812 end:
3813 lttng_metadata_end(session);
3814 return ret;
3815 }
3816
3817 /*
3818 * Must be called with sessions_mutex held.
3819 */
3820 static
3821 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3822 {
3823 return lttng_metadata_printf(session,
3824 "struct packet_context {\n"
3825 " uint64_clock_monotonic_t timestamp_begin;\n"
3826 " uint64_clock_monotonic_t timestamp_end;\n"
3827 " uint64_t content_size;\n"
3828 " uint64_t packet_size;\n"
3829 " uint64_t packet_seq_num;\n"
3830 " unsigned long events_discarded;\n"
3831 " uint32_t cpu_id;\n"
3832 "};\n\n"
3833 );
3834 }
3835
3836 /*
3837 * Compact header:
3838 * id: range: 0 - 30.
3839 * id 31 is reserved to indicate an extended header.
3840 *
3841 * Large header:
3842 * id: range: 0 - 65534.
3843 * id 65535 is reserved to indicate an extended header.
3844 *
3845 * Must be called with sessions_mutex held.
3846 */
3847 static
3848 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3849 {
3850 return lttng_metadata_printf(session,
3851 "struct event_header_compact {\n"
3852 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3853 " variant <id> {\n"
3854 " struct {\n"
3855 " uint27_clock_monotonic_t timestamp;\n"
3856 " } compact;\n"
3857 " struct {\n"
3858 " uint32_t id;\n"
3859 " uint64_clock_monotonic_t timestamp;\n"
3860 " } extended;\n"
3861 " } v;\n"
3862 "} align(%u);\n"
3863 "\n"
3864 "struct event_header_large {\n"
3865 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3866 " variant <id> {\n"
3867 " struct {\n"
3868 " uint32_clock_monotonic_t timestamp;\n"
3869 " } compact;\n"
3870 " struct {\n"
3871 " uint32_t id;\n"
3872 " uint64_clock_monotonic_t timestamp;\n"
3873 " } extended;\n"
3874 " } v;\n"
3875 "} align(%u);\n\n",
3876 lttng_alignof(uint32_t) * CHAR_BIT,
3877 lttng_alignof(uint16_t) * CHAR_BIT
3878 );
3879 }
3880
3881 /*
3882 * Approximation of NTP time of day to clock monotonic correlation,
3883 * taken at start of trace.
3884 * Yes, this is only an approximation. Yes, we can (and will) do better
3885 * in future versions.
3886 * This function may return a negative offset. It may happen if the
3887 * system sets the REALTIME clock to 0 after boot.
3888 *
3889 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3890 * y2038 compliant.
3891 */
3892 static
3893 int64_t measure_clock_offset(void)
3894 {
3895 uint64_t monotonic_avg, monotonic[2], realtime;
3896 uint64_t tcf = trace_clock_freq();
3897 int64_t offset;
3898 unsigned long flags;
3899 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3900 struct timespec64 rts = { 0, 0 };
3901 #else
3902 struct timespec rts = { 0, 0 };
3903 #endif
3904
3905 /* Disable interrupts to increase correlation precision. */
3906 local_irq_save(flags);
3907 monotonic[0] = trace_clock_read64();
3908 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3909 ktime_get_real_ts64(&rts);
3910 #else
3911 getnstimeofday(&rts);
3912 #endif
3913 monotonic[1] = trace_clock_read64();
3914 local_irq_restore(flags);
3915
3916 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3917 realtime = (uint64_t) rts.tv_sec * tcf;
3918 if (tcf == NSEC_PER_SEC) {
3919 realtime += rts.tv_nsec;
3920 } else {
3921 uint64_t n = rts.tv_nsec * tcf;
3922
3923 do_div(n, NSEC_PER_SEC);
3924 realtime += n;
3925 }
3926 offset = (int64_t) realtime - monotonic_avg;
3927 return offset;
3928 }
3929
3930 static
3931 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3932 {
3933 int ret = 0;
3934 size_t i;
3935 char cur;
3936
3937 i = 0;
3938 cur = string[i];
3939 while (cur != '\0') {
3940 switch (cur) {
3941 case '\n':
3942 ret = lttng_metadata_printf(session, "%s", "\\n");
3943 break;
3944 case '\\':
3945 case '"':
3946 ret = lttng_metadata_printf(session, "%c", '\\');
3947 if (ret)
3948 goto error;
3949 /* We still print the current char */
3950 lttng_fallthrough;
3951 default:
3952 ret = lttng_metadata_printf(session, "%c", cur);
3953 break;
3954 }
3955
3956 if (ret)
3957 goto error;
3958
3959 cur = string[++i];
3960 }
3961 error:
3962 return ret;
3963 }
3964
3965 static
3966 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3967 const char *field_value)
3968 {
3969 int ret;
3970
3971 ret = lttng_metadata_printf(session, " %s = \"", field);
3972 if (ret)
3973 goto error;
3974
3975 ret = print_escaped_ctf_string(session, field_value);
3976 if (ret)
3977 goto error;
3978
3979 ret = lttng_metadata_printf(session, "\";\n");
3980
3981 error:
3982 return ret;
3983 }
3984
3985 /*
3986 * Output metadata into this session's metadata buffers.
3987 * Must be called with sessions_mutex held.
3988 */
3989 static
3990 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3991 {
3992 unsigned char *uuid_c = session->priv->uuid.b;
3993 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3994 const char *product_uuid;
3995 struct lttng_kernel_channel_buffer_private *chan_priv;
3996 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3997 int ret = 0;
3998
3999 if (!LTTNG_READ_ONCE(session->active))
4000 return 0;
4001
4002 lttng_metadata_begin(session);
4003
4004 if (session->priv->metadata_dumped)
4005 goto skip_session;
4006
4007 snprintf(uuid_s, sizeof(uuid_s),
4008 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
4009 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
4010 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
4011 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
4012 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
4013
4014 ret = lttng_metadata_printf(session,
4015 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
4016 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
4017 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
4018 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
4019 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
4020 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
4021 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
4022 "\n"
4023 "trace {\n"
4024 " major = %u;\n"
4025 " minor = %u;\n"
4026 " uuid = \"%s\";\n"
4027 " byte_order = %s;\n"
4028 " packet.header := struct {\n"
4029 " uint32_t magic;\n"
4030 " uint8_t uuid[16];\n"
4031 " uint32_t stream_id;\n"
4032 " uint64_t stream_instance_id;\n"
4033 " };\n"
4034 "};\n\n",
4035 lttng_alignof(uint8_t) * CHAR_BIT,
4036 lttng_alignof(uint16_t) * CHAR_BIT,
4037 lttng_alignof(uint32_t) * CHAR_BIT,
4038 lttng_alignof(uint64_t) * CHAR_BIT,
4039 sizeof(unsigned long) * CHAR_BIT,
4040 lttng_alignof(unsigned long) * CHAR_BIT,
4041 CTF_SPEC_MAJOR,
4042 CTF_SPEC_MINOR,
4043 uuid_s,
4044 #if __BYTE_ORDER == __BIG_ENDIAN
4045 "be"
4046 #else
4047 "le"
4048 #endif
4049 );
4050 if (ret)
4051 goto end;
4052
4053 ret = lttng_metadata_printf(session,
4054 "env {\n"
4055 " hostname = \"%s\";\n"
4056 " domain = \"kernel\";\n"
4057 " sysname = \"%s\";\n"
4058 " kernel_release = \"%s\";\n"
4059 " kernel_version = \"%s\";\n"
4060 " tracer_name = \"lttng-modules\";\n"
4061 " tracer_major = %d;\n"
4062 " tracer_minor = %d;\n"
4063 " tracer_patchlevel = %d;\n"
4064 " trace_buffering_scheme = \"global\";\n",
4065 current->nsproxy->uts_ns->name.nodename,
4066 utsname()->sysname,
4067 utsname()->release,
4068 utsname()->version,
4069 LTTNG_MODULES_MAJOR_VERSION,
4070 LTTNG_MODULES_MINOR_VERSION,
4071 LTTNG_MODULES_PATCHLEVEL_VERSION
4072 );
4073 if (ret)
4074 goto end;
4075
4076 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
4077 if (ret)
4078 goto end;
4079 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
4080 session->priv->creation_time);
4081 if (ret)
4082 goto end;
4083
4084 /* Add the product UUID to the 'env' section */
4085 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4086 if (product_uuid) {
4087 ret = lttng_metadata_printf(session,
4088 " product_uuid = \"%s\";\n",
4089 product_uuid
4090 );
4091 if (ret)
4092 goto end;
4093 }
4094
4095 /* Close the 'env' section */
4096 ret = lttng_metadata_printf(session, "};\n\n");
4097 if (ret)
4098 goto end;
4099
4100 ret = lttng_metadata_printf(session,
4101 "clock {\n"
4102 " name = \"%s\";\n",
4103 trace_clock_name()
4104 );
4105 if (ret)
4106 goto end;
4107
4108 if (!trace_clock_uuid(clock_uuid_s)) {
4109 ret = lttng_metadata_printf(session,
4110 " uuid = \"%s\";\n",
4111 clock_uuid_s
4112 );
4113 if (ret)
4114 goto end;
4115 }
4116
4117 ret = lttng_metadata_printf(session,
4118 " description = \"%s\";\n"
4119 " freq = %llu; /* Frequency, in Hz */\n"
4120 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4121 " offset = %lld;\n"
4122 "};\n\n",
4123 trace_clock_description(),
4124 (unsigned long long) trace_clock_freq(),
4125 (long long) measure_clock_offset()
4126 );
4127 if (ret)
4128 goto end;
4129
4130 ret = lttng_metadata_printf(session,
4131 "typealias integer {\n"
4132 " size = 27; align = 1; signed = false;\n"
4133 " map = clock.%s.value;\n"
4134 "} := uint27_clock_monotonic_t;\n"
4135 "\n"
4136 "typealias integer {\n"
4137 " size = 32; align = %u; signed = false;\n"
4138 " map = clock.%s.value;\n"
4139 "} := uint32_clock_monotonic_t;\n"
4140 "\n"
4141 "typealias integer {\n"
4142 " size = 64; align = %u; signed = false;\n"
4143 " map = clock.%s.value;\n"
4144 "} := uint64_clock_monotonic_t;\n\n",
4145 trace_clock_name(),
4146 lttng_alignof(uint32_t) * CHAR_BIT,
4147 trace_clock_name(),
4148 lttng_alignof(uint64_t) * CHAR_BIT,
4149 trace_clock_name()
4150 );
4151 if (ret)
4152 goto end;
4153
4154 ret = _lttng_stream_packet_context_declare(session);
4155 if (ret)
4156 goto end;
4157
4158 ret = _lttng_event_header_declare(session);
4159 if (ret)
4160 goto end;
4161
4162 skip_session:
4163 list_for_each_entry(chan_priv, &session->priv->chan, node) {
4164 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
4165 if (ret)
4166 goto end;
4167 }
4168
4169 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
4170 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4171 event_recorder_priv->pub);
4172 if (ret)
4173 goto end;
4174 }
4175 session->priv->metadata_dumped = 1;
4176 end:
4177 lttng_metadata_end(session);
4178 return ret;
4179 }
4180
4181 /**
4182 * lttng_transport_register - LTT transport registration
4183 * @transport: transport structure
4184 *
4185 * Registers a transport which can be used as output to extract the data out of
4186 * LTTng. The module calling this registration function must ensure that no
4187 * trap-inducing code will be executed by the transport functions. E.g.
4188 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4189 * is made visible to the transport function. This registration acts as a
4190 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4191 * after its registration must it synchronize the TLBs.
4192 */
4193 void lttng_transport_register(struct lttng_transport *transport)
4194 {
4195 /*
4196 * Make sure no page fault can be triggered by the module about to be
4197 * registered. We deal with this here so we don't have to call
4198 * vmalloc_sync_mappings() in each module's init.
4199 */
4200 wrapper_vmalloc_sync_mappings();
4201
4202 mutex_lock(&sessions_mutex);
4203 list_add_tail(&transport->node, &lttng_transport_list);
4204 mutex_unlock(&sessions_mutex);
4205 }
4206 EXPORT_SYMBOL_GPL(lttng_transport_register);
4207
4208 /**
4209 * lttng_transport_unregister - LTT transport unregistration
4210 * @transport: transport structure
4211 */
4212 void lttng_transport_unregister(struct lttng_transport *transport)
4213 {
4214 mutex_lock(&sessions_mutex);
4215 list_del(&transport->node);
4216 mutex_unlock(&sessions_mutex);
4217 }
4218 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4219
4220 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4221 {
4222 /*
4223 * Make sure no page fault can be triggered by the module about to be
4224 * registered. We deal with this here so we don't have to call
4225 * vmalloc_sync_mappings() in each module's init.
4226 */
4227 wrapper_vmalloc_sync_mappings();
4228
4229 mutex_lock(&sessions_mutex);
4230 list_add_tail(&transport->node, &lttng_counter_transport_list);
4231 mutex_unlock(&sessions_mutex);
4232 }
4233 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4234
4235 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4236 {
4237 mutex_lock(&sessions_mutex);
4238 list_del(&transport->node);
4239 mutex_unlock(&sessions_mutex);
4240 }
4241 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4242
4243 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4244
4245 enum cpuhp_state lttng_hp_prepare;
4246 enum cpuhp_state lttng_hp_online;
4247
4248 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4249 {
4250 struct lttng_cpuhp_node *lttng_node;
4251
4252 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4253 switch (lttng_node->component) {
4254 case LTTNG_RING_BUFFER_FRONTEND:
4255 return 0;
4256 case LTTNG_RING_BUFFER_BACKEND:
4257 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4258 case LTTNG_RING_BUFFER_ITER:
4259 return 0;
4260 case LTTNG_CONTEXT_PERF_COUNTERS:
4261 return 0;
4262 default:
4263 return -EINVAL;
4264 }
4265 }
4266
4267 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4268 {
4269 struct lttng_cpuhp_node *lttng_node;
4270
4271 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4272 switch (lttng_node->component) {
4273 case LTTNG_RING_BUFFER_FRONTEND:
4274 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4275 case LTTNG_RING_BUFFER_BACKEND:
4276 return 0;
4277 case LTTNG_RING_BUFFER_ITER:
4278 return 0;
4279 case LTTNG_CONTEXT_PERF_COUNTERS:
4280 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4281 default:
4282 return -EINVAL;
4283 }
4284 }
4285
4286 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4287 {
4288 struct lttng_cpuhp_node *lttng_node;
4289
4290 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4291 switch (lttng_node->component) {
4292 case LTTNG_RING_BUFFER_FRONTEND:
4293 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4294 case LTTNG_RING_BUFFER_BACKEND:
4295 return 0;
4296 case LTTNG_RING_BUFFER_ITER:
4297 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4298 case LTTNG_CONTEXT_PERF_COUNTERS:
4299 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4300 default:
4301 return -EINVAL;
4302 }
4303 }
4304
4305 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4306 {
4307 struct lttng_cpuhp_node *lttng_node;
4308
4309 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4310 switch (lttng_node->component) {
4311 case LTTNG_RING_BUFFER_FRONTEND:
4312 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4313 case LTTNG_RING_BUFFER_BACKEND:
4314 return 0;
4315 case LTTNG_RING_BUFFER_ITER:
4316 return 0;
4317 case LTTNG_CONTEXT_PERF_COUNTERS:
4318 return 0;
4319 default:
4320 return -EINVAL;
4321 }
4322 }
4323
4324 static int __init lttng_init_cpu_hotplug(void)
4325 {
4326 int ret;
4327
4328 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4329 lttng_hotplug_prepare,
4330 lttng_hotplug_dead);
4331 if (ret < 0) {
4332 return ret;
4333 }
4334 lttng_hp_prepare = ret;
4335 lttng_rb_set_hp_prepare(ret);
4336
4337 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4338 lttng_hotplug_online,
4339 lttng_hotplug_offline);
4340 if (ret < 0) {
4341 cpuhp_remove_multi_state(lttng_hp_prepare);
4342 lttng_hp_prepare = 0;
4343 return ret;
4344 }
4345 lttng_hp_online = ret;
4346 lttng_rb_set_hp_online(ret);
4347
4348 return 0;
4349 }
4350
4351 static void __exit lttng_exit_cpu_hotplug(void)
4352 {
4353 lttng_rb_set_hp_online(0);
4354 cpuhp_remove_multi_state(lttng_hp_online);
4355 lttng_rb_set_hp_prepare(0);
4356 cpuhp_remove_multi_state(lttng_hp_prepare);
4357 }
4358
4359 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4360 static int lttng_init_cpu_hotplug(void)
4361 {
4362 return 0;
4363 }
4364 static void lttng_exit_cpu_hotplug(void)
4365 {
4366 }
4367 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4368
4369 static int __init lttng_events_init(void)
4370 {
4371 int ret;
4372
4373 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4374 if (ret)
4375 return ret;
4376 ret = wrapper_get_pfnblock_flags_mask_init();
4377 if (ret)
4378 return ret;
4379 ret = wrapper_get_pageblock_flags_mask_init();
4380 if (ret)
4381 return ret;
4382 ret = lttng_probes_init();
4383 if (ret)
4384 return ret;
4385 ret = lttng_context_init();
4386 if (ret)
4387 return ret;
4388 ret = lttng_tracepoint_init();
4389 if (ret)
4390 goto error_tp;
4391 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4392 if (!event_recorder_cache) {
4393 ret = -ENOMEM;
4394 goto error_kmem_event_recorder;
4395 }
4396 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4397 if (!event_recorder_private_cache) {
4398 ret = -ENOMEM;
4399 goto error_kmem_event_recorder_private;
4400 }
4401 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4402 if (!event_notifier_cache) {
4403 ret = -ENOMEM;
4404 goto error_kmem_event_notifier;
4405 }
4406 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4407 if (!event_notifier_private_cache) {
4408 ret = -ENOMEM;
4409 goto error_kmem_event_notifier_private;
4410 }
4411 ret = lttng_abi_init();
4412 if (ret)
4413 goto error_abi;
4414 ret = lttng_logger_init();
4415 if (ret)
4416 goto error_logger;
4417 ret = lttng_init_cpu_hotplug();
4418 if (ret)
4419 goto error_hotplug;
4420 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4421 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4422 __stringify(LTTNG_MODULES_MINOR_VERSION),
4423 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4424 LTTNG_MODULES_EXTRAVERSION,
4425 LTTNG_VERSION_NAME,
4426 #ifdef LTTNG_EXTRA_VERSION_GIT
4427 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4428 #else
4429 "",
4430 #endif
4431 #ifdef LTTNG_EXTRA_VERSION_NAME
4432 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4433 #else
4434 "");
4435 #endif
4436 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4437 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4438 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4439 return 0;
4440
4441 error_hotplug:
4442 lttng_logger_exit();
4443 error_logger:
4444 lttng_abi_exit();
4445 error_abi:
4446 kmem_cache_destroy(event_notifier_private_cache);
4447 error_kmem_event_notifier_private:
4448 kmem_cache_destroy(event_notifier_cache);
4449 error_kmem_event_notifier:
4450 kmem_cache_destroy(event_recorder_private_cache);
4451 error_kmem_event_recorder_private:
4452 kmem_cache_destroy(event_recorder_cache);
4453 error_kmem_event_recorder:
4454 lttng_tracepoint_exit();
4455 error_tp:
4456 lttng_context_exit();
4457 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4458 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4459 __stringify(LTTNG_MODULES_MINOR_VERSION),
4460 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4461 LTTNG_MODULES_EXTRAVERSION,
4462 LTTNG_VERSION_NAME,
4463 #ifdef LTTNG_EXTRA_VERSION_GIT
4464 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4465 #else
4466 "",
4467 #endif
4468 #ifdef LTTNG_EXTRA_VERSION_NAME
4469 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4470 #else
4471 "");
4472 #endif
4473 return ret;
4474 }
4475
4476 module_init(lttng_events_init);
4477
4478 static void __exit lttng_events_exit(void)
4479 {
4480 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4481
4482 lttng_exit_cpu_hotplug();
4483 lttng_logger_exit();
4484 lttng_abi_exit();
4485 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4486 lttng_session_destroy(session_priv->pub);
4487 kmem_cache_destroy(event_recorder_cache);
4488 kmem_cache_destroy(event_recorder_private_cache);
4489 kmem_cache_destroy(event_notifier_cache);
4490 kmem_cache_destroy(event_notifier_private_cache);
4491 lttng_tracepoint_exit();
4492 lttng_context_exit();
4493 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4494 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4495 __stringify(LTTNG_MODULES_MINOR_VERSION),
4496 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4497 LTTNG_MODULES_EXTRAVERSION,
4498 LTTNG_VERSION_NAME,
4499 #ifdef LTTNG_EXTRA_VERSION_GIT
4500 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4501 #else
4502 "",
4503 #endif
4504 #ifdef LTTNG_EXTRA_VERSION_NAME
4505 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4506 #else
4507 "");
4508 #endif
4509 }
4510
4511 module_exit(lttng_events_exit);
4512
4513 #include <generated/patches.h>
4514 #ifdef LTTNG_EXTRA_VERSION_GIT
4515 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4516 #endif
4517 #ifdef LTTNG_EXTRA_VERSION_NAME
4518 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4519 #endif
4520 MODULE_LICENSE("GPL and additional rights");
4521 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4522 MODULE_DESCRIPTION("LTTng tracer");
4523 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4524 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4525 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4526 LTTNG_MODULES_EXTRAVERSION);
This page took 0.174854 seconds and 3 git commands to generate.