Split struct lttng_session into public/private structures
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/events-internal.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <ringbuffer/backend.h>
48 #include <ringbuffer/frontend.h>
49 #include <wrapper/time.h>
50
51 #define METADATA_CACHE_DEFAULT_SIZE 4096
52
53 static LIST_HEAD(sessions);
54 static LIST_HEAD(event_notifier_groups);
55 static LIST_HEAD(lttng_transport_list);
56 static LIST_HEAD(lttng_counter_transport_list);
57 /*
58 * Protect the sessions and metadata caches.
59 */
60 static DEFINE_MUTEX(sessions_mutex);
61 static struct kmem_cache *event_recorder_cache;
62 static struct kmem_cache *event_recorder_private_cache;
63 static struct kmem_cache *event_notifier_cache;
64 static struct kmem_cache *event_notifier_private_cache;
65
66 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
67 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
69 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
70 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_channel *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_recorder *event);
75 static int _lttng_event_notifier_unregister(struct lttng_kernel_event_notifier *event_notifier);
76 static
77 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
78 struct lttng_channel *chan,
79 struct lttng_kernel_event_recorder *event);
80 static
81 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
82 static
83 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
84 static
85 int _lttng_type_statedump(struct lttng_kernel_session *session,
86 const struct lttng_kernel_type_common *type,
87 enum lttng_kernel_string_encoding parent_encoding,
88 size_t nesting);
89 static
90 int _lttng_field_statedump(struct lttng_kernel_session *session,
91 const struct lttng_kernel_event_field *field,
92 size_t nesting, const char **prev_field_name_p);
93
94 void synchronize_trace(void)
95 {
96 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
97 synchronize_rcu();
98 #else
99 synchronize_sched();
100 #endif
101
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
103 #ifdef CONFIG_PREEMPT_RT_FULL
104 synchronize_rcu();
105 #endif
106 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
107 #ifdef CONFIG_PREEMPT_RT
108 synchronize_rcu();
109 #endif
110 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 }
112
113 void lttng_lock_sessions(void)
114 {
115 mutex_lock(&sessions_mutex);
116 }
117
118 void lttng_unlock_sessions(void)
119 {
120 mutex_unlock(&sessions_mutex);
121 }
122
123 static struct lttng_transport *lttng_transport_find(const char *name)
124 {
125 struct lttng_transport *transport;
126
127 list_for_each_entry(transport, &lttng_transport_list, node) {
128 if (!strcmp(transport->name, name))
129 return transport;
130 }
131 return NULL;
132 }
133
134 /*
135 * Called with sessions lock held.
136 */
137 int lttng_session_active(void)
138 {
139 struct lttng_kernel_session_private *iter;
140
141 list_for_each_entry(iter, &sessions, list) {
142 if (iter->pub->active)
143 return 1;
144 }
145 return 0;
146 }
147
148 struct lttng_kernel_session *lttng_session_create(void)
149 {
150 struct lttng_kernel_session *session;
151 struct lttng_kernel_session_private *session_priv;
152 struct lttng_metadata_cache *metadata_cache;
153 int i;
154
155 mutex_lock(&sessions_mutex);
156 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
157 if (!session)
158 goto err;
159 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
160 if (!session_priv)
161 goto err_free_session;
162 session->priv = session_priv;
163 session_priv->pub = session;
164
165 INIT_LIST_HEAD(&session_priv->chan);
166 INIT_LIST_HEAD(&session_priv->events);
167 lttng_guid_gen(&session_priv->uuid);
168
169 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
170 GFP_KERNEL);
171 if (!metadata_cache)
172 goto err_free_session_private;
173 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
174 if (!metadata_cache->data)
175 goto err_free_cache;
176 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
177 kref_init(&metadata_cache->refcount);
178 mutex_init(&metadata_cache->lock);
179 session_priv->metadata_cache = metadata_cache;
180 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
181 memcpy(&metadata_cache->uuid, &session_priv->uuid,
182 sizeof(metadata_cache->uuid));
183 INIT_LIST_HEAD(&session_priv->enablers_head);
184 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
185 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
186 list_add(&session_priv->list, &sessions);
187
188 session->pid_tracker.session = session;
189 session->pid_tracker.tracker_type = TRACKER_PID;
190 session->vpid_tracker.session = session;
191 session->vpid_tracker.tracker_type = TRACKER_VPID;
192 session->uid_tracker.session = session;
193 session->uid_tracker.tracker_type = TRACKER_UID;
194 session->vuid_tracker.session = session;
195 session->vuid_tracker.tracker_type = TRACKER_VUID;
196 session->gid_tracker.session = session;
197 session->gid_tracker.tracker_type = TRACKER_GID;
198 session->vgid_tracker.session = session;
199 session->vgid_tracker.tracker_type = TRACKER_VGID;
200 mutex_unlock(&sessions_mutex);
201
202 return session;
203
204 err_free_cache:
205 kfree(metadata_cache);
206 err_free_session_private:
207 lttng_kvfree(session_priv);
208 err_free_session:
209 lttng_kvfree(session);
210 err:
211 mutex_unlock(&sessions_mutex);
212 return NULL;
213 }
214
215 static
216 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
217 {
218 struct lttng_counter_transport *transport;
219
220 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
221 if (!strcmp(transport->name, name))
222 return transport;
223 }
224 return NULL;
225 }
226
227 struct lttng_counter *lttng_kernel_counter_create(
228 const char *counter_transport_name,
229 size_t number_dimensions, const size_t *dimensions_sizes)
230 {
231 struct lttng_counter *counter = NULL;
232 struct lttng_counter_transport *counter_transport = NULL;
233
234 counter_transport = lttng_counter_transport_find(counter_transport_name);
235 if (!counter_transport) {
236 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
237 counter_transport_name);
238 goto notransport;
239 }
240 if (!try_module_get(counter_transport->owner)) {
241 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
242 goto notransport;
243 }
244
245 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
246 if (!counter)
247 goto nomem;
248
249 /* Create event notifier error counter. */
250 counter->ops = &counter_transport->ops;
251 counter->transport = counter_transport;
252
253 counter->counter = counter->ops->counter_create(
254 number_dimensions, dimensions_sizes, 0);
255 if (!counter->counter) {
256 goto create_error;
257 }
258
259 return counter;
260
261 create_error:
262 lttng_kvfree(counter);
263 nomem:
264 if (counter_transport)
265 module_put(counter_transport->owner);
266 notransport:
267 return NULL;
268 }
269
270 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
271 {
272 struct lttng_transport *transport = NULL;
273 struct lttng_event_notifier_group *event_notifier_group;
274 const char *transport_name = "relay-event-notifier";
275 size_t subbuf_size = 4096; //TODO
276 size_t num_subbuf = 16; //TODO
277 unsigned int switch_timer_interval = 0;
278 unsigned int read_timer_interval = 0;
279 int i;
280
281 mutex_lock(&sessions_mutex);
282
283 transport = lttng_transport_find(transport_name);
284 if (!transport) {
285 printk(KERN_WARNING "LTTng: transport %s not found\n",
286 transport_name);
287 goto notransport;
288 }
289 if (!try_module_get(transport->owner)) {
290 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
291 transport_name);
292 goto notransport;
293 }
294
295 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
296 GFP_KERNEL);
297 if (!event_notifier_group)
298 goto nomem;
299
300 /*
301 * Initialize the ring buffer used to store event notifier
302 * notifications.
303 */
304 event_notifier_group->ops = &transport->ops;
305 event_notifier_group->chan = transport->ops.priv->channel_create(
306 transport_name, event_notifier_group, NULL,
307 subbuf_size, num_subbuf, switch_timer_interval,
308 read_timer_interval);
309 if (!event_notifier_group->chan)
310 goto create_error;
311
312 event_notifier_group->transport = transport;
313
314 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
315 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
316 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
317 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
318
319 list_add(&event_notifier_group->node, &event_notifier_groups);
320
321 mutex_unlock(&sessions_mutex);
322
323 return event_notifier_group;
324
325 create_error:
326 lttng_kvfree(event_notifier_group);
327 nomem:
328 if (transport)
329 module_put(transport->owner);
330 notransport:
331 mutex_unlock(&sessions_mutex);
332 return NULL;
333 }
334
335 void metadata_cache_destroy(struct kref *kref)
336 {
337 struct lttng_metadata_cache *cache =
338 container_of(kref, struct lttng_metadata_cache, refcount);
339 vfree(cache->data);
340 kfree(cache);
341 }
342
343 void lttng_session_destroy(struct lttng_kernel_session *session)
344 {
345 struct lttng_channel *chan, *tmpchan;
346 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
347 struct lttng_metadata_stream *metadata_stream;
348 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
349 int ret;
350
351 mutex_lock(&sessions_mutex);
352 WRITE_ONCE(session->active, 0);
353 list_for_each_entry(chan, &session->priv->chan, list) {
354 ret = lttng_syscalls_unregister_channel(chan);
355 WARN_ON(ret);
356 }
357 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
358 ret = _lttng_event_unregister(event_recorder_priv->pub);
359 WARN_ON(ret);
360 }
361 synchronize_trace(); /* Wait for in-flight events to complete */
362 list_for_each_entry(chan, &session->priv->chan, list) {
363 ret = lttng_syscalls_destroy_event(chan);
364 WARN_ON(ret);
365 }
366 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
367 &session->priv->enablers_head, node)
368 lttng_event_enabler_destroy(event_enabler);
369 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, node)
370 _lttng_event_destroy(&event_recorder_priv->pub->parent);
371 list_for_each_entry_safe(chan, tmpchan, &session->priv->chan, list) {
372 BUG_ON(chan->channel_type == METADATA_CHANNEL);
373 _lttng_channel_destroy(chan);
374 }
375 mutex_lock(&session->priv->metadata_cache->lock);
376 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
377 _lttng_metadata_channel_hangup(metadata_stream);
378 mutex_unlock(&session->priv->metadata_cache->lock);
379 lttng_id_tracker_destroy(&session->pid_tracker, false);
380 lttng_id_tracker_destroy(&session->vpid_tracker, false);
381 lttng_id_tracker_destroy(&session->uid_tracker, false);
382 lttng_id_tracker_destroy(&session->vuid_tracker, false);
383 lttng_id_tracker_destroy(&session->gid_tracker, false);
384 lttng_id_tracker_destroy(&session->vgid_tracker, false);
385 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
386 list_del(&session->priv->list);
387 mutex_unlock(&sessions_mutex);
388 lttng_kvfree(session->priv);
389 lttng_kvfree(session);
390 }
391
392 void lttng_event_notifier_group_destroy(
393 struct lttng_event_notifier_group *event_notifier_group)
394 {
395 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
396 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
397 int ret;
398
399 if (!event_notifier_group)
400 return;
401
402 mutex_lock(&sessions_mutex);
403
404 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
405 WARN_ON(ret);
406
407 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
408 &event_notifier_group->event_notifiers_head, node) {
409 ret = _lttng_event_notifier_unregister(event_notifier_priv->pub);
410 WARN_ON(ret);
411 }
412
413 /* Wait for in-flight event notifier to complete */
414 synchronize_trace();
415
416 irq_work_sync(&event_notifier_group->wakeup_pending);
417
418 kfree(event_notifier_group->sc_filter);
419
420 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
421 &event_notifier_group->enablers_head, node)
422 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
423
424 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
425 &event_notifier_group->event_notifiers_head, node)
426 _lttng_event_destroy(&event_notifier_priv->pub->parent);
427
428 if (event_notifier_group->error_counter) {
429 struct lttng_counter *error_counter = event_notifier_group->error_counter;
430
431 error_counter->ops->counter_destroy(error_counter->counter);
432 module_put(error_counter->transport->owner);
433 lttng_kvfree(error_counter);
434 event_notifier_group->error_counter = NULL;
435 }
436
437 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
438 module_put(event_notifier_group->transport->owner);
439 list_del(&event_notifier_group->node);
440
441 mutex_unlock(&sessions_mutex);
442 lttng_kvfree(event_notifier_group);
443 }
444
445 int lttng_session_statedump(struct lttng_kernel_session *session)
446 {
447 int ret;
448
449 mutex_lock(&sessions_mutex);
450 ret = lttng_statedump_start(session);
451 mutex_unlock(&sessions_mutex);
452 return ret;
453 }
454
455 int lttng_session_enable(struct lttng_kernel_session *session)
456 {
457 int ret = 0;
458 struct lttng_channel *chan;
459
460 mutex_lock(&sessions_mutex);
461 if (session->active) {
462 ret = -EBUSY;
463 goto end;
464 }
465
466 /* Set transient enabler state to "enabled" */
467 session->priv->tstate = 1;
468
469 /* We need to sync enablers with session before activation. */
470 lttng_session_sync_event_enablers(session);
471
472 /*
473 * Snapshot the number of events per channel to know the type of header
474 * we need to use.
475 */
476 list_for_each_entry(chan, &session->priv->chan, list) {
477 if (chan->header_type)
478 continue; /* don't change it if session stop/restart */
479 if (chan->free_event_id < 31)
480 chan->header_type = 1; /* compact */
481 else
482 chan->header_type = 2; /* large */
483 }
484
485 /* Clear each stream's quiescent state. */
486 list_for_each_entry(chan, &session->priv->chan, list) {
487 if (chan->channel_type != METADATA_CHANNEL)
488 lib_ring_buffer_clear_quiescent_channel(chan->chan);
489 }
490
491 WRITE_ONCE(session->active, 1);
492 WRITE_ONCE(session->priv->been_active, 1);
493 ret = _lttng_session_metadata_statedump(session);
494 if (ret) {
495 WRITE_ONCE(session->active, 0);
496 goto end;
497 }
498 ret = lttng_statedump_start(session);
499 if (ret)
500 WRITE_ONCE(session->active, 0);
501 end:
502 mutex_unlock(&sessions_mutex);
503 return ret;
504 }
505
506 int lttng_session_disable(struct lttng_kernel_session *session)
507 {
508 int ret = 0;
509 struct lttng_channel *chan;
510
511 mutex_lock(&sessions_mutex);
512 if (!session->active) {
513 ret = -EBUSY;
514 goto end;
515 }
516 WRITE_ONCE(session->active, 0);
517
518 /* Set transient enabler state to "disabled" */
519 session->priv->tstate = 0;
520 lttng_session_sync_event_enablers(session);
521
522 /* Set each stream's quiescent state. */
523 list_for_each_entry(chan, &session->priv->chan, list) {
524 if (chan->channel_type != METADATA_CHANNEL)
525 lib_ring_buffer_set_quiescent_channel(chan->chan);
526 }
527 end:
528 mutex_unlock(&sessions_mutex);
529 return ret;
530 }
531
532 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
533 {
534 int ret = 0;
535 struct lttng_channel *chan;
536 struct lttng_kernel_event_recorder_private *event_recorder_priv;
537 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
538 struct lttng_metadata_stream *stream;
539
540 mutex_lock(&sessions_mutex);
541 if (!session->active) {
542 ret = -EBUSY;
543 goto end;
544 }
545
546 mutex_lock(&cache->lock);
547 memset(cache->data, 0, cache->cache_alloc);
548 cache->metadata_written = 0;
549 cache->version++;
550 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
551 stream->metadata_out = 0;
552 stream->metadata_in = 0;
553 }
554 mutex_unlock(&cache->lock);
555
556 session->priv->metadata_dumped = 0;
557 list_for_each_entry(chan, &session->priv->chan, list) {
558 chan->metadata_dumped = 0;
559 }
560
561 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
562 event_recorder_priv->metadata_dumped = 0;
563 }
564
565 ret = _lttng_session_metadata_statedump(session);
566
567 end:
568 mutex_unlock(&sessions_mutex);
569 return ret;
570 }
571
572 int lttng_channel_enable(struct lttng_channel *channel)
573 {
574 int ret = 0;
575
576 mutex_lock(&sessions_mutex);
577 if (channel->channel_type == METADATA_CHANNEL) {
578 ret = -EPERM;
579 goto end;
580 }
581 if (channel->enabled) {
582 ret = -EEXIST;
583 goto end;
584 }
585 /* Set transient enabler state to "enabled" */
586 channel->tstate = 1;
587 lttng_session_sync_event_enablers(channel->session);
588 /* Set atomically the state to "enabled" */
589 WRITE_ONCE(channel->enabled, 1);
590 end:
591 mutex_unlock(&sessions_mutex);
592 return ret;
593 }
594
595 int lttng_channel_disable(struct lttng_channel *channel)
596 {
597 int ret = 0;
598
599 mutex_lock(&sessions_mutex);
600 if (channel->channel_type == METADATA_CHANNEL) {
601 ret = -EPERM;
602 goto end;
603 }
604 if (!channel->enabled) {
605 ret = -EEXIST;
606 goto end;
607 }
608 /* Set atomically the state to "disabled" */
609 WRITE_ONCE(channel->enabled, 0);
610 /* Set transient enabler state to "enabled" */
611 channel->tstate = 0;
612 lttng_session_sync_event_enablers(channel->session);
613 end:
614 mutex_unlock(&sessions_mutex);
615 return ret;
616 }
617
618 int lttng_event_enable(struct lttng_kernel_event_common *event)
619 {
620 int ret = 0;
621
622 mutex_lock(&sessions_mutex);
623 switch (event->type) {
624 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
625 {
626 struct lttng_kernel_event_recorder *event_recorder =
627 container_of(event, struct lttng_kernel_event_recorder, parent);
628
629 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
630 ret = -EPERM;
631 goto end;
632 }
633 break;
634 }
635 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
636 switch (event->priv->instrumentation) {
637 case LTTNG_KERNEL_ABI_KRETPROBE:
638 ret = -EINVAL;
639 goto end;
640 default:
641 break;
642 }
643 break;
644 default:
645 break;
646 }
647
648 if (event->enabled) {
649 ret = -EEXIST;
650 goto end;
651 }
652 switch (event->priv->instrumentation) {
653 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
654 case LTTNG_KERNEL_ABI_SYSCALL:
655 ret = -EINVAL;
656 break;
657
658 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
659 case LTTNG_KERNEL_ABI_UPROBE:
660 WRITE_ONCE(event->enabled, 1);
661 break;
662
663 case LTTNG_KERNEL_ABI_KRETPROBE:
664 ret = lttng_kretprobes_event_enable_state(event, 1);
665 break;
666
667 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
668 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
669 default:
670 WARN_ON_ONCE(1);
671 ret = -EINVAL;
672 }
673 end:
674 mutex_unlock(&sessions_mutex);
675 return ret;
676 }
677
678 int lttng_event_disable(struct lttng_kernel_event_common *event)
679 {
680 int ret = 0;
681
682 mutex_lock(&sessions_mutex);
683 switch (event->type) {
684 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
685 {
686 struct lttng_kernel_event_recorder *event_recorder =
687 container_of(event, struct lttng_kernel_event_recorder, parent);
688
689 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
690 ret = -EPERM;
691 goto end;
692 }
693 break;
694 }
695 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
696 switch (event->priv->instrumentation) {
697 case LTTNG_KERNEL_ABI_KRETPROBE:
698 ret = -EINVAL;
699 goto end;
700 default:
701 break;
702 }
703 break;
704 default:
705 break;
706 }
707
708 if (!event->enabled) {
709 ret = -EEXIST;
710 goto end;
711 }
712 switch (event->priv->instrumentation) {
713 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
714 case LTTNG_KERNEL_ABI_SYSCALL:
715 ret = -EINVAL;
716 break;
717
718 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
719 case LTTNG_KERNEL_ABI_UPROBE:
720 WRITE_ONCE(event->enabled, 0);
721 break;
722
723 case LTTNG_KERNEL_ABI_KRETPROBE:
724 ret = lttng_kretprobes_event_enable_state(event, 0);
725 break;
726
727 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
728 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
729 default:
730 WARN_ON_ONCE(1);
731 ret = -EINVAL;
732 }
733 end:
734 mutex_unlock(&sessions_mutex);
735 return ret;
736 }
737
738 struct lttng_channel *lttng_channel_create(struct lttng_kernel_session *session,
739 const char *transport_name,
740 void *buf_addr,
741 size_t subbuf_size, size_t num_subbuf,
742 unsigned int switch_timer_interval,
743 unsigned int read_timer_interval,
744 enum channel_type channel_type)
745 {
746 struct lttng_channel *chan;
747 struct lttng_transport *transport = NULL;
748
749 mutex_lock(&sessions_mutex);
750 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
751 goto active; /* Refuse to add channel to active session */
752 transport = lttng_transport_find(transport_name);
753 if (!transport) {
754 printk(KERN_WARNING "LTTng: transport %s not found\n",
755 transport_name);
756 goto notransport;
757 }
758 if (!try_module_get(transport->owner)) {
759 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
760 goto notransport;
761 }
762 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
763 if (!chan)
764 goto nomem;
765 chan->session = session;
766 chan->id = session->priv->free_chan_id++;
767 chan->ops = &transport->ops;
768 /*
769 * Note: the channel creation op already writes into the packet
770 * headers. Therefore the "chan" information used as input
771 * should be already accessible.
772 */
773 chan->chan = transport->ops.priv->channel_create(transport_name,
774 chan, buf_addr, subbuf_size, num_subbuf,
775 switch_timer_interval, read_timer_interval);
776 if (!chan->chan)
777 goto create_error;
778 chan->tstate = 1;
779 chan->enabled = 1;
780 chan->transport = transport;
781 chan->channel_type = channel_type;
782 list_add(&chan->list, &session->priv->chan);
783 mutex_unlock(&sessions_mutex);
784 return chan;
785
786 create_error:
787 kfree(chan);
788 nomem:
789 if (transport)
790 module_put(transport->owner);
791 notransport:
792 active:
793 mutex_unlock(&sessions_mutex);
794 return NULL;
795 }
796
797 /*
798 * Only used internally at session destruction for per-cpu channels, and
799 * when metadata channel is released.
800 * Needs to be called with sessions mutex held.
801 */
802 static
803 void _lttng_channel_destroy(struct lttng_channel *chan)
804 {
805 chan->ops->priv->channel_destroy(chan->chan);
806 module_put(chan->transport->owner);
807 list_del(&chan->list);
808 lttng_kernel_destroy_context(chan->ctx);
809 kfree(chan);
810 }
811
812 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
813 {
814 BUG_ON(chan->channel_type != METADATA_CHANNEL);
815
816 /* Protect the metadata cache with the sessions_mutex. */
817 mutex_lock(&sessions_mutex);
818 _lttng_channel_destroy(chan);
819 mutex_unlock(&sessions_mutex);
820 }
821 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
822
823 static
824 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
825 {
826 stream->finalized = 1;
827 wake_up_interruptible(&stream->read_wait);
828 }
829
830
831 /*
832 * Supports event creation while tracing session is active.
833 * Needs to be called with sessions mutex held.
834 */
835 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_channel *chan,
836 struct lttng_kernel_abi_event *event_param,
837 const struct lttng_kernel_event_desc *event_desc,
838 enum lttng_kernel_abi_instrumentation itype)
839 {
840 struct lttng_kernel_session *session = chan->session;
841 struct lttng_kernel_event_recorder *event_recorder;
842 struct lttng_kernel_event_recorder_private *event_recorder_priv;
843 const char *event_name;
844 struct hlist_head *head;
845 int ret;
846
847 if (chan->free_event_id == -1U) {
848 ret = -EMFILE;
849 goto full;
850 }
851
852 switch (itype) {
853 case LTTNG_KERNEL_ABI_TRACEPOINT:
854 event_name = event_desc->event_name;
855 break;
856
857 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
858 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
859 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
860 case LTTNG_KERNEL_ABI_SYSCALL:
861 event_name = event_param->name;
862 break;
863
864 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
865 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
866 default:
867 WARN_ON_ONCE(1);
868 ret = -EINVAL;
869 goto type_error;
870 }
871
872 head = utils_borrow_hash_table_bucket(session->priv->events_ht.table,
873 LTTNG_EVENT_HT_SIZE, event_name);
874 lttng_hlist_for_each_entry(event_recorder_priv, head, hlist) {
875 WARN_ON_ONCE(!event_recorder_priv->parent.desc);
876 if (!strncmp(event_recorder_priv->parent.desc->event_name, event_name,
877 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
878 && chan == event_recorder_priv->pub->chan) {
879 ret = -EEXIST;
880 goto exist;
881 }
882 }
883
884 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
885 if (!event_recorder) {
886 ret = -ENOMEM;
887 goto cache_error;
888 }
889 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
890 if (!event_recorder_priv) {
891 ret = -ENOMEM;
892 goto cache_private_error;
893 }
894 event_recorder_priv->pub = event_recorder;
895 event_recorder_priv->parent.pub = &event_recorder->parent;
896 event_recorder->priv = event_recorder_priv;
897 event_recorder->parent.priv = &event_recorder_priv->parent;
898 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
899
900 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
901 event_recorder->chan = chan;
902 event_recorder->priv->id = chan->free_event_id++;
903 event_recorder->priv->parent.instrumentation = itype;
904 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
905 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
906
907 switch (itype) {
908 case LTTNG_KERNEL_ABI_TRACEPOINT:
909 /* Event will be enabled by enabler sync. */
910 event_recorder->parent.enabled = 0;
911 event_recorder->priv->parent.registered = 0;
912 event_recorder->priv->parent.desc = lttng_event_desc_get(event_name);
913 if (!event_recorder->priv->parent.desc) {
914 ret = -ENOENT;
915 goto register_error;
916 }
917 /* Populate lttng_event structure before event registration. */
918 smp_wmb();
919 break;
920
921 case LTTNG_KERNEL_ABI_KPROBE:
922 /*
923 * Needs to be explicitly enabled after creation, since
924 * we may want to apply filters.
925 */
926 event_recorder->parent.enabled = 0;
927 event_recorder->priv->parent.registered = 1;
928 /*
929 * Populate lttng_event structure before event
930 * registration.
931 */
932 smp_wmb();
933 ret = lttng_kprobes_register_event(event_name,
934 event_param->u.kprobe.symbol_name,
935 event_param->u.kprobe.offset,
936 event_param->u.kprobe.addr,
937 event_recorder);
938 if (ret) {
939 ret = -EINVAL;
940 goto register_error;
941 }
942 ret = try_module_get(event_recorder->priv->parent.desc->owner);
943 WARN_ON_ONCE(!ret);
944 break;
945
946 case LTTNG_KERNEL_ABI_KRETPROBE:
947 {
948 struct lttng_kernel_event_recorder *event_recorder_return;
949 struct lttng_kernel_event_recorder_private *event_recorder_return_priv;
950
951 /* kretprobe defines 2 events */
952 /*
953 * Needs to be explicitly enabled after creation, since
954 * we may want to apply filters.
955 */
956 event_recorder->parent.enabled = 0;
957 event_recorder->priv->parent.registered = 1;
958
959 event_recorder_return = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
960 if (!event_recorder_return) {
961 ret = -ENOMEM;
962 goto register_error;
963 }
964 event_recorder_return_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
965 if (!event_recorder_return_priv) {
966 kmem_cache_free(event_recorder_cache, event_recorder_return);
967 ret = -ENOMEM;
968 goto register_error;
969 }
970 event_recorder_return_priv->pub = event_recorder_return;
971 event_recorder_return_priv->parent.pub = &event_recorder_return->parent;
972 event_recorder_return->priv = event_recorder_return_priv;
973 event_recorder_return->parent.priv = &event_recorder_return_priv->parent;
974 event_recorder_return->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
975
976 event_recorder_return->parent.run_filter = lttng_kernel_interpret_event_filter;
977 event_recorder_return->chan = chan;
978 event_recorder_return->priv->id = chan->free_event_id++;
979 event_recorder_return->priv->parent.instrumentation = itype;
980 event_recorder_return->parent.enabled = 0;
981 event_recorder_return->priv->parent.registered = 1;
982 INIT_LIST_HEAD(&event_recorder_return->priv->parent.filter_bytecode_runtime_head);
983 INIT_LIST_HEAD(&event_recorder_return->priv->parent.enablers_ref_head);
984 /*
985 * Populate lttng_event structure before kretprobe registration.
986 */
987 smp_wmb();
988 ret = lttng_kretprobes_register(event_name,
989 event_param->u.kretprobe.symbol_name,
990 event_param->u.kretprobe.offset,
991 event_param->u.kretprobe.addr,
992 event_recorder, event_recorder_return);
993 if (ret) {
994 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
995 kmem_cache_free(event_recorder_cache, event_recorder_return);
996 ret = -EINVAL;
997 goto register_error;
998 }
999 /* Take 2 refs on the module: one per event. */
1000 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1001 WARN_ON_ONCE(!ret);
1002 ret = try_module_get(event_recorder_return->priv->parent.desc->owner);
1003 WARN_ON_ONCE(!ret);
1004 ret = _lttng_event_metadata_statedump(chan->session, chan,
1005 event_recorder_return);
1006 WARN_ON_ONCE(ret > 0);
1007 if (ret) {
1008 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1009 kmem_cache_free(event_recorder_cache, event_recorder_return);
1010 module_put(event_recorder_return->priv->parent.desc->owner);
1011 module_put(event_recorder->priv->parent.desc->owner);
1012 goto statedump_error;
1013 }
1014 list_add(&event_recorder_return->priv->node, &chan->session->priv->events);
1015 break;
1016 }
1017
1018 case LTTNG_KERNEL_ABI_SYSCALL:
1019 /*
1020 * Needs to be explicitly enabled after creation, since
1021 * we may want to apply filters.
1022 */
1023 event_recorder->parent.enabled = 0;
1024 event_recorder->priv->parent.registered = 0;
1025 event_recorder->priv->parent.desc = event_desc;
1026 switch (event_param->u.syscall.entryexit) {
1027 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1028 ret = -EINVAL;
1029 goto register_error;
1030 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1031 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1032 break;
1033 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1034 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1035 break;
1036 }
1037 switch (event_param->u.syscall.abi) {
1038 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1039 ret = -EINVAL;
1040 goto register_error;
1041 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1042 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1043 break;
1044 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1045 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1046 break;
1047 }
1048 if (!event_recorder->priv->parent.desc) {
1049 ret = -EINVAL;
1050 goto register_error;
1051 }
1052 break;
1053
1054 case LTTNG_KERNEL_ABI_UPROBE:
1055 /*
1056 * Needs to be explicitly enabled after creation, since
1057 * we may want to apply filters.
1058 */
1059 event_recorder->parent.enabled = 0;
1060 event_recorder->priv->parent.registered = 1;
1061
1062 /*
1063 * Populate lttng_event structure before event
1064 * registration.
1065 */
1066 smp_wmb();
1067
1068 ret = lttng_uprobes_register_event(event_param->name,
1069 event_param->u.uprobe.fd,
1070 event_recorder);
1071 if (ret)
1072 goto register_error;
1073 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1074 WARN_ON_ONCE(!ret);
1075 break;
1076
1077 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1078 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1079 default:
1080 WARN_ON_ONCE(1);
1081 ret = -EINVAL;
1082 goto register_error;
1083 }
1084 ret = _lttng_event_metadata_statedump(chan->session, chan, event_recorder);
1085 WARN_ON_ONCE(ret > 0);
1086 if (ret) {
1087 goto statedump_error;
1088 }
1089 hlist_add_head(&event_recorder->priv->hlist, head);
1090 list_add(&event_recorder->priv->node, &chan->session->priv->events);
1091 return event_recorder;
1092
1093 statedump_error:
1094 /* If a statedump error occurs, events will not be readable. */
1095 register_error:
1096 kmem_cache_free(event_recorder_private_cache, event_recorder_priv);
1097 cache_private_error:
1098 kmem_cache_free(event_recorder_cache, event_recorder);
1099 cache_error:
1100 exist:
1101 type_error:
1102 full:
1103 return ERR_PTR(ret);
1104 }
1105
1106 struct lttng_kernel_event_notifier *_lttng_event_notifier_create(
1107 const struct lttng_kernel_event_desc *event_desc,
1108 uint64_t token, uint64_t error_counter_index,
1109 struct lttng_event_notifier_group *event_notifier_group,
1110 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1111 enum lttng_kernel_abi_instrumentation itype)
1112 {
1113 struct lttng_kernel_event_notifier *event_notifier;
1114 struct lttng_kernel_event_notifier_private *event_notifier_priv;
1115 struct lttng_counter *error_counter;
1116 const char *event_name;
1117 struct hlist_head *head;
1118 int ret;
1119
1120 switch (itype) {
1121 case LTTNG_KERNEL_ABI_TRACEPOINT:
1122 event_name = event_desc->event_name;
1123 break;
1124
1125 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1126 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1127 case LTTNG_KERNEL_ABI_SYSCALL:
1128 event_name = event_notifier_param->event.name;
1129 break;
1130
1131 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1132 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1133 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1134 default:
1135 WARN_ON_ONCE(1);
1136 ret = -EINVAL;
1137 goto type_error;
1138 }
1139
1140 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1141 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1142 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
1143 WARN_ON_ONCE(!event_notifier_priv->parent.desc);
1144 if (!strncmp(event_notifier_priv->parent.desc->event_name, event_name,
1145 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1146 && event_notifier_group == event_notifier_priv->group
1147 && token == event_notifier_priv->parent.user_token) {
1148 ret = -EEXIST;
1149 goto exist;
1150 }
1151 }
1152
1153 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1154 if (!event_notifier) {
1155 ret = -ENOMEM;
1156 goto cache_error;
1157 }
1158 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
1159 if (!event_notifier_priv) {
1160 ret = -ENOMEM;
1161 goto cache_private_error;
1162 }
1163 event_notifier_priv->pub = event_notifier;
1164 event_notifier_priv->parent.pub = &event_notifier->parent;
1165 event_notifier->priv = event_notifier_priv;
1166 event_notifier->parent.priv = &event_notifier_priv->parent;
1167 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
1168
1169 event_notifier->priv->group = event_notifier_group;
1170 event_notifier->priv->parent.user_token = token;
1171 event_notifier->priv->error_counter_index = error_counter_index;
1172 event_notifier->priv->num_captures = 0;
1173 event_notifier->priv->parent.instrumentation = itype;
1174 event_notifier->notification_send = lttng_event_notifier_notification_send;
1175 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
1176 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
1177 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
1178 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
1179
1180 switch (itype) {
1181 case LTTNG_KERNEL_ABI_TRACEPOINT:
1182 /* Event will be enabled by enabler sync. */
1183 event_notifier->parent.enabled = 0;
1184 event_notifier->priv->parent.registered = 0;
1185 event_notifier->priv->parent.desc = lttng_event_desc_get(event_name);
1186 if (!event_notifier->priv->parent.desc) {
1187 ret = -ENOENT;
1188 goto register_error;
1189 }
1190 /* Populate lttng_event_notifier structure before event registration. */
1191 smp_wmb();
1192 break;
1193
1194 case LTTNG_KERNEL_ABI_KPROBE:
1195 /*
1196 * Needs to be explicitly enabled after creation, since
1197 * we may want to apply filters.
1198 */
1199 event_notifier->parent.enabled = 0;
1200 event_notifier->priv->parent.registered = 1;
1201 /*
1202 * Populate lttng_event_notifier structure before event
1203 * registration.
1204 */
1205 smp_wmb();
1206 ret = lttng_kprobes_register_event_notifier(
1207 event_notifier_param->event.u.kprobe.symbol_name,
1208 event_notifier_param->event.u.kprobe.offset,
1209 event_notifier_param->event.u.kprobe.addr,
1210 event_notifier);
1211 if (ret) {
1212 ret = -EINVAL;
1213 goto register_error;
1214 }
1215 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1216 WARN_ON_ONCE(!ret);
1217 break;
1218
1219 case LTTNG_KERNEL_ABI_SYSCALL:
1220 /*
1221 * Needs to be explicitly enabled after creation, since
1222 * we may want to apply filters.
1223 */
1224 event_notifier->parent.enabled = 0;
1225 event_notifier->priv->parent.registered = 0;
1226 event_notifier->priv->parent.desc = event_desc;
1227 switch (event_notifier_param->event.u.syscall.entryexit) {
1228 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1229 ret = -EINVAL;
1230 goto register_error;
1231 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1232 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1233 break;
1234 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1235 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1236 break;
1237 }
1238 switch (event_notifier_param->event.u.syscall.abi) {
1239 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1240 ret = -EINVAL;
1241 goto register_error;
1242 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1243 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1244 break;
1245 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1246 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1247 break;
1248 }
1249
1250 if (!event_notifier->priv->parent.desc) {
1251 ret = -EINVAL;
1252 goto register_error;
1253 }
1254 break;
1255
1256 case LTTNG_KERNEL_ABI_UPROBE:
1257 /*
1258 * Needs to be explicitly enabled after creation, since
1259 * we may want to apply filters.
1260 */
1261 event_notifier->parent.enabled = 0;
1262 event_notifier->priv->parent.registered = 1;
1263
1264 /*
1265 * Populate lttng_event_notifier structure before
1266 * event_notifier registration.
1267 */
1268 smp_wmb();
1269
1270 ret = lttng_uprobes_register_event_notifier(
1271 event_notifier_param->event.name,
1272 event_notifier_param->event.u.uprobe.fd,
1273 event_notifier);
1274 if (ret)
1275 goto register_error;
1276 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1277 WARN_ON_ONCE(!ret);
1278 break;
1279
1280 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1281 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1282 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1283 default:
1284 WARN_ON_ONCE(1);
1285 ret = -EINVAL;
1286 goto register_error;
1287 }
1288
1289 list_add(&event_notifier->priv->node, &event_notifier_group->event_notifiers_head);
1290 hlist_add_head(&event_notifier->priv->hlist, head);
1291
1292 /*
1293 * Clear the error counter bucket. The sessiond keeps track of which
1294 * bucket is currently in use. We trust it. The session lock
1295 * synchronizes against concurrent creation of the error
1296 * counter.
1297 */
1298 error_counter = event_notifier_group->error_counter;
1299 if (error_counter) {
1300 size_t dimension_index[1];
1301
1302 /*
1303 * Check that the index is within the boundary of the counter.
1304 */
1305 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1306 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1307 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1308 ret = -EINVAL;
1309 goto register_error;
1310 }
1311
1312 dimension_index[0] = event_notifier->priv->error_counter_index;
1313 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1314 if (ret) {
1315 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1316 event_notifier->priv->error_counter_index);
1317 goto register_error;
1318 }
1319 }
1320
1321 return event_notifier;
1322
1323 register_error:
1324 kmem_cache_free(event_notifier_private_cache, event_notifier_priv);
1325 cache_private_error:
1326 kmem_cache_free(event_notifier_cache, event_notifier);
1327 cache_error:
1328 exist:
1329 type_error:
1330 return ERR_PTR(ret);
1331 }
1332
1333 int lttng_kernel_counter_read(struct lttng_counter *counter,
1334 const size_t *dim_indexes, int32_t cpu,
1335 int64_t *val, bool *overflow, bool *underflow)
1336 {
1337 return counter->ops->counter_read(counter->counter, dim_indexes,
1338 cpu, val, overflow, underflow);
1339 }
1340
1341 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1342 const size_t *dim_indexes, int64_t *val,
1343 bool *overflow, bool *underflow)
1344 {
1345 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1346 val, overflow, underflow);
1347 }
1348
1349 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1350 const size_t *dim_indexes)
1351 {
1352 return counter->ops->counter_clear(counter->counter, dim_indexes);
1353 }
1354
1355 struct lttng_kernel_event_recorder *lttng_kernel_event_recorder_create(struct lttng_channel *chan,
1356 struct lttng_kernel_abi_event *event_param,
1357 const struct lttng_kernel_event_desc *event_desc,
1358 enum lttng_kernel_abi_instrumentation itype)
1359 {
1360 struct lttng_kernel_event_recorder *event;
1361
1362 mutex_lock(&sessions_mutex);
1363 event = _lttng_kernel_event_recorder_create(chan, event_param, event_desc, itype);
1364 mutex_unlock(&sessions_mutex);
1365 return event;
1366 }
1367
1368 struct lttng_kernel_event_notifier *lttng_event_notifier_create(
1369 const struct lttng_kernel_event_desc *event_desc,
1370 uint64_t id, uint64_t error_counter_index,
1371 struct lttng_event_notifier_group *event_notifier_group,
1372 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1373 enum lttng_kernel_abi_instrumentation itype)
1374 {
1375 struct lttng_kernel_event_notifier *event_notifier;
1376
1377 mutex_lock(&sessions_mutex);
1378 event_notifier = _lttng_event_notifier_create(event_desc, id,
1379 error_counter_index, event_notifier_group,
1380 event_notifier_param, itype);
1381 mutex_unlock(&sessions_mutex);
1382 return event_notifier;
1383 }
1384
1385 /* Only used for tracepoints for now. */
1386 static
1387 void register_event(struct lttng_kernel_event_recorder *event_recorder)
1388 {
1389 const struct lttng_kernel_event_desc *desc;
1390 int ret = -EINVAL;
1391
1392 if (event_recorder->priv->parent.registered)
1393 return;
1394
1395 desc = event_recorder->priv->parent.desc;
1396 switch (event_recorder->priv->parent.instrumentation) {
1397 case LTTNG_KERNEL_ABI_TRACEPOINT:
1398 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1399 desc->probe_callback,
1400 event_recorder);
1401 break;
1402
1403 case LTTNG_KERNEL_ABI_SYSCALL:
1404 ret = lttng_syscall_filter_enable_event(event_recorder->chan, event_recorder);
1405 break;
1406
1407 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1408 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1409 case LTTNG_KERNEL_ABI_KRETPROBE:
1410 ret = 0;
1411 break;
1412
1413 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1414 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1415 default:
1416 WARN_ON_ONCE(1);
1417 }
1418 if (!ret)
1419 event_recorder->priv->parent.registered = 1;
1420 }
1421
1422 /*
1423 * Only used internally at session destruction.
1424 */
1425 int _lttng_event_unregister(struct lttng_kernel_event_recorder *event_recorder)
1426 {
1427 struct lttng_kernel_event_common_private *event_priv = &event_recorder->priv->parent;
1428 const struct lttng_kernel_event_desc *desc;
1429 int ret = -EINVAL;
1430
1431 if (!event_priv->registered)
1432 return 0;
1433
1434 desc = event_priv->desc;
1435 switch (event_priv->instrumentation) {
1436 case LTTNG_KERNEL_ABI_TRACEPOINT:
1437 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1438 event_priv->desc->probe_callback,
1439 event_recorder);
1440 break;
1441
1442 case LTTNG_KERNEL_ABI_KPROBE:
1443 lttng_kprobes_unregister_event(event_recorder);
1444 ret = 0;
1445 break;
1446
1447 case LTTNG_KERNEL_ABI_KRETPROBE:
1448 lttng_kretprobes_unregister(event_recorder);
1449 ret = 0;
1450 break;
1451
1452 case LTTNG_KERNEL_ABI_SYSCALL:
1453 ret = lttng_syscall_filter_disable_event(event_recorder->chan, event_recorder);
1454 break;
1455
1456 case LTTNG_KERNEL_ABI_NOOP:
1457 ret = 0;
1458 break;
1459
1460 case LTTNG_KERNEL_ABI_UPROBE:
1461 lttng_uprobes_unregister_event(event_recorder);
1462 ret = 0;
1463 break;
1464
1465 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1466 default:
1467 WARN_ON_ONCE(1);
1468 }
1469 if (!ret)
1470 event_priv->registered = 0;
1471 return ret;
1472 }
1473
1474 /* Only used for tracepoints for now. */
1475 static
1476 void register_event_notifier(struct lttng_kernel_event_notifier *event_notifier)
1477 {
1478 const struct lttng_kernel_event_desc *desc;
1479 int ret = -EINVAL;
1480
1481 if (event_notifier->priv->parent.registered)
1482 return;
1483
1484 desc = event_notifier->priv->parent.desc;
1485 switch (event_notifier->priv->parent.instrumentation) {
1486 case LTTNG_KERNEL_ABI_TRACEPOINT:
1487 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1488 desc->probe_callback,
1489 event_notifier);
1490 break;
1491
1492 case LTTNG_KERNEL_ABI_SYSCALL:
1493 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1494 break;
1495
1496 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1497 case LTTNG_KERNEL_ABI_UPROBE:
1498 ret = 0;
1499 break;
1500
1501 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1502 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1503 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1504 default:
1505 WARN_ON_ONCE(1);
1506 }
1507 if (!ret)
1508 event_notifier->priv->parent.registered = 1;
1509 }
1510
1511 static
1512 int _lttng_event_notifier_unregister(
1513 struct lttng_kernel_event_notifier *event_notifier)
1514 {
1515 const struct lttng_kernel_event_desc *desc;
1516 int ret = -EINVAL;
1517
1518 if (!event_notifier->priv->parent.registered)
1519 return 0;
1520
1521 desc = event_notifier->priv->parent.desc;
1522 switch (event_notifier->priv->parent.instrumentation) {
1523 case LTTNG_KERNEL_ABI_TRACEPOINT:
1524 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->priv->parent.desc->event_kname,
1525 event_notifier->priv->parent.desc->probe_callback,
1526 event_notifier);
1527 break;
1528
1529 case LTTNG_KERNEL_ABI_KPROBE:
1530 lttng_kprobes_unregister_event_notifier(event_notifier);
1531 ret = 0;
1532 break;
1533
1534 case LTTNG_KERNEL_ABI_UPROBE:
1535 lttng_uprobes_unregister_event_notifier(event_notifier);
1536 ret = 0;
1537 break;
1538
1539 case LTTNG_KERNEL_ABI_SYSCALL:
1540 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1541 break;
1542
1543 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1544 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1545 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1546 default:
1547 WARN_ON_ONCE(1);
1548 }
1549 if (!ret)
1550 event_notifier->priv->parent.registered = 0;
1551 return ret;
1552 }
1553
1554 /*
1555 * Only used internally at session destruction.
1556 */
1557 static
1558 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1559 {
1560 struct lttng_kernel_event_common_private *event_priv = event->priv;
1561 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1562
1563 lttng_free_event_filter_runtime(event);
1564 /* Free event enabler refs */
1565 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1566 &event_priv->enablers_ref_head, node)
1567 kfree(enabler_ref);
1568
1569 switch (event->type) {
1570 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1571 {
1572 struct lttng_kernel_event_recorder *event_recorder =
1573 container_of(event, struct lttng_kernel_event_recorder, parent);
1574
1575 switch (event_priv->instrumentation) {
1576 case LTTNG_KERNEL_ABI_TRACEPOINT:
1577 lttng_event_desc_put(event_priv->desc);
1578 break;
1579
1580 case LTTNG_KERNEL_ABI_KPROBE:
1581 module_put(event_priv->desc->owner);
1582 lttng_kprobes_destroy_event_private(event_recorder);
1583 break;
1584
1585 case LTTNG_KERNEL_ABI_KRETPROBE:
1586 module_put(event_priv->desc->owner);
1587 lttng_kretprobes_destroy_private(event_recorder);
1588 break;
1589
1590 case LTTNG_KERNEL_ABI_SYSCALL:
1591 break;
1592
1593 case LTTNG_KERNEL_ABI_UPROBE:
1594 module_put(event_priv->desc->owner);
1595 lttng_uprobes_destroy_event_private(event_recorder);
1596 break;
1597
1598 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1599 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1600 default:
1601 WARN_ON_ONCE(1);
1602 }
1603 list_del(&event_recorder->priv->node);
1604 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1605 kmem_cache_free(event_recorder_cache, event_recorder);
1606 break;
1607 }
1608 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1609 {
1610 struct lttng_kernel_event_notifier *event_notifier =
1611 container_of(event, struct lttng_kernel_event_notifier, parent);
1612
1613 switch (event_notifier->priv->parent.instrumentation) {
1614 case LTTNG_KERNEL_ABI_TRACEPOINT:
1615 lttng_event_desc_put(event_notifier->priv->parent.desc);
1616 break;
1617
1618 case LTTNG_KERNEL_ABI_KPROBE:
1619 module_put(event_notifier->priv->parent.desc->owner);
1620 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1621 break;
1622
1623 case LTTNG_KERNEL_ABI_SYSCALL:
1624 break;
1625
1626 case LTTNG_KERNEL_ABI_UPROBE:
1627 module_put(event_notifier->priv->parent.desc->owner);
1628 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1629 break;
1630
1631 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1632 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1633 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1634 default:
1635 WARN_ON_ONCE(1);
1636 }
1637 list_del(&event_notifier->priv->node);
1638 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1639 kmem_cache_free(event_notifier_cache, event_notifier);
1640 break;
1641 }
1642 default:
1643 WARN_ON_ONCE(1);
1644 }
1645 }
1646
1647 struct lttng_id_tracker *get_tracker(struct lttng_kernel_session *session,
1648 enum tracker_type tracker_type)
1649 {
1650 switch (tracker_type) {
1651 case TRACKER_PID:
1652 return &session->pid_tracker;
1653 case TRACKER_VPID:
1654 return &session->vpid_tracker;
1655 case TRACKER_UID:
1656 return &session->uid_tracker;
1657 case TRACKER_VUID:
1658 return &session->vuid_tracker;
1659 case TRACKER_GID:
1660 return &session->gid_tracker;
1661 case TRACKER_VGID:
1662 return &session->vgid_tracker;
1663 default:
1664 WARN_ON_ONCE(1);
1665 return NULL;
1666 }
1667 }
1668
1669 int lttng_session_track_id(struct lttng_kernel_session *session,
1670 enum tracker_type tracker_type, int id)
1671 {
1672 struct lttng_id_tracker *tracker;
1673 int ret;
1674
1675 tracker = get_tracker(session, tracker_type);
1676 if (!tracker)
1677 return -EINVAL;
1678 if (id < -1)
1679 return -EINVAL;
1680 mutex_lock(&sessions_mutex);
1681 if (id == -1) {
1682 /* track all ids: destroy tracker. */
1683 lttng_id_tracker_destroy(tracker, true);
1684 ret = 0;
1685 } else {
1686 ret = lttng_id_tracker_add(tracker, id);
1687 }
1688 mutex_unlock(&sessions_mutex);
1689 return ret;
1690 }
1691
1692 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1693 enum tracker_type tracker_type, int id)
1694 {
1695 struct lttng_id_tracker *tracker;
1696 int ret;
1697
1698 tracker = get_tracker(session, tracker_type);
1699 if (!tracker)
1700 return -EINVAL;
1701 if (id < -1)
1702 return -EINVAL;
1703 mutex_lock(&sessions_mutex);
1704 if (id == -1) {
1705 /* untrack all ids: replace by empty tracker. */
1706 ret = lttng_id_tracker_empty_set(tracker);
1707 } else {
1708 ret = lttng_id_tracker_del(tracker, id);
1709 }
1710 mutex_unlock(&sessions_mutex);
1711 return ret;
1712 }
1713
1714 static
1715 void *id_list_start(struct seq_file *m, loff_t *pos)
1716 {
1717 struct lttng_id_tracker *id_tracker = m->private;
1718 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1719 struct lttng_id_hash_node *e;
1720 int iter = 0, i;
1721
1722 mutex_lock(&sessions_mutex);
1723 if (id_tracker_p) {
1724 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1725 struct hlist_head *head = &id_tracker_p->id_hash[i];
1726
1727 lttng_hlist_for_each_entry(e, head, hlist) {
1728 if (iter++ >= *pos)
1729 return e;
1730 }
1731 }
1732 } else {
1733 /* ID tracker disabled. */
1734 if (iter >= *pos && iter == 0) {
1735 return id_tracker_p; /* empty tracker */
1736 }
1737 iter++;
1738 }
1739 /* End of list */
1740 return NULL;
1741 }
1742
1743 /* Called with sessions_mutex held. */
1744 static
1745 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1746 {
1747 struct lttng_id_tracker *id_tracker = m->private;
1748 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1749 struct lttng_id_hash_node *e;
1750 int iter = 0, i;
1751
1752 (*ppos)++;
1753 if (id_tracker_p) {
1754 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1755 struct hlist_head *head = &id_tracker_p->id_hash[i];
1756
1757 lttng_hlist_for_each_entry(e, head, hlist) {
1758 if (iter++ >= *ppos)
1759 return e;
1760 }
1761 }
1762 } else {
1763 /* ID tracker disabled. */
1764 if (iter >= *ppos && iter == 0)
1765 return p; /* empty tracker */
1766 iter++;
1767 }
1768
1769 /* End of list */
1770 return NULL;
1771 }
1772
1773 static
1774 void id_list_stop(struct seq_file *m, void *p)
1775 {
1776 mutex_unlock(&sessions_mutex);
1777 }
1778
1779 static
1780 int id_list_show(struct seq_file *m, void *p)
1781 {
1782 struct lttng_id_tracker *id_tracker = m->private;
1783 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1784 int id;
1785
1786 if (p == id_tracker_p) {
1787 /* Tracker disabled. */
1788 id = -1;
1789 } else {
1790 const struct lttng_id_hash_node *e = p;
1791
1792 id = lttng_id_tracker_get_node_id(e);
1793 }
1794 switch (id_tracker->tracker_type) {
1795 case TRACKER_PID:
1796 seq_printf(m, "process { pid = %d; };\n", id);
1797 break;
1798 case TRACKER_VPID:
1799 seq_printf(m, "process { vpid = %d; };\n", id);
1800 break;
1801 case TRACKER_UID:
1802 seq_printf(m, "user { uid = %d; };\n", id);
1803 break;
1804 case TRACKER_VUID:
1805 seq_printf(m, "user { vuid = %d; };\n", id);
1806 break;
1807 case TRACKER_GID:
1808 seq_printf(m, "group { gid = %d; };\n", id);
1809 break;
1810 case TRACKER_VGID:
1811 seq_printf(m, "group { vgid = %d; };\n", id);
1812 break;
1813 default:
1814 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1815 }
1816 return 0;
1817 }
1818
1819 static
1820 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1821 .start = id_list_start,
1822 .next = id_list_next,
1823 .stop = id_list_stop,
1824 .show = id_list_show,
1825 };
1826
1827 static
1828 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1829 {
1830 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1831 }
1832
1833 static
1834 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1835 {
1836 struct seq_file *m = file->private_data;
1837 struct lttng_id_tracker *id_tracker = m->private;
1838 int ret;
1839
1840 WARN_ON_ONCE(!id_tracker);
1841 ret = seq_release(inode, file);
1842 if (!ret)
1843 fput(id_tracker->session->priv->file);
1844 return ret;
1845 }
1846
1847 const struct file_operations lttng_tracker_ids_list_fops = {
1848 .owner = THIS_MODULE,
1849 .open = lttng_tracker_ids_list_open,
1850 .read = seq_read,
1851 .llseek = seq_lseek,
1852 .release = lttng_tracker_ids_list_release,
1853 };
1854
1855 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1856 enum tracker_type tracker_type)
1857 {
1858 struct file *tracker_ids_list_file;
1859 struct seq_file *m;
1860 int file_fd, ret;
1861
1862 file_fd = lttng_get_unused_fd();
1863 if (file_fd < 0) {
1864 ret = file_fd;
1865 goto fd_error;
1866 }
1867
1868 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1869 &lttng_tracker_ids_list_fops,
1870 NULL, O_RDWR);
1871 if (IS_ERR(tracker_ids_list_file)) {
1872 ret = PTR_ERR(tracker_ids_list_file);
1873 goto file_error;
1874 }
1875 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1876 ret = -EOVERFLOW;
1877 goto refcount_error;
1878 }
1879 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1880 if (ret < 0)
1881 goto open_error;
1882 m = tracker_ids_list_file->private_data;
1883
1884 m->private = get_tracker(session, tracker_type);
1885 BUG_ON(!m->private);
1886 fd_install(file_fd, tracker_ids_list_file);
1887
1888 return file_fd;
1889
1890 open_error:
1891 atomic_long_dec(&session->priv->file->f_count);
1892 refcount_error:
1893 fput(tracker_ids_list_file);
1894 file_error:
1895 put_unused_fd(file_fd);
1896 fd_error:
1897 return ret;
1898 }
1899
1900 /*
1901 * Enabler management.
1902 */
1903 static
1904 int lttng_match_enabler_star_glob(const char *desc_name,
1905 const char *pattern)
1906 {
1907 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1908 desc_name, LTTNG_SIZE_MAX))
1909 return 0;
1910 return 1;
1911 }
1912
1913 static
1914 int lttng_match_enabler_name(const char *desc_name,
1915 const char *name)
1916 {
1917 if (strcmp(desc_name, name))
1918 return 0;
1919 return 1;
1920 }
1921
1922 int lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1923 struct lttng_enabler *enabler)
1924 {
1925 const char *desc_name, *enabler_name;
1926 bool compat = false, entry = false;
1927
1928 enabler_name = enabler->event_param.name;
1929 switch (enabler->event_param.instrumentation) {
1930 case LTTNG_KERNEL_ABI_TRACEPOINT:
1931 desc_name = desc->event_name;
1932 switch (enabler->format_type) {
1933 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1934 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1935 case LTTNG_ENABLER_FORMAT_NAME:
1936 return lttng_match_enabler_name(desc_name, enabler_name);
1937 default:
1938 return -EINVAL;
1939 }
1940 break;
1941
1942 case LTTNG_KERNEL_ABI_SYSCALL:
1943 desc_name = desc->event_name;
1944 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1945 desc_name += strlen("compat_");
1946 compat = true;
1947 }
1948 if (!strncmp(desc_name, "syscall_exit_",
1949 strlen("syscall_exit_"))) {
1950 desc_name += strlen("syscall_exit_");
1951 } else if (!strncmp(desc_name, "syscall_entry_",
1952 strlen("syscall_entry_"))) {
1953 desc_name += strlen("syscall_entry_");
1954 entry = true;
1955 } else {
1956 WARN_ON_ONCE(1);
1957 return -EINVAL;
1958 }
1959 switch (enabler->event_param.u.syscall.entryexit) {
1960 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1961 break;
1962 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1963 if (!entry)
1964 return 0;
1965 break;
1966 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1967 if (entry)
1968 return 0;
1969 break;
1970 default:
1971 return -EINVAL;
1972 }
1973 switch (enabler->event_param.u.syscall.abi) {
1974 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1975 break;
1976 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1977 if (compat)
1978 return 0;
1979 break;
1980 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1981 if (!compat)
1982 return 0;
1983 break;
1984 default:
1985 return -EINVAL;
1986 }
1987 switch (enabler->event_param.u.syscall.match) {
1988 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1989 switch (enabler->format_type) {
1990 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1991 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1992 case LTTNG_ENABLER_FORMAT_NAME:
1993 return lttng_match_enabler_name(desc_name, enabler_name);
1994 default:
1995 return -EINVAL;
1996 }
1997 break;
1998 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
1999 return -EINVAL; /* Not implemented. */
2000 default:
2001 return -EINVAL;
2002 }
2003 break;
2004
2005 default:
2006 WARN_ON_ONCE(1);
2007 return -EINVAL;
2008 }
2009 }
2010
2011 static
2012 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
2013 struct lttng_kernel_event_recorder *event_recorder)
2014 {
2015 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
2016 event_enabler);
2017
2018 if (base_enabler->event_param.instrumentation != event_recorder->priv->parent.instrumentation)
2019 return 0;
2020 if (lttng_desc_match_enabler(event_recorder->priv->parent.desc, base_enabler)
2021 && event_recorder->chan == event_enabler->chan)
2022 return 1;
2023 else
2024 return 0;
2025 }
2026
2027 static
2028 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
2029 struct lttng_kernel_event_notifier *event_notifier)
2030 {
2031 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
2032 event_notifier_enabler);
2033
2034 if (base_enabler->event_param.instrumentation != event_notifier->priv->parent.instrumentation)
2035 return 0;
2036 if (lttng_desc_match_enabler(event_notifier->priv->parent.desc, base_enabler)
2037 && event_notifier->priv->group == event_notifier_enabler->group
2038 && event_notifier->priv->parent.user_token == event_notifier_enabler->base.user_token)
2039 return 1;
2040 else
2041 return 0;
2042 }
2043
2044 static
2045 struct lttng_enabler_ref *lttng_enabler_ref(
2046 struct list_head *enablers_ref_list,
2047 struct lttng_enabler *enabler)
2048 {
2049 struct lttng_enabler_ref *enabler_ref;
2050
2051 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2052 if (enabler_ref->ref == enabler)
2053 return enabler_ref;
2054 }
2055 return NULL;
2056 }
2057
2058 static
2059 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
2060 {
2061 struct lttng_kernel_session *session = event_enabler->chan->session;
2062 struct lttng_kernel_probe_desc *probe_desc;
2063 const struct lttng_kernel_event_desc *desc;
2064 int i;
2065 struct list_head *probe_list;
2066
2067 probe_list = lttng_get_probe_list_head();
2068 /*
2069 * For each probe event, if we find that a probe event matches
2070 * our enabler, create an associated lttng_event if not
2071 * already present.
2072 */
2073 list_for_each_entry(probe_desc, probe_list, head) {
2074 for (i = 0; i < probe_desc->nr_events; i++) {
2075 int found = 0;
2076 struct hlist_head *head;
2077 struct lttng_kernel_event_recorder_private *event_recorder_private;
2078 struct lttng_kernel_event_recorder *event_recorder;
2079
2080 desc = probe_desc->event_desc[i];
2081 if (!lttng_desc_match_enabler(desc,
2082 lttng_event_enabler_as_enabler(event_enabler)))
2083 continue;
2084
2085 /*
2086 * Check if already created.
2087 */
2088 head = utils_borrow_hash_table_bucket(
2089 session->priv->events_ht.table, LTTNG_EVENT_HT_SIZE,
2090 desc->event_name);
2091 lttng_hlist_for_each_entry(event_recorder_private, head, hlist) {
2092 if (event_recorder_private->parent.desc == desc
2093 && event_recorder_private->pub->chan == event_enabler->chan)
2094 found = 1;
2095 }
2096 if (found)
2097 continue;
2098
2099 /*
2100 * We need to create an event for this
2101 * event probe.
2102 */
2103 event_recorder = _lttng_kernel_event_recorder_create(event_enabler->chan,
2104 NULL, desc, LTTNG_KERNEL_ABI_TRACEPOINT);
2105 if (!event_recorder) {
2106 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2107 probe_desc->event_desc[i]->event_name);
2108 }
2109 }
2110 }
2111 }
2112
2113 static
2114 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2115 {
2116 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2117 struct lttng_kernel_probe_desc *probe_desc;
2118 const struct lttng_kernel_event_desc *desc;
2119 int i;
2120 struct list_head *probe_list;
2121
2122 probe_list = lttng_get_probe_list_head();
2123 /*
2124 * For each probe event, if we find that a probe event matches
2125 * our enabler, create an associated lttng_event_notifier if not
2126 * already present.
2127 */
2128 list_for_each_entry(probe_desc, probe_list, head) {
2129 for (i = 0; i < probe_desc->nr_events; i++) {
2130 int found = 0;
2131 struct hlist_head *head;
2132 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2133 struct lttng_kernel_event_notifier *event_notifier;
2134
2135 desc = probe_desc->event_desc[i];
2136 if (!lttng_desc_match_enabler(desc,
2137 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2138 continue;
2139
2140 /*
2141 * Check if already created.
2142 */
2143 head = utils_borrow_hash_table_bucket(
2144 event_notifier_group->event_notifiers_ht.table,
2145 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->event_name);
2146 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
2147 if (event_notifier_priv->parent.desc == desc
2148 && event_notifier_priv->parent.user_token == event_notifier_enabler->base.user_token)
2149 found = 1;
2150 }
2151 if (found)
2152 continue;
2153
2154 /*
2155 * We need to create a event_notifier for this event probe.
2156 */
2157 event_notifier = _lttng_event_notifier_create(desc,
2158 event_notifier_enabler->base.user_token,
2159 event_notifier_enabler->error_counter_index,
2160 event_notifier_group, NULL,
2161 LTTNG_KERNEL_ABI_TRACEPOINT);
2162 if (IS_ERR(event_notifier)) {
2163 printk(KERN_INFO "Unable to create event_notifier %s\n",
2164 probe_desc->event_desc[i]->event_name);
2165 }
2166 }
2167 }
2168 }
2169
2170 static
2171 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2172 {
2173 int ret;
2174
2175 ret = lttng_syscalls_register_event(event_enabler);
2176 WARN_ON_ONCE(ret);
2177 }
2178
2179 static
2180 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2181 {
2182 int ret;
2183
2184 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler);
2185 WARN_ON_ONCE(ret);
2186 ret = lttng_syscalls_create_matching_event_notifiers(event_notifier_enabler);
2187 WARN_ON_ONCE(ret);
2188 }
2189
2190 /*
2191 * Create struct lttng_kernel_event_recorder if it is missing and present in the list of
2192 * tracepoint probes.
2193 * Should be called with sessions mutex held.
2194 */
2195 static
2196 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2197 {
2198 switch (event_enabler->base.event_param.instrumentation) {
2199 case LTTNG_KERNEL_ABI_TRACEPOINT:
2200 lttng_create_tracepoint_event_if_missing(event_enabler);
2201 break;
2202
2203 case LTTNG_KERNEL_ABI_SYSCALL:
2204 lttng_create_syscall_event_if_missing(event_enabler);
2205 break;
2206
2207 default:
2208 WARN_ON_ONCE(1);
2209 break;
2210 }
2211 }
2212
2213 /*
2214 * Create events associated with an event_enabler (if not already present),
2215 * and add backward reference from the event to the enabler.
2216 * Should be called with sessions mutex held.
2217 */
2218 static
2219 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2220 {
2221 struct lttng_channel *chan = event_enabler->chan;
2222 struct lttng_kernel_session *session = event_enabler->chan->session;
2223 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2224 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2225
2226 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2227 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2228 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2229 !strcmp(base_enabler->event_param.name, "*")) {
2230 int enabled = base_enabler->enabled;
2231 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2232
2233 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2234 WRITE_ONCE(chan->syscall_all_entry, enabled);
2235
2236 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2237 WRITE_ONCE(chan->syscall_all_exit, enabled);
2238 }
2239
2240 /* First ensure that probe events are created for this enabler. */
2241 lttng_create_event_if_missing(event_enabler);
2242
2243 /* For each event matching event_enabler in session event list. */
2244 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2245 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2246 struct lttng_enabler_ref *enabler_ref;
2247
2248 if (!lttng_event_enabler_match_event(event_enabler, event_recorder))
2249 continue;
2250 enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
2251 lttng_event_enabler_as_enabler(event_enabler));
2252 if (!enabler_ref) {
2253 /*
2254 * If no backward ref, create it.
2255 * Add backward ref from event to event_enabler.
2256 */
2257 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2258 if (!enabler_ref)
2259 return -ENOMEM;
2260 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2261 list_add(&enabler_ref->node,
2262 &event_recorder_priv->parent.enablers_ref_head);
2263 }
2264
2265 /*
2266 * Link filter bytecodes if not linked yet.
2267 */
2268 lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
2269 lttng_static_ctx,
2270 &event_recorder_priv->parent.filter_bytecode_runtime_head,
2271 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2272 }
2273 return 0;
2274 }
2275
2276 /*
2277 * Create struct lttng_kernel_event_notifier if it is missing and present in the list of
2278 * tracepoint probes.
2279 * Should be called with sessions mutex held.
2280 */
2281 static
2282 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2283 {
2284 switch (event_notifier_enabler->base.event_param.instrumentation) {
2285 case LTTNG_KERNEL_ABI_TRACEPOINT:
2286 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2287 break;
2288
2289 case LTTNG_KERNEL_ABI_SYSCALL:
2290 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2291 break;
2292
2293 default:
2294 WARN_ON_ONCE(1);
2295 break;
2296 }
2297 }
2298
2299 /*
2300 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2301 */
2302 static
2303 int lttng_event_notifier_enabler_ref_event_notifiers(
2304 struct lttng_event_notifier_enabler *event_notifier_enabler)
2305 {
2306 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2307 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2308 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2309
2310 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2311 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2312 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2313 !strcmp(base_enabler->event_param.name, "*")) {
2314
2315 int enabled = base_enabler->enabled;
2316 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2317
2318 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2319 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2320
2321 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2322 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2323
2324 }
2325
2326 /* First ensure that probe event_notifiers are created for this enabler. */
2327 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2328
2329 /* Link the created event_notifier with its associated enabler. */
2330 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2331 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2332 struct lttng_enabler_ref *enabler_ref;
2333
2334 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2335 continue;
2336
2337 enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
2338 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2339 if (!enabler_ref) {
2340 /*
2341 * If no backward ref, create it.
2342 * Add backward ref from event_notifier to enabler.
2343 */
2344 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2345 if (!enabler_ref)
2346 return -ENOMEM;
2347
2348 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2349 event_notifier_enabler);
2350 list_add(&enabler_ref->node,
2351 &event_notifier_priv->parent.enablers_ref_head);
2352 }
2353
2354 /*
2355 * Link filter bytecodes if not linked yet.
2356 */
2357 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2358 lttng_static_ctx, &event_notifier_priv->parent.filter_bytecode_runtime_head,
2359 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2360
2361 /* Link capture bytecodes if not linked yet. */
2362 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2363 lttng_static_ctx, &event_notifier_priv->capture_bytecode_runtime_head,
2364 &event_notifier_enabler->capture_bytecode_head);
2365
2366 event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
2367 }
2368 return 0;
2369 }
2370
2371 /*
2372 * Called at module load: connect the probe on all enablers matching
2373 * this event.
2374 * Called with sessions lock held.
2375 */
2376 int lttng_fix_pending_events(void)
2377 {
2378 struct lttng_kernel_session_private *session_priv;
2379
2380 list_for_each_entry(session_priv, &sessions, list)
2381 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2382 return 0;
2383 }
2384
2385 static bool lttng_event_notifier_group_has_active_event_notifiers(
2386 struct lttng_event_notifier_group *event_notifier_group)
2387 {
2388 struct lttng_event_notifier_enabler *event_notifier_enabler;
2389
2390 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2391 node) {
2392 if (event_notifier_enabler->base.enabled)
2393 return true;
2394 }
2395 return false;
2396 }
2397
2398 bool lttng_event_notifier_active(void)
2399 {
2400 struct lttng_event_notifier_group *event_notifier_group;
2401
2402 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2403 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2404 return true;
2405 }
2406 return false;
2407 }
2408
2409 int lttng_fix_pending_event_notifiers(void)
2410 {
2411 struct lttng_event_notifier_group *event_notifier_group;
2412
2413 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2414 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2415 return 0;
2416 }
2417
2418 struct lttng_event_enabler *lttng_event_enabler_create(
2419 enum lttng_enabler_format_type format_type,
2420 struct lttng_kernel_abi_event *event_param,
2421 struct lttng_channel *chan)
2422 {
2423 struct lttng_event_enabler *event_enabler;
2424
2425 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2426 if (!event_enabler)
2427 return NULL;
2428 event_enabler->base.format_type = format_type;
2429 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2430 memcpy(&event_enabler->base.event_param, event_param,
2431 sizeof(event_enabler->base.event_param));
2432 event_enabler->chan = chan;
2433 /* ctx left NULL */
2434 event_enabler->base.enabled = 0;
2435 mutex_lock(&sessions_mutex);
2436 list_add(&event_enabler->node, &event_enabler->chan->session->priv->enablers_head);
2437 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2438 mutex_unlock(&sessions_mutex);
2439 return event_enabler;
2440 }
2441
2442 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2443 {
2444 mutex_lock(&sessions_mutex);
2445 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2446 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2447 mutex_unlock(&sessions_mutex);
2448 return 0;
2449 }
2450
2451 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2452 {
2453 mutex_lock(&sessions_mutex);
2454 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2455 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2456 mutex_unlock(&sessions_mutex);
2457 return 0;
2458 }
2459
2460 static
2461 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2462 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2463 {
2464 struct lttng_kernel_bytecode_node *bytecode_node;
2465 uint32_t bytecode_len;
2466 int ret;
2467
2468 ret = get_user(bytecode_len, &bytecode->len);
2469 if (ret)
2470 return ret;
2471 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2472 GFP_KERNEL);
2473 if (!bytecode_node)
2474 return -ENOMEM;
2475 ret = copy_from_user(&bytecode_node->bc, bytecode,
2476 sizeof(*bytecode) + bytecode_len);
2477 if (ret)
2478 goto error_free;
2479
2480 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2481 bytecode_node->enabler = enabler;
2482 /* Enforce length based on allocated size */
2483 bytecode_node->bc.len = bytecode_len;
2484 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2485
2486 return 0;
2487
2488 error_free:
2489 lttng_kvfree(bytecode_node);
2490 return ret;
2491 }
2492
2493 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2494 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2495 {
2496 int ret;
2497 ret = lttng_enabler_attach_filter_bytecode(
2498 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2499 if (ret)
2500 goto error;
2501
2502 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2503 return 0;
2504
2505 error:
2506 return ret;
2507 }
2508
2509 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2510 struct lttng_kernel_abi_event_callsite __user *callsite)
2511 {
2512
2513 switch (event->priv->instrumentation) {
2514 case LTTNG_KERNEL_ABI_UPROBE:
2515 return lttng_uprobes_event_add_callsite(event, callsite);
2516 default:
2517 return -EINVAL;
2518 }
2519 }
2520
2521 static
2522 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2523 {
2524 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2525
2526 /* Destroy filter bytecode */
2527 list_for_each_entry_safe(filter_node, tmp_filter_node,
2528 &enabler->filter_bytecode_head, node) {
2529 lttng_kvfree(filter_node);
2530 }
2531 }
2532
2533 static
2534 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2535 {
2536 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2537
2538 list_del(&event_enabler->node);
2539 kfree(event_enabler);
2540 }
2541
2542 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2543 struct lttng_event_notifier_group *event_notifier_group,
2544 enum lttng_enabler_format_type format_type,
2545 struct lttng_kernel_abi_event_notifier *event_notifier_param)
2546 {
2547 struct lttng_event_notifier_enabler *event_notifier_enabler;
2548
2549 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2550 if (!event_notifier_enabler)
2551 return NULL;
2552
2553 event_notifier_enabler->base.format_type = format_type;
2554 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2555 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2556
2557 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2558 event_notifier_enabler->num_captures = 0;
2559
2560 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2561 sizeof(event_notifier_enabler->base.event_param));
2562
2563 event_notifier_enabler->base.enabled = 0;
2564 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2565 event_notifier_enabler->group = event_notifier_group;
2566
2567 mutex_lock(&sessions_mutex);
2568 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2569 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2570
2571 mutex_unlock(&sessions_mutex);
2572
2573 return event_notifier_enabler;
2574 }
2575
2576 int lttng_event_notifier_enabler_enable(
2577 struct lttng_event_notifier_enabler *event_notifier_enabler)
2578 {
2579 mutex_lock(&sessions_mutex);
2580 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2581 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2582 mutex_unlock(&sessions_mutex);
2583 return 0;
2584 }
2585
2586 int lttng_event_notifier_enabler_disable(
2587 struct lttng_event_notifier_enabler *event_notifier_enabler)
2588 {
2589 mutex_lock(&sessions_mutex);
2590 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2591 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2592 mutex_unlock(&sessions_mutex);
2593 return 0;
2594 }
2595
2596 int lttng_event_notifier_enabler_attach_filter_bytecode(
2597 struct lttng_event_notifier_enabler *event_notifier_enabler,
2598 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2599 {
2600 int ret;
2601
2602 ret = lttng_enabler_attach_filter_bytecode(
2603 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2604 bytecode);
2605 if (ret)
2606 goto error;
2607
2608 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2609 return 0;
2610
2611 error:
2612 return ret;
2613 }
2614
2615 int lttng_event_notifier_enabler_attach_capture_bytecode(
2616 struct lttng_event_notifier_enabler *event_notifier_enabler,
2617 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2618 {
2619 struct lttng_kernel_bytecode_node *bytecode_node;
2620 struct lttng_enabler *enabler =
2621 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2622 uint32_t bytecode_len;
2623 int ret;
2624
2625 ret = get_user(bytecode_len, &bytecode->len);
2626 if (ret)
2627 return ret;
2628
2629 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2630 GFP_KERNEL);
2631 if (!bytecode_node)
2632 return -ENOMEM;
2633
2634 ret = copy_from_user(&bytecode_node->bc, bytecode,
2635 sizeof(*bytecode) + bytecode_len);
2636 if (ret)
2637 goto error_free;
2638
2639 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2640 bytecode_node->enabler = enabler;
2641
2642 /* Enforce length based on allocated size */
2643 bytecode_node->bc.len = bytecode_len;
2644 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2645
2646 event_notifier_enabler->num_captures++;
2647
2648 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2649 goto end;
2650
2651 error_free:
2652 lttng_kvfree(bytecode_node);
2653 end:
2654 return ret;
2655 }
2656
2657 static
2658 void lttng_event_notifier_enabler_destroy(
2659 struct lttng_event_notifier_enabler *event_notifier_enabler)
2660 {
2661 if (!event_notifier_enabler) {
2662 return;
2663 }
2664
2665 list_del(&event_notifier_enabler->node);
2666
2667 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2668 kfree(event_notifier_enabler);
2669 }
2670
2671 /*
2672 * lttng_session_sync_event_enablers should be called just before starting a
2673 * session.
2674 * Should be called with sessions mutex held.
2675 */
2676 static
2677 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2678 {
2679 struct lttng_event_enabler *event_enabler;
2680 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2681
2682 list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
2683 lttng_event_enabler_ref_events(event_enabler);
2684 /*
2685 * For each event, if at least one of its enablers is enabled,
2686 * and its channel and session transient states are enabled, we
2687 * enable the event, else we disable it.
2688 */
2689 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2690 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2691 struct lttng_enabler_ref *enabler_ref;
2692 struct lttng_kernel_bytecode_runtime *runtime;
2693 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2694 int nr_filters = 0;
2695
2696 switch (event_recorder_priv->parent.instrumentation) {
2697 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2698 case LTTNG_KERNEL_ABI_SYSCALL:
2699 /* Enable events */
2700 list_for_each_entry(enabler_ref,
2701 &event_recorder_priv->parent.enablers_ref_head, node) {
2702 if (enabler_ref->ref->enabled) {
2703 enabled = 1;
2704 break;
2705 }
2706 }
2707 break;
2708
2709 default:
2710 /* Not handled with lazy sync. */
2711 continue;
2712 }
2713 /*
2714 * Enabled state is based on union of enablers, with
2715 * intesection of session and channel transient enable
2716 * states.
2717 */
2718 enabled = enabled && session->priv->tstate && event_recorder->chan->tstate;
2719
2720 WRITE_ONCE(event_recorder->parent.enabled, enabled);
2721 /*
2722 * Sync tracepoint registration with event enabled
2723 * state.
2724 */
2725 if (enabled) {
2726 register_event(event_recorder);
2727 } else {
2728 _lttng_event_unregister(event_recorder);
2729 }
2730
2731 /* Check if has enablers without bytecode enabled */
2732 list_for_each_entry(enabler_ref,
2733 &event_recorder_priv->parent.enablers_ref_head, node) {
2734 if (enabler_ref->ref->enabled
2735 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2736 has_enablers_without_filter_bytecode = 1;
2737 break;
2738 }
2739 }
2740 event_recorder_priv->parent.has_enablers_without_filter_bytecode =
2741 has_enablers_without_filter_bytecode;
2742
2743 /* Enable filters */
2744 list_for_each_entry(runtime,
2745 &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
2746 lttng_bytecode_sync_state(runtime);
2747 nr_filters++;
2748 }
2749 WRITE_ONCE(event_recorder_priv->parent.pub->eval_filter,
2750 !(has_enablers_without_filter_bytecode || !nr_filters));
2751 }
2752 }
2753
2754 /*
2755 * Apply enablers to session events, adding events to session if need
2756 * be. It is required after each modification applied to an active
2757 * session, and right before session "start".
2758 * "lazy" sync means we only sync if required.
2759 * Should be called with sessions mutex held.
2760 */
2761 static
2762 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2763 {
2764 /* We can skip if session is not active */
2765 if (!session->active)
2766 return;
2767 lttng_session_sync_event_enablers(session);
2768 }
2769
2770 static
2771 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2772 {
2773 struct lttng_event_notifier_enabler *event_notifier_enabler;
2774 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2775
2776 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2777 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2778
2779 /*
2780 * For each event_notifier, if at least one of its enablers is enabled,
2781 * we enable the event_notifier, else we disable it.
2782 */
2783 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2784 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2785 struct lttng_enabler_ref *enabler_ref;
2786 struct lttng_kernel_bytecode_runtime *runtime;
2787 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2788 int nr_filters = 0, nr_captures = 0;
2789
2790 switch (event_notifier_priv->parent.instrumentation) {
2791 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2792 case LTTNG_KERNEL_ABI_SYSCALL:
2793 /* Enable event_notifiers */
2794 list_for_each_entry(enabler_ref,
2795 &event_notifier_priv->parent.enablers_ref_head, node) {
2796 if (enabler_ref->ref->enabled) {
2797 enabled = 1;
2798 break;
2799 }
2800 }
2801 break;
2802
2803 default:
2804 /* Not handled with sync. */
2805 continue;
2806 }
2807
2808 WRITE_ONCE(event_notifier->parent.enabled, enabled);
2809 /*
2810 * Sync tracepoint registration with event_notifier enabled
2811 * state.
2812 */
2813 if (enabled) {
2814 if (!event_notifier_priv->parent.registered)
2815 register_event_notifier(event_notifier);
2816 } else {
2817 if (event_notifier_priv->parent.registered)
2818 _lttng_event_notifier_unregister(event_notifier);
2819 }
2820
2821 /* Check if has enablers without bytecode enabled */
2822 list_for_each_entry(enabler_ref,
2823 &event_notifier_priv->parent.enablers_ref_head, node) {
2824 if (enabler_ref->ref->enabled
2825 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2826 has_enablers_without_filter_bytecode = 1;
2827 break;
2828 }
2829 }
2830 event_notifier_priv->parent.has_enablers_without_filter_bytecode =
2831 has_enablers_without_filter_bytecode;
2832
2833 /* Enable filters */
2834 list_for_each_entry(runtime,
2835 &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
2836 lttng_bytecode_sync_state(runtime);
2837 nr_filters++;
2838 }
2839 WRITE_ONCE(event_notifier_priv->parent.pub->eval_filter,
2840 !(has_enablers_without_filter_bytecode || !nr_filters));
2841
2842 /* Enable captures */
2843 list_for_each_entry(runtime,
2844 &event_notifier_priv->capture_bytecode_runtime_head, node) {
2845 lttng_bytecode_sync_state(runtime);
2846 nr_captures++;
2847 }
2848 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2849 }
2850 }
2851
2852 /*
2853 * Serialize at most one packet worth of metadata into a metadata
2854 * channel.
2855 * We grab the metadata cache mutex to get exclusive access to our metadata
2856 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2857 * allows us to do racy operations such as looking for remaining space left in
2858 * packet and write, since mutual exclusion protects us from concurrent writes.
2859 * Mutual exclusion on the metadata cache allow us to read the cache content
2860 * without racing against reallocation of the cache by updates.
2861 * Returns the number of bytes written in the channel, 0 if no data
2862 * was written and a negative value on error.
2863 */
2864 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2865 struct channel *chan, bool *coherent)
2866 {
2867 struct lttng_kernel_ring_buffer_ctx ctx;
2868 int ret = 0;
2869 size_t len, reserve_len;
2870
2871 /*
2872 * Ensure we support mutiple get_next / put sequences followed by
2873 * put_next. The metadata cache lock protects reading the metadata
2874 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2875 * "flush" operations on the buffer invoked by different processes.
2876 * Moreover, since the metadata cache memory can be reallocated, we
2877 * need to have exclusive access against updates even though we only
2878 * read it.
2879 */
2880 mutex_lock(&stream->metadata_cache->lock);
2881 WARN_ON(stream->metadata_in < stream->metadata_out);
2882 if (stream->metadata_in != stream->metadata_out)
2883 goto end;
2884
2885 /* Metadata regenerated, change the version. */
2886 if (stream->metadata_cache->version != stream->version)
2887 stream->version = stream->metadata_cache->version;
2888
2889 len = stream->metadata_cache->metadata_written -
2890 stream->metadata_in;
2891 if (!len)
2892 goto end;
2893 reserve_len = min_t(size_t,
2894 stream->transport->ops.priv->packet_avail_size(chan),
2895 len);
2896 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2897 sizeof(char), NULL);
2898 /*
2899 * If reservation failed, return an error to the caller.
2900 */
2901 ret = stream->transport->ops.event_reserve(&ctx);
2902 if (ret != 0) {
2903 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2904 stream->coherent = false;
2905 goto end;
2906 }
2907 stream->transport->ops.event_write(&ctx,
2908 stream->metadata_cache->data + stream->metadata_in,
2909 reserve_len);
2910 stream->transport->ops.event_commit(&ctx);
2911 stream->metadata_in += reserve_len;
2912 if (reserve_len < len)
2913 stream->coherent = false;
2914 else
2915 stream->coherent = true;
2916 ret = reserve_len;
2917
2918 end:
2919 if (coherent)
2920 *coherent = stream->coherent;
2921 mutex_unlock(&stream->metadata_cache->lock);
2922 return ret;
2923 }
2924
2925 static
2926 void lttng_metadata_begin(struct lttng_kernel_session *session)
2927 {
2928 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2929 mutex_lock(&session->priv->metadata_cache->lock);
2930 }
2931
2932 static
2933 void lttng_metadata_end(struct lttng_kernel_session *session)
2934 {
2935 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2936 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2937 struct lttng_metadata_stream *stream;
2938
2939 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2940 wake_up_interruptible(&stream->read_wait);
2941 mutex_unlock(&session->priv->metadata_cache->lock);
2942 }
2943 }
2944
2945 /*
2946 * Write the metadata to the metadata cache.
2947 * Must be called with sessions_mutex held.
2948 * The metadata cache lock protects us from concurrent read access from
2949 * thread outputting metadata content to ring buffer.
2950 * The content of the printf is printed as a single atomic metadata
2951 * transaction.
2952 */
2953 int lttng_metadata_printf(struct lttng_kernel_session *session,
2954 const char *fmt, ...)
2955 {
2956 char *str;
2957 size_t len;
2958 va_list ap;
2959
2960 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2961
2962 va_start(ap, fmt);
2963 str = kvasprintf(GFP_KERNEL, fmt, ap);
2964 va_end(ap);
2965 if (!str)
2966 return -ENOMEM;
2967
2968 len = strlen(str);
2969 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2970 if (session->priv->metadata_cache->metadata_written + len >
2971 session->priv->metadata_cache->cache_alloc) {
2972 char *tmp_cache_realloc;
2973 unsigned int tmp_cache_alloc_size;
2974
2975 tmp_cache_alloc_size = max_t(unsigned int,
2976 session->priv->metadata_cache->cache_alloc + len,
2977 session->priv->metadata_cache->cache_alloc << 1);
2978 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2979 if (!tmp_cache_realloc)
2980 goto err;
2981 if (session->priv->metadata_cache->data) {
2982 memcpy(tmp_cache_realloc,
2983 session->priv->metadata_cache->data,
2984 session->priv->metadata_cache->cache_alloc);
2985 vfree(session->priv->metadata_cache->data);
2986 }
2987
2988 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2989 session->priv->metadata_cache->data = tmp_cache_realloc;
2990 }
2991 memcpy(session->priv->metadata_cache->data +
2992 session->priv->metadata_cache->metadata_written,
2993 str, len);
2994 session->priv->metadata_cache->metadata_written += len;
2995 kfree(str);
2996
2997 return 0;
2998
2999 err:
3000 kfree(str);
3001 return -ENOMEM;
3002 }
3003
3004 static
3005 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3006 {
3007 size_t i;
3008
3009 for (i = 0; i < nesting; i++) {
3010 int ret;
3011
3012 ret = lttng_metadata_printf(session, " ");
3013 if (ret) {
3014 return ret;
3015 }
3016 }
3017 return 0;
3018 }
3019
3020 static
3021 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3022 const struct lttng_kernel_event_field *field,
3023 size_t nesting)
3024 {
3025 return lttng_metadata_printf(session, " _%s;\n", field->name);
3026 }
3027
3028 static
3029 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3030 const struct lttng_kernel_type_integer *type,
3031 enum lttng_kernel_string_encoding parent_encoding,
3032 size_t nesting)
3033 {
3034 int ret;
3035
3036 ret = print_tabs(session, nesting);
3037 if (ret)
3038 return ret;
3039 ret = lttng_metadata_printf(session,
3040 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3041 type->size,
3042 type->alignment,
3043 type->signedness,
3044 (parent_encoding == lttng_kernel_string_encoding_none)
3045 ? "none"
3046 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3047 ? "UTF8"
3048 : "ASCII",
3049 type->base,
3050 #if __BYTE_ORDER == __BIG_ENDIAN
3051 type->reverse_byte_order ? " byte_order = le;" : ""
3052 #else
3053 type->reverse_byte_order ? " byte_order = be;" : ""
3054 #endif
3055 );
3056 return ret;
3057 }
3058
3059 /*
3060 * Must be called with sessions_mutex held.
3061 */
3062 static
3063 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3064 const struct lttng_kernel_type_struct *type,
3065 size_t nesting)
3066 {
3067 const char *prev_field_name = NULL;
3068 int ret;
3069 uint32_t i, nr_fields;
3070 unsigned int alignment;
3071
3072 ret = print_tabs(session, nesting);
3073 if (ret)
3074 return ret;
3075 ret = lttng_metadata_printf(session,
3076 "struct {\n");
3077 if (ret)
3078 return ret;
3079 nr_fields = type->nr_fields;
3080 for (i = 0; i < nr_fields; i++) {
3081 const struct lttng_kernel_event_field *iter_field;
3082
3083 iter_field = type->fields[i];
3084 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3085 if (ret)
3086 return ret;
3087 }
3088 ret = print_tabs(session, nesting);
3089 if (ret)
3090 return ret;
3091 alignment = type->alignment;
3092 if (alignment) {
3093 ret = lttng_metadata_printf(session,
3094 "} align(%u)",
3095 alignment);
3096 } else {
3097 ret = lttng_metadata_printf(session,
3098 "}");
3099 }
3100 return ret;
3101 }
3102
3103 /*
3104 * Must be called with sessions_mutex held.
3105 */
3106 static
3107 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3108 const struct lttng_kernel_event_field *field,
3109 size_t nesting)
3110 {
3111 int ret;
3112
3113 ret = _lttng_struct_type_statedump(session,
3114 lttng_kernel_get_type_struct(field->type), nesting);
3115 if (ret)
3116 return ret;
3117 return lttng_field_name_statedump(session, field, nesting);
3118 }
3119
3120 /*
3121 * Must be called with sessions_mutex held.
3122 */
3123 static
3124 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3125 const struct lttng_kernel_type_variant *type,
3126 size_t nesting,
3127 const char *prev_field_name)
3128 {
3129 const char *tag_name;
3130 int ret;
3131 uint32_t i, nr_choices;
3132
3133 tag_name = type->tag_name;
3134 if (!tag_name)
3135 tag_name = prev_field_name;
3136 if (!tag_name)
3137 return -EINVAL;
3138 /*
3139 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3140 */
3141 if (type->alignment != 0)
3142 return -EINVAL;
3143 ret = print_tabs(session, nesting);
3144 if (ret)
3145 return ret;
3146 ret = lttng_metadata_printf(session,
3147 "variant <_%s> {\n",
3148 tag_name);
3149 if (ret)
3150 return ret;
3151 nr_choices = type->nr_choices;
3152 for (i = 0; i < nr_choices; i++) {
3153 const struct lttng_kernel_event_field *iter_field;
3154
3155 iter_field = type->choices[i];
3156 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3157 if (ret)
3158 return ret;
3159 }
3160 ret = print_tabs(session, nesting);
3161 if (ret)
3162 return ret;
3163 ret = lttng_metadata_printf(session,
3164 "}");
3165 return ret;
3166 }
3167
3168 /*
3169 * Must be called with sessions_mutex held.
3170 */
3171 static
3172 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3173 const struct lttng_kernel_event_field *field,
3174 size_t nesting,
3175 const char *prev_field_name)
3176 {
3177 int ret;
3178
3179 ret = _lttng_variant_type_statedump(session,
3180 lttng_kernel_get_type_variant(field->type), nesting,
3181 prev_field_name);
3182 if (ret)
3183 return ret;
3184 return lttng_field_name_statedump(session, field, nesting);
3185 }
3186
3187 /*
3188 * Must be called with sessions_mutex held.
3189 */
3190 static
3191 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3192 const struct lttng_kernel_event_field *field,
3193 size_t nesting)
3194 {
3195 int ret;
3196 const struct lttng_kernel_type_array *array_type;
3197 const struct lttng_kernel_type_common *elem_type;
3198
3199 array_type = lttng_kernel_get_type_array(field->type);
3200 WARN_ON_ONCE(!array_type);
3201
3202 if (array_type->alignment) {
3203 ret = print_tabs(session, nesting);
3204 if (ret)
3205 return ret;
3206 ret = lttng_metadata_printf(session,
3207 "struct { } align(%u) _%s_padding;\n",
3208 array_type->alignment * CHAR_BIT,
3209 field->name);
3210 if (ret)
3211 return ret;
3212 }
3213 /*
3214 * Nested compound types: Only array of structures and variants are
3215 * currently supported.
3216 */
3217 elem_type = array_type->elem_type;
3218 switch (elem_type->type) {
3219 case lttng_kernel_type_integer:
3220 case lttng_kernel_type_struct:
3221 case lttng_kernel_type_variant:
3222 ret = _lttng_type_statedump(session, elem_type,
3223 array_type->encoding, nesting);
3224 if (ret)
3225 return ret;
3226 break;
3227
3228 default:
3229 return -EINVAL;
3230 }
3231 ret = lttng_metadata_printf(session,
3232 " _%s[%u];\n",
3233 field->name,
3234 array_type->length);
3235 return ret;
3236 }
3237
3238 /*
3239 * Must be called with sessions_mutex held.
3240 */
3241 static
3242 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3243 const struct lttng_kernel_event_field *field,
3244 size_t nesting,
3245 const char *prev_field_name)
3246 {
3247 int ret;
3248 const char *length_name;
3249 const struct lttng_kernel_type_sequence *sequence_type;
3250 const struct lttng_kernel_type_common *elem_type;
3251
3252 sequence_type = lttng_kernel_get_type_sequence(field->type);
3253 WARN_ON_ONCE(!sequence_type);
3254
3255 length_name = sequence_type->length_name;
3256 if (!length_name)
3257 length_name = prev_field_name;
3258 if (!length_name)
3259 return -EINVAL;
3260
3261 if (sequence_type->alignment) {
3262 ret = print_tabs(session, nesting);
3263 if (ret)
3264 return ret;
3265 ret = lttng_metadata_printf(session,
3266 "struct { } align(%u) _%s_padding;\n",
3267 sequence_type->alignment * CHAR_BIT,
3268 field->name);
3269 if (ret)
3270 return ret;
3271 }
3272
3273 /*
3274 * Nested compound types: Only array of structures and variants are
3275 * currently supported.
3276 */
3277 elem_type = sequence_type->elem_type;
3278 switch (elem_type->type) {
3279 case lttng_kernel_type_integer:
3280 case lttng_kernel_type_struct:
3281 case lttng_kernel_type_variant:
3282 ret = _lttng_type_statedump(session, elem_type,
3283 sequence_type->encoding, nesting);
3284 if (ret)
3285 return ret;
3286 break;
3287
3288 default:
3289 return -EINVAL;
3290 }
3291 ret = lttng_metadata_printf(session,
3292 " _%s[ _%s ];\n",
3293 field->name,
3294 length_name);
3295 return ret;
3296 }
3297
3298 /*
3299 * Must be called with sessions_mutex held.
3300 */
3301 static
3302 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3303 const struct lttng_kernel_type_enum *type,
3304 size_t nesting)
3305 {
3306 const struct lttng_kernel_enum_desc *enum_desc;
3307 const struct lttng_kernel_type_common *container_type;
3308 int ret;
3309 unsigned int i, nr_entries;
3310
3311 container_type = type->container_type;
3312 if (container_type->type != lttng_kernel_type_integer) {
3313 ret = -EINVAL;
3314 goto end;
3315 }
3316 enum_desc = type->desc;
3317 nr_entries = enum_desc->nr_entries;
3318
3319 ret = print_tabs(session, nesting);
3320 if (ret)
3321 goto end;
3322 ret = lttng_metadata_printf(session, "enum : ");
3323 if (ret)
3324 goto end;
3325 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3326 lttng_kernel_string_encoding_none, 0);
3327 if (ret)
3328 goto end;
3329 ret = lttng_metadata_printf(session, " {\n");
3330 if (ret)
3331 goto end;
3332 /* Dump all entries */
3333 for (i = 0; i < nr_entries; i++) {
3334 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3335 int j, len;
3336
3337 ret = print_tabs(session, nesting + 1);
3338 if (ret)
3339 goto end;
3340 ret = lttng_metadata_printf(session,
3341 "\"");
3342 if (ret)
3343 goto end;
3344 len = strlen(entry->string);
3345 /* Escape the character '"' */
3346 for (j = 0; j < len; j++) {
3347 char c = entry->string[j];
3348
3349 switch (c) {
3350 case '"':
3351 ret = lttng_metadata_printf(session,
3352 "\\\"");
3353 break;
3354 case '\\':
3355 ret = lttng_metadata_printf(session,
3356 "\\\\");
3357 break;
3358 default:
3359 ret = lttng_metadata_printf(session,
3360 "%c", c);
3361 break;
3362 }
3363 if (ret)
3364 goto end;
3365 }
3366 ret = lttng_metadata_printf(session, "\"");
3367 if (ret)
3368 goto end;
3369
3370 if (entry->options.is_auto) {
3371 ret = lttng_metadata_printf(session, ",\n");
3372 if (ret)
3373 goto end;
3374 } else {
3375 ret = lttng_metadata_printf(session,
3376 " = ");
3377 if (ret)
3378 goto end;
3379 if (entry->start.signedness)
3380 ret = lttng_metadata_printf(session,
3381 "%lld", (long long) entry->start.value);
3382 else
3383 ret = lttng_metadata_printf(session,
3384 "%llu", entry->start.value);
3385 if (ret)
3386 goto end;
3387 if (entry->start.signedness == entry->end.signedness &&
3388 entry->start.value
3389 == entry->end.value) {
3390 ret = lttng_metadata_printf(session,
3391 ",\n");
3392 } else {
3393 if (entry->end.signedness) {
3394 ret = lttng_metadata_printf(session,
3395 " ... %lld,\n",
3396 (long long) entry->end.value);
3397 } else {
3398 ret = lttng_metadata_printf(session,
3399 " ... %llu,\n",
3400 entry->end.value);
3401 }
3402 }
3403 if (ret)
3404 goto end;
3405 }
3406 }
3407 ret = print_tabs(session, nesting);
3408 if (ret)
3409 goto end;
3410 ret = lttng_metadata_printf(session, "}");
3411 end:
3412 return ret;
3413 }
3414
3415 /*
3416 * Must be called with sessions_mutex held.
3417 */
3418 static
3419 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3420 const struct lttng_kernel_event_field *field,
3421 size_t nesting)
3422 {
3423 int ret;
3424 const struct lttng_kernel_type_enum *enum_type;
3425
3426 enum_type = lttng_kernel_get_type_enum(field->type);
3427 WARN_ON_ONCE(!enum_type);
3428 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3429 if (ret)
3430 return ret;
3431 return lttng_field_name_statedump(session, field, nesting);
3432 }
3433
3434 static
3435 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3436 const struct lttng_kernel_event_field *field,
3437 size_t nesting)
3438 {
3439 int ret;
3440
3441 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3442 lttng_kernel_string_encoding_none, nesting);
3443 if (ret)
3444 return ret;
3445 return lttng_field_name_statedump(session, field, nesting);
3446 }
3447
3448 static
3449 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3450 const struct lttng_kernel_type_string *type,
3451 size_t nesting)
3452 {
3453 int ret;
3454
3455 /* Default encoding is UTF8 */
3456 ret = print_tabs(session, nesting);
3457 if (ret)
3458 return ret;
3459 ret = lttng_metadata_printf(session,
3460 "string%s",
3461 type->encoding == lttng_kernel_string_encoding_ASCII ?
3462 " { encoding = ASCII; }" : "");
3463 return ret;
3464 }
3465
3466 static
3467 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3468 const struct lttng_kernel_event_field *field,
3469 size_t nesting)
3470 {
3471 const struct lttng_kernel_type_string *string_type;
3472 int ret;
3473
3474 string_type = lttng_kernel_get_type_string(field->type);
3475 WARN_ON_ONCE(!string_type);
3476 ret = _lttng_string_type_statedump(session, string_type, nesting);
3477 if (ret)
3478 return ret;
3479 return lttng_field_name_statedump(session, field, nesting);
3480 }
3481
3482 /*
3483 * Must be called with sessions_mutex held.
3484 */
3485 static
3486 int _lttng_type_statedump(struct lttng_kernel_session *session,
3487 const struct lttng_kernel_type_common *type,
3488 enum lttng_kernel_string_encoding parent_encoding,
3489 size_t nesting)
3490 {
3491 int ret = 0;
3492
3493 switch (type->type) {
3494 case lttng_kernel_type_integer:
3495 ret = _lttng_integer_type_statedump(session,
3496 lttng_kernel_get_type_integer(type),
3497 parent_encoding, nesting);
3498 break;
3499 case lttng_kernel_type_enum:
3500 ret = _lttng_enum_type_statedump(session,
3501 lttng_kernel_get_type_enum(type),
3502 nesting);
3503 break;
3504 case lttng_kernel_type_string:
3505 ret = _lttng_string_type_statedump(session,
3506 lttng_kernel_get_type_string(type),
3507 nesting);
3508 break;
3509 case lttng_kernel_type_struct:
3510 ret = _lttng_struct_type_statedump(session,
3511 lttng_kernel_get_type_struct(type),
3512 nesting);
3513 break;
3514 case lttng_kernel_type_variant:
3515 ret = _lttng_variant_type_statedump(session,
3516 lttng_kernel_get_type_variant(type),
3517 nesting, NULL);
3518 break;
3519
3520 /* Nested arrays and sequences are not supported yet. */
3521 case lttng_kernel_type_array:
3522 case lttng_kernel_type_sequence:
3523 default:
3524 WARN_ON_ONCE(1);
3525 return -EINVAL;
3526 }
3527 return ret;
3528 }
3529
3530 /*
3531 * Must be called with sessions_mutex held.
3532 */
3533 static
3534 int _lttng_field_statedump(struct lttng_kernel_session *session,
3535 const struct lttng_kernel_event_field *field,
3536 size_t nesting,
3537 const char **prev_field_name_p)
3538 {
3539 const char *prev_field_name = NULL;
3540 int ret = 0;
3541
3542 if (prev_field_name_p)
3543 prev_field_name = *prev_field_name_p;
3544 switch (field->type->type) {
3545 case lttng_kernel_type_integer:
3546 ret = _lttng_integer_field_statedump(session, field, nesting);
3547 break;
3548 case lttng_kernel_type_enum:
3549 ret = _lttng_enum_field_statedump(session, field, nesting);
3550 break;
3551 case lttng_kernel_type_string:
3552 ret = _lttng_string_field_statedump(session, field, nesting);
3553 break;
3554 case lttng_kernel_type_struct:
3555 ret = _lttng_struct_field_statedump(session, field, nesting);
3556 break;
3557 case lttng_kernel_type_array:
3558 ret = _lttng_array_field_statedump(session, field, nesting);
3559 break;
3560 case lttng_kernel_type_sequence:
3561 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3562 break;
3563 case lttng_kernel_type_variant:
3564 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3565 break;
3566
3567 default:
3568 WARN_ON_ONCE(1);
3569 return -EINVAL;
3570 }
3571 if (prev_field_name_p)
3572 *prev_field_name_p = field->name;
3573 return ret;
3574 }
3575
3576 static
3577 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3578 struct lttng_kernel_ctx *ctx)
3579 {
3580 const char *prev_field_name = NULL;
3581 int ret = 0;
3582 int i;
3583
3584 if (!ctx)
3585 return 0;
3586 for (i = 0; i < ctx->nr_fields; i++) {
3587 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3588
3589 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3590 if (ret)
3591 return ret;
3592 }
3593 return ret;
3594 }
3595
3596 static
3597 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3598 struct lttng_kernel_event_recorder *event_recorder)
3599 {
3600 const char *prev_field_name = NULL;
3601 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3602 int ret = 0;
3603 int i;
3604
3605 for (i = 0; i < desc->nr_fields; i++) {
3606 const struct lttng_kernel_event_field *field = desc->fields[i];
3607
3608 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3609 if (ret)
3610 return ret;
3611 }
3612 return ret;
3613 }
3614
3615 /*
3616 * Must be called with sessions_mutex held.
3617 * The entire event metadata is printed as a single atomic metadata
3618 * transaction.
3619 */
3620 static
3621 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
3622 struct lttng_channel *chan,
3623 struct lttng_kernel_event_recorder *event_recorder)
3624 {
3625 int ret = 0;
3626
3627 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3628 return 0;
3629 if (chan->channel_type == METADATA_CHANNEL)
3630 return 0;
3631
3632 lttng_metadata_begin(session);
3633
3634 ret = lttng_metadata_printf(session,
3635 "event {\n"
3636 " name = \"%s\";\n"
3637 " id = %u;\n"
3638 " stream_id = %u;\n",
3639 event_recorder->priv->parent.desc->event_name,
3640 event_recorder->priv->id,
3641 event_recorder->chan->id);
3642 if (ret)
3643 goto end;
3644
3645 ret = lttng_metadata_printf(session,
3646 " fields := struct {\n"
3647 );
3648 if (ret)
3649 goto end;
3650
3651 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3652 if (ret)
3653 goto end;
3654
3655 /*
3656 * LTTng space reservation can only reserve multiples of the
3657 * byte size.
3658 */
3659 ret = lttng_metadata_printf(session,
3660 " };\n"
3661 "};\n\n");
3662 if (ret)
3663 goto end;
3664
3665 event_recorder->priv->metadata_dumped = 1;
3666 end:
3667 lttng_metadata_end(session);
3668 return ret;
3669
3670 }
3671
3672 /*
3673 * Must be called with sessions_mutex held.
3674 * The entire channel metadata is printed as a single atomic metadata
3675 * transaction.
3676 */
3677 static
3678 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3679 struct lttng_channel *chan)
3680 {
3681 int ret = 0;
3682
3683 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3684 return 0;
3685
3686 if (chan->channel_type == METADATA_CHANNEL)
3687 return 0;
3688
3689 lttng_metadata_begin(session);
3690
3691 WARN_ON_ONCE(!chan->header_type);
3692 ret = lttng_metadata_printf(session,
3693 "stream {\n"
3694 " id = %u;\n"
3695 " event.header := %s;\n"
3696 " packet.context := struct packet_context;\n",
3697 chan->id,
3698 chan->header_type == 1 ? "struct event_header_compact" :
3699 "struct event_header_large");
3700 if (ret)
3701 goto end;
3702
3703 if (chan->ctx) {
3704 ret = lttng_metadata_printf(session,
3705 " event.context := struct {\n");
3706 if (ret)
3707 goto end;
3708 }
3709 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3710 if (ret)
3711 goto end;
3712 if (chan->ctx) {
3713 ret = lttng_metadata_printf(session,
3714 " };\n");
3715 if (ret)
3716 goto end;
3717 }
3718
3719 ret = lttng_metadata_printf(session,
3720 "};\n\n");
3721
3722 chan->metadata_dumped = 1;
3723 end:
3724 lttng_metadata_end(session);
3725 return ret;
3726 }
3727
3728 /*
3729 * Must be called with sessions_mutex held.
3730 */
3731 static
3732 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3733 {
3734 return lttng_metadata_printf(session,
3735 "struct packet_context {\n"
3736 " uint64_clock_monotonic_t timestamp_begin;\n"
3737 " uint64_clock_monotonic_t timestamp_end;\n"
3738 " uint64_t content_size;\n"
3739 " uint64_t packet_size;\n"
3740 " uint64_t packet_seq_num;\n"
3741 " unsigned long events_discarded;\n"
3742 " uint32_t cpu_id;\n"
3743 "};\n\n"
3744 );
3745 }
3746
3747 /*
3748 * Compact header:
3749 * id: range: 0 - 30.
3750 * id 31 is reserved to indicate an extended header.
3751 *
3752 * Large header:
3753 * id: range: 0 - 65534.
3754 * id 65535 is reserved to indicate an extended header.
3755 *
3756 * Must be called with sessions_mutex held.
3757 */
3758 static
3759 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3760 {
3761 return lttng_metadata_printf(session,
3762 "struct event_header_compact {\n"
3763 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3764 " variant <id> {\n"
3765 " struct {\n"
3766 " uint27_clock_monotonic_t timestamp;\n"
3767 " } compact;\n"
3768 " struct {\n"
3769 " uint32_t id;\n"
3770 " uint64_clock_monotonic_t timestamp;\n"
3771 " } extended;\n"
3772 " } v;\n"
3773 "} align(%u);\n"
3774 "\n"
3775 "struct event_header_large {\n"
3776 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3777 " variant <id> {\n"
3778 " struct {\n"
3779 " uint32_clock_monotonic_t timestamp;\n"
3780 " } compact;\n"
3781 " struct {\n"
3782 " uint32_t id;\n"
3783 " uint64_clock_monotonic_t timestamp;\n"
3784 " } extended;\n"
3785 " } v;\n"
3786 "} align(%u);\n\n",
3787 lttng_alignof(uint32_t) * CHAR_BIT,
3788 lttng_alignof(uint16_t) * CHAR_BIT
3789 );
3790 }
3791
3792 /*
3793 * Approximation of NTP time of day to clock monotonic correlation,
3794 * taken at start of trace.
3795 * Yes, this is only an approximation. Yes, we can (and will) do better
3796 * in future versions.
3797 * This function may return a negative offset. It may happen if the
3798 * system sets the REALTIME clock to 0 after boot.
3799 *
3800 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3801 * y2038 compliant.
3802 */
3803 static
3804 int64_t measure_clock_offset(void)
3805 {
3806 uint64_t monotonic_avg, monotonic[2], realtime;
3807 uint64_t tcf = trace_clock_freq();
3808 int64_t offset;
3809 unsigned long flags;
3810 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3811 struct timespec64 rts = { 0, 0 };
3812 #else
3813 struct timespec rts = { 0, 0 };
3814 #endif
3815
3816 /* Disable interrupts to increase correlation precision. */
3817 local_irq_save(flags);
3818 monotonic[0] = trace_clock_read64();
3819 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3820 ktime_get_real_ts64(&rts);
3821 #else
3822 getnstimeofday(&rts);
3823 #endif
3824 monotonic[1] = trace_clock_read64();
3825 local_irq_restore(flags);
3826
3827 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3828 realtime = (uint64_t) rts.tv_sec * tcf;
3829 if (tcf == NSEC_PER_SEC) {
3830 realtime += rts.tv_nsec;
3831 } else {
3832 uint64_t n = rts.tv_nsec * tcf;
3833
3834 do_div(n, NSEC_PER_SEC);
3835 realtime += n;
3836 }
3837 offset = (int64_t) realtime - monotonic_avg;
3838 return offset;
3839 }
3840
3841 static
3842 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3843 {
3844 int ret = 0;
3845 size_t i;
3846 char cur;
3847
3848 i = 0;
3849 cur = string[i];
3850 while (cur != '\0') {
3851 switch (cur) {
3852 case '\n':
3853 ret = lttng_metadata_printf(session, "%s", "\\n");
3854 break;
3855 case '\\':
3856 case '"':
3857 ret = lttng_metadata_printf(session, "%c", '\\');
3858 if (ret)
3859 goto error;
3860 /* We still print the current char */
3861 /* Fallthrough */
3862 default:
3863 ret = lttng_metadata_printf(session, "%c", cur);
3864 break;
3865 }
3866
3867 if (ret)
3868 goto error;
3869
3870 cur = string[++i];
3871 }
3872 error:
3873 return ret;
3874 }
3875
3876 static
3877 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3878 const char *field_value)
3879 {
3880 int ret;
3881
3882 ret = lttng_metadata_printf(session, " %s = \"", field);
3883 if (ret)
3884 goto error;
3885
3886 ret = print_escaped_ctf_string(session, field_value);
3887 if (ret)
3888 goto error;
3889
3890 ret = lttng_metadata_printf(session, "\";\n");
3891
3892 error:
3893 return ret;
3894 }
3895
3896 /*
3897 * Output metadata into this session's metadata buffers.
3898 * Must be called with sessions_mutex held.
3899 */
3900 static
3901 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3902 {
3903 unsigned char *uuid_c = session->priv->uuid.b;
3904 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3905 const char *product_uuid;
3906 struct lttng_channel *chan;
3907 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3908 int ret = 0;
3909
3910 if (!LTTNG_READ_ONCE(session->active))
3911 return 0;
3912
3913 lttng_metadata_begin(session);
3914
3915 if (session->priv->metadata_dumped)
3916 goto skip_session;
3917
3918 snprintf(uuid_s, sizeof(uuid_s),
3919 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3920 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3921 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3922 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3923 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3924
3925 ret = lttng_metadata_printf(session,
3926 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3927 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3928 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3929 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3930 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3931 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3932 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3933 "\n"
3934 "trace {\n"
3935 " major = %u;\n"
3936 " minor = %u;\n"
3937 " uuid = \"%s\";\n"
3938 " byte_order = %s;\n"
3939 " packet.header := struct {\n"
3940 " uint32_t magic;\n"
3941 " uint8_t uuid[16];\n"
3942 " uint32_t stream_id;\n"
3943 " uint64_t stream_instance_id;\n"
3944 " };\n"
3945 "};\n\n",
3946 lttng_alignof(uint8_t) * CHAR_BIT,
3947 lttng_alignof(uint16_t) * CHAR_BIT,
3948 lttng_alignof(uint32_t) * CHAR_BIT,
3949 lttng_alignof(uint64_t) * CHAR_BIT,
3950 sizeof(unsigned long) * CHAR_BIT,
3951 lttng_alignof(unsigned long) * CHAR_BIT,
3952 CTF_SPEC_MAJOR,
3953 CTF_SPEC_MINOR,
3954 uuid_s,
3955 #if __BYTE_ORDER == __BIG_ENDIAN
3956 "be"
3957 #else
3958 "le"
3959 #endif
3960 );
3961 if (ret)
3962 goto end;
3963
3964 ret = lttng_metadata_printf(session,
3965 "env {\n"
3966 " hostname = \"%s\";\n"
3967 " domain = \"kernel\";\n"
3968 " sysname = \"%s\";\n"
3969 " kernel_release = \"%s\";\n"
3970 " kernel_version = \"%s\";\n"
3971 " tracer_name = \"lttng-modules\";\n"
3972 " tracer_major = %d;\n"
3973 " tracer_minor = %d;\n"
3974 " tracer_patchlevel = %d;\n"
3975 " trace_buffering_scheme = \"global\";\n",
3976 current->nsproxy->uts_ns->name.nodename,
3977 utsname()->sysname,
3978 utsname()->release,
3979 utsname()->version,
3980 LTTNG_MODULES_MAJOR_VERSION,
3981 LTTNG_MODULES_MINOR_VERSION,
3982 LTTNG_MODULES_PATCHLEVEL_VERSION
3983 );
3984 if (ret)
3985 goto end;
3986
3987 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3988 if (ret)
3989 goto end;
3990 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3991 session->priv->creation_time);
3992 if (ret)
3993 goto end;
3994
3995 /* Add the product UUID to the 'env' section */
3996 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3997 if (product_uuid) {
3998 ret = lttng_metadata_printf(session,
3999 " product_uuid = \"%s\";\n",
4000 product_uuid
4001 );
4002 if (ret)
4003 goto end;
4004 }
4005
4006 /* Close the 'env' section */
4007 ret = lttng_metadata_printf(session, "};\n\n");
4008 if (ret)
4009 goto end;
4010
4011 ret = lttng_metadata_printf(session,
4012 "clock {\n"
4013 " name = \"%s\";\n",
4014 trace_clock_name()
4015 );
4016 if (ret)
4017 goto end;
4018
4019 if (!trace_clock_uuid(clock_uuid_s)) {
4020 ret = lttng_metadata_printf(session,
4021 " uuid = \"%s\";\n",
4022 clock_uuid_s
4023 );
4024 if (ret)
4025 goto end;
4026 }
4027
4028 ret = lttng_metadata_printf(session,
4029 " description = \"%s\";\n"
4030 " freq = %llu; /* Frequency, in Hz */\n"
4031 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4032 " offset = %lld;\n"
4033 "};\n\n",
4034 trace_clock_description(),
4035 (unsigned long long) trace_clock_freq(),
4036 (long long) measure_clock_offset()
4037 );
4038 if (ret)
4039 goto end;
4040
4041 ret = lttng_metadata_printf(session,
4042 "typealias integer {\n"
4043 " size = 27; align = 1; signed = false;\n"
4044 " map = clock.%s.value;\n"
4045 "} := uint27_clock_monotonic_t;\n"
4046 "\n"
4047 "typealias integer {\n"
4048 " size = 32; align = %u; signed = false;\n"
4049 " map = clock.%s.value;\n"
4050 "} := uint32_clock_monotonic_t;\n"
4051 "\n"
4052 "typealias integer {\n"
4053 " size = 64; align = %u; signed = false;\n"
4054 " map = clock.%s.value;\n"
4055 "} := uint64_clock_monotonic_t;\n\n",
4056 trace_clock_name(),
4057 lttng_alignof(uint32_t) * CHAR_BIT,
4058 trace_clock_name(),
4059 lttng_alignof(uint64_t) * CHAR_BIT,
4060 trace_clock_name()
4061 );
4062 if (ret)
4063 goto end;
4064
4065 ret = _lttng_stream_packet_context_declare(session);
4066 if (ret)
4067 goto end;
4068
4069 ret = _lttng_event_header_declare(session);
4070 if (ret)
4071 goto end;
4072
4073 skip_session:
4074 list_for_each_entry(chan, &session->priv->chan, list) {
4075 ret = _lttng_channel_metadata_statedump(session, chan);
4076 if (ret)
4077 goto end;
4078 }
4079
4080 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
4081 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4082 event_recorder_priv->pub);
4083 if (ret)
4084 goto end;
4085 }
4086 session->priv->metadata_dumped = 1;
4087 end:
4088 lttng_metadata_end(session);
4089 return ret;
4090 }
4091
4092 /**
4093 * lttng_transport_register - LTT transport registration
4094 * @transport: transport structure
4095 *
4096 * Registers a transport which can be used as output to extract the data out of
4097 * LTTng. The module calling this registration function must ensure that no
4098 * trap-inducing code will be executed by the transport functions. E.g.
4099 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4100 * is made visible to the transport function. This registration acts as a
4101 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4102 * after its registration must it synchronize the TLBs.
4103 */
4104 void lttng_transport_register(struct lttng_transport *transport)
4105 {
4106 /*
4107 * Make sure no page fault can be triggered by the module about to be
4108 * registered. We deal with this here so we don't have to call
4109 * vmalloc_sync_mappings() in each module's init.
4110 */
4111 wrapper_vmalloc_sync_mappings();
4112
4113 mutex_lock(&sessions_mutex);
4114 list_add_tail(&transport->node, &lttng_transport_list);
4115 mutex_unlock(&sessions_mutex);
4116 }
4117 EXPORT_SYMBOL_GPL(lttng_transport_register);
4118
4119 /**
4120 * lttng_transport_unregister - LTT transport unregistration
4121 * @transport: transport structure
4122 */
4123 void lttng_transport_unregister(struct lttng_transport *transport)
4124 {
4125 mutex_lock(&sessions_mutex);
4126 list_del(&transport->node);
4127 mutex_unlock(&sessions_mutex);
4128 }
4129 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4130
4131 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4132 {
4133 /*
4134 * Make sure no page fault can be triggered by the module about to be
4135 * registered. We deal with this here so we don't have to call
4136 * vmalloc_sync_mappings() in each module's init.
4137 */
4138 wrapper_vmalloc_sync_mappings();
4139
4140 mutex_lock(&sessions_mutex);
4141 list_add_tail(&transport->node, &lttng_counter_transport_list);
4142 mutex_unlock(&sessions_mutex);
4143 }
4144 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4145
4146 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4147 {
4148 mutex_lock(&sessions_mutex);
4149 list_del(&transport->node);
4150 mutex_unlock(&sessions_mutex);
4151 }
4152 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4153
4154 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4155
4156 enum cpuhp_state lttng_hp_prepare;
4157 enum cpuhp_state lttng_hp_online;
4158
4159 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4160 {
4161 struct lttng_cpuhp_node *lttng_node;
4162
4163 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4164 switch (lttng_node->component) {
4165 case LTTNG_RING_BUFFER_FRONTEND:
4166 return 0;
4167 case LTTNG_RING_BUFFER_BACKEND:
4168 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4169 case LTTNG_RING_BUFFER_ITER:
4170 return 0;
4171 case LTTNG_CONTEXT_PERF_COUNTERS:
4172 return 0;
4173 default:
4174 return -EINVAL;
4175 }
4176 }
4177
4178 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4179 {
4180 struct lttng_cpuhp_node *lttng_node;
4181
4182 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4183 switch (lttng_node->component) {
4184 case LTTNG_RING_BUFFER_FRONTEND:
4185 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4186 case LTTNG_RING_BUFFER_BACKEND:
4187 return 0;
4188 case LTTNG_RING_BUFFER_ITER:
4189 return 0;
4190 case LTTNG_CONTEXT_PERF_COUNTERS:
4191 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4192 default:
4193 return -EINVAL;
4194 }
4195 }
4196
4197 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4198 {
4199 struct lttng_cpuhp_node *lttng_node;
4200
4201 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4202 switch (lttng_node->component) {
4203 case LTTNG_RING_BUFFER_FRONTEND:
4204 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4205 case LTTNG_RING_BUFFER_BACKEND:
4206 return 0;
4207 case LTTNG_RING_BUFFER_ITER:
4208 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4209 case LTTNG_CONTEXT_PERF_COUNTERS:
4210 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4211 default:
4212 return -EINVAL;
4213 }
4214 }
4215
4216 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4217 {
4218 struct lttng_cpuhp_node *lttng_node;
4219
4220 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4221 switch (lttng_node->component) {
4222 case LTTNG_RING_BUFFER_FRONTEND:
4223 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4224 case LTTNG_RING_BUFFER_BACKEND:
4225 return 0;
4226 case LTTNG_RING_BUFFER_ITER:
4227 return 0;
4228 case LTTNG_CONTEXT_PERF_COUNTERS:
4229 return 0;
4230 default:
4231 return -EINVAL;
4232 }
4233 }
4234
4235 static int __init lttng_init_cpu_hotplug(void)
4236 {
4237 int ret;
4238
4239 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4240 lttng_hotplug_prepare,
4241 lttng_hotplug_dead);
4242 if (ret < 0) {
4243 return ret;
4244 }
4245 lttng_hp_prepare = ret;
4246 lttng_rb_set_hp_prepare(ret);
4247
4248 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4249 lttng_hotplug_online,
4250 lttng_hotplug_offline);
4251 if (ret < 0) {
4252 cpuhp_remove_multi_state(lttng_hp_prepare);
4253 lttng_hp_prepare = 0;
4254 return ret;
4255 }
4256 lttng_hp_online = ret;
4257 lttng_rb_set_hp_online(ret);
4258
4259 return 0;
4260 }
4261
4262 static void __exit lttng_exit_cpu_hotplug(void)
4263 {
4264 lttng_rb_set_hp_online(0);
4265 cpuhp_remove_multi_state(lttng_hp_online);
4266 lttng_rb_set_hp_prepare(0);
4267 cpuhp_remove_multi_state(lttng_hp_prepare);
4268 }
4269
4270 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4271 static int lttng_init_cpu_hotplug(void)
4272 {
4273 return 0;
4274 }
4275 static void lttng_exit_cpu_hotplug(void)
4276 {
4277 }
4278 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4279
4280
4281 static int __init lttng_events_init(void)
4282 {
4283 int ret;
4284
4285 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4286 if (ret)
4287 return ret;
4288 ret = wrapper_get_pfnblock_flags_mask_init();
4289 if (ret)
4290 return ret;
4291 ret = wrapper_get_pageblock_flags_mask_init();
4292 if (ret)
4293 return ret;
4294 ret = lttng_probes_init();
4295 if (ret)
4296 return ret;
4297 ret = lttng_context_init();
4298 if (ret)
4299 return ret;
4300 ret = lttng_tracepoint_init();
4301 if (ret)
4302 goto error_tp;
4303 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4304 if (!event_recorder_cache) {
4305 ret = -ENOMEM;
4306 goto error_kmem_event_recorder;
4307 }
4308 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4309 if (!event_recorder_private_cache) {
4310 ret = -ENOMEM;
4311 goto error_kmem_event_recorder_private;
4312 }
4313 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4314 if (!event_notifier_cache) {
4315 ret = -ENOMEM;
4316 goto error_kmem_event_notifier;
4317 }
4318 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4319 if (!event_notifier_private_cache) {
4320 ret = -ENOMEM;
4321 goto error_kmem_event_notifier_private;
4322 }
4323 ret = lttng_abi_init();
4324 if (ret)
4325 goto error_abi;
4326 ret = lttng_logger_init();
4327 if (ret)
4328 goto error_logger;
4329 ret = lttng_init_cpu_hotplug();
4330 if (ret)
4331 goto error_hotplug;
4332 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4333 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4334 __stringify(LTTNG_MODULES_MINOR_VERSION),
4335 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4336 LTTNG_MODULES_EXTRAVERSION,
4337 LTTNG_VERSION_NAME,
4338 #ifdef LTTNG_EXTRA_VERSION_GIT
4339 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4340 #else
4341 "",
4342 #endif
4343 #ifdef LTTNG_EXTRA_VERSION_NAME
4344 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4345 #else
4346 "");
4347 #endif
4348 return 0;
4349
4350 error_hotplug:
4351 lttng_logger_exit();
4352 error_logger:
4353 lttng_abi_exit();
4354 error_abi:
4355 kmem_cache_destroy(event_notifier_private_cache);
4356 error_kmem_event_notifier_private:
4357 kmem_cache_destroy(event_notifier_cache);
4358 error_kmem_event_notifier:
4359 kmem_cache_destroy(event_recorder_private_cache);
4360 error_kmem_event_recorder_private:
4361 kmem_cache_destroy(event_recorder_cache);
4362 error_kmem_event_recorder:
4363 lttng_tracepoint_exit();
4364 error_tp:
4365 lttng_context_exit();
4366 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4367 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4368 __stringify(LTTNG_MODULES_MINOR_VERSION),
4369 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4370 LTTNG_MODULES_EXTRAVERSION,
4371 LTTNG_VERSION_NAME,
4372 #ifdef LTTNG_EXTRA_VERSION_GIT
4373 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4374 #else
4375 "",
4376 #endif
4377 #ifdef LTTNG_EXTRA_VERSION_NAME
4378 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4379 #else
4380 "");
4381 #endif
4382 return ret;
4383 }
4384
4385 module_init(lttng_events_init);
4386
4387 static void __exit lttng_events_exit(void)
4388 {
4389 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4390
4391 lttng_exit_cpu_hotplug();
4392 lttng_logger_exit();
4393 lttng_abi_exit();
4394 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4395 lttng_session_destroy(session_priv->pub);
4396 kmem_cache_destroy(event_recorder_cache);
4397 kmem_cache_destroy(event_recorder_private_cache);
4398 kmem_cache_destroy(event_notifier_cache);
4399 kmem_cache_destroy(event_notifier_private_cache);
4400 lttng_tracepoint_exit();
4401 lttng_context_exit();
4402 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4403 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4404 __stringify(LTTNG_MODULES_MINOR_VERSION),
4405 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4406 LTTNG_MODULES_EXTRAVERSION,
4407 LTTNG_VERSION_NAME,
4408 #ifdef LTTNG_EXTRA_VERSION_GIT
4409 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4410 #else
4411 "",
4412 #endif
4413 #ifdef LTTNG_EXTRA_VERSION_NAME
4414 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4415 #else
4416 "");
4417 #endif
4418 }
4419
4420 module_exit(lttng_events_exit);
4421
4422 #include <generated/patches.h>
4423 #ifdef LTTNG_EXTRA_VERSION_GIT
4424 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4425 #endif
4426 #ifdef LTTNG_EXTRA_VERSION_NAME
4427 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4428 #endif
4429 MODULE_LICENSE("GPL and additional rights");
4430 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4431 MODULE_DESCRIPTION("LTTng tracer");
4432 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4433 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4434 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4435 LTTNG_MODULES_EXTRAVERSION);
This page took 0.256588 seconds and 4 git commands to generate.