Refactoring: event structures
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/events-internal.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <ringbuffer/backend.h>
48 #include <ringbuffer/frontend.h>
49 #include <wrapper/time.h>
50
51 #define METADATA_CACHE_DEFAULT_SIZE 4096
52
53 static LIST_HEAD(sessions);
54 static LIST_HEAD(event_notifier_groups);
55 static LIST_HEAD(lttng_transport_list);
56 static LIST_HEAD(lttng_counter_transport_list);
57 /*
58 * Protect the sessions and metadata caches.
59 */
60 static DEFINE_MUTEX(sessions_mutex);
61 static struct kmem_cache *event_recorder_cache;
62 static struct kmem_cache *event_recorder_private_cache;
63 static struct kmem_cache *event_notifier_cache;
64 static struct kmem_cache *event_notifier_private_cache;
65
66 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
67 static void lttng_session_sync_event_enablers(struct lttng_session *session);
68 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
69 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
70 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_recorder *event);
73 static void _lttng_event_notifier_destroy(struct lttng_kernel_event_notifier *event_notifier);
74 static void _lttng_channel_destroy(struct lttng_channel *chan);
75 static int _lttng_event_unregister(struct lttng_kernel_event_recorder *event);
76 static int _lttng_event_notifier_unregister(struct lttng_kernel_event_notifier *event_notifier);
77 static
78 int _lttng_event_metadata_statedump(struct lttng_session *session,
79 struct lttng_channel *chan,
80 struct lttng_kernel_event_recorder *event);
81 static
82 int _lttng_session_metadata_statedump(struct lttng_session *session);
83 static
84 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
85 static
86 int _lttng_type_statedump(struct lttng_session *session,
87 const struct lttng_kernel_type_common *type,
88 enum lttng_kernel_string_encoding parent_encoding,
89 size_t nesting);
90 static
91 int _lttng_field_statedump(struct lttng_session *session,
92 const struct lttng_kernel_event_field *field,
93 size_t nesting);
94
95 void synchronize_trace(void)
96 {
97 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
98 synchronize_rcu();
99 #else
100 synchronize_sched();
101 #endif
102
103 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
104 #ifdef CONFIG_PREEMPT_RT_FULL
105 synchronize_rcu();
106 #endif
107 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
108 #ifdef CONFIG_PREEMPT_RT
109 synchronize_rcu();
110 #endif
111 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
112 }
113
114 void lttng_lock_sessions(void)
115 {
116 mutex_lock(&sessions_mutex);
117 }
118
119 void lttng_unlock_sessions(void)
120 {
121 mutex_unlock(&sessions_mutex);
122 }
123
124 static struct lttng_transport *lttng_transport_find(const char *name)
125 {
126 struct lttng_transport *transport;
127
128 list_for_each_entry(transport, &lttng_transport_list, node) {
129 if (!strcmp(transport->name, name))
130 return transport;
131 }
132 return NULL;
133 }
134
135 /*
136 * Called with sessions lock held.
137 */
138 int lttng_session_active(void)
139 {
140 struct lttng_session *iter;
141
142 list_for_each_entry(iter, &sessions, list) {
143 if (iter->active)
144 return 1;
145 }
146 return 0;
147 }
148
149 struct lttng_session *lttng_session_create(void)
150 {
151 struct lttng_session *session;
152 struct lttng_metadata_cache *metadata_cache;
153 int i;
154
155 mutex_lock(&sessions_mutex);
156 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
157 if (!session)
158 goto err;
159 INIT_LIST_HEAD(&session->chan);
160 INIT_LIST_HEAD(&session->events);
161 lttng_guid_gen(&session->uuid);
162
163 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
164 GFP_KERNEL);
165 if (!metadata_cache)
166 goto err_free_session;
167 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
168 if (!metadata_cache->data)
169 goto err_free_cache;
170 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
171 kref_init(&metadata_cache->refcount);
172 mutex_init(&metadata_cache->lock);
173 session->metadata_cache = metadata_cache;
174 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
175 memcpy(&metadata_cache->uuid, &session->uuid,
176 sizeof(metadata_cache->uuid));
177 INIT_LIST_HEAD(&session->enablers_head);
178 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
179 INIT_HLIST_HEAD(&session->events_ht.table[i]);
180 list_add(&session->list, &sessions);
181 session->pid_tracker.session = session;
182 session->pid_tracker.tracker_type = TRACKER_PID;
183 session->vpid_tracker.session = session;
184 session->vpid_tracker.tracker_type = TRACKER_VPID;
185 session->uid_tracker.session = session;
186 session->uid_tracker.tracker_type = TRACKER_UID;
187 session->vuid_tracker.session = session;
188 session->vuid_tracker.tracker_type = TRACKER_VUID;
189 session->gid_tracker.session = session;
190 session->gid_tracker.tracker_type = TRACKER_GID;
191 session->vgid_tracker.session = session;
192 session->vgid_tracker.tracker_type = TRACKER_VGID;
193 mutex_unlock(&sessions_mutex);
194 return session;
195
196 err_free_cache:
197 kfree(metadata_cache);
198 err_free_session:
199 lttng_kvfree(session);
200 err:
201 mutex_unlock(&sessions_mutex);
202 return NULL;
203 }
204
205 static
206 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
207 {
208 struct lttng_counter_transport *transport;
209
210 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
211 if (!strcmp(transport->name, name))
212 return transport;
213 }
214 return NULL;
215 }
216
217 struct lttng_counter *lttng_kernel_counter_create(
218 const char *counter_transport_name,
219 size_t number_dimensions, const size_t *dimensions_sizes)
220 {
221 struct lttng_counter *counter = NULL;
222 struct lttng_counter_transport *counter_transport = NULL;
223
224 counter_transport = lttng_counter_transport_find(counter_transport_name);
225 if (!counter_transport) {
226 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
227 counter_transport_name);
228 goto notransport;
229 }
230 if (!try_module_get(counter_transport->owner)) {
231 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
232 goto notransport;
233 }
234
235 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
236 if (!counter)
237 goto nomem;
238
239 /* Create event notifier error counter. */
240 counter->ops = &counter_transport->ops;
241 counter->transport = counter_transport;
242
243 counter->counter = counter->ops->counter_create(
244 number_dimensions, dimensions_sizes, 0);
245 if (!counter->counter) {
246 goto create_error;
247 }
248
249 return counter;
250
251 create_error:
252 lttng_kvfree(counter);
253 nomem:
254 if (counter_transport)
255 module_put(counter_transport->owner);
256 notransport:
257 return NULL;
258 }
259
260 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
261 {
262 struct lttng_transport *transport = NULL;
263 struct lttng_event_notifier_group *event_notifier_group;
264 const char *transport_name = "relay-event-notifier";
265 size_t subbuf_size = 4096; //TODO
266 size_t num_subbuf = 16; //TODO
267 unsigned int switch_timer_interval = 0;
268 unsigned int read_timer_interval = 0;
269 int i;
270
271 mutex_lock(&sessions_mutex);
272
273 transport = lttng_transport_find(transport_name);
274 if (!transport) {
275 printk(KERN_WARNING "LTTng: transport %s not found\n",
276 transport_name);
277 goto notransport;
278 }
279 if (!try_module_get(transport->owner)) {
280 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
281 transport_name);
282 goto notransport;
283 }
284
285 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
286 GFP_KERNEL);
287 if (!event_notifier_group)
288 goto nomem;
289
290 /*
291 * Initialize the ring buffer used to store event notifier
292 * notifications.
293 */
294 event_notifier_group->ops = &transport->ops;
295 event_notifier_group->chan = transport->ops.channel_create(
296 transport_name, event_notifier_group, NULL,
297 subbuf_size, num_subbuf, switch_timer_interval,
298 read_timer_interval);
299 if (!event_notifier_group->chan)
300 goto create_error;
301
302 event_notifier_group->transport = transport;
303
304 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
305 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
306 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
307 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
308
309 list_add(&event_notifier_group->node, &event_notifier_groups);
310
311 mutex_unlock(&sessions_mutex);
312
313 return event_notifier_group;
314
315 create_error:
316 lttng_kvfree(event_notifier_group);
317 nomem:
318 if (transport)
319 module_put(transport->owner);
320 notransport:
321 mutex_unlock(&sessions_mutex);
322 return NULL;
323 }
324
325 void metadata_cache_destroy(struct kref *kref)
326 {
327 struct lttng_metadata_cache *cache =
328 container_of(kref, struct lttng_metadata_cache, refcount);
329 vfree(cache->data);
330 kfree(cache);
331 }
332
333 void lttng_session_destroy(struct lttng_session *session)
334 {
335 struct lttng_channel *chan, *tmpchan;
336 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
337 struct lttng_metadata_stream *metadata_stream;
338 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
339 int ret;
340
341 mutex_lock(&sessions_mutex);
342 WRITE_ONCE(session->active, 0);
343 list_for_each_entry(chan, &session->chan, list) {
344 ret = lttng_syscalls_unregister_channel(chan);
345 WARN_ON(ret);
346 }
347 list_for_each_entry(event_recorder_priv, &session->events, node) {
348 ret = _lttng_event_unregister(event_recorder_priv->pub);
349 WARN_ON(ret);
350 }
351 synchronize_trace(); /* Wait for in-flight events to complete */
352 list_for_each_entry(chan, &session->chan, list) {
353 ret = lttng_syscalls_destroy_event(chan);
354 WARN_ON(ret);
355 }
356 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
357 &session->enablers_head, node)
358 lttng_event_enabler_destroy(event_enabler);
359 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->events, node)
360 _lttng_event_destroy(event_recorder_priv->pub);
361 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
362 BUG_ON(chan->channel_type == METADATA_CHANNEL);
363 _lttng_channel_destroy(chan);
364 }
365 mutex_lock(&session->metadata_cache->lock);
366 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
367 _lttng_metadata_channel_hangup(metadata_stream);
368 mutex_unlock(&session->metadata_cache->lock);
369 lttng_id_tracker_destroy(&session->pid_tracker, false);
370 lttng_id_tracker_destroy(&session->vpid_tracker, false);
371 lttng_id_tracker_destroy(&session->uid_tracker, false);
372 lttng_id_tracker_destroy(&session->vuid_tracker, false);
373 lttng_id_tracker_destroy(&session->gid_tracker, false);
374 lttng_id_tracker_destroy(&session->vgid_tracker, false);
375 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
376 list_del(&session->list);
377 mutex_unlock(&sessions_mutex);
378 lttng_kvfree(session);
379 }
380
381 void lttng_event_notifier_group_destroy(
382 struct lttng_event_notifier_group *event_notifier_group)
383 {
384 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
385 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
386 int ret;
387
388 if (!event_notifier_group)
389 return;
390
391 mutex_lock(&sessions_mutex);
392
393 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
394 WARN_ON(ret);
395
396 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
397 &event_notifier_group->event_notifiers_head, node) {
398 ret = _lttng_event_notifier_unregister(event_notifier_priv->pub);
399 WARN_ON(ret);
400 }
401
402 /* Wait for in-flight event notifier to complete */
403 synchronize_trace();
404
405 irq_work_sync(&event_notifier_group->wakeup_pending);
406
407 kfree(event_notifier_group->sc_filter);
408
409 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
410 &event_notifier_group->enablers_head, node)
411 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
412
413 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
414 &event_notifier_group->event_notifiers_head, node)
415 _lttng_event_notifier_destroy(event_notifier_priv->pub);
416
417 if (event_notifier_group->error_counter) {
418 struct lttng_counter *error_counter = event_notifier_group->error_counter;
419
420 error_counter->ops->counter_destroy(error_counter->counter);
421 module_put(error_counter->transport->owner);
422 lttng_kvfree(error_counter);
423 event_notifier_group->error_counter = NULL;
424 }
425
426 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
427 module_put(event_notifier_group->transport->owner);
428 list_del(&event_notifier_group->node);
429
430 mutex_unlock(&sessions_mutex);
431 lttng_kvfree(event_notifier_group);
432 }
433
434 int lttng_session_statedump(struct lttng_session *session)
435 {
436 int ret;
437
438 mutex_lock(&sessions_mutex);
439 ret = lttng_statedump_start(session);
440 mutex_unlock(&sessions_mutex);
441 return ret;
442 }
443
444 int lttng_session_enable(struct lttng_session *session)
445 {
446 int ret = 0;
447 struct lttng_channel *chan;
448
449 mutex_lock(&sessions_mutex);
450 if (session->active) {
451 ret = -EBUSY;
452 goto end;
453 }
454
455 /* Set transient enabler state to "enabled" */
456 session->tstate = 1;
457
458 /* We need to sync enablers with session before activation. */
459 lttng_session_sync_event_enablers(session);
460
461 /*
462 * Snapshot the number of events per channel to know the type of header
463 * we need to use.
464 */
465 list_for_each_entry(chan, &session->chan, list) {
466 if (chan->header_type)
467 continue; /* don't change it if session stop/restart */
468 if (chan->free_event_id < 31)
469 chan->header_type = 1; /* compact */
470 else
471 chan->header_type = 2; /* large */
472 }
473
474 /* Clear each stream's quiescent state. */
475 list_for_each_entry(chan, &session->chan, list) {
476 if (chan->channel_type != METADATA_CHANNEL)
477 lib_ring_buffer_clear_quiescent_channel(chan->chan);
478 }
479
480 WRITE_ONCE(session->active, 1);
481 WRITE_ONCE(session->been_active, 1);
482 ret = _lttng_session_metadata_statedump(session);
483 if (ret) {
484 WRITE_ONCE(session->active, 0);
485 goto end;
486 }
487 ret = lttng_statedump_start(session);
488 if (ret)
489 WRITE_ONCE(session->active, 0);
490 end:
491 mutex_unlock(&sessions_mutex);
492 return ret;
493 }
494
495 int lttng_session_disable(struct lttng_session *session)
496 {
497 int ret = 0;
498 struct lttng_channel *chan;
499
500 mutex_lock(&sessions_mutex);
501 if (!session->active) {
502 ret = -EBUSY;
503 goto end;
504 }
505 WRITE_ONCE(session->active, 0);
506
507 /* Set transient enabler state to "disabled" */
508 session->tstate = 0;
509 lttng_session_sync_event_enablers(session);
510
511 /* Set each stream's quiescent state. */
512 list_for_each_entry(chan, &session->chan, list) {
513 if (chan->channel_type != METADATA_CHANNEL)
514 lib_ring_buffer_set_quiescent_channel(chan->chan);
515 }
516 end:
517 mutex_unlock(&sessions_mutex);
518 return ret;
519 }
520
521 int lttng_session_metadata_regenerate(struct lttng_session *session)
522 {
523 int ret = 0;
524 struct lttng_channel *chan;
525 struct lttng_kernel_event_recorder_private *event_recorder_priv;
526 struct lttng_metadata_cache *cache = session->metadata_cache;
527 struct lttng_metadata_stream *stream;
528
529 mutex_lock(&sessions_mutex);
530 if (!session->active) {
531 ret = -EBUSY;
532 goto end;
533 }
534
535 mutex_lock(&cache->lock);
536 memset(cache->data, 0, cache->cache_alloc);
537 cache->metadata_written = 0;
538 cache->version++;
539 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
540 stream->metadata_out = 0;
541 stream->metadata_in = 0;
542 }
543 mutex_unlock(&cache->lock);
544
545 session->metadata_dumped = 0;
546 list_for_each_entry(chan, &session->chan, list) {
547 chan->metadata_dumped = 0;
548 }
549
550 list_for_each_entry(event_recorder_priv, &session->events, node) {
551 event_recorder_priv->metadata_dumped = 0;
552 }
553
554 ret = _lttng_session_metadata_statedump(session);
555
556 end:
557 mutex_unlock(&sessions_mutex);
558 return ret;
559 }
560
561 int lttng_channel_enable(struct lttng_channel *channel)
562 {
563 int ret = 0;
564
565 mutex_lock(&sessions_mutex);
566 if (channel->channel_type == METADATA_CHANNEL) {
567 ret = -EPERM;
568 goto end;
569 }
570 if (channel->enabled) {
571 ret = -EEXIST;
572 goto end;
573 }
574 /* Set transient enabler state to "enabled" */
575 channel->tstate = 1;
576 lttng_session_sync_event_enablers(channel->session);
577 /* Set atomically the state to "enabled" */
578 WRITE_ONCE(channel->enabled, 1);
579 end:
580 mutex_unlock(&sessions_mutex);
581 return ret;
582 }
583
584 int lttng_channel_disable(struct lttng_channel *channel)
585 {
586 int ret = 0;
587
588 mutex_lock(&sessions_mutex);
589 if (channel->channel_type == METADATA_CHANNEL) {
590 ret = -EPERM;
591 goto end;
592 }
593 if (!channel->enabled) {
594 ret = -EEXIST;
595 goto end;
596 }
597 /* Set atomically the state to "disabled" */
598 WRITE_ONCE(channel->enabled, 0);
599 /* Set transient enabler state to "enabled" */
600 channel->tstate = 0;
601 lttng_session_sync_event_enablers(channel->session);
602 end:
603 mutex_unlock(&sessions_mutex);
604 return ret;
605 }
606
607 int lttng_event_enable(struct lttng_kernel_event_recorder *event_recorder)
608 {
609 struct lttng_kernel_event_common *event = &event_recorder->parent;
610 int ret = 0;
611
612 mutex_lock(&sessions_mutex);
613 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
614 ret = -EPERM;
615 goto end;
616 }
617 if (event->enabled) {
618 ret = -EEXIST;
619 goto end;
620 }
621 switch (event->priv->instrumentation) {
622 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
623 case LTTNG_KERNEL_ABI_SYSCALL:
624 ret = -EINVAL;
625 break;
626
627 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
628 case LTTNG_KERNEL_ABI_UPROBE:
629 WRITE_ONCE(event->enabled, 1);
630 break;
631
632 case LTTNG_KERNEL_ABI_KRETPROBE:
633 ret = lttng_kretprobes_event_enable_state(event_recorder, 1);
634 break;
635
636 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
637 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
638 default:
639 WARN_ON_ONCE(1);
640 ret = -EINVAL;
641 }
642 end:
643 mutex_unlock(&sessions_mutex);
644 return ret;
645 }
646
647 int lttng_event_disable(struct lttng_kernel_event_recorder *event_recorder)
648 {
649 struct lttng_kernel_event_common *event = &event_recorder->parent;
650 int ret = 0;
651
652 mutex_lock(&sessions_mutex);
653 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
654 ret = -EPERM;
655 goto end;
656 }
657 if (!event->enabled) {
658 ret = -EEXIST;
659 goto end;
660 }
661 switch (event->priv->instrumentation) {
662 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
663 case LTTNG_KERNEL_ABI_SYSCALL:
664 ret = -EINVAL;
665 break;
666
667 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
668 case LTTNG_KERNEL_ABI_UPROBE:
669 WRITE_ONCE(event->enabled, 0);
670 break;
671
672 case LTTNG_KERNEL_ABI_KRETPROBE:
673
674 ret = lttng_kretprobes_event_enable_state(event_recorder, 0);
675 break;
676
677 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
678 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
679 default:
680 WARN_ON_ONCE(1);
681 ret = -EINVAL;
682 }
683 end:
684 mutex_unlock(&sessions_mutex);
685 return ret;
686 }
687
688 int lttng_event_notifier_enable(struct lttng_kernel_event_notifier *event_notifier)
689 {
690 struct lttng_kernel_event_common *event = &event_notifier->parent;
691 int ret = 0;
692
693 mutex_lock(&sessions_mutex);
694 if (event->enabled) {
695 ret = -EEXIST;
696 goto end;
697 }
698 switch (event->priv->instrumentation) {
699 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
700 case LTTNG_KERNEL_ABI_SYSCALL:
701 ret = -EINVAL;
702 break;
703
704 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
705 case LTTNG_KERNEL_ABI_UPROBE:
706 WRITE_ONCE(event->enabled, 1);
707 break;
708
709 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
710 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
711 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
712 default:
713 WARN_ON_ONCE(1);
714 ret = -EINVAL;
715 }
716 end:
717 mutex_unlock(&sessions_mutex);
718 return ret;
719 }
720
721 int lttng_event_notifier_disable(struct lttng_kernel_event_notifier *event_notifier)
722 {
723 struct lttng_kernel_event_common *event = &event_notifier->parent;
724 int ret = 0;
725
726 mutex_lock(&sessions_mutex);
727 if (!event->enabled) {
728 ret = -EEXIST;
729 goto end;
730 }
731 switch (event->priv->instrumentation) {
732 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
733 case LTTNG_KERNEL_ABI_SYSCALL:
734 ret = -EINVAL;
735 break;
736
737 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
738 case LTTNG_KERNEL_ABI_UPROBE:
739 WRITE_ONCE(event->enabled, 0);
740 break;
741
742 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
743 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
744 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
745 default:
746 WARN_ON_ONCE(1);
747 ret = -EINVAL;
748 }
749 end:
750 mutex_unlock(&sessions_mutex);
751 return ret;
752 }
753
754 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
755 const char *transport_name,
756 void *buf_addr,
757 size_t subbuf_size, size_t num_subbuf,
758 unsigned int switch_timer_interval,
759 unsigned int read_timer_interval,
760 enum channel_type channel_type)
761 {
762 struct lttng_channel *chan;
763 struct lttng_transport *transport = NULL;
764
765 mutex_lock(&sessions_mutex);
766 if (session->been_active && channel_type != METADATA_CHANNEL)
767 goto active; /* Refuse to add channel to active session */
768 transport = lttng_transport_find(transport_name);
769 if (!transport) {
770 printk(KERN_WARNING "LTTng: transport %s not found\n",
771 transport_name);
772 goto notransport;
773 }
774 if (!try_module_get(transport->owner)) {
775 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
776 goto notransport;
777 }
778 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
779 if (!chan)
780 goto nomem;
781 chan->session = session;
782 chan->id = session->free_chan_id++;
783 chan->ops = &transport->ops;
784 /*
785 * Note: the channel creation op already writes into the packet
786 * headers. Therefore the "chan" information used as input
787 * should be already accessible.
788 */
789 chan->chan = transport->ops.channel_create(transport_name,
790 chan, buf_addr, subbuf_size, num_subbuf,
791 switch_timer_interval, read_timer_interval);
792 if (!chan->chan)
793 goto create_error;
794 chan->tstate = 1;
795 chan->enabled = 1;
796 chan->transport = transport;
797 chan->channel_type = channel_type;
798 list_add(&chan->list, &session->chan);
799 mutex_unlock(&sessions_mutex);
800 return chan;
801
802 create_error:
803 kfree(chan);
804 nomem:
805 if (transport)
806 module_put(transport->owner);
807 notransport:
808 active:
809 mutex_unlock(&sessions_mutex);
810 return NULL;
811 }
812
813 /*
814 * Only used internally at session destruction for per-cpu channels, and
815 * when metadata channel is released.
816 * Needs to be called with sessions mutex held.
817 */
818 static
819 void _lttng_channel_destroy(struct lttng_channel *chan)
820 {
821 chan->ops->channel_destroy(chan->chan);
822 module_put(chan->transport->owner);
823 list_del(&chan->list);
824 lttng_kernel_destroy_context(chan->ctx);
825 kfree(chan);
826 }
827
828 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
829 {
830 BUG_ON(chan->channel_type != METADATA_CHANNEL);
831
832 /* Protect the metadata cache with the sessions_mutex. */
833 mutex_lock(&sessions_mutex);
834 _lttng_channel_destroy(chan);
835 mutex_unlock(&sessions_mutex);
836 }
837 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
838
839 static
840 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
841 {
842 stream->finalized = 1;
843 wake_up_interruptible(&stream->read_wait);
844 }
845
846
847 /*
848 * Supports event creation while tracing session is active.
849 * Needs to be called with sessions mutex held.
850 */
851 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_channel *chan,
852 struct lttng_kernel_abi_event *event_param,
853 const struct lttng_kernel_event_desc *event_desc,
854 enum lttng_kernel_abi_instrumentation itype)
855 {
856 struct lttng_session *session = chan->session;
857 struct lttng_kernel_event_recorder *event_recorder;
858 struct lttng_kernel_event_recorder_private *event_recorder_priv;
859 const char *event_name;
860 struct hlist_head *head;
861 int ret;
862
863 if (chan->free_event_id == -1U) {
864 ret = -EMFILE;
865 goto full;
866 }
867
868 switch (itype) {
869 case LTTNG_KERNEL_ABI_TRACEPOINT:
870 event_name = event_desc->event_name;
871 break;
872
873 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
874 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
875 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
876 case LTTNG_KERNEL_ABI_SYSCALL:
877 event_name = event_param->name;
878 break;
879
880 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
881 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
882 default:
883 WARN_ON_ONCE(1);
884 ret = -EINVAL;
885 goto type_error;
886 }
887
888 head = utils_borrow_hash_table_bucket(session->events_ht.table,
889 LTTNG_EVENT_HT_SIZE, event_name);
890 lttng_hlist_for_each_entry(event_recorder_priv, head, hlist) {
891 WARN_ON_ONCE(!event_recorder_priv->parent.desc);
892 if (!strncmp(event_recorder_priv->parent.desc->event_name, event_name,
893 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
894 && chan == event_recorder_priv->pub->chan) {
895 ret = -EEXIST;
896 goto exist;
897 }
898 }
899
900 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
901 if (!event_recorder) {
902 ret = -ENOMEM;
903 goto cache_error;
904 }
905 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
906 if (!event_recorder_priv) {
907 ret = -ENOMEM;
908 goto cache_private_error;
909 }
910 event_recorder_priv->pub = event_recorder;
911 event_recorder_priv->parent.pub = &event_recorder->parent;
912 event_recorder->priv = event_recorder_priv;
913 event_recorder->parent.priv = &event_recorder_priv->parent;
914 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
915
916 event_recorder->chan = chan;
917 event_recorder->priv->id = chan->free_event_id++;
918 event_recorder->priv->parent.instrumentation = itype;
919 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
920 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
921
922 switch (itype) {
923 case LTTNG_KERNEL_ABI_TRACEPOINT:
924 /* Event will be enabled by enabler sync. */
925 event_recorder->parent.enabled = 0;
926 event_recorder->priv->parent.registered = 0;
927 event_recorder->priv->parent.desc = lttng_event_desc_get(event_name);
928 if (!event_recorder->priv->parent.desc) {
929 ret = -ENOENT;
930 goto register_error;
931 }
932 /* Populate lttng_event structure before event registration. */
933 smp_wmb();
934 break;
935
936 case LTTNG_KERNEL_ABI_KPROBE:
937 /*
938 * Needs to be explicitly enabled after creation, since
939 * we may want to apply filters.
940 */
941 event_recorder->parent.enabled = 0;
942 event_recorder->priv->parent.registered = 1;
943 /*
944 * Populate lttng_event structure before event
945 * registration.
946 */
947 smp_wmb();
948 ret = lttng_kprobes_register_event(event_name,
949 event_param->u.kprobe.symbol_name,
950 event_param->u.kprobe.offset,
951 event_param->u.kprobe.addr,
952 event_recorder);
953 if (ret) {
954 ret = -EINVAL;
955 goto register_error;
956 }
957 ret = try_module_get(event_recorder->priv->parent.desc->owner);
958 WARN_ON_ONCE(!ret);
959 break;
960
961 case LTTNG_KERNEL_ABI_KRETPROBE:
962 {
963 struct lttng_kernel_event_recorder *event_recorder_return;
964 struct lttng_kernel_event_recorder_private *event_recorder_return_priv;
965
966 /* kretprobe defines 2 events */
967 /*
968 * Needs to be explicitly enabled after creation, since
969 * we may want to apply filters.
970 */
971 event_recorder->parent.enabled = 0;
972 event_recorder->priv->parent.registered = 1;
973
974 event_recorder_return = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
975 if (!event_recorder_return) {
976 ret = -ENOMEM;
977 goto register_error;
978 }
979 event_recorder_return_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
980 if (!event_recorder_return_priv) {
981 kmem_cache_free(event_recorder_cache, event_recorder_return);
982 ret = -ENOMEM;
983 goto register_error;
984 }
985 event_recorder_return_priv->pub = event_recorder_return;
986 event_recorder_return_priv->parent.pub = &event_recorder_return->parent;
987 event_recorder_return->priv = event_recorder_return_priv;
988 event_recorder_return->parent.priv = &event_recorder_return_priv->parent;
989 event_recorder_return->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
990
991 event_recorder_return->chan = chan;
992 event_recorder_return->priv->id = chan->free_event_id++;
993 event_recorder_return->priv->parent.instrumentation = itype;
994 event_recorder_return->parent.enabled = 0;
995 event_recorder_return->priv->parent.registered = 1;
996 INIT_LIST_HEAD(&event_recorder_return->priv->parent.filter_bytecode_runtime_head);
997 INIT_LIST_HEAD(&event_recorder_return->priv->parent.enablers_ref_head);
998 /*
999 * Populate lttng_event structure before kretprobe registration.
1000 */
1001 smp_wmb();
1002 ret = lttng_kretprobes_register(event_name,
1003 event_param->u.kretprobe.symbol_name,
1004 event_param->u.kretprobe.offset,
1005 event_param->u.kretprobe.addr,
1006 event_recorder, event_recorder_return);
1007 if (ret) {
1008 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1009 kmem_cache_free(event_recorder_cache, event_recorder_return);
1010 ret = -EINVAL;
1011 goto register_error;
1012 }
1013 /* Take 2 refs on the module: one per event. */
1014 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1015 WARN_ON_ONCE(!ret);
1016 ret = try_module_get(event_recorder_return->priv->parent.desc->owner);
1017 WARN_ON_ONCE(!ret);
1018 ret = _lttng_event_metadata_statedump(chan->session, chan,
1019 event_recorder_return);
1020 WARN_ON_ONCE(ret > 0);
1021 if (ret) {
1022 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1023 kmem_cache_free(event_recorder_cache, event_recorder_return);
1024 module_put(event_recorder_return->priv->parent.desc->owner);
1025 module_put(event_recorder->priv->parent.desc->owner);
1026 goto statedump_error;
1027 }
1028 list_add(&event_recorder_return->priv->node, &chan->session->events);
1029 break;
1030 }
1031
1032 case LTTNG_KERNEL_ABI_SYSCALL:
1033 /*
1034 * Needs to be explicitly enabled after creation, since
1035 * we may want to apply filters.
1036 */
1037 event_recorder->parent.enabled = 0;
1038 event_recorder->priv->parent.registered = 0;
1039 event_recorder->priv->parent.desc = event_desc;
1040 switch (event_param->u.syscall.entryexit) {
1041 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1042 ret = -EINVAL;
1043 goto register_error;
1044 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1045 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1046 break;
1047 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1048 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1049 break;
1050 }
1051 switch (event_param->u.syscall.abi) {
1052 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1053 ret = -EINVAL;
1054 goto register_error;
1055 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1056 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1057 break;
1058 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1059 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1060 break;
1061 }
1062 if (!event_recorder->priv->parent.desc) {
1063 ret = -EINVAL;
1064 goto register_error;
1065 }
1066 break;
1067
1068 case LTTNG_KERNEL_ABI_UPROBE:
1069 /*
1070 * Needs to be explicitly enabled after creation, since
1071 * we may want to apply filters.
1072 */
1073 event_recorder->parent.enabled = 0;
1074 event_recorder->priv->parent.registered = 1;
1075
1076 /*
1077 * Populate lttng_event structure before event
1078 * registration.
1079 */
1080 smp_wmb();
1081
1082 ret = lttng_uprobes_register_event(event_param->name,
1083 event_param->u.uprobe.fd,
1084 event_recorder);
1085 if (ret)
1086 goto register_error;
1087 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1088 WARN_ON_ONCE(!ret);
1089 break;
1090
1091 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1092 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1093 default:
1094 WARN_ON_ONCE(1);
1095 ret = -EINVAL;
1096 goto register_error;
1097 }
1098 ret = _lttng_event_metadata_statedump(chan->session, chan, event_recorder);
1099 WARN_ON_ONCE(ret > 0);
1100 if (ret) {
1101 goto statedump_error;
1102 }
1103 hlist_add_head(&event_recorder->priv->hlist, head);
1104 list_add(&event_recorder->priv->node, &chan->session->events);
1105 return event_recorder;
1106
1107 statedump_error:
1108 /* If a statedump error occurs, events will not be readable. */
1109 register_error:
1110 kmem_cache_free(event_recorder_private_cache, event_recorder_priv);
1111 cache_private_error:
1112 kmem_cache_free(event_recorder_cache, event_recorder);
1113 cache_error:
1114 exist:
1115 type_error:
1116 full:
1117 return ERR_PTR(ret);
1118 }
1119
1120 struct lttng_kernel_event_notifier *_lttng_event_notifier_create(
1121 const struct lttng_kernel_event_desc *event_desc,
1122 uint64_t token, uint64_t error_counter_index,
1123 struct lttng_event_notifier_group *event_notifier_group,
1124 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1125 enum lttng_kernel_abi_instrumentation itype)
1126 {
1127 struct lttng_kernel_event_notifier *event_notifier;
1128 struct lttng_kernel_event_notifier_private *event_notifier_priv;
1129 struct lttng_counter *error_counter;
1130 const char *event_name;
1131 struct hlist_head *head;
1132 int ret;
1133
1134 switch (itype) {
1135 case LTTNG_KERNEL_ABI_TRACEPOINT:
1136 event_name = event_desc->event_name;
1137 break;
1138
1139 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1140 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1141 case LTTNG_KERNEL_ABI_SYSCALL:
1142 event_name = event_notifier_param->event.name;
1143 break;
1144
1145 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1146 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1147 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1148 default:
1149 WARN_ON_ONCE(1);
1150 ret = -EINVAL;
1151 goto type_error;
1152 }
1153
1154 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1155 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1156 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
1157 WARN_ON_ONCE(!event_notifier_priv->parent.desc);
1158 if (!strncmp(event_notifier_priv->parent.desc->event_name, event_name,
1159 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1160 && event_notifier_group == event_notifier_priv->group
1161 && token == event_notifier_priv->parent.user_token) {
1162 ret = -EEXIST;
1163 goto exist;
1164 }
1165 }
1166
1167 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1168 if (!event_notifier) {
1169 ret = -ENOMEM;
1170 goto cache_error;
1171 }
1172 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
1173 if (!event_notifier_priv) {
1174 ret = -ENOMEM;
1175 goto cache_private_error;
1176 }
1177 event_notifier_priv->pub = event_notifier;
1178 event_notifier_priv->parent.pub = &event_notifier->parent;
1179 event_notifier->priv = event_notifier_priv;
1180 event_notifier->parent.priv = &event_notifier_priv->parent;
1181 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
1182
1183 event_notifier->priv->group = event_notifier_group;
1184 event_notifier->priv->parent.user_token = token;
1185 event_notifier->priv->error_counter_index = error_counter_index;
1186 event_notifier->priv->num_captures = 0;
1187 event_notifier->priv->parent.instrumentation = itype;
1188 event_notifier->notification_send = lttng_event_notifier_notification_send;
1189 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
1190 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
1191 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
1192
1193 switch (itype) {
1194 case LTTNG_KERNEL_ABI_TRACEPOINT:
1195 /* Event will be enabled by enabler sync. */
1196 event_notifier->parent.enabled = 0;
1197 event_notifier->priv->parent.registered = 0;
1198 event_notifier->priv->parent.desc = lttng_event_desc_get(event_name);
1199 if (!event_notifier->priv->parent.desc) {
1200 ret = -ENOENT;
1201 goto register_error;
1202 }
1203 /* Populate lttng_event_notifier structure before event registration. */
1204 smp_wmb();
1205 break;
1206
1207 case LTTNG_KERNEL_ABI_KPROBE:
1208 /*
1209 * Needs to be explicitly enabled after creation, since
1210 * we may want to apply filters.
1211 */
1212 event_notifier->parent.enabled = 0;
1213 event_notifier->priv->parent.registered = 1;
1214 /*
1215 * Populate lttng_event_notifier structure before event
1216 * registration.
1217 */
1218 smp_wmb();
1219 ret = lttng_kprobes_register_event_notifier(
1220 event_notifier_param->event.u.kprobe.symbol_name,
1221 event_notifier_param->event.u.kprobe.offset,
1222 event_notifier_param->event.u.kprobe.addr,
1223 event_notifier);
1224 if (ret) {
1225 ret = -EINVAL;
1226 goto register_error;
1227 }
1228 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1229 WARN_ON_ONCE(!ret);
1230 break;
1231
1232 case LTTNG_KERNEL_ABI_SYSCALL:
1233 /*
1234 * Needs to be explicitly enabled after creation, since
1235 * we may want to apply filters.
1236 */
1237 event_notifier->parent.enabled = 0;
1238 event_notifier->priv->parent.registered = 0;
1239 event_notifier->priv->parent.desc = event_desc;
1240 switch (event_notifier_param->event.u.syscall.entryexit) {
1241 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1242 ret = -EINVAL;
1243 goto register_error;
1244 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1245 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1246 break;
1247 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1248 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1249 break;
1250 }
1251 switch (event_notifier_param->event.u.syscall.abi) {
1252 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1253 ret = -EINVAL;
1254 goto register_error;
1255 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1256 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1257 break;
1258 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1259 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1260 break;
1261 }
1262
1263 if (!event_notifier->priv->parent.desc) {
1264 ret = -EINVAL;
1265 goto register_error;
1266 }
1267 break;
1268
1269 case LTTNG_KERNEL_ABI_UPROBE:
1270 /*
1271 * Needs to be explicitly enabled after creation, since
1272 * we may want to apply filters.
1273 */
1274 event_notifier->parent.enabled = 0;
1275 event_notifier->priv->parent.registered = 1;
1276
1277 /*
1278 * Populate lttng_event_notifier structure before
1279 * event_notifier registration.
1280 */
1281 smp_wmb();
1282
1283 ret = lttng_uprobes_register_event_notifier(
1284 event_notifier_param->event.name,
1285 event_notifier_param->event.u.uprobe.fd,
1286 event_notifier);
1287 if (ret)
1288 goto register_error;
1289 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1290 WARN_ON_ONCE(!ret);
1291 break;
1292
1293 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1294 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1295 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1296 default:
1297 WARN_ON_ONCE(1);
1298 ret = -EINVAL;
1299 goto register_error;
1300 }
1301
1302 list_add(&event_notifier->priv->node, &event_notifier_group->event_notifiers_head);
1303 hlist_add_head(&event_notifier->priv->hlist, head);
1304
1305 /*
1306 * Clear the error counter bucket. The sessiond keeps track of which
1307 * bucket is currently in use. We trust it. The session lock
1308 * synchronizes against concurrent creation of the error
1309 * counter.
1310 */
1311 error_counter = event_notifier_group->error_counter;
1312 if (error_counter) {
1313 size_t dimension_index[1];
1314
1315 /*
1316 * Check that the index is within the boundary of the counter.
1317 */
1318 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1319 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1320 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1321 ret = -EINVAL;
1322 goto register_error;
1323 }
1324
1325 dimension_index[0] = event_notifier->priv->error_counter_index;
1326 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1327 if (ret) {
1328 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1329 event_notifier->priv->error_counter_index);
1330 goto register_error;
1331 }
1332 }
1333
1334 return event_notifier;
1335
1336 register_error:
1337 kmem_cache_free(event_notifier_private_cache, event_notifier_priv);
1338 cache_private_error:
1339 kmem_cache_free(event_notifier_cache, event_notifier);
1340 cache_error:
1341 exist:
1342 type_error:
1343 return ERR_PTR(ret);
1344 }
1345
1346 int lttng_kernel_counter_read(struct lttng_counter *counter,
1347 const size_t *dim_indexes, int32_t cpu,
1348 int64_t *val, bool *overflow, bool *underflow)
1349 {
1350 return counter->ops->counter_read(counter->counter, dim_indexes,
1351 cpu, val, overflow, underflow);
1352 }
1353
1354 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1355 const size_t *dim_indexes, int64_t *val,
1356 bool *overflow, bool *underflow)
1357 {
1358 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1359 val, overflow, underflow);
1360 }
1361
1362 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1363 const size_t *dim_indexes)
1364 {
1365 return counter->ops->counter_clear(counter->counter, dim_indexes);
1366 }
1367
1368 struct lttng_kernel_event_recorder *lttng_kernel_event_recorder_create(struct lttng_channel *chan,
1369 struct lttng_kernel_abi_event *event_param,
1370 const struct lttng_kernel_event_desc *event_desc,
1371 enum lttng_kernel_abi_instrumentation itype)
1372 {
1373 struct lttng_kernel_event_recorder *event;
1374
1375 mutex_lock(&sessions_mutex);
1376 event = _lttng_kernel_event_recorder_create(chan, event_param, event_desc, itype);
1377 mutex_unlock(&sessions_mutex);
1378 return event;
1379 }
1380
1381 struct lttng_kernel_event_notifier *lttng_event_notifier_create(
1382 const struct lttng_kernel_event_desc *event_desc,
1383 uint64_t id, uint64_t error_counter_index,
1384 struct lttng_event_notifier_group *event_notifier_group,
1385 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1386 enum lttng_kernel_abi_instrumentation itype)
1387 {
1388 struct lttng_kernel_event_notifier *event_notifier;
1389
1390 mutex_lock(&sessions_mutex);
1391 event_notifier = _lttng_event_notifier_create(event_desc, id,
1392 error_counter_index, event_notifier_group,
1393 event_notifier_param, itype);
1394 mutex_unlock(&sessions_mutex);
1395 return event_notifier;
1396 }
1397
1398 /* Only used for tracepoints for now. */
1399 static
1400 void register_event(struct lttng_kernel_event_recorder *event_recorder)
1401 {
1402 const struct lttng_kernel_event_desc *desc;
1403 int ret = -EINVAL;
1404
1405 if (event_recorder->priv->parent.registered)
1406 return;
1407
1408 desc = event_recorder->priv->parent.desc;
1409 switch (event_recorder->priv->parent.instrumentation) {
1410 case LTTNG_KERNEL_ABI_TRACEPOINT:
1411 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1412 desc->probe_callback,
1413 event_recorder);
1414 break;
1415
1416 case LTTNG_KERNEL_ABI_SYSCALL:
1417 ret = lttng_syscall_filter_enable_event(event_recorder->chan, event_recorder);
1418 break;
1419
1420 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1421 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1422 case LTTNG_KERNEL_ABI_KRETPROBE:
1423 ret = 0;
1424 break;
1425
1426 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1427 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1428 default:
1429 WARN_ON_ONCE(1);
1430 }
1431 if (!ret)
1432 event_recorder->priv->parent.registered = 1;
1433 }
1434
1435 /*
1436 * Only used internally at session destruction.
1437 */
1438 int _lttng_event_unregister(struct lttng_kernel_event_recorder *event_recorder)
1439 {
1440 struct lttng_kernel_event_common_private *event_priv = &event_recorder->priv->parent;
1441 const struct lttng_kernel_event_desc *desc;
1442 int ret = -EINVAL;
1443
1444 if (!event_priv->registered)
1445 return 0;
1446
1447 desc = event_priv->desc;
1448 switch (event_priv->instrumentation) {
1449 case LTTNG_KERNEL_ABI_TRACEPOINT:
1450 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1451 event_priv->desc->probe_callback,
1452 event_recorder);
1453 break;
1454
1455 case LTTNG_KERNEL_ABI_KPROBE:
1456 lttng_kprobes_unregister_event(event_recorder);
1457 ret = 0;
1458 break;
1459
1460 case LTTNG_KERNEL_ABI_KRETPROBE:
1461 lttng_kretprobes_unregister(event_recorder);
1462 ret = 0;
1463 break;
1464
1465 case LTTNG_KERNEL_ABI_SYSCALL:
1466 ret = lttng_syscall_filter_disable_event(event_recorder->chan, event_recorder);
1467 break;
1468
1469 case LTTNG_KERNEL_ABI_NOOP:
1470 ret = 0;
1471 break;
1472
1473 case LTTNG_KERNEL_ABI_UPROBE:
1474 lttng_uprobes_unregister_event(event_recorder);
1475 ret = 0;
1476 break;
1477
1478 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1479 default:
1480 WARN_ON_ONCE(1);
1481 }
1482 if (!ret)
1483 event_priv->registered = 0;
1484 return ret;
1485 }
1486
1487 /* Only used for tracepoints for now. */
1488 static
1489 void register_event_notifier(struct lttng_kernel_event_notifier *event_notifier)
1490 {
1491 const struct lttng_kernel_event_desc *desc;
1492 int ret = -EINVAL;
1493
1494 if (event_notifier->priv->parent.registered)
1495 return;
1496
1497 desc = event_notifier->priv->parent.desc;
1498 switch (event_notifier->priv->parent.instrumentation) {
1499 case LTTNG_KERNEL_ABI_TRACEPOINT:
1500 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1501 desc->event_notifier_callback,
1502 event_notifier);
1503 break;
1504
1505 case LTTNG_KERNEL_ABI_SYSCALL:
1506 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1507 break;
1508
1509 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1510 case LTTNG_KERNEL_ABI_UPROBE:
1511 ret = 0;
1512 break;
1513
1514 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1515 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1516 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1517 default:
1518 WARN_ON_ONCE(1);
1519 }
1520 if (!ret)
1521 event_notifier->priv->parent.registered = 1;
1522 }
1523
1524 static
1525 int _lttng_event_notifier_unregister(
1526 struct lttng_kernel_event_notifier *event_notifier)
1527 {
1528 const struct lttng_kernel_event_desc *desc;
1529 int ret = -EINVAL;
1530
1531 if (!event_notifier->priv->parent.registered)
1532 return 0;
1533
1534 desc = event_notifier->priv->parent.desc;
1535 switch (event_notifier->priv->parent.instrumentation) {
1536 case LTTNG_KERNEL_ABI_TRACEPOINT:
1537 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->priv->parent.desc->event_kname,
1538 event_notifier->priv->parent.desc->event_notifier_callback,
1539 event_notifier);
1540 break;
1541
1542 case LTTNG_KERNEL_ABI_KPROBE:
1543 lttng_kprobes_unregister_event_notifier(event_notifier);
1544 ret = 0;
1545 break;
1546
1547 case LTTNG_KERNEL_ABI_UPROBE:
1548 lttng_uprobes_unregister_event_notifier(event_notifier);
1549 ret = 0;
1550 break;
1551
1552 case LTTNG_KERNEL_ABI_SYSCALL:
1553 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1554 break;
1555
1556 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1557 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1558 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1559 default:
1560 WARN_ON_ONCE(1);
1561 }
1562 if (!ret)
1563 event_notifier->priv->parent.registered = 0;
1564 return ret;
1565 }
1566
1567 /*
1568 * Only used internally at session destruction.
1569 */
1570 static
1571 void _lttng_event_destroy(struct lttng_kernel_event_recorder *event_recorder)
1572 {
1573 struct lttng_kernel_event_common_private *event_priv = &event_recorder->priv->parent;
1574 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1575
1576 switch (event_priv->instrumentation) {
1577 case LTTNG_KERNEL_ABI_TRACEPOINT:
1578 lttng_event_desc_put(event_priv->desc);
1579 break;
1580
1581 case LTTNG_KERNEL_ABI_KPROBE:
1582 module_put(event_priv->desc->owner);
1583 lttng_kprobes_destroy_event_private(event_recorder);
1584 break;
1585
1586 case LTTNG_KERNEL_ABI_KRETPROBE:
1587 module_put(event_priv->desc->owner);
1588 lttng_kretprobes_destroy_private(event_recorder);
1589 break;
1590
1591 case LTTNG_KERNEL_ABI_SYSCALL:
1592 break;
1593
1594 case LTTNG_KERNEL_ABI_UPROBE:
1595 module_put(event_priv->desc->owner);
1596 lttng_uprobes_destroy_event_private(event_recorder);
1597 break;
1598
1599 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1600 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1601 default:
1602 WARN_ON_ONCE(1);
1603 }
1604 list_del(&event_recorder->priv->node);
1605 lttng_free_event_filter_runtime(event_recorder);
1606 /* Free event enabler refs */
1607 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1608 &event_priv->enablers_ref_head, node)
1609 kfree(enabler_ref);
1610 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1611 kmem_cache_free(event_recorder_cache, event_recorder);
1612 }
1613
1614 /*
1615 * Only used internally at session destruction.
1616 */
1617 static
1618 void _lttng_event_notifier_destroy(struct lttng_kernel_event_notifier *event_notifier)
1619 {
1620 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1621
1622 switch (event_notifier->priv->parent.instrumentation) {
1623 case LTTNG_KERNEL_ABI_TRACEPOINT:
1624 lttng_event_desc_put(event_notifier->priv->parent.desc);
1625 break;
1626
1627 case LTTNG_KERNEL_ABI_KPROBE:
1628 module_put(event_notifier->priv->parent.desc->owner);
1629 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1630 break;
1631
1632 case LTTNG_KERNEL_ABI_SYSCALL:
1633 break;
1634
1635 case LTTNG_KERNEL_ABI_UPROBE:
1636 module_put(event_notifier->priv->parent.desc->owner);
1637 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1638 break;
1639
1640 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1641 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1642 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1643 default:
1644 WARN_ON_ONCE(1);
1645 }
1646 list_del(&event_notifier->priv->node);
1647 lttng_free_event_notifier_filter_runtime(event_notifier);
1648 /* Free event enabler refs */
1649 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1650 &event_notifier->priv->parent.enablers_ref_head, node)
1651 kfree(enabler_ref);
1652 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1653 kmem_cache_free(event_notifier_cache, event_notifier);
1654 }
1655
1656 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1657 enum tracker_type tracker_type)
1658 {
1659 switch (tracker_type) {
1660 case TRACKER_PID:
1661 return &session->pid_tracker;
1662 case TRACKER_VPID:
1663 return &session->vpid_tracker;
1664 case TRACKER_UID:
1665 return &session->uid_tracker;
1666 case TRACKER_VUID:
1667 return &session->vuid_tracker;
1668 case TRACKER_GID:
1669 return &session->gid_tracker;
1670 case TRACKER_VGID:
1671 return &session->vgid_tracker;
1672 default:
1673 WARN_ON_ONCE(1);
1674 return NULL;
1675 }
1676 }
1677
1678 int lttng_session_track_id(struct lttng_session *session,
1679 enum tracker_type tracker_type, int id)
1680 {
1681 struct lttng_id_tracker *tracker;
1682 int ret;
1683
1684 tracker = get_tracker(session, tracker_type);
1685 if (!tracker)
1686 return -EINVAL;
1687 if (id < -1)
1688 return -EINVAL;
1689 mutex_lock(&sessions_mutex);
1690 if (id == -1) {
1691 /* track all ids: destroy tracker. */
1692 lttng_id_tracker_destroy(tracker, true);
1693 ret = 0;
1694 } else {
1695 ret = lttng_id_tracker_add(tracker, id);
1696 }
1697 mutex_unlock(&sessions_mutex);
1698 return ret;
1699 }
1700
1701 int lttng_session_untrack_id(struct lttng_session *session,
1702 enum tracker_type tracker_type, int id)
1703 {
1704 struct lttng_id_tracker *tracker;
1705 int ret;
1706
1707 tracker = get_tracker(session, tracker_type);
1708 if (!tracker)
1709 return -EINVAL;
1710 if (id < -1)
1711 return -EINVAL;
1712 mutex_lock(&sessions_mutex);
1713 if (id == -1) {
1714 /* untrack all ids: replace by empty tracker. */
1715 ret = lttng_id_tracker_empty_set(tracker);
1716 } else {
1717 ret = lttng_id_tracker_del(tracker, id);
1718 }
1719 mutex_unlock(&sessions_mutex);
1720 return ret;
1721 }
1722
1723 static
1724 void *id_list_start(struct seq_file *m, loff_t *pos)
1725 {
1726 struct lttng_id_tracker *id_tracker = m->private;
1727 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1728 struct lttng_id_hash_node *e;
1729 int iter = 0, i;
1730
1731 mutex_lock(&sessions_mutex);
1732 if (id_tracker_p) {
1733 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1734 struct hlist_head *head = &id_tracker_p->id_hash[i];
1735
1736 lttng_hlist_for_each_entry(e, head, hlist) {
1737 if (iter++ >= *pos)
1738 return e;
1739 }
1740 }
1741 } else {
1742 /* ID tracker disabled. */
1743 if (iter >= *pos && iter == 0) {
1744 return id_tracker_p; /* empty tracker */
1745 }
1746 iter++;
1747 }
1748 /* End of list */
1749 return NULL;
1750 }
1751
1752 /* Called with sessions_mutex held. */
1753 static
1754 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1755 {
1756 struct lttng_id_tracker *id_tracker = m->private;
1757 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1758 struct lttng_id_hash_node *e;
1759 int iter = 0, i;
1760
1761 (*ppos)++;
1762 if (id_tracker_p) {
1763 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1764 struct hlist_head *head = &id_tracker_p->id_hash[i];
1765
1766 lttng_hlist_for_each_entry(e, head, hlist) {
1767 if (iter++ >= *ppos)
1768 return e;
1769 }
1770 }
1771 } else {
1772 /* ID tracker disabled. */
1773 if (iter >= *ppos && iter == 0)
1774 return p; /* empty tracker */
1775 iter++;
1776 }
1777
1778 /* End of list */
1779 return NULL;
1780 }
1781
1782 static
1783 void id_list_stop(struct seq_file *m, void *p)
1784 {
1785 mutex_unlock(&sessions_mutex);
1786 }
1787
1788 static
1789 int id_list_show(struct seq_file *m, void *p)
1790 {
1791 struct lttng_id_tracker *id_tracker = m->private;
1792 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1793 int id;
1794
1795 if (p == id_tracker_p) {
1796 /* Tracker disabled. */
1797 id = -1;
1798 } else {
1799 const struct lttng_id_hash_node *e = p;
1800
1801 id = lttng_id_tracker_get_node_id(e);
1802 }
1803 switch (id_tracker->tracker_type) {
1804 case TRACKER_PID:
1805 seq_printf(m, "process { pid = %d; };\n", id);
1806 break;
1807 case TRACKER_VPID:
1808 seq_printf(m, "process { vpid = %d; };\n", id);
1809 break;
1810 case TRACKER_UID:
1811 seq_printf(m, "user { uid = %d; };\n", id);
1812 break;
1813 case TRACKER_VUID:
1814 seq_printf(m, "user { vuid = %d; };\n", id);
1815 break;
1816 case TRACKER_GID:
1817 seq_printf(m, "group { gid = %d; };\n", id);
1818 break;
1819 case TRACKER_VGID:
1820 seq_printf(m, "group { vgid = %d; };\n", id);
1821 break;
1822 default:
1823 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1824 }
1825 return 0;
1826 }
1827
1828 static
1829 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1830 .start = id_list_start,
1831 .next = id_list_next,
1832 .stop = id_list_stop,
1833 .show = id_list_show,
1834 };
1835
1836 static
1837 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1838 {
1839 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1840 }
1841
1842 static
1843 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1844 {
1845 struct seq_file *m = file->private_data;
1846 struct lttng_id_tracker *id_tracker = m->private;
1847 int ret;
1848
1849 WARN_ON_ONCE(!id_tracker);
1850 ret = seq_release(inode, file);
1851 if (!ret)
1852 fput(id_tracker->session->file);
1853 return ret;
1854 }
1855
1856 const struct file_operations lttng_tracker_ids_list_fops = {
1857 .owner = THIS_MODULE,
1858 .open = lttng_tracker_ids_list_open,
1859 .read = seq_read,
1860 .llseek = seq_lseek,
1861 .release = lttng_tracker_ids_list_release,
1862 };
1863
1864 int lttng_session_list_tracker_ids(struct lttng_session *session,
1865 enum tracker_type tracker_type)
1866 {
1867 struct file *tracker_ids_list_file;
1868 struct seq_file *m;
1869 int file_fd, ret;
1870
1871 file_fd = lttng_get_unused_fd();
1872 if (file_fd < 0) {
1873 ret = file_fd;
1874 goto fd_error;
1875 }
1876
1877 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1878 &lttng_tracker_ids_list_fops,
1879 NULL, O_RDWR);
1880 if (IS_ERR(tracker_ids_list_file)) {
1881 ret = PTR_ERR(tracker_ids_list_file);
1882 goto file_error;
1883 }
1884 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1885 ret = -EOVERFLOW;
1886 goto refcount_error;
1887 }
1888 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1889 if (ret < 0)
1890 goto open_error;
1891 m = tracker_ids_list_file->private_data;
1892
1893 m->private = get_tracker(session, tracker_type);
1894 BUG_ON(!m->private);
1895 fd_install(file_fd, tracker_ids_list_file);
1896
1897 return file_fd;
1898
1899 open_error:
1900 atomic_long_dec(&session->file->f_count);
1901 refcount_error:
1902 fput(tracker_ids_list_file);
1903 file_error:
1904 put_unused_fd(file_fd);
1905 fd_error:
1906 return ret;
1907 }
1908
1909 /*
1910 * Enabler management.
1911 */
1912 static
1913 int lttng_match_enabler_star_glob(const char *desc_name,
1914 const char *pattern)
1915 {
1916 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1917 desc_name, LTTNG_SIZE_MAX))
1918 return 0;
1919 return 1;
1920 }
1921
1922 static
1923 int lttng_match_enabler_name(const char *desc_name,
1924 const char *name)
1925 {
1926 if (strcmp(desc_name, name))
1927 return 0;
1928 return 1;
1929 }
1930
1931 int lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1932 struct lttng_enabler *enabler)
1933 {
1934 const char *desc_name, *enabler_name;
1935 bool compat = false, entry = false;
1936
1937 enabler_name = enabler->event_param.name;
1938 switch (enabler->event_param.instrumentation) {
1939 case LTTNG_KERNEL_ABI_TRACEPOINT:
1940 desc_name = desc->event_name;
1941 switch (enabler->format_type) {
1942 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1943 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1944 case LTTNG_ENABLER_FORMAT_NAME:
1945 return lttng_match_enabler_name(desc_name, enabler_name);
1946 default:
1947 return -EINVAL;
1948 }
1949 break;
1950
1951 case LTTNG_KERNEL_ABI_SYSCALL:
1952 desc_name = desc->event_name;
1953 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1954 desc_name += strlen("compat_");
1955 compat = true;
1956 }
1957 if (!strncmp(desc_name, "syscall_exit_",
1958 strlen("syscall_exit_"))) {
1959 desc_name += strlen("syscall_exit_");
1960 } else if (!strncmp(desc_name, "syscall_entry_",
1961 strlen("syscall_entry_"))) {
1962 desc_name += strlen("syscall_entry_");
1963 entry = true;
1964 } else {
1965 WARN_ON_ONCE(1);
1966 return -EINVAL;
1967 }
1968 switch (enabler->event_param.u.syscall.entryexit) {
1969 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1970 break;
1971 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1972 if (!entry)
1973 return 0;
1974 break;
1975 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1976 if (entry)
1977 return 0;
1978 break;
1979 default:
1980 return -EINVAL;
1981 }
1982 switch (enabler->event_param.u.syscall.abi) {
1983 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1984 break;
1985 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1986 if (compat)
1987 return 0;
1988 break;
1989 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1990 if (!compat)
1991 return 0;
1992 break;
1993 default:
1994 return -EINVAL;
1995 }
1996 switch (enabler->event_param.u.syscall.match) {
1997 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1998 switch (enabler->format_type) {
1999 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2000 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2001 case LTTNG_ENABLER_FORMAT_NAME:
2002 return lttng_match_enabler_name(desc_name, enabler_name);
2003 default:
2004 return -EINVAL;
2005 }
2006 break;
2007 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2008 return -EINVAL; /* Not implemented. */
2009 default:
2010 return -EINVAL;
2011 }
2012 break;
2013
2014 default:
2015 WARN_ON_ONCE(1);
2016 return -EINVAL;
2017 }
2018 }
2019
2020 static
2021 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
2022 struct lttng_kernel_event_recorder *event_recorder)
2023 {
2024 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
2025 event_enabler);
2026
2027 if (base_enabler->event_param.instrumentation != event_recorder->priv->parent.instrumentation)
2028 return 0;
2029 if (lttng_desc_match_enabler(event_recorder->priv->parent.desc, base_enabler)
2030 && event_recorder->chan == event_enabler->chan)
2031 return 1;
2032 else
2033 return 0;
2034 }
2035
2036 static
2037 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
2038 struct lttng_kernel_event_notifier *event_notifier)
2039 {
2040 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
2041 event_notifier_enabler);
2042
2043 if (base_enabler->event_param.instrumentation != event_notifier->priv->parent.instrumentation)
2044 return 0;
2045 if (lttng_desc_match_enabler(event_notifier->priv->parent.desc, base_enabler)
2046 && event_notifier->priv->group == event_notifier_enabler->group
2047 && event_notifier->priv->parent.user_token == event_notifier_enabler->base.user_token)
2048 return 1;
2049 else
2050 return 0;
2051 }
2052
2053 static
2054 struct lttng_enabler_ref *lttng_enabler_ref(
2055 struct list_head *enablers_ref_list,
2056 struct lttng_enabler *enabler)
2057 {
2058 struct lttng_enabler_ref *enabler_ref;
2059
2060 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2061 if (enabler_ref->ref == enabler)
2062 return enabler_ref;
2063 }
2064 return NULL;
2065 }
2066
2067 static
2068 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
2069 {
2070 struct lttng_session *session = event_enabler->chan->session;
2071 struct lttng_kernel_probe_desc *probe_desc;
2072 const struct lttng_kernel_event_desc *desc;
2073 int i;
2074 struct list_head *probe_list;
2075
2076 probe_list = lttng_get_probe_list_head();
2077 /*
2078 * For each probe event, if we find that a probe event matches
2079 * our enabler, create an associated lttng_event if not
2080 * already present.
2081 */
2082 list_for_each_entry(probe_desc, probe_list, head) {
2083 for (i = 0; i < probe_desc->nr_events; i++) {
2084 int found = 0;
2085 struct hlist_head *head;
2086 struct lttng_kernel_event_recorder_private *event_recorder_private;
2087 struct lttng_kernel_event_recorder *event_recorder;
2088
2089 desc = probe_desc->event_desc[i];
2090 if (!lttng_desc_match_enabler(desc,
2091 lttng_event_enabler_as_enabler(event_enabler)))
2092 continue;
2093
2094 /*
2095 * Check if already created.
2096 */
2097 head = utils_borrow_hash_table_bucket(
2098 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
2099 desc->event_name);
2100 lttng_hlist_for_each_entry(event_recorder_private, head, hlist) {
2101 if (event_recorder_private->parent.desc == desc
2102 && event_recorder_private->pub->chan == event_enabler->chan)
2103 found = 1;
2104 }
2105 if (found)
2106 continue;
2107
2108 /*
2109 * We need to create an event for this
2110 * event probe.
2111 */
2112 event_recorder = _lttng_kernel_event_recorder_create(event_enabler->chan,
2113 NULL, desc, LTTNG_KERNEL_ABI_TRACEPOINT);
2114 if (!event_recorder) {
2115 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2116 probe_desc->event_desc[i]->event_name);
2117 }
2118 }
2119 }
2120 }
2121
2122 static
2123 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2124 {
2125 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2126 struct lttng_kernel_probe_desc *probe_desc;
2127 const struct lttng_kernel_event_desc *desc;
2128 int i;
2129 struct list_head *probe_list;
2130
2131 probe_list = lttng_get_probe_list_head();
2132 /*
2133 * For each probe event, if we find that a probe event matches
2134 * our enabler, create an associated lttng_event_notifier if not
2135 * already present.
2136 */
2137 list_for_each_entry(probe_desc, probe_list, head) {
2138 for (i = 0; i < probe_desc->nr_events; i++) {
2139 int found = 0;
2140 struct hlist_head *head;
2141 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2142 struct lttng_kernel_event_notifier *event_notifier;
2143
2144 desc = probe_desc->event_desc[i];
2145 if (!lttng_desc_match_enabler(desc,
2146 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2147 continue;
2148
2149 /*
2150 * Check if already created.
2151 */
2152 head = utils_borrow_hash_table_bucket(
2153 event_notifier_group->event_notifiers_ht.table,
2154 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->event_name);
2155 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
2156 if (event_notifier_priv->parent.desc == desc
2157 && event_notifier_priv->parent.user_token == event_notifier_enabler->base.user_token)
2158 found = 1;
2159 }
2160 if (found)
2161 continue;
2162
2163 /*
2164 * We need to create a event_notifier for this event probe.
2165 */
2166 event_notifier = _lttng_event_notifier_create(desc,
2167 event_notifier_enabler->base.user_token,
2168 event_notifier_enabler->error_counter_index,
2169 event_notifier_group, NULL,
2170 LTTNG_KERNEL_ABI_TRACEPOINT);
2171 if (IS_ERR(event_notifier)) {
2172 printk(KERN_INFO "Unable to create event_notifier %s\n",
2173 probe_desc->event_desc[i]->event_name);
2174 }
2175 }
2176 }
2177 }
2178
2179 static
2180 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2181 {
2182 int ret;
2183
2184 ret = lttng_syscalls_register_event(event_enabler);
2185 WARN_ON_ONCE(ret);
2186 }
2187
2188 static
2189 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2190 {
2191 int ret;
2192
2193 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler);
2194 WARN_ON_ONCE(ret);
2195 ret = lttng_syscalls_create_matching_event_notifiers(event_notifier_enabler);
2196 WARN_ON_ONCE(ret);
2197 }
2198
2199 /*
2200 * Create struct lttng_kernel_event_recorder if it is missing and present in the list of
2201 * tracepoint probes.
2202 * Should be called with sessions mutex held.
2203 */
2204 static
2205 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2206 {
2207 switch (event_enabler->base.event_param.instrumentation) {
2208 case LTTNG_KERNEL_ABI_TRACEPOINT:
2209 lttng_create_tracepoint_event_if_missing(event_enabler);
2210 break;
2211
2212 case LTTNG_KERNEL_ABI_SYSCALL:
2213 lttng_create_syscall_event_if_missing(event_enabler);
2214 break;
2215
2216 default:
2217 WARN_ON_ONCE(1);
2218 break;
2219 }
2220 }
2221
2222 /*
2223 * Create events associated with an event_enabler (if not already present),
2224 * and add backward reference from the event to the enabler.
2225 * Should be called with sessions mutex held.
2226 */
2227 static
2228 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2229 {
2230 struct lttng_channel *chan = event_enabler->chan;
2231 struct lttng_session *session = event_enabler->chan->session;
2232 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2233 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2234
2235 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2236 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2237 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2238 !strcmp(base_enabler->event_param.name, "*")) {
2239 int enabled = base_enabler->enabled;
2240 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2241
2242 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2243 WRITE_ONCE(chan->syscall_all_entry, enabled);
2244
2245 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2246 WRITE_ONCE(chan->syscall_all_exit, enabled);
2247 }
2248
2249 /* First ensure that probe events are created for this enabler. */
2250 lttng_create_event_if_missing(event_enabler);
2251
2252 /* For each event matching event_enabler in session event list. */
2253 list_for_each_entry(event_recorder_priv, &session->events, node) {
2254 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2255 struct lttng_enabler_ref *enabler_ref;
2256
2257 if (!lttng_event_enabler_match_event(event_enabler, event_recorder))
2258 continue;
2259 enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
2260 lttng_event_enabler_as_enabler(event_enabler));
2261 if (!enabler_ref) {
2262 /*
2263 * If no backward ref, create it.
2264 * Add backward ref from event to event_enabler.
2265 */
2266 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2267 if (!enabler_ref)
2268 return -ENOMEM;
2269 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2270 list_add(&enabler_ref->node,
2271 &event_recorder_priv->parent.enablers_ref_head);
2272 }
2273
2274 /*
2275 * Link filter bytecodes if not linked yet.
2276 */
2277 lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
2278 lttng_static_ctx,
2279 &event_recorder_priv->parent.filter_bytecode_runtime_head,
2280 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2281 }
2282 return 0;
2283 }
2284
2285 /*
2286 * Create struct lttng_kernel_event_notifier if it is missing and present in the list of
2287 * tracepoint probes.
2288 * Should be called with sessions mutex held.
2289 */
2290 static
2291 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2292 {
2293 switch (event_notifier_enabler->base.event_param.instrumentation) {
2294 case LTTNG_KERNEL_ABI_TRACEPOINT:
2295 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2296 break;
2297
2298 case LTTNG_KERNEL_ABI_SYSCALL:
2299 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2300 break;
2301
2302 default:
2303 WARN_ON_ONCE(1);
2304 break;
2305 }
2306 }
2307
2308 /*
2309 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2310 */
2311 static
2312 int lttng_event_notifier_enabler_ref_event_notifiers(
2313 struct lttng_event_notifier_enabler *event_notifier_enabler)
2314 {
2315 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2316 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2317 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2318
2319 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2320 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2321 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2322 !strcmp(base_enabler->event_param.name, "*")) {
2323
2324 int enabled = base_enabler->enabled;
2325 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2326
2327 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2328 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2329
2330 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2331 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2332
2333 }
2334
2335 /* First ensure that probe event_notifiers are created for this enabler. */
2336 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2337
2338 /* Link the created event_notifier with its associated enabler. */
2339 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2340 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2341 struct lttng_enabler_ref *enabler_ref;
2342
2343 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2344 continue;
2345
2346 enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
2347 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2348 if (!enabler_ref) {
2349 /*
2350 * If no backward ref, create it.
2351 * Add backward ref from event_notifier to enabler.
2352 */
2353 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2354 if (!enabler_ref)
2355 return -ENOMEM;
2356
2357 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2358 event_notifier_enabler);
2359 list_add(&enabler_ref->node,
2360 &event_notifier_priv->parent.enablers_ref_head);
2361 }
2362
2363 /*
2364 * Link filter bytecodes if not linked yet.
2365 */
2366 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2367 lttng_static_ctx, &event_notifier_priv->parent.filter_bytecode_runtime_head,
2368 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2369
2370 /* Link capture bytecodes if not linked yet. */
2371 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2372 lttng_static_ctx, &event_notifier_priv->capture_bytecode_runtime_head,
2373 &event_notifier_enabler->capture_bytecode_head);
2374
2375 event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
2376 }
2377 return 0;
2378 }
2379
2380 /*
2381 * Called at module load: connect the probe on all enablers matching
2382 * this event.
2383 * Called with sessions lock held.
2384 */
2385 int lttng_fix_pending_events(void)
2386 {
2387 struct lttng_session *session;
2388
2389 list_for_each_entry(session, &sessions, list)
2390 lttng_session_lazy_sync_event_enablers(session);
2391 return 0;
2392 }
2393
2394 static bool lttng_event_notifier_group_has_active_event_notifiers(
2395 struct lttng_event_notifier_group *event_notifier_group)
2396 {
2397 struct lttng_event_notifier_enabler *event_notifier_enabler;
2398
2399 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2400 node) {
2401 if (event_notifier_enabler->base.enabled)
2402 return true;
2403 }
2404 return false;
2405 }
2406
2407 bool lttng_event_notifier_active(void)
2408 {
2409 struct lttng_event_notifier_group *event_notifier_group;
2410
2411 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2412 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2413 return true;
2414 }
2415 return false;
2416 }
2417
2418 int lttng_fix_pending_event_notifiers(void)
2419 {
2420 struct lttng_event_notifier_group *event_notifier_group;
2421
2422 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2423 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2424 return 0;
2425 }
2426
2427 struct lttng_event_enabler *lttng_event_enabler_create(
2428 enum lttng_enabler_format_type format_type,
2429 struct lttng_kernel_abi_event *event_param,
2430 struct lttng_channel *chan)
2431 {
2432 struct lttng_event_enabler *event_enabler;
2433
2434 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2435 if (!event_enabler)
2436 return NULL;
2437 event_enabler->base.format_type = format_type;
2438 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2439 memcpy(&event_enabler->base.event_param, event_param,
2440 sizeof(event_enabler->base.event_param));
2441 event_enabler->chan = chan;
2442 /* ctx left NULL */
2443 event_enabler->base.enabled = 0;
2444 mutex_lock(&sessions_mutex);
2445 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2446 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2447 mutex_unlock(&sessions_mutex);
2448 return event_enabler;
2449 }
2450
2451 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2452 {
2453 mutex_lock(&sessions_mutex);
2454 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2455 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2456 mutex_unlock(&sessions_mutex);
2457 return 0;
2458 }
2459
2460 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2461 {
2462 mutex_lock(&sessions_mutex);
2463 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2464 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2465 mutex_unlock(&sessions_mutex);
2466 return 0;
2467 }
2468
2469 static
2470 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2471 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2472 {
2473 struct lttng_bytecode_node *bytecode_node;
2474 uint32_t bytecode_len;
2475 int ret;
2476
2477 ret = get_user(bytecode_len, &bytecode->len);
2478 if (ret)
2479 return ret;
2480 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2481 GFP_KERNEL);
2482 if (!bytecode_node)
2483 return -ENOMEM;
2484 ret = copy_from_user(&bytecode_node->bc, bytecode,
2485 sizeof(*bytecode) + bytecode_len);
2486 if (ret)
2487 goto error_free;
2488
2489 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2490 bytecode_node->enabler = enabler;
2491 /* Enforce length based on allocated size */
2492 bytecode_node->bc.len = bytecode_len;
2493 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2494
2495 return 0;
2496
2497 error_free:
2498 lttng_kvfree(bytecode_node);
2499 return ret;
2500 }
2501
2502 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2503 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2504 {
2505 int ret;
2506 ret = lttng_enabler_attach_filter_bytecode(
2507 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2508 if (ret)
2509 goto error;
2510
2511 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2512 return 0;
2513
2514 error:
2515 return ret;
2516 }
2517
2518 int lttng_event_add_callsite(struct lttng_kernel_event_recorder *event_recorder,
2519 struct lttng_kernel_abi_event_callsite __user *callsite)
2520 {
2521
2522 switch (event_recorder->priv->parent.instrumentation) {
2523 case LTTNG_KERNEL_ABI_UPROBE:
2524 return lttng_uprobes_event_add_callsite(event_recorder, callsite);
2525 default:
2526 return -EINVAL;
2527 }
2528 }
2529
2530 static
2531 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2532 {
2533 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2534
2535 /* Destroy filter bytecode */
2536 list_for_each_entry_safe(filter_node, tmp_filter_node,
2537 &enabler->filter_bytecode_head, node) {
2538 lttng_kvfree(filter_node);
2539 }
2540 }
2541
2542 static
2543 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2544 {
2545 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2546
2547 list_del(&event_enabler->node);
2548 kfree(event_enabler);
2549 }
2550
2551 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2552 struct lttng_event_notifier_group *event_notifier_group,
2553 enum lttng_enabler_format_type format_type,
2554 struct lttng_kernel_abi_event_notifier *event_notifier_param)
2555 {
2556 struct lttng_event_notifier_enabler *event_notifier_enabler;
2557
2558 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2559 if (!event_notifier_enabler)
2560 return NULL;
2561
2562 event_notifier_enabler->base.format_type = format_type;
2563 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2564 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2565
2566 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2567 event_notifier_enabler->num_captures = 0;
2568
2569 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2570 sizeof(event_notifier_enabler->base.event_param));
2571
2572 event_notifier_enabler->base.enabled = 0;
2573 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2574 event_notifier_enabler->group = event_notifier_group;
2575
2576 mutex_lock(&sessions_mutex);
2577 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2578 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2579
2580 mutex_unlock(&sessions_mutex);
2581
2582 return event_notifier_enabler;
2583 }
2584
2585 int lttng_event_notifier_enabler_enable(
2586 struct lttng_event_notifier_enabler *event_notifier_enabler)
2587 {
2588 mutex_lock(&sessions_mutex);
2589 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2590 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2591 mutex_unlock(&sessions_mutex);
2592 return 0;
2593 }
2594
2595 int lttng_event_notifier_enabler_disable(
2596 struct lttng_event_notifier_enabler *event_notifier_enabler)
2597 {
2598 mutex_lock(&sessions_mutex);
2599 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2600 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2601 mutex_unlock(&sessions_mutex);
2602 return 0;
2603 }
2604
2605 int lttng_event_notifier_enabler_attach_filter_bytecode(
2606 struct lttng_event_notifier_enabler *event_notifier_enabler,
2607 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2608 {
2609 int ret;
2610
2611 ret = lttng_enabler_attach_filter_bytecode(
2612 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2613 bytecode);
2614 if (ret)
2615 goto error;
2616
2617 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2618 return 0;
2619
2620 error:
2621 return ret;
2622 }
2623
2624 int lttng_event_notifier_enabler_attach_capture_bytecode(
2625 struct lttng_event_notifier_enabler *event_notifier_enabler,
2626 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2627 {
2628 struct lttng_bytecode_node *bytecode_node;
2629 struct lttng_enabler *enabler =
2630 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2631 uint32_t bytecode_len;
2632 int ret;
2633
2634 ret = get_user(bytecode_len, &bytecode->len);
2635 if (ret)
2636 return ret;
2637
2638 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2639 GFP_KERNEL);
2640 if (!bytecode_node)
2641 return -ENOMEM;
2642
2643 ret = copy_from_user(&bytecode_node->bc, bytecode,
2644 sizeof(*bytecode) + bytecode_len);
2645 if (ret)
2646 goto error_free;
2647
2648 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2649 bytecode_node->enabler = enabler;
2650
2651 /* Enforce length based on allocated size */
2652 bytecode_node->bc.len = bytecode_len;
2653 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2654
2655 event_notifier_enabler->num_captures++;
2656
2657 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2658 goto end;
2659
2660 error_free:
2661 lttng_kvfree(bytecode_node);
2662 end:
2663 return ret;
2664 }
2665
2666 int lttng_event_notifier_add_callsite(struct lttng_kernel_event_notifier *event_notifier,
2667 struct lttng_kernel_abi_event_callsite __user *callsite)
2668 {
2669
2670 switch (event_notifier->priv->parent.instrumentation) {
2671 case LTTNG_KERNEL_ABI_UPROBE:
2672 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2673 callsite);
2674 default:
2675 return -EINVAL;
2676 }
2677 }
2678
2679 static
2680 void lttng_event_notifier_enabler_destroy(
2681 struct lttng_event_notifier_enabler *event_notifier_enabler)
2682 {
2683 if (!event_notifier_enabler) {
2684 return;
2685 }
2686
2687 list_del(&event_notifier_enabler->node);
2688
2689 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2690 kfree(event_notifier_enabler);
2691 }
2692
2693 /*
2694 * lttng_session_sync_event_enablers should be called just before starting a
2695 * session.
2696 * Should be called with sessions mutex held.
2697 */
2698 static
2699 void lttng_session_sync_event_enablers(struct lttng_session *session)
2700 {
2701 struct lttng_event_enabler *event_enabler;
2702 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2703
2704 list_for_each_entry(event_enabler, &session->enablers_head, node)
2705 lttng_event_enabler_ref_events(event_enabler);
2706 /*
2707 * For each event, if at least one of its enablers is enabled,
2708 * and its channel and session transient states are enabled, we
2709 * enable the event, else we disable it.
2710 */
2711 list_for_each_entry(event_recorder_priv, &session->events, node) {
2712 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2713 struct lttng_enabler_ref *enabler_ref;
2714 struct lttng_bytecode_runtime *runtime;
2715 int enabled = 0, has_enablers_without_bytecode = 0;
2716
2717 switch (event_recorder_priv->parent.instrumentation) {
2718 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2719 case LTTNG_KERNEL_ABI_SYSCALL:
2720 /* Enable events */
2721 list_for_each_entry(enabler_ref,
2722 &event_recorder_priv->parent.enablers_ref_head, node) {
2723 if (enabler_ref->ref->enabled) {
2724 enabled = 1;
2725 break;
2726 }
2727 }
2728 break;
2729
2730 default:
2731 /* Not handled with lazy sync. */
2732 continue;
2733 }
2734 /*
2735 * Enabled state is based on union of enablers, with
2736 * intesection of session and channel transient enable
2737 * states.
2738 */
2739 enabled = enabled && session->tstate && event_recorder->chan->tstate;
2740
2741 WRITE_ONCE(event_recorder->parent.enabled, enabled);
2742 /*
2743 * Sync tracepoint registration with event enabled
2744 * state.
2745 */
2746 if (enabled) {
2747 register_event(event_recorder);
2748 } else {
2749 _lttng_event_unregister(event_recorder);
2750 }
2751
2752 /* Check if has enablers without bytecode enabled */
2753 list_for_each_entry(enabler_ref,
2754 &event_recorder_priv->parent.enablers_ref_head, node) {
2755 if (enabler_ref->ref->enabled
2756 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2757 has_enablers_without_bytecode = 1;
2758 break;
2759 }
2760 }
2761 event_recorder_priv->parent.has_enablers_without_filter_bytecode =
2762 has_enablers_without_bytecode;
2763
2764 /* Enable filters */
2765 list_for_each_entry(runtime,
2766 &event_recorder_priv->parent.filter_bytecode_runtime_head, node)
2767 lttng_bytecode_filter_sync_state(runtime);
2768 }
2769 }
2770
2771 /*
2772 * Apply enablers to session events, adding events to session if need
2773 * be. It is required after each modification applied to an active
2774 * session, and right before session "start".
2775 * "lazy" sync means we only sync if required.
2776 * Should be called with sessions mutex held.
2777 */
2778 static
2779 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2780 {
2781 /* We can skip if session is not active */
2782 if (!session->active)
2783 return;
2784 lttng_session_sync_event_enablers(session);
2785 }
2786
2787 static
2788 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2789 {
2790 struct lttng_event_notifier_enabler *event_notifier_enabler;
2791 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2792
2793 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2794 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2795
2796 /*
2797 * For each event_notifier, if at least one of its enablers is enabled,
2798 * we enable the event_notifier, else we disable it.
2799 */
2800 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2801 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2802 struct lttng_enabler_ref *enabler_ref;
2803 struct lttng_bytecode_runtime *runtime;
2804 int enabled = 0, has_enablers_without_bytecode = 0;
2805
2806 switch (event_notifier_priv->parent.instrumentation) {
2807 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2808 case LTTNG_KERNEL_ABI_SYSCALL:
2809 /* Enable event_notifiers */
2810 list_for_each_entry(enabler_ref,
2811 &event_notifier_priv->parent.enablers_ref_head, node) {
2812 if (enabler_ref->ref->enabled) {
2813 enabled = 1;
2814 break;
2815 }
2816 }
2817 break;
2818
2819 default:
2820 /* Not handled with sync. */
2821 continue;
2822 }
2823
2824 WRITE_ONCE(event_notifier->parent.enabled, enabled);
2825 /*
2826 * Sync tracepoint registration with event_notifier enabled
2827 * state.
2828 */
2829 if (enabled) {
2830 if (!event_notifier_priv->parent.registered)
2831 register_event_notifier(event_notifier);
2832 } else {
2833 if (event_notifier_priv->parent.registered)
2834 _lttng_event_notifier_unregister(event_notifier);
2835 }
2836
2837 /* Check if has enablers without bytecode enabled */
2838 list_for_each_entry(enabler_ref,
2839 &event_notifier_priv->parent.enablers_ref_head, node) {
2840 if (enabler_ref->ref->enabled
2841 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2842 has_enablers_without_bytecode = 1;
2843 break;
2844 }
2845 }
2846 event_notifier_priv->parent.has_enablers_without_filter_bytecode =
2847 has_enablers_without_bytecode;
2848
2849 /* Enable filters */
2850 list_for_each_entry(runtime,
2851 &event_notifier_priv->parent.filter_bytecode_runtime_head, node)
2852 lttng_bytecode_filter_sync_state(runtime);
2853
2854 /* Enable captures */
2855 list_for_each_entry(runtime,
2856 &event_notifier_priv->capture_bytecode_runtime_head, node)
2857 lttng_bytecode_capture_sync_state(runtime);
2858
2859 WRITE_ONCE(event_notifier->eval_capture, !!event_notifier_priv->num_captures);
2860 }
2861 }
2862
2863 /*
2864 * Serialize at most one packet worth of metadata into a metadata
2865 * channel.
2866 * We grab the metadata cache mutex to get exclusive access to our metadata
2867 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2868 * allows us to do racy operations such as looking for remaining space left in
2869 * packet and write, since mutual exclusion protects us from concurrent writes.
2870 * Mutual exclusion on the metadata cache allow us to read the cache content
2871 * without racing against reallocation of the cache by updates.
2872 * Returns the number of bytes written in the channel, 0 if no data
2873 * was written and a negative value on error.
2874 */
2875 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2876 struct channel *chan, bool *coherent)
2877 {
2878 struct lib_ring_buffer_ctx ctx;
2879 int ret = 0;
2880 size_t len, reserve_len;
2881
2882 /*
2883 * Ensure we support mutiple get_next / put sequences followed by
2884 * put_next. The metadata cache lock protects reading the metadata
2885 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2886 * "flush" operations on the buffer invoked by different processes.
2887 * Moreover, since the metadata cache memory can be reallocated, we
2888 * need to have exclusive access against updates even though we only
2889 * read it.
2890 */
2891 mutex_lock(&stream->metadata_cache->lock);
2892 WARN_ON(stream->metadata_in < stream->metadata_out);
2893 if (stream->metadata_in != stream->metadata_out)
2894 goto end;
2895
2896 /* Metadata regenerated, change the version. */
2897 if (stream->metadata_cache->version != stream->version)
2898 stream->version = stream->metadata_cache->version;
2899
2900 len = stream->metadata_cache->metadata_written -
2901 stream->metadata_in;
2902 if (!len)
2903 goto end;
2904 reserve_len = min_t(size_t,
2905 stream->transport->ops.packet_avail_size(chan),
2906 len);
2907 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2908 sizeof(char), -1);
2909 /*
2910 * If reservation failed, return an error to the caller.
2911 */
2912 ret = stream->transport->ops.event_reserve(&ctx, 0);
2913 if (ret != 0) {
2914 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2915 stream->coherent = false;
2916 goto end;
2917 }
2918 stream->transport->ops.event_write(&ctx,
2919 stream->metadata_cache->data + stream->metadata_in,
2920 reserve_len);
2921 stream->transport->ops.event_commit(&ctx);
2922 stream->metadata_in += reserve_len;
2923 if (reserve_len < len)
2924 stream->coherent = false;
2925 else
2926 stream->coherent = true;
2927 ret = reserve_len;
2928
2929 end:
2930 if (coherent)
2931 *coherent = stream->coherent;
2932 mutex_unlock(&stream->metadata_cache->lock);
2933 return ret;
2934 }
2935
2936 static
2937 void lttng_metadata_begin(struct lttng_session *session)
2938 {
2939 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2940 mutex_lock(&session->metadata_cache->lock);
2941 }
2942
2943 static
2944 void lttng_metadata_end(struct lttng_session *session)
2945 {
2946 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2947 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2948 struct lttng_metadata_stream *stream;
2949
2950 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2951 wake_up_interruptible(&stream->read_wait);
2952 mutex_unlock(&session->metadata_cache->lock);
2953 }
2954 }
2955
2956 /*
2957 * Write the metadata to the metadata cache.
2958 * Must be called with sessions_mutex held.
2959 * The metadata cache lock protects us from concurrent read access from
2960 * thread outputting metadata content to ring buffer.
2961 * The content of the printf is printed as a single atomic metadata
2962 * transaction.
2963 */
2964 int lttng_metadata_printf(struct lttng_session *session,
2965 const char *fmt, ...)
2966 {
2967 char *str;
2968 size_t len;
2969 va_list ap;
2970
2971 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2972
2973 va_start(ap, fmt);
2974 str = kvasprintf(GFP_KERNEL, fmt, ap);
2975 va_end(ap);
2976 if (!str)
2977 return -ENOMEM;
2978
2979 len = strlen(str);
2980 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2981 if (session->metadata_cache->metadata_written + len >
2982 session->metadata_cache->cache_alloc) {
2983 char *tmp_cache_realloc;
2984 unsigned int tmp_cache_alloc_size;
2985
2986 tmp_cache_alloc_size = max_t(unsigned int,
2987 session->metadata_cache->cache_alloc + len,
2988 session->metadata_cache->cache_alloc << 1);
2989 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2990 if (!tmp_cache_realloc)
2991 goto err;
2992 if (session->metadata_cache->data) {
2993 memcpy(tmp_cache_realloc,
2994 session->metadata_cache->data,
2995 session->metadata_cache->cache_alloc);
2996 vfree(session->metadata_cache->data);
2997 }
2998
2999 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3000 session->metadata_cache->data = tmp_cache_realloc;
3001 }
3002 memcpy(session->metadata_cache->data +
3003 session->metadata_cache->metadata_written,
3004 str, len);
3005 session->metadata_cache->metadata_written += len;
3006 kfree(str);
3007
3008 return 0;
3009
3010 err:
3011 kfree(str);
3012 return -ENOMEM;
3013 }
3014
3015 static
3016 int print_tabs(struct lttng_session *session, size_t nesting)
3017 {
3018 size_t i;
3019
3020 for (i = 0; i < nesting; i++) {
3021 int ret;
3022
3023 ret = lttng_metadata_printf(session, " ");
3024 if (ret) {
3025 return ret;
3026 }
3027 }
3028 return 0;
3029 }
3030
3031 static
3032 int lttng_field_name_statedump(struct lttng_session *session,
3033 const struct lttng_kernel_event_field *field,
3034 size_t nesting)
3035 {
3036 return lttng_metadata_printf(session, " _%s;\n", field->name);
3037 }
3038
3039 static
3040 int _lttng_integer_type_statedump(struct lttng_session *session,
3041 const struct lttng_kernel_type_integer *type,
3042 enum lttng_kernel_string_encoding parent_encoding,
3043 size_t nesting)
3044 {
3045 int ret;
3046
3047 ret = print_tabs(session, nesting);
3048 if (ret)
3049 return ret;
3050 ret = lttng_metadata_printf(session,
3051 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3052 type->size,
3053 type->alignment,
3054 type->signedness,
3055 (parent_encoding == lttng_kernel_string_encoding_none)
3056 ? "none"
3057 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3058 ? "UTF8"
3059 : "ASCII",
3060 type->base,
3061 #if __BYTE_ORDER == __BIG_ENDIAN
3062 type->reverse_byte_order ? " byte_order = le;" : ""
3063 #else
3064 type->reverse_byte_order ? " byte_order = be;" : ""
3065 #endif
3066 );
3067 return ret;
3068 }
3069
3070 /*
3071 * Must be called with sessions_mutex held.
3072 */
3073 static
3074 int _lttng_struct_type_statedump(struct lttng_session *session,
3075 const struct lttng_kernel_type_struct *type,
3076 size_t nesting)
3077 {
3078 int ret;
3079 uint32_t i, nr_fields;
3080 unsigned int alignment;
3081
3082 ret = print_tabs(session, nesting);
3083 if (ret)
3084 return ret;
3085 ret = lttng_metadata_printf(session,
3086 "struct {\n");
3087 if (ret)
3088 return ret;
3089 nr_fields = type->nr_fields;
3090 for (i = 0; i < nr_fields; i++) {
3091 const struct lttng_kernel_event_field *iter_field;
3092
3093 iter_field = type->fields[i];
3094 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3095 if (ret)
3096 return ret;
3097 }
3098 ret = print_tabs(session, nesting);
3099 if (ret)
3100 return ret;
3101 alignment = type->alignment;
3102 if (alignment) {
3103 ret = lttng_metadata_printf(session,
3104 "} align(%u)",
3105 alignment);
3106 } else {
3107 ret = lttng_metadata_printf(session,
3108 "}");
3109 }
3110 return ret;
3111 }
3112
3113 /*
3114 * Must be called with sessions_mutex held.
3115 */
3116 static
3117 int _lttng_struct_field_statedump(struct lttng_session *session,
3118 const struct lttng_kernel_event_field *field,
3119 size_t nesting)
3120 {
3121 int ret;
3122
3123 ret = _lttng_struct_type_statedump(session,
3124 lttng_kernel_get_type_struct(field->type), nesting);
3125 if (ret)
3126 return ret;
3127 return lttng_field_name_statedump(session, field, nesting);
3128 }
3129
3130 /*
3131 * Must be called with sessions_mutex held.
3132 */
3133 static
3134 int _lttng_variant_type_statedump(struct lttng_session *session,
3135 const struct lttng_kernel_type_variant *type,
3136 size_t nesting)
3137 {
3138 int ret;
3139 uint32_t i, nr_choices;
3140
3141 /*
3142 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3143 */
3144 if (type->alignment != 0)
3145 return -EINVAL;
3146 ret = print_tabs(session, nesting);
3147 if (ret)
3148 return ret;
3149 ret = lttng_metadata_printf(session,
3150 "variant <_%s> {\n",
3151 type->tag_name);
3152 if (ret)
3153 return ret;
3154 nr_choices = type->nr_choices;
3155 for (i = 0; i < nr_choices; i++) {
3156 const struct lttng_kernel_event_field *iter_field;
3157
3158 iter_field = type->choices[i];
3159 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3160 if (ret)
3161 return ret;
3162 }
3163 ret = print_tabs(session, nesting);
3164 if (ret)
3165 return ret;
3166 ret = lttng_metadata_printf(session,
3167 "}");
3168 return ret;
3169 }
3170
3171 /*
3172 * Must be called with sessions_mutex held.
3173 */
3174 static
3175 int _lttng_variant_field_statedump(struct lttng_session *session,
3176 const struct lttng_kernel_event_field *field,
3177 size_t nesting)
3178 {
3179 int ret;
3180
3181 ret = _lttng_variant_type_statedump(session,
3182 lttng_kernel_get_type_variant(field->type), nesting);
3183 if (ret)
3184 return ret;
3185 return lttng_field_name_statedump(session, field, nesting);
3186 }
3187
3188 /*
3189 * Must be called with sessions_mutex held.
3190 */
3191 static
3192 int _lttng_array_field_statedump(struct lttng_session *session,
3193 const struct lttng_kernel_event_field *field,
3194 size_t nesting)
3195 {
3196 int ret;
3197 const struct lttng_kernel_type_array *array_type;
3198 const struct lttng_kernel_type_common *elem_type;
3199
3200 array_type = lttng_kernel_get_type_array(field->type);
3201 WARN_ON_ONCE(!array_type);
3202
3203 if (array_type->alignment) {
3204 ret = print_tabs(session, nesting);
3205 if (ret)
3206 return ret;
3207 ret = lttng_metadata_printf(session,
3208 "struct { } align(%u) _%s_padding;\n",
3209 array_type->alignment * CHAR_BIT,
3210 field->name);
3211 if (ret)
3212 return ret;
3213 }
3214 /*
3215 * Nested compound types: Only array of structures and variants are
3216 * currently supported.
3217 */
3218 elem_type = array_type->elem_type;
3219 switch (elem_type->type) {
3220 case lttng_kernel_type_integer:
3221 case lttng_kernel_type_struct:
3222 case lttng_kernel_type_variant:
3223 ret = _lttng_type_statedump(session, elem_type,
3224 array_type->encoding, nesting);
3225 if (ret)
3226 return ret;
3227 break;
3228
3229 default:
3230 return -EINVAL;
3231 }
3232 ret = lttng_metadata_printf(session,
3233 " _%s[%u];\n",
3234 field->name,
3235 array_type->length);
3236 return ret;
3237 }
3238
3239 /*
3240 * Must be called with sessions_mutex held.
3241 */
3242 static
3243 int _lttng_sequence_field_statedump(struct lttng_session *session,
3244 const struct lttng_kernel_event_field *field,
3245 size_t nesting)
3246 {
3247 int ret;
3248 const char *length_name;
3249 const struct lttng_kernel_type_sequence *sequence_type;
3250 const struct lttng_kernel_type_common *elem_type;
3251
3252 sequence_type = lttng_kernel_get_type_sequence(field->type);
3253 WARN_ON_ONCE(!sequence_type);
3254
3255 length_name = sequence_type->length_name;
3256
3257 if (sequence_type->alignment) {
3258 ret = print_tabs(session, nesting);
3259 if (ret)
3260 return ret;
3261 ret = lttng_metadata_printf(session,
3262 "struct { } align(%u) _%s_padding;\n",
3263 sequence_type->alignment * CHAR_BIT,
3264 field->name);
3265 if (ret)
3266 return ret;
3267 }
3268
3269 /*
3270 * Nested compound types: Only array of structures and variants are
3271 * currently supported.
3272 */
3273 elem_type = sequence_type->elem_type;
3274 switch (elem_type->type) {
3275 case lttng_kernel_type_integer:
3276 case lttng_kernel_type_struct:
3277 case lttng_kernel_type_variant:
3278 ret = _lttng_type_statedump(session, elem_type,
3279 sequence_type->encoding, nesting);
3280 if (ret)
3281 return ret;
3282 break;
3283
3284 default:
3285 return -EINVAL;
3286 }
3287 ret = lttng_metadata_printf(session,
3288 " _%s[ _%s ];\n",
3289 field->name,
3290 sequence_type->length_name);
3291 return ret;
3292 }
3293
3294 /*
3295 * Must be called with sessions_mutex held.
3296 */
3297 static
3298 int _lttng_enum_type_statedump(struct lttng_session *session,
3299 const struct lttng_kernel_type_enum *type,
3300 size_t nesting)
3301 {
3302 const struct lttng_kernel_enum_desc *enum_desc;
3303 const struct lttng_kernel_type_common *container_type;
3304 int ret;
3305 unsigned int i, nr_entries;
3306
3307 container_type = type->container_type;
3308 if (container_type->type != lttng_kernel_type_integer) {
3309 ret = -EINVAL;
3310 goto end;
3311 }
3312 enum_desc = type->desc;
3313 nr_entries = enum_desc->nr_entries;
3314
3315 ret = print_tabs(session, nesting);
3316 if (ret)
3317 goto end;
3318 ret = lttng_metadata_printf(session, "enum : ");
3319 if (ret)
3320 goto end;
3321 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3322 lttng_kernel_string_encoding_none, 0);
3323 if (ret)
3324 goto end;
3325 ret = lttng_metadata_printf(session, " {\n");
3326 if (ret)
3327 goto end;
3328 /* Dump all entries */
3329 for (i = 0; i < nr_entries; i++) {
3330 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3331 int j, len;
3332
3333 ret = print_tabs(session, nesting + 1);
3334 if (ret)
3335 goto end;
3336 ret = lttng_metadata_printf(session,
3337 "\"");
3338 if (ret)
3339 goto end;
3340 len = strlen(entry->string);
3341 /* Escape the character '"' */
3342 for (j = 0; j < len; j++) {
3343 char c = entry->string[j];
3344
3345 switch (c) {
3346 case '"':
3347 ret = lttng_metadata_printf(session,
3348 "\\\"");
3349 break;
3350 case '\\':
3351 ret = lttng_metadata_printf(session,
3352 "\\\\");
3353 break;
3354 default:
3355 ret = lttng_metadata_printf(session,
3356 "%c", c);
3357 break;
3358 }
3359 if (ret)
3360 goto end;
3361 }
3362 ret = lttng_metadata_printf(session, "\"");
3363 if (ret)
3364 goto end;
3365
3366 if (entry->options.is_auto) {
3367 ret = lttng_metadata_printf(session, ",\n");
3368 if (ret)
3369 goto end;
3370 } else {
3371 ret = lttng_metadata_printf(session,
3372 " = ");
3373 if (ret)
3374 goto end;
3375 if (entry->start.signedness)
3376 ret = lttng_metadata_printf(session,
3377 "%lld", (long long) entry->start.value);
3378 else
3379 ret = lttng_metadata_printf(session,
3380 "%llu", entry->start.value);
3381 if (ret)
3382 goto end;
3383 if (entry->start.signedness == entry->end.signedness &&
3384 entry->start.value
3385 == entry->end.value) {
3386 ret = lttng_metadata_printf(session,
3387 ",\n");
3388 } else {
3389 if (entry->end.signedness) {
3390 ret = lttng_metadata_printf(session,
3391 " ... %lld,\n",
3392 (long long) entry->end.value);
3393 } else {
3394 ret = lttng_metadata_printf(session,
3395 " ... %llu,\n",
3396 entry->end.value);
3397 }
3398 }
3399 if (ret)
3400 goto end;
3401 }
3402 }
3403 ret = print_tabs(session, nesting);
3404 if (ret)
3405 goto end;
3406 ret = lttng_metadata_printf(session, "}");
3407 end:
3408 return ret;
3409 }
3410
3411 /*
3412 * Must be called with sessions_mutex held.
3413 */
3414 static
3415 int _lttng_enum_field_statedump(struct lttng_session *session,
3416 const struct lttng_kernel_event_field *field,
3417 size_t nesting)
3418 {
3419 int ret;
3420 const struct lttng_kernel_type_enum *enum_type;
3421
3422 enum_type = lttng_kernel_get_type_enum(field->type);
3423 WARN_ON_ONCE(!enum_type);
3424 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3425 if (ret)
3426 return ret;
3427 return lttng_field_name_statedump(session, field, nesting);
3428 }
3429
3430 static
3431 int _lttng_integer_field_statedump(struct lttng_session *session,
3432 const struct lttng_kernel_event_field *field,
3433 size_t nesting)
3434 {
3435 int ret;
3436
3437 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3438 lttng_kernel_string_encoding_none, nesting);
3439 if (ret)
3440 return ret;
3441 return lttng_field_name_statedump(session, field, nesting);
3442 }
3443
3444 static
3445 int _lttng_string_type_statedump(struct lttng_session *session,
3446 const struct lttng_kernel_type_string *type,
3447 size_t nesting)
3448 {
3449 int ret;
3450
3451 /* Default encoding is UTF8 */
3452 ret = print_tabs(session, nesting);
3453 if (ret)
3454 return ret;
3455 ret = lttng_metadata_printf(session,
3456 "string%s",
3457 type->encoding == lttng_kernel_string_encoding_ASCII ?
3458 " { encoding = ASCII; }" : "");
3459 return ret;
3460 }
3461
3462 static
3463 int _lttng_string_field_statedump(struct lttng_session *session,
3464 const struct lttng_kernel_event_field *field,
3465 size_t nesting)
3466 {
3467 const struct lttng_kernel_type_string *string_type;
3468 int ret;
3469
3470 string_type = lttng_kernel_get_type_string(field->type);
3471 WARN_ON_ONCE(!string_type);
3472 ret = _lttng_string_type_statedump(session, string_type, nesting);
3473 if (ret)
3474 return ret;
3475 return lttng_field_name_statedump(session, field, nesting);
3476 }
3477
3478 /*
3479 * Must be called with sessions_mutex held.
3480 */
3481 static
3482 int _lttng_type_statedump(struct lttng_session *session,
3483 const struct lttng_kernel_type_common *type,
3484 enum lttng_kernel_string_encoding parent_encoding,
3485 size_t nesting)
3486 {
3487 int ret = 0;
3488
3489 switch (type->type) {
3490 case lttng_kernel_type_integer:
3491 ret = _lttng_integer_type_statedump(session,
3492 lttng_kernel_get_type_integer(type),
3493 parent_encoding, nesting);
3494 break;
3495 case lttng_kernel_type_enum:
3496 ret = _lttng_enum_type_statedump(session,
3497 lttng_kernel_get_type_enum(type),
3498 nesting);
3499 break;
3500 case lttng_kernel_type_string:
3501 ret = _lttng_string_type_statedump(session,
3502 lttng_kernel_get_type_string(type),
3503 nesting);
3504 break;
3505 case lttng_kernel_type_struct:
3506 ret = _lttng_struct_type_statedump(session,
3507 lttng_kernel_get_type_struct(type),
3508 nesting);
3509 break;
3510 case lttng_kernel_type_variant:
3511 ret = _lttng_variant_type_statedump(session,
3512 lttng_kernel_get_type_variant(type),
3513 nesting);
3514 break;
3515
3516 /* Nested arrays and sequences are not supported yet. */
3517 case lttng_kernel_type_array:
3518 case lttng_kernel_type_sequence:
3519 default:
3520 WARN_ON_ONCE(1);
3521 return -EINVAL;
3522 }
3523 return ret;
3524 }
3525
3526 /*
3527 * Must be called with sessions_mutex held.
3528 */
3529 static
3530 int _lttng_field_statedump(struct lttng_session *session,
3531 const struct lttng_kernel_event_field *field,
3532 size_t nesting)
3533 {
3534 int ret = 0;
3535
3536 switch (field->type->type) {
3537 case lttng_kernel_type_integer:
3538 ret = _lttng_integer_field_statedump(session, field, nesting);
3539 break;
3540 case lttng_kernel_type_enum:
3541 ret = _lttng_enum_field_statedump(session, field, nesting);
3542 break;
3543 case lttng_kernel_type_string:
3544 ret = _lttng_string_field_statedump(session, field, nesting);
3545 break;
3546 case lttng_kernel_type_struct:
3547 ret = _lttng_struct_field_statedump(session, field, nesting);
3548 break;
3549 case lttng_kernel_type_array:
3550 ret = _lttng_array_field_statedump(session, field, nesting);
3551 break;
3552 case lttng_kernel_type_sequence:
3553 ret = _lttng_sequence_field_statedump(session, field, nesting);
3554 break;
3555 case lttng_kernel_type_variant:
3556 ret = _lttng_variant_field_statedump(session, field, nesting);
3557 break;
3558
3559 default:
3560 WARN_ON_ONCE(1);
3561 return -EINVAL;
3562 }
3563 return ret;
3564 }
3565
3566 static
3567 int _lttng_context_metadata_statedump(struct lttng_session *session,
3568 struct lttng_kernel_ctx *ctx)
3569 {
3570 int ret = 0;
3571 int i;
3572
3573 if (!ctx)
3574 return 0;
3575 for (i = 0; i < ctx->nr_fields; i++) {
3576 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3577
3578 ret = _lttng_field_statedump(session, field->event_field, 2);
3579 if (ret)
3580 return ret;
3581 }
3582 return ret;
3583 }
3584
3585 static
3586 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3587 struct lttng_kernel_event_recorder *event_recorder)
3588 {
3589 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3590 int ret = 0;
3591 int i;
3592
3593 for (i = 0; i < desc->nr_fields; i++) {
3594 const struct lttng_kernel_event_field *field = desc->fields[i];
3595
3596 ret = _lttng_field_statedump(session, field, 2);
3597 if (ret)
3598 return ret;
3599 }
3600 return ret;
3601 }
3602
3603 /*
3604 * Must be called with sessions_mutex held.
3605 * The entire event metadata is printed as a single atomic metadata
3606 * transaction.
3607 */
3608 static
3609 int _lttng_event_metadata_statedump(struct lttng_session *session,
3610 struct lttng_channel *chan,
3611 struct lttng_kernel_event_recorder *event_recorder)
3612 {
3613 int ret = 0;
3614
3615 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3616 return 0;
3617 if (chan->channel_type == METADATA_CHANNEL)
3618 return 0;
3619
3620 lttng_metadata_begin(session);
3621
3622 ret = lttng_metadata_printf(session,
3623 "event {\n"
3624 " name = \"%s\";\n"
3625 " id = %u;\n"
3626 " stream_id = %u;\n",
3627 event_recorder->priv->parent.desc->event_name,
3628 event_recorder->priv->id,
3629 event_recorder->chan->id);
3630 if (ret)
3631 goto end;
3632
3633 ret = lttng_metadata_printf(session,
3634 " fields := struct {\n"
3635 );
3636 if (ret)
3637 goto end;
3638
3639 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3640 if (ret)
3641 goto end;
3642
3643 /*
3644 * LTTng space reservation can only reserve multiples of the
3645 * byte size.
3646 */
3647 ret = lttng_metadata_printf(session,
3648 " };\n"
3649 "};\n\n");
3650 if (ret)
3651 goto end;
3652
3653 event_recorder->priv->metadata_dumped = 1;
3654 end:
3655 lttng_metadata_end(session);
3656 return ret;
3657
3658 }
3659
3660 /*
3661 * Must be called with sessions_mutex held.
3662 * The entire channel metadata is printed as a single atomic metadata
3663 * transaction.
3664 */
3665 static
3666 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3667 struct lttng_channel *chan)
3668 {
3669 int ret = 0;
3670
3671 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3672 return 0;
3673
3674 if (chan->channel_type == METADATA_CHANNEL)
3675 return 0;
3676
3677 lttng_metadata_begin(session);
3678
3679 WARN_ON_ONCE(!chan->header_type);
3680 ret = lttng_metadata_printf(session,
3681 "stream {\n"
3682 " id = %u;\n"
3683 " event.header := %s;\n"
3684 " packet.context := struct packet_context;\n",
3685 chan->id,
3686 chan->header_type == 1 ? "struct event_header_compact" :
3687 "struct event_header_large");
3688 if (ret)
3689 goto end;
3690
3691 if (chan->ctx) {
3692 ret = lttng_metadata_printf(session,
3693 " event.context := struct {\n");
3694 if (ret)
3695 goto end;
3696 }
3697 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3698 if (ret)
3699 goto end;
3700 if (chan->ctx) {
3701 ret = lttng_metadata_printf(session,
3702 " };\n");
3703 if (ret)
3704 goto end;
3705 }
3706
3707 ret = lttng_metadata_printf(session,
3708 "};\n\n");
3709
3710 chan->metadata_dumped = 1;
3711 end:
3712 lttng_metadata_end(session);
3713 return ret;
3714 }
3715
3716 /*
3717 * Must be called with sessions_mutex held.
3718 */
3719 static
3720 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3721 {
3722 return lttng_metadata_printf(session,
3723 "struct packet_context {\n"
3724 " uint64_clock_monotonic_t timestamp_begin;\n"
3725 " uint64_clock_monotonic_t timestamp_end;\n"
3726 " uint64_t content_size;\n"
3727 " uint64_t packet_size;\n"
3728 " uint64_t packet_seq_num;\n"
3729 " unsigned long events_discarded;\n"
3730 " uint32_t cpu_id;\n"
3731 "};\n\n"
3732 );
3733 }
3734
3735 /*
3736 * Compact header:
3737 * id: range: 0 - 30.
3738 * id 31 is reserved to indicate an extended header.
3739 *
3740 * Large header:
3741 * id: range: 0 - 65534.
3742 * id 65535 is reserved to indicate an extended header.
3743 *
3744 * Must be called with sessions_mutex held.
3745 */
3746 static
3747 int _lttng_event_header_declare(struct lttng_session *session)
3748 {
3749 return lttng_metadata_printf(session,
3750 "struct event_header_compact {\n"
3751 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3752 " variant <id> {\n"
3753 " struct {\n"
3754 " uint27_clock_monotonic_t timestamp;\n"
3755 " } compact;\n"
3756 " struct {\n"
3757 " uint32_t id;\n"
3758 " uint64_clock_monotonic_t timestamp;\n"
3759 " } extended;\n"
3760 " } v;\n"
3761 "} align(%u);\n"
3762 "\n"
3763 "struct event_header_large {\n"
3764 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3765 " variant <id> {\n"
3766 " struct {\n"
3767 " uint32_clock_monotonic_t timestamp;\n"
3768 " } compact;\n"
3769 " struct {\n"
3770 " uint32_t id;\n"
3771 " uint64_clock_monotonic_t timestamp;\n"
3772 " } extended;\n"
3773 " } v;\n"
3774 "} align(%u);\n\n",
3775 lttng_alignof(uint32_t) * CHAR_BIT,
3776 lttng_alignof(uint16_t) * CHAR_BIT
3777 );
3778 }
3779
3780 /*
3781 * Approximation of NTP time of day to clock monotonic correlation,
3782 * taken at start of trace.
3783 * Yes, this is only an approximation. Yes, we can (and will) do better
3784 * in future versions.
3785 * This function may return a negative offset. It may happen if the
3786 * system sets the REALTIME clock to 0 after boot.
3787 *
3788 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3789 * y2038 compliant.
3790 */
3791 static
3792 int64_t measure_clock_offset(void)
3793 {
3794 uint64_t monotonic_avg, monotonic[2], realtime;
3795 uint64_t tcf = trace_clock_freq();
3796 int64_t offset;
3797 unsigned long flags;
3798 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3799 struct timespec64 rts = { 0, 0 };
3800 #else
3801 struct timespec rts = { 0, 0 };
3802 #endif
3803
3804 /* Disable interrupts to increase correlation precision. */
3805 local_irq_save(flags);
3806 monotonic[0] = trace_clock_read64();
3807 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3808 ktime_get_real_ts64(&rts);
3809 #else
3810 getnstimeofday(&rts);
3811 #endif
3812 monotonic[1] = trace_clock_read64();
3813 local_irq_restore(flags);
3814
3815 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3816 realtime = (uint64_t) rts.tv_sec * tcf;
3817 if (tcf == NSEC_PER_SEC) {
3818 realtime += rts.tv_nsec;
3819 } else {
3820 uint64_t n = rts.tv_nsec * tcf;
3821
3822 do_div(n, NSEC_PER_SEC);
3823 realtime += n;
3824 }
3825 offset = (int64_t) realtime - monotonic_avg;
3826 return offset;
3827 }
3828
3829 static
3830 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3831 {
3832 int ret = 0;
3833 size_t i;
3834 char cur;
3835
3836 i = 0;
3837 cur = string[i];
3838 while (cur != '\0') {
3839 switch (cur) {
3840 case '\n':
3841 ret = lttng_metadata_printf(session, "%s", "\\n");
3842 break;
3843 case '\\':
3844 case '"':
3845 ret = lttng_metadata_printf(session, "%c", '\\');
3846 if (ret)
3847 goto error;
3848 /* We still print the current char */
3849 /* Fallthrough */
3850 default:
3851 ret = lttng_metadata_printf(session, "%c", cur);
3852 break;
3853 }
3854
3855 if (ret)
3856 goto error;
3857
3858 cur = string[++i];
3859 }
3860 error:
3861 return ret;
3862 }
3863
3864 static
3865 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3866 const char *field_value)
3867 {
3868 int ret;
3869
3870 ret = lttng_metadata_printf(session, " %s = \"", field);
3871 if (ret)
3872 goto error;
3873
3874 ret = print_escaped_ctf_string(session, field_value);
3875 if (ret)
3876 goto error;
3877
3878 ret = lttng_metadata_printf(session, "\";\n");
3879
3880 error:
3881 return ret;
3882 }
3883
3884 /*
3885 * Output metadata into this session's metadata buffers.
3886 * Must be called with sessions_mutex held.
3887 */
3888 static
3889 int _lttng_session_metadata_statedump(struct lttng_session *session)
3890 {
3891 unsigned char *uuid_c = session->uuid.b;
3892 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3893 const char *product_uuid;
3894 struct lttng_channel *chan;
3895 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3896 int ret = 0;
3897
3898 if (!LTTNG_READ_ONCE(session->active))
3899 return 0;
3900
3901 lttng_metadata_begin(session);
3902
3903 if (session->metadata_dumped)
3904 goto skip_session;
3905
3906 snprintf(uuid_s, sizeof(uuid_s),
3907 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3908 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3909 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3910 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3911 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3912
3913 ret = lttng_metadata_printf(session,
3914 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3915 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3916 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3917 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3918 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3919 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3920 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3921 "\n"
3922 "trace {\n"
3923 " major = %u;\n"
3924 " minor = %u;\n"
3925 " uuid = \"%s\";\n"
3926 " byte_order = %s;\n"
3927 " packet.header := struct {\n"
3928 " uint32_t magic;\n"
3929 " uint8_t uuid[16];\n"
3930 " uint32_t stream_id;\n"
3931 " uint64_t stream_instance_id;\n"
3932 " };\n"
3933 "};\n\n",
3934 lttng_alignof(uint8_t) * CHAR_BIT,
3935 lttng_alignof(uint16_t) * CHAR_BIT,
3936 lttng_alignof(uint32_t) * CHAR_BIT,
3937 lttng_alignof(uint64_t) * CHAR_BIT,
3938 sizeof(unsigned long) * CHAR_BIT,
3939 lttng_alignof(unsigned long) * CHAR_BIT,
3940 CTF_SPEC_MAJOR,
3941 CTF_SPEC_MINOR,
3942 uuid_s,
3943 #if __BYTE_ORDER == __BIG_ENDIAN
3944 "be"
3945 #else
3946 "le"
3947 #endif
3948 );
3949 if (ret)
3950 goto end;
3951
3952 ret = lttng_metadata_printf(session,
3953 "env {\n"
3954 " hostname = \"%s\";\n"
3955 " domain = \"kernel\";\n"
3956 " sysname = \"%s\";\n"
3957 " kernel_release = \"%s\";\n"
3958 " kernel_version = \"%s\";\n"
3959 " tracer_name = \"lttng-modules\";\n"
3960 " tracer_major = %d;\n"
3961 " tracer_minor = %d;\n"
3962 " tracer_patchlevel = %d;\n"
3963 " trace_buffering_scheme = \"global\";\n",
3964 current->nsproxy->uts_ns->name.nodename,
3965 utsname()->sysname,
3966 utsname()->release,
3967 utsname()->version,
3968 LTTNG_MODULES_MAJOR_VERSION,
3969 LTTNG_MODULES_MINOR_VERSION,
3970 LTTNG_MODULES_PATCHLEVEL_VERSION
3971 );
3972 if (ret)
3973 goto end;
3974
3975 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3976 if (ret)
3977 goto end;
3978 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3979 session->creation_time);
3980 if (ret)
3981 goto end;
3982
3983 /* Add the product UUID to the 'env' section */
3984 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3985 if (product_uuid) {
3986 ret = lttng_metadata_printf(session,
3987 " product_uuid = \"%s\";\n",
3988 product_uuid
3989 );
3990 if (ret)
3991 goto end;
3992 }
3993
3994 /* Close the 'env' section */
3995 ret = lttng_metadata_printf(session, "};\n\n");
3996 if (ret)
3997 goto end;
3998
3999 ret = lttng_metadata_printf(session,
4000 "clock {\n"
4001 " name = \"%s\";\n",
4002 trace_clock_name()
4003 );
4004 if (ret)
4005 goto end;
4006
4007 if (!trace_clock_uuid(clock_uuid_s)) {
4008 ret = lttng_metadata_printf(session,
4009 " uuid = \"%s\";\n",
4010 clock_uuid_s
4011 );
4012 if (ret)
4013 goto end;
4014 }
4015
4016 ret = lttng_metadata_printf(session,
4017 " description = \"%s\";\n"
4018 " freq = %llu; /* Frequency, in Hz */\n"
4019 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4020 " offset = %lld;\n"
4021 "};\n\n",
4022 trace_clock_description(),
4023 (unsigned long long) trace_clock_freq(),
4024 (long long) measure_clock_offset()
4025 );
4026 if (ret)
4027 goto end;
4028
4029 ret = lttng_metadata_printf(session,
4030 "typealias integer {\n"
4031 " size = 27; align = 1; signed = false;\n"
4032 " map = clock.%s.value;\n"
4033 "} := uint27_clock_monotonic_t;\n"
4034 "\n"
4035 "typealias integer {\n"
4036 " size = 32; align = %u; signed = false;\n"
4037 " map = clock.%s.value;\n"
4038 "} := uint32_clock_monotonic_t;\n"
4039 "\n"
4040 "typealias integer {\n"
4041 " size = 64; align = %u; signed = false;\n"
4042 " map = clock.%s.value;\n"
4043 "} := uint64_clock_monotonic_t;\n\n",
4044 trace_clock_name(),
4045 lttng_alignof(uint32_t) * CHAR_BIT,
4046 trace_clock_name(),
4047 lttng_alignof(uint64_t) * CHAR_BIT,
4048 trace_clock_name()
4049 );
4050 if (ret)
4051 goto end;
4052
4053 ret = _lttng_stream_packet_context_declare(session);
4054 if (ret)
4055 goto end;
4056
4057 ret = _lttng_event_header_declare(session);
4058 if (ret)
4059 goto end;
4060
4061 skip_session:
4062 list_for_each_entry(chan, &session->chan, list) {
4063 ret = _lttng_channel_metadata_statedump(session, chan);
4064 if (ret)
4065 goto end;
4066 }
4067
4068 list_for_each_entry(event_recorder_priv, &session->events, node) {
4069 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4070 event_recorder_priv->pub);
4071 if (ret)
4072 goto end;
4073 }
4074 session->metadata_dumped = 1;
4075 end:
4076 lttng_metadata_end(session);
4077 return ret;
4078 }
4079
4080 /**
4081 * lttng_transport_register - LTT transport registration
4082 * @transport: transport structure
4083 *
4084 * Registers a transport which can be used as output to extract the data out of
4085 * LTTng. The module calling this registration function must ensure that no
4086 * trap-inducing code will be executed by the transport functions. E.g.
4087 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4088 * is made visible to the transport function. This registration acts as a
4089 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4090 * after its registration must it synchronize the TLBs.
4091 */
4092 void lttng_transport_register(struct lttng_transport *transport)
4093 {
4094 /*
4095 * Make sure no page fault can be triggered by the module about to be
4096 * registered. We deal with this here so we don't have to call
4097 * vmalloc_sync_mappings() in each module's init.
4098 */
4099 wrapper_vmalloc_sync_mappings();
4100
4101 mutex_lock(&sessions_mutex);
4102 list_add_tail(&transport->node, &lttng_transport_list);
4103 mutex_unlock(&sessions_mutex);
4104 }
4105 EXPORT_SYMBOL_GPL(lttng_transport_register);
4106
4107 /**
4108 * lttng_transport_unregister - LTT transport unregistration
4109 * @transport: transport structure
4110 */
4111 void lttng_transport_unregister(struct lttng_transport *transport)
4112 {
4113 mutex_lock(&sessions_mutex);
4114 list_del(&transport->node);
4115 mutex_unlock(&sessions_mutex);
4116 }
4117 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4118
4119 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4120 {
4121 /*
4122 * Make sure no page fault can be triggered by the module about to be
4123 * registered. We deal with this here so we don't have to call
4124 * vmalloc_sync_mappings() in each module's init.
4125 */
4126 wrapper_vmalloc_sync_mappings();
4127
4128 mutex_lock(&sessions_mutex);
4129 list_add_tail(&transport->node, &lttng_counter_transport_list);
4130 mutex_unlock(&sessions_mutex);
4131 }
4132 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4133
4134 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4135 {
4136 mutex_lock(&sessions_mutex);
4137 list_del(&transport->node);
4138 mutex_unlock(&sessions_mutex);
4139 }
4140 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4141
4142 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4143
4144 enum cpuhp_state lttng_hp_prepare;
4145 enum cpuhp_state lttng_hp_online;
4146
4147 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4148 {
4149 struct lttng_cpuhp_node *lttng_node;
4150
4151 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4152 switch (lttng_node->component) {
4153 case LTTNG_RING_BUFFER_FRONTEND:
4154 return 0;
4155 case LTTNG_RING_BUFFER_BACKEND:
4156 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4157 case LTTNG_RING_BUFFER_ITER:
4158 return 0;
4159 case LTTNG_CONTEXT_PERF_COUNTERS:
4160 return 0;
4161 default:
4162 return -EINVAL;
4163 }
4164 }
4165
4166 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4167 {
4168 struct lttng_cpuhp_node *lttng_node;
4169
4170 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4171 switch (lttng_node->component) {
4172 case LTTNG_RING_BUFFER_FRONTEND:
4173 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4174 case LTTNG_RING_BUFFER_BACKEND:
4175 return 0;
4176 case LTTNG_RING_BUFFER_ITER:
4177 return 0;
4178 case LTTNG_CONTEXT_PERF_COUNTERS:
4179 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4180 default:
4181 return -EINVAL;
4182 }
4183 }
4184
4185 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4186 {
4187 struct lttng_cpuhp_node *lttng_node;
4188
4189 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4190 switch (lttng_node->component) {
4191 case LTTNG_RING_BUFFER_FRONTEND:
4192 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4193 case LTTNG_RING_BUFFER_BACKEND:
4194 return 0;
4195 case LTTNG_RING_BUFFER_ITER:
4196 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4197 case LTTNG_CONTEXT_PERF_COUNTERS:
4198 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4199 default:
4200 return -EINVAL;
4201 }
4202 }
4203
4204 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4205 {
4206 struct lttng_cpuhp_node *lttng_node;
4207
4208 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4209 switch (lttng_node->component) {
4210 case LTTNG_RING_BUFFER_FRONTEND:
4211 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4212 case LTTNG_RING_BUFFER_BACKEND:
4213 return 0;
4214 case LTTNG_RING_BUFFER_ITER:
4215 return 0;
4216 case LTTNG_CONTEXT_PERF_COUNTERS:
4217 return 0;
4218 default:
4219 return -EINVAL;
4220 }
4221 }
4222
4223 static int __init lttng_init_cpu_hotplug(void)
4224 {
4225 int ret;
4226
4227 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4228 lttng_hotplug_prepare,
4229 lttng_hotplug_dead);
4230 if (ret < 0) {
4231 return ret;
4232 }
4233 lttng_hp_prepare = ret;
4234 lttng_rb_set_hp_prepare(ret);
4235
4236 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4237 lttng_hotplug_online,
4238 lttng_hotplug_offline);
4239 if (ret < 0) {
4240 cpuhp_remove_multi_state(lttng_hp_prepare);
4241 lttng_hp_prepare = 0;
4242 return ret;
4243 }
4244 lttng_hp_online = ret;
4245 lttng_rb_set_hp_online(ret);
4246
4247 return 0;
4248 }
4249
4250 static void __exit lttng_exit_cpu_hotplug(void)
4251 {
4252 lttng_rb_set_hp_online(0);
4253 cpuhp_remove_multi_state(lttng_hp_online);
4254 lttng_rb_set_hp_prepare(0);
4255 cpuhp_remove_multi_state(lttng_hp_prepare);
4256 }
4257
4258 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4259 static int lttng_init_cpu_hotplug(void)
4260 {
4261 return 0;
4262 }
4263 static void lttng_exit_cpu_hotplug(void)
4264 {
4265 }
4266 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4267
4268
4269 static int __init lttng_events_init(void)
4270 {
4271 int ret;
4272
4273 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4274 if (ret)
4275 return ret;
4276 ret = wrapper_get_pfnblock_flags_mask_init();
4277 if (ret)
4278 return ret;
4279 ret = wrapper_get_pageblock_flags_mask_init();
4280 if (ret)
4281 return ret;
4282 ret = lttng_probes_init();
4283 if (ret)
4284 return ret;
4285 ret = lttng_context_init();
4286 if (ret)
4287 return ret;
4288 ret = lttng_tracepoint_init();
4289 if (ret)
4290 goto error_tp;
4291 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4292 if (!event_recorder_cache) {
4293 ret = -ENOMEM;
4294 goto error_kmem_event_recorder;
4295 }
4296 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4297 if (!event_recorder_private_cache) {
4298 ret = -ENOMEM;
4299 goto error_kmem_event_recorder_private;
4300 }
4301 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4302 if (!event_notifier_cache) {
4303 ret = -ENOMEM;
4304 goto error_kmem_event_notifier;
4305 }
4306 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4307 if (!event_notifier_private_cache) {
4308 ret = -ENOMEM;
4309 goto error_kmem_event_notifier_private;
4310 }
4311 ret = lttng_abi_init();
4312 if (ret)
4313 goto error_abi;
4314 ret = lttng_logger_init();
4315 if (ret)
4316 goto error_logger;
4317 ret = lttng_init_cpu_hotplug();
4318 if (ret)
4319 goto error_hotplug;
4320 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4321 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4322 __stringify(LTTNG_MODULES_MINOR_VERSION),
4323 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4324 LTTNG_MODULES_EXTRAVERSION,
4325 LTTNG_VERSION_NAME,
4326 #ifdef LTTNG_EXTRA_VERSION_GIT
4327 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4328 #else
4329 "",
4330 #endif
4331 #ifdef LTTNG_EXTRA_VERSION_NAME
4332 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4333 #else
4334 "");
4335 #endif
4336 return 0;
4337
4338 error_hotplug:
4339 lttng_logger_exit();
4340 error_logger:
4341 lttng_abi_exit();
4342 error_abi:
4343 kmem_cache_destroy(event_notifier_private_cache);
4344 error_kmem_event_notifier_private:
4345 kmem_cache_destroy(event_notifier_cache);
4346 error_kmem_event_notifier:
4347 kmem_cache_destroy(event_recorder_private_cache);
4348 error_kmem_event_recorder_private:
4349 kmem_cache_destroy(event_recorder_cache);
4350 error_kmem_event_recorder:
4351 lttng_tracepoint_exit();
4352 error_tp:
4353 lttng_context_exit();
4354 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4355 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4356 __stringify(LTTNG_MODULES_MINOR_VERSION),
4357 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4358 LTTNG_MODULES_EXTRAVERSION,
4359 LTTNG_VERSION_NAME,
4360 #ifdef LTTNG_EXTRA_VERSION_GIT
4361 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4362 #else
4363 "",
4364 #endif
4365 #ifdef LTTNG_EXTRA_VERSION_NAME
4366 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4367 #else
4368 "");
4369 #endif
4370 return ret;
4371 }
4372
4373 module_init(lttng_events_init);
4374
4375 static void __exit lttng_events_exit(void)
4376 {
4377 struct lttng_session *session, *tmpsession;
4378
4379 lttng_exit_cpu_hotplug();
4380 lttng_logger_exit();
4381 lttng_abi_exit();
4382 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4383 lttng_session_destroy(session);
4384 kmem_cache_destroy(event_recorder_cache);
4385 kmem_cache_destroy(event_recorder_private_cache);
4386 kmem_cache_destroy(event_notifier_cache);
4387 kmem_cache_destroy(event_notifier_private_cache);
4388 lttng_tracepoint_exit();
4389 lttng_context_exit();
4390 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4391 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4392 __stringify(LTTNG_MODULES_MINOR_VERSION),
4393 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4394 LTTNG_MODULES_EXTRAVERSION,
4395 LTTNG_VERSION_NAME,
4396 #ifdef LTTNG_EXTRA_VERSION_GIT
4397 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4398 #else
4399 "",
4400 #endif
4401 #ifdef LTTNG_EXTRA_VERSION_NAME
4402 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4403 #else
4404 "");
4405 #endif
4406 }
4407
4408 module_exit(lttng_events_exit);
4409
4410 #include <generated/patches.h>
4411 #ifdef LTTNG_EXTRA_VERSION_GIT
4412 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4413 #endif
4414 #ifdef LTTNG_EXTRA_VERSION_NAME
4415 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4416 #endif
4417 MODULE_LICENSE("GPL and additional rights");
4418 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4419 MODULE_DESCRIPTION("LTTng tracer");
4420 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4421 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4422 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4423 LTTNG_MODULES_EXTRAVERSION);
This page took 0.181564 seconds and 4 git commands to generate.