Split ID tracker into public/private structures
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/events-internal.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <ringbuffer/backend.h>
48 #include <ringbuffer/frontend.h>
49 #include <wrapper/time.h>
50
51 #define METADATA_CACHE_DEFAULT_SIZE 4096
52
53 static LIST_HEAD(sessions);
54 static LIST_HEAD(event_notifier_groups);
55 static LIST_HEAD(lttng_transport_list);
56 static LIST_HEAD(lttng_counter_transport_list);
57 /*
58 * Protect the sessions and metadata caches.
59 */
60 static DEFINE_MUTEX(sessions_mutex);
61 static struct kmem_cache *event_recorder_cache;
62 static struct kmem_cache *event_recorder_private_cache;
63 static struct kmem_cache *event_notifier_cache;
64 static struct kmem_cache *event_notifier_private_cache;
65
66 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
67 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
69 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
70 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_channel *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_recorder *event);
75 static int _lttng_event_notifier_unregister(struct lttng_kernel_event_notifier *event_notifier);
76 static
77 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
78 struct lttng_channel *chan,
79 struct lttng_kernel_event_recorder *event);
80 static
81 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
82 static
83 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
84 static
85 int _lttng_type_statedump(struct lttng_kernel_session *session,
86 const struct lttng_kernel_type_common *type,
87 enum lttng_kernel_string_encoding parent_encoding,
88 size_t nesting);
89 static
90 int _lttng_field_statedump(struct lttng_kernel_session *session,
91 const struct lttng_kernel_event_field *field,
92 size_t nesting, const char **prev_field_name_p);
93
94 void synchronize_trace(void)
95 {
96 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
97 synchronize_rcu();
98 #else
99 synchronize_sched();
100 #endif
101
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
103 #ifdef CONFIG_PREEMPT_RT_FULL
104 synchronize_rcu();
105 #endif
106 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
107 #ifdef CONFIG_PREEMPT_RT
108 synchronize_rcu();
109 #endif
110 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 }
112
113 void lttng_lock_sessions(void)
114 {
115 mutex_lock(&sessions_mutex);
116 }
117
118 void lttng_unlock_sessions(void)
119 {
120 mutex_unlock(&sessions_mutex);
121 }
122
123 static struct lttng_transport *lttng_transport_find(const char *name)
124 {
125 struct lttng_transport *transport;
126
127 list_for_each_entry(transport, &lttng_transport_list, node) {
128 if (!strcmp(transport->name, name))
129 return transport;
130 }
131 return NULL;
132 }
133
134 /*
135 * Called with sessions lock held.
136 */
137 int lttng_session_active(void)
138 {
139 struct lttng_kernel_session_private *iter;
140
141 list_for_each_entry(iter, &sessions, list) {
142 if (iter->pub->active)
143 return 1;
144 }
145 return 0;
146 }
147
148 struct lttng_kernel_session *lttng_session_create(void)
149 {
150 struct lttng_kernel_session *session;
151 struct lttng_kernel_session_private *session_priv;
152 struct lttng_metadata_cache *metadata_cache;
153 int i;
154
155 mutex_lock(&sessions_mutex);
156 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
157 if (!session)
158 goto err;
159 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
160 if (!session_priv)
161 goto err_free_session;
162 session->priv = session_priv;
163 session_priv->pub = session;
164
165 INIT_LIST_HEAD(&session_priv->chan);
166 INIT_LIST_HEAD(&session_priv->events);
167 lttng_guid_gen(&session_priv->uuid);
168
169 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
170 GFP_KERNEL);
171 if (!metadata_cache)
172 goto err_free_session_private;
173 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
174 if (!metadata_cache->data)
175 goto err_free_cache;
176 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
177 kref_init(&metadata_cache->refcount);
178 mutex_init(&metadata_cache->lock);
179 session_priv->metadata_cache = metadata_cache;
180 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
181 memcpy(&metadata_cache->uuid, &session_priv->uuid,
182 sizeof(metadata_cache->uuid));
183 INIT_LIST_HEAD(&session_priv->enablers_head);
184 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
185 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
186 list_add(&session_priv->list, &sessions);
187
188 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
197 goto tracker_alloc_error;
198 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
199 goto tracker_alloc_error;
200
201 mutex_unlock(&sessions_mutex);
202
203 return session;
204
205 tracker_alloc_error:
206 lttng_id_tracker_fini(&session->pid_tracker);
207 lttng_id_tracker_fini(&session->vpid_tracker);
208 lttng_id_tracker_fini(&session->uid_tracker);
209 lttng_id_tracker_fini(&session->vuid_tracker);
210 lttng_id_tracker_fini(&session->gid_tracker);
211 lttng_id_tracker_fini(&session->vgid_tracker);
212 err_free_cache:
213 kfree(metadata_cache);
214 err_free_session_private:
215 lttng_kvfree(session_priv);
216 err_free_session:
217 lttng_kvfree(session);
218 err:
219 mutex_unlock(&sessions_mutex);
220 return NULL;
221 }
222
223 static
224 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
225 {
226 struct lttng_counter_transport *transport;
227
228 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
229 if (!strcmp(transport->name, name))
230 return transport;
231 }
232 return NULL;
233 }
234
235 struct lttng_counter *lttng_kernel_counter_create(
236 const char *counter_transport_name,
237 size_t number_dimensions, const size_t *dimensions_sizes)
238 {
239 struct lttng_counter *counter = NULL;
240 struct lttng_counter_transport *counter_transport = NULL;
241
242 counter_transport = lttng_counter_transport_find(counter_transport_name);
243 if (!counter_transport) {
244 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
245 counter_transport_name);
246 goto notransport;
247 }
248 if (!try_module_get(counter_transport->owner)) {
249 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
250 goto notransport;
251 }
252
253 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
254 if (!counter)
255 goto nomem;
256
257 /* Create event notifier error counter. */
258 counter->ops = &counter_transport->ops;
259 counter->transport = counter_transport;
260
261 counter->counter = counter->ops->counter_create(
262 number_dimensions, dimensions_sizes, 0);
263 if (!counter->counter) {
264 goto create_error;
265 }
266
267 return counter;
268
269 create_error:
270 lttng_kvfree(counter);
271 nomem:
272 if (counter_transport)
273 module_put(counter_transport->owner);
274 notransport:
275 return NULL;
276 }
277
278 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
279 {
280 struct lttng_transport *transport = NULL;
281 struct lttng_event_notifier_group *event_notifier_group;
282 const char *transport_name = "relay-event-notifier";
283 size_t subbuf_size = 4096; //TODO
284 size_t num_subbuf = 16; //TODO
285 unsigned int switch_timer_interval = 0;
286 unsigned int read_timer_interval = 0;
287 int i;
288
289 mutex_lock(&sessions_mutex);
290
291 transport = lttng_transport_find(transport_name);
292 if (!transport) {
293 printk(KERN_WARNING "LTTng: transport %s not found\n",
294 transport_name);
295 goto notransport;
296 }
297 if (!try_module_get(transport->owner)) {
298 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
299 transport_name);
300 goto notransport;
301 }
302
303 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
304 GFP_KERNEL);
305 if (!event_notifier_group)
306 goto nomem;
307
308 /*
309 * Initialize the ring buffer used to store event notifier
310 * notifications.
311 */
312 event_notifier_group->ops = &transport->ops;
313 event_notifier_group->chan = transport->ops.priv->channel_create(
314 transport_name, event_notifier_group, NULL,
315 subbuf_size, num_subbuf, switch_timer_interval,
316 read_timer_interval);
317 if (!event_notifier_group->chan)
318 goto create_error;
319
320 event_notifier_group->transport = transport;
321
322 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
323 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
324 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
325 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
326
327 list_add(&event_notifier_group->node, &event_notifier_groups);
328
329 mutex_unlock(&sessions_mutex);
330
331 return event_notifier_group;
332
333 create_error:
334 lttng_kvfree(event_notifier_group);
335 nomem:
336 if (transport)
337 module_put(transport->owner);
338 notransport:
339 mutex_unlock(&sessions_mutex);
340 return NULL;
341 }
342
343 void metadata_cache_destroy(struct kref *kref)
344 {
345 struct lttng_metadata_cache *cache =
346 container_of(kref, struct lttng_metadata_cache, refcount);
347 vfree(cache->data);
348 kfree(cache);
349 }
350
351 void lttng_session_destroy(struct lttng_kernel_session *session)
352 {
353 struct lttng_channel *chan, *tmpchan;
354 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
355 struct lttng_metadata_stream *metadata_stream;
356 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
357 int ret;
358
359 mutex_lock(&sessions_mutex);
360 WRITE_ONCE(session->active, 0);
361 list_for_each_entry(chan, &session->priv->chan, list) {
362 ret = lttng_syscalls_unregister_channel(chan);
363 WARN_ON(ret);
364 }
365 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
366 ret = _lttng_event_unregister(event_recorder_priv->pub);
367 WARN_ON(ret);
368 }
369 synchronize_trace(); /* Wait for in-flight events to complete */
370 list_for_each_entry(chan, &session->priv->chan, list) {
371 ret = lttng_syscalls_destroy_event(chan);
372 WARN_ON(ret);
373 }
374 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
375 &session->priv->enablers_head, node)
376 lttng_event_enabler_destroy(event_enabler);
377 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, node)
378 _lttng_event_destroy(&event_recorder_priv->pub->parent);
379 list_for_each_entry_safe(chan, tmpchan, &session->priv->chan, list) {
380 BUG_ON(chan->channel_type == METADATA_CHANNEL);
381 _lttng_channel_destroy(chan);
382 }
383 mutex_lock(&session->priv->metadata_cache->lock);
384 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
385 _lttng_metadata_channel_hangup(metadata_stream);
386 mutex_unlock(&session->priv->metadata_cache->lock);
387 lttng_id_tracker_fini(&session->pid_tracker);
388 lttng_id_tracker_fini(&session->vpid_tracker);
389 lttng_id_tracker_fini(&session->uid_tracker);
390 lttng_id_tracker_fini(&session->vuid_tracker);
391 lttng_id_tracker_fini(&session->gid_tracker);
392 lttng_id_tracker_fini(&session->vgid_tracker);
393 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
394 list_del(&session->priv->list);
395 mutex_unlock(&sessions_mutex);
396 lttng_kvfree(session->priv);
397 lttng_kvfree(session);
398 }
399
400 void lttng_event_notifier_group_destroy(
401 struct lttng_event_notifier_group *event_notifier_group)
402 {
403 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
404 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
405 int ret;
406
407 if (!event_notifier_group)
408 return;
409
410 mutex_lock(&sessions_mutex);
411
412 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
413 WARN_ON(ret);
414
415 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
416 &event_notifier_group->event_notifiers_head, node) {
417 ret = _lttng_event_notifier_unregister(event_notifier_priv->pub);
418 WARN_ON(ret);
419 }
420
421 /* Wait for in-flight event notifier to complete */
422 synchronize_trace();
423
424 irq_work_sync(&event_notifier_group->wakeup_pending);
425
426 kfree(event_notifier_group->sc_filter);
427
428 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
429 &event_notifier_group->enablers_head, node)
430 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
431
432 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
433 &event_notifier_group->event_notifiers_head, node)
434 _lttng_event_destroy(&event_notifier_priv->pub->parent);
435
436 if (event_notifier_group->error_counter) {
437 struct lttng_counter *error_counter = event_notifier_group->error_counter;
438
439 error_counter->ops->counter_destroy(error_counter->counter);
440 module_put(error_counter->transport->owner);
441 lttng_kvfree(error_counter);
442 event_notifier_group->error_counter = NULL;
443 }
444
445 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
446 module_put(event_notifier_group->transport->owner);
447 list_del(&event_notifier_group->node);
448
449 mutex_unlock(&sessions_mutex);
450 lttng_kvfree(event_notifier_group);
451 }
452
453 int lttng_session_statedump(struct lttng_kernel_session *session)
454 {
455 int ret;
456
457 mutex_lock(&sessions_mutex);
458 ret = lttng_statedump_start(session);
459 mutex_unlock(&sessions_mutex);
460 return ret;
461 }
462
463 int lttng_session_enable(struct lttng_kernel_session *session)
464 {
465 int ret = 0;
466 struct lttng_channel *chan;
467
468 mutex_lock(&sessions_mutex);
469 if (session->active) {
470 ret = -EBUSY;
471 goto end;
472 }
473
474 /* Set transient enabler state to "enabled" */
475 session->priv->tstate = 1;
476
477 /* We need to sync enablers with session before activation. */
478 lttng_session_sync_event_enablers(session);
479
480 /*
481 * Snapshot the number of events per channel to know the type of header
482 * we need to use.
483 */
484 list_for_each_entry(chan, &session->priv->chan, list) {
485 if (chan->header_type)
486 continue; /* don't change it if session stop/restart */
487 if (chan->free_event_id < 31)
488 chan->header_type = 1; /* compact */
489 else
490 chan->header_type = 2; /* large */
491 }
492
493 /* Clear each stream's quiescent state. */
494 list_for_each_entry(chan, &session->priv->chan, list) {
495 if (chan->channel_type != METADATA_CHANNEL)
496 lib_ring_buffer_clear_quiescent_channel(chan->chan);
497 }
498
499 WRITE_ONCE(session->active, 1);
500 WRITE_ONCE(session->priv->been_active, 1);
501 ret = _lttng_session_metadata_statedump(session);
502 if (ret) {
503 WRITE_ONCE(session->active, 0);
504 goto end;
505 }
506 ret = lttng_statedump_start(session);
507 if (ret)
508 WRITE_ONCE(session->active, 0);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_session_disable(struct lttng_kernel_session *session)
515 {
516 int ret = 0;
517 struct lttng_channel *chan;
518
519 mutex_lock(&sessions_mutex);
520 if (!session->active) {
521 ret = -EBUSY;
522 goto end;
523 }
524 WRITE_ONCE(session->active, 0);
525
526 /* Set transient enabler state to "disabled" */
527 session->priv->tstate = 0;
528 lttng_session_sync_event_enablers(session);
529
530 /* Set each stream's quiescent state. */
531 list_for_each_entry(chan, &session->priv->chan, list) {
532 if (chan->channel_type != METADATA_CHANNEL)
533 lib_ring_buffer_set_quiescent_channel(chan->chan);
534 }
535 end:
536 mutex_unlock(&sessions_mutex);
537 return ret;
538 }
539
540 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
541 {
542 int ret = 0;
543 struct lttng_channel *chan;
544 struct lttng_kernel_event_recorder_private *event_recorder_priv;
545 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
546 struct lttng_metadata_stream *stream;
547
548 mutex_lock(&sessions_mutex);
549 if (!session->active) {
550 ret = -EBUSY;
551 goto end;
552 }
553
554 mutex_lock(&cache->lock);
555 memset(cache->data, 0, cache->cache_alloc);
556 cache->metadata_written = 0;
557 cache->version++;
558 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
559 stream->metadata_out = 0;
560 stream->metadata_in = 0;
561 }
562 mutex_unlock(&cache->lock);
563
564 session->priv->metadata_dumped = 0;
565 list_for_each_entry(chan, &session->priv->chan, list) {
566 chan->metadata_dumped = 0;
567 }
568
569 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
570 event_recorder_priv->metadata_dumped = 0;
571 }
572
573 ret = _lttng_session_metadata_statedump(session);
574
575 end:
576 mutex_unlock(&sessions_mutex);
577 return ret;
578 }
579
580 int lttng_channel_enable(struct lttng_channel *channel)
581 {
582 int ret = 0;
583
584 mutex_lock(&sessions_mutex);
585 if (channel->channel_type == METADATA_CHANNEL) {
586 ret = -EPERM;
587 goto end;
588 }
589 if (channel->enabled) {
590 ret = -EEXIST;
591 goto end;
592 }
593 /* Set transient enabler state to "enabled" */
594 channel->tstate = 1;
595 lttng_session_sync_event_enablers(channel->session);
596 /* Set atomically the state to "enabled" */
597 WRITE_ONCE(channel->enabled, 1);
598 end:
599 mutex_unlock(&sessions_mutex);
600 return ret;
601 }
602
603 int lttng_channel_disable(struct lttng_channel *channel)
604 {
605 int ret = 0;
606
607 mutex_lock(&sessions_mutex);
608 if (channel->channel_type == METADATA_CHANNEL) {
609 ret = -EPERM;
610 goto end;
611 }
612 if (!channel->enabled) {
613 ret = -EEXIST;
614 goto end;
615 }
616 /* Set atomically the state to "disabled" */
617 WRITE_ONCE(channel->enabled, 0);
618 /* Set transient enabler state to "enabled" */
619 channel->tstate = 0;
620 lttng_session_sync_event_enablers(channel->session);
621 end:
622 mutex_unlock(&sessions_mutex);
623 return ret;
624 }
625
626 int lttng_event_enable(struct lttng_kernel_event_common *event)
627 {
628 int ret = 0;
629
630 mutex_lock(&sessions_mutex);
631 switch (event->type) {
632 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
633 {
634 struct lttng_kernel_event_recorder *event_recorder =
635 container_of(event, struct lttng_kernel_event_recorder, parent);
636
637 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
638 ret = -EPERM;
639 goto end;
640 }
641 break;
642 }
643 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
644 switch (event->priv->instrumentation) {
645 case LTTNG_KERNEL_ABI_KRETPROBE:
646 ret = -EINVAL;
647 goto end;
648 default:
649 break;
650 }
651 break;
652 default:
653 break;
654 }
655
656 if (event->enabled) {
657 ret = -EEXIST;
658 goto end;
659 }
660 switch (event->priv->instrumentation) {
661 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
662 case LTTNG_KERNEL_ABI_SYSCALL:
663 ret = -EINVAL;
664 break;
665
666 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
667 case LTTNG_KERNEL_ABI_UPROBE:
668 WRITE_ONCE(event->enabled, 1);
669 break;
670
671 case LTTNG_KERNEL_ABI_KRETPROBE:
672 ret = lttng_kretprobes_event_enable_state(event, 1);
673 break;
674
675 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
676 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
677 default:
678 WARN_ON_ONCE(1);
679 ret = -EINVAL;
680 }
681 end:
682 mutex_unlock(&sessions_mutex);
683 return ret;
684 }
685
686 int lttng_event_disable(struct lttng_kernel_event_common *event)
687 {
688 int ret = 0;
689
690 mutex_lock(&sessions_mutex);
691 switch (event->type) {
692 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
693 {
694 struct lttng_kernel_event_recorder *event_recorder =
695 container_of(event, struct lttng_kernel_event_recorder, parent);
696
697 if (event_recorder->chan->channel_type == METADATA_CHANNEL) {
698 ret = -EPERM;
699 goto end;
700 }
701 break;
702 }
703 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
704 switch (event->priv->instrumentation) {
705 case LTTNG_KERNEL_ABI_KRETPROBE:
706 ret = -EINVAL;
707 goto end;
708 default:
709 break;
710 }
711 break;
712 default:
713 break;
714 }
715
716 if (!event->enabled) {
717 ret = -EEXIST;
718 goto end;
719 }
720 switch (event->priv->instrumentation) {
721 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
722 case LTTNG_KERNEL_ABI_SYSCALL:
723 ret = -EINVAL;
724 break;
725
726 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
727 case LTTNG_KERNEL_ABI_UPROBE:
728 WRITE_ONCE(event->enabled, 0);
729 break;
730
731 case LTTNG_KERNEL_ABI_KRETPROBE:
732 ret = lttng_kretprobes_event_enable_state(event, 0);
733 break;
734
735 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
736 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
737 default:
738 WARN_ON_ONCE(1);
739 ret = -EINVAL;
740 }
741 end:
742 mutex_unlock(&sessions_mutex);
743 return ret;
744 }
745
746 struct lttng_channel *lttng_channel_create(struct lttng_kernel_session *session,
747 const char *transport_name,
748 void *buf_addr,
749 size_t subbuf_size, size_t num_subbuf,
750 unsigned int switch_timer_interval,
751 unsigned int read_timer_interval,
752 enum channel_type channel_type)
753 {
754 struct lttng_channel *chan;
755 struct lttng_transport *transport = NULL;
756
757 mutex_lock(&sessions_mutex);
758 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
759 goto active; /* Refuse to add channel to active session */
760 transport = lttng_transport_find(transport_name);
761 if (!transport) {
762 printk(KERN_WARNING "LTTng: transport %s not found\n",
763 transport_name);
764 goto notransport;
765 }
766 if (!try_module_get(transport->owner)) {
767 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
768 goto notransport;
769 }
770 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
771 if (!chan)
772 goto nomem;
773 chan->session = session;
774 chan->id = session->priv->free_chan_id++;
775 chan->ops = &transport->ops;
776 /*
777 * Note: the channel creation op already writes into the packet
778 * headers. Therefore the "chan" information used as input
779 * should be already accessible.
780 */
781 chan->chan = transport->ops.priv->channel_create(transport_name,
782 chan, buf_addr, subbuf_size, num_subbuf,
783 switch_timer_interval, read_timer_interval);
784 if (!chan->chan)
785 goto create_error;
786 chan->tstate = 1;
787 chan->enabled = 1;
788 chan->transport = transport;
789 chan->channel_type = channel_type;
790 list_add(&chan->list, &session->priv->chan);
791 mutex_unlock(&sessions_mutex);
792 return chan;
793
794 create_error:
795 kfree(chan);
796 nomem:
797 if (transport)
798 module_put(transport->owner);
799 notransport:
800 active:
801 mutex_unlock(&sessions_mutex);
802 return NULL;
803 }
804
805 /*
806 * Only used internally at session destruction for per-cpu channels, and
807 * when metadata channel is released.
808 * Needs to be called with sessions mutex held.
809 */
810 static
811 void _lttng_channel_destroy(struct lttng_channel *chan)
812 {
813 chan->ops->priv->channel_destroy(chan->chan);
814 module_put(chan->transport->owner);
815 list_del(&chan->list);
816 lttng_kernel_destroy_context(chan->ctx);
817 kfree(chan);
818 }
819
820 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
821 {
822 BUG_ON(chan->channel_type != METADATA_CHANNEL);
823
824 /* Protect the metadata cache with the sessions_mutex. */
825 mutex_lock(&sessions_mutex);
826 _lttng_channel_destroy(chan);
827 mutex_unlock(&sessions_mutex);
828 }
829 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
830
831 static
832 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
833 {
834 stream->finalized = 1;
835 wake_up_interruptible(&stream->read_wait);
836 }
837
838
839 /*
840 * Supports event creation while tracing session is active.
841 * Needs to be called with sessions mutex held.
842 */
843 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_channel *chan,
844 struct lttng_kernel_abi_event *event_param,
845 const struct lttng_kernel_event_desc *event_desc,
846 enum lttng_kernel_abi_instrumentation itype)
847 {
848 struct lttng_kernel_session *session = chan->session;
849 struct lttng_kernel_event_recorder *event_recorder;
850 struct lttng_kernel_event_recorder_private *event_recorder_priv;
851 const char *event_name;
852 struct hlist_head *head;
853 int ret;
854
855 if (chan->free_event_id == -1U) {
856 ret = -EMFILE;
857 goto full;
858 }
859
860 switch (itype) {
861 case LTTNG_KERNEL_ABI_TRACEPOINT:
862 event_name = event_desc->event_name;
863 break;
864
865 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
866 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
867 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
868 case LTTNG_KERNEL_ABI_SYSCALL:
869 event_name = event_param->name;
870 break;
871
872 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
873 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
874 default:
875 WARN_ON_ONCE(1);
876 ret = -EINVAL;
877 goto type_error;
878 }
879
880 head = utils_borrow_hash_table_bucket(session->priv->events_ht.table,
881 LTTNG_EVENT_HT_SIZE, event_name);
882 lttng_hlist_for_each_entry(event_recorder_priv, head, hlist) {
883 WARN_ON_ONCE(!event_recorder_priv->parent.desc);
884 if (!strncmp(event_recorder_priv->parent.desc->event_name, event_name,
885 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
886 && chan == event_recorder_priv->pub->chan) {
887 ret = -EEXIST;
888 goto exist;
889 }
890 }
891
892 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
893 if (!event_recorder) {
894 ret = -ENOMEM;
895 goto cache_error;
896 }
897 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
898 if (!event_recorder_priv) {
899 ret = -ENOMEM;
900 goto cache_private_error;
901 }
902 event_recorder_priv->pub = event_recorder;
903 event_recorder_priv->parent.pub = &event_recorder->parent;
904 event_recorder->priv = event_recorder_priv;
905 event_recorder->parent.priv = &event_recorder_priv->parent;
906 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
907
908 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
909 event_recorder->chan = chan;
910 event_recorder->priv->id = chan->free_event_id++;
911 event_recorder->priv->parent.instrumentation = itype;
912 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
913 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
914
915 switch (itype) {
916 case LTTNG_KERNEL_ABI_TRACEPOINT:
917 /* Event will be enabled by enabler sync. */
918 event_recorder->parent.enabled = 0;
919 event_recorder->priv->parent.registered = 0;
920 event_recorder->priv->parent.desc = lttng_event_desc_get(event_name);
921 if (!event_recorder->priv->parent.desc) {
922 ret = -ENOENT;
923 goto register_error;
924 }
925 /* Populate lttng_event structure before event registration. */
926 smp_wmb();
927 break;
928
929 case LTTNG_KERNEL_ABI_KPROBE:
930 /*
931 * Needs to be explicitly enabled after creation, since
932 * we may want to apply filters.
933 */
934 event_recorder->parent.enabled = 0;
935 event_recorder->priv->parent.registered = 1;
936 /*
937 * Populate lttng_event structure before event
938 * registration.
939 */
940 smp_wmb();
941 ret = lttng_kprobes_register_event(event_name,
942 event_param->u.kprobe.symbol_name,
943 event_param->u.kprobe.offset,
944 event_param->u.kprobe.addr,
945 event_recorder);
946 if (ret) {
947 ret = -EINVAL;
948 goto register_error;
949 }
950 ret = try_module_get(event_recorder->priv->parent.desc->owner);
951 WARN_ON_ONCE(!ret);
952 break;
953
954 case LTTNG_KERNEL_ABI_KRETPROBE:
955 {
956 struct lttng_kernel_event_recorder *event_recorder_return;
957 struct lttng_kernel_event_recorder_private *event_recorder_return_priv;
958
959 /* kretprobe defines 2 events */
960 /*
961 * Needs to be explicitly enabled after creation, since
962 * we may want to apply filters.
963 */
964 event_recorder->parent.enabled = 0;
965 event_recorder->priv->parent.registered = 1;
966
967 event_recorder_return = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
968 if (!event_recorder_return) {
969 ret = -ENOMEM;
970 goto register_error;
971 }
972 event_recorder_return_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
973 if (!event_recorder_return_priv) {
974 kmem_cache_free(event_recorder_cache, event_recorder_return);
975 ret = -ENOMEM;
976 goto register_error;
977 }
978 event_recorder_return_priv->pub = event_recorder_return;
979 event_recorder_return_priv->parent.pub = &event_recorder_return->parent;
980 event_recorder_return->priv = event_recorder_return_priv;
981 event_recorder_return->parent.priv = &event_recorder_return_priv->parent;
982 event_recorder_return->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
983
984 event_recorder_return->parent.run_filter = lttng_kernel_interpret_event_filter;
985 event_recorder_return->chan = chan;
986 event_recorder_return->priv->id = chan->free_event_id++;
987 event_recorder_return->priv->parent.instrumentation = itype;
988 event_recorder_return->parent.enabled = 0;
989 event_recorder_return->priv->parent.registered = 1;
990 INIT_LIST_HEAD(&event_recorder_return->priv->parent.filter_bytecode_runtime_head);
991 INIT_LIST_HEAD(&event_recorder_return->priv->parent.enablers_ref_head);
992 /*
993 * Populate lttng_event structure before kretprobe registration.
994 */
995 smp_wmb();
996 ret = lttng_kretprobes_register(event_name,
997 event_param->u.kretprobe.symbol_name,
998 event_param->u.kretprobe.offset,
999 event_param->u.kretprobe.addr,
1000 event_recorder, event_recorder_return);
1001 if (ret) {
1002 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1003 kmem_cache_free(event_recorder_cache, event_recorder_return);
1004 ret = -EINVAL;
1005 goto register_error;
1006 }
1007 /* Take 2 refs on the module: one per event. */
1008 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1009 WARN_ON_ONCE(!ret);
1010 ret = try_module_get(event_recorder_return->priv->parent.desc->owner);
1011 WARN_ON_ONCE(!ret);
1012 ret = _lttng_event_metadata_statedump(chan->session, chan,
1013 event_recorder_return);
1014 WARN_ON_ONCE(ret > 0);
1015 if (ret) {
1016 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1017 kmem_cache_free(event_recorder_cache, event_recorder_return);
1018 module_put(event_recorder_return->priv->parent.desc->owner);
1019 module_put(event_recorder->priv->parent.desc->owner);
1020 goto statedump_error;
1021 }
1022 list_add(&event_recorder_return->priv->node, &chan->session->priv->events);
1023 break;
1024 }
1025
1026 case LTTNG_KERNEL_ABI_SYSCALL:
1027 /*
1028 * Needs to be explicitly enabled after creation, since
1029 * we may want to apply filters.
1030 */
1031 event_recorder->parent.enabled = 0;
1032 event_recorder->priv->parent.registered = 0;
1033 event_recorder->priv->parent.desc = event_desc;
1034 switch (event_param->u.syscall.entryexit) {
1035 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1036 ret = -EINVAL;
1037 goto register_error;
1038 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1039 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1040 break;
1041 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1042 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1043 break;
1044 }
1045 switch (event_param->u.syscall.abi) {
1046 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1047 ret = -EINVAL;
1048 goto register_error;
1049 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1050 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1051 break;
1052 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1053 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1054 break;
1055 }
1056 if (!event_recorder->priv->parent.desc) {
1057 ret = -EINVAL;
1058 goto register_error;
1059 }
1060 break;
1061
1062 case LTTNG_KERNEL_ABI_UPROBE:
1063 /*
1064 * Needs to be explicitly enabled after creation, since
1065 * we may want to apply filters.
1066 */
1067 event_recorder->parent.enabled = 0;
1068 event_recorder->priv->parent.registered = 1;
1069
1070 /*
1071 * Populate lttng_event structure before event
1072 * registration.
1073 */
1074 smp_wmb();
1075
1076 ret = lttng_uprobes_register_event(event_param->name,
1077 event_param->u.uprobe.fd,
1078 event_recorder);
1079 if (ret)
1080 goto register_error;
1081 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1082 WARN_ON_ONCE(!ret);
1083 break;
1084
1085 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1086 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1087 default:
1088 WARN_ON_ONCE(1);
1089 ret = -EINVAL;
1090 goto register_error;
1091 }
1092 ret = _lttng_event_metadata_statedump(chan->session, chan, event_recorder);
1093 WARN_ON_ONCE(ret > 0);
1094 if (ret) {
1095 goto statedump_error;
1096 }
1097 hlist_add_head(&event_recorder->priv->hlist, head);
1098 list_add(&event_recorder->priv->node, &chan->session->priv->events);
1099 return event_recorder;
1100
1101 statedump_error:
1102 /* If a statedump error occurs, events will not be readable. */
1103 register_error:
1104 kmem_cache_free(event_recorder_private_cache, event_recorder_priv);
1105 cache_private_error:
1106 kmem_cache_free(event_recorder_cache, event_recorder);
1107 cache_error:
1108 exist:
1109 type_error:
1110 full:
1111 return ERR_PTR(ret);
1112 }
1113
1114 struct lttng_kernel_event_notifier *_lttng_event_notifier_create(
1115 const struct lttng_kernel_event_desc *event_desc,
1116 uint64_t token, uint64_t error_counter_index,
1117 struct lttng_event_notifier_group *event_notifier_group,
1118 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1119 enum lttng_kernel_abi_instrumentation itype)
1120 {
1121 struct lttng_kernel_event_notifier *event_notifier;
1122 struct lttng_kernel_event_notifier_private *event_notifier_priv;
1123 struct lttng_counter *error_counter;
1124 const char *event_name;
1125 struct hlist_head *head;
1126 int ret;
1127
1128 switch (itype) {
1129 case LTTNG_KERNEL_ABI_TRACEPOINT:
1130 event_name = event_desc->event_name;
1131 break;
1132
1133 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1134 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1135 case LTTNG_KERNEL_ABI_SYSCALL:
1136 event_name = event_notifier_param->event.name;
1137 break;
1138
1139 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1140 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1141 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1142 default:
1143 WARN_ON_ONCE(1);
1144 ret = -EINVAL;
1145 goto type_error;
1146 }
1147
1148 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1149 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1150 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
1151 WARN_ON_ONCE(!event_notifier_priv->parent.desc);
1152 if (!strncmp(event_notifier_priv->parent.desc->event_name, event_name,
1153 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1154 && event_notifier_group == event_notifier_priv->group
1155 && token == event_notifier_priv->parent.user_token) {
1156 ret = -EEXIST;
1157 goto exist;
1158 }
1159 }
1160
1161 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1162 if (!event_notifier) {
1163 ret = -ENOMEM;
1164 goto cache_error;
1165 }
1166 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
1167 if (!event_notifier_priv) {
1168 ret = -ENOMEM;
1169 goto cache_private_error;
1170 }
1171 event_notifier_priv->pub = event_notifier;
1172 event_notifier_priv->parent.pub = &event_notifier->parent;
1173 event_notifier->priv = event_notifier_priv;
1174 event_notifier->parent.priv = &event_notifier_priv->parent;
1175 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
1176
1177 event_notifier->priv->group = event_notifier_group;
1178 event_notifier->priv->parent.user_token = token;
1179 event_notifier->priv->error_counter_index = error_counter_index;
1180 event_notifier->priv->num_captures = 0;
1181 event_notifier->priv->parent.instrumentation = itype;
1182 event_notifier->notification_send = lttng_event_notifier_notification_send;
1183 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
1184 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
1185 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
1186 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
1187
1188 switch (itype) {
1189 case LTTNG_KERNEL_ABI_TRACEPOINT:
1190 /* Event will be enabled by enabler sync. */
1191 event_notifier->parent.enabled = 0;
1192 event_notifier->priv->parent.registered = 0;
1193 event_notifier->priv->parent.desc = lttng_event_desc_get(event_name);
1194 if (!event_notifier->priv->parent.desc) {
1195 ret = -ENOENT;
1196 goto register_error;
1197 }
1198 /* Populate lttng_event_notifier structure before event registration. */
1199 smp_wmb();
1200 break;
1201
1202 case LTTNG_KERNEL_ABI_KPROBE:
1203 /*
1204 * Needs to be explicitly enabled after creation, since
1205 * we may want to apply filters.
1206 */
1207 event_notifier->parent.enabled = 0;
1208 event_notifier->priv->parent.registered = 1;
1209 /*
1210 * Populate lttng_event_notifier structure before event
1211 * registration.
1212 */
1213 smp_wmb();
1214 ret = lttng_kprobes_register_event_notifier(
1215 event_notifier_param->event.u.kprobe.symbol_name,
1216 event_notifier_param->event.u.kprobe.offset,
1217 event_notifier_param->event.u.kprobe.addr,
1218 event_notifier);
1219 if (ret) {
1220 ret = -EINVAL;
1221 goto register_error;
1222 }
1223 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1224 WARN_ON_ONCE(!ret);
1225 break;
1226
1227 case LTTNG_KERNEL_ABI_SYSCALL:
1228 /*
1229 * Needs to be explicitly enabled after creation, since
1230 * we may want to apply filters.
1231 */
1232 event_notifier->parent.enabled = 0;
1233 event_notifier->priv->parent.registered = 0;
1234 event_notifier->priv->parent.desc = event_desc;
1235 switch (event_notifier_param->event.u.syscall.entryexit) {
1236 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1237 ret = -EINVAL;
1238 goto register_error;
1239 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1240 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1241 break;
1242 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1243 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1244 break;
1245 }
1246 switch (event_notifier_param->event.u.syscall.abi) {
1247 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1248 ret = -EINVAL;
1249 goto register_error;
1250 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1251 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1252 break;
1253 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1254 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1255 break;
1256 }
1257
1258 if (!event_notifier->priv->parent.desc) {
1259 ret = -EINVAL;
1260 goto register_error;
1261 }
1262 break;
1263
1264 case LTTNG_KERNEL_ABI_UPROBE:
1265 /*
1266 * Needs to be explicitly enabled after creation, since
1267 * we may want to apply filters.
1268 */
1269 event_notifier->parent.enabled = 0;
1270 event_notifier->priv->parent.registered = 1;
1271
1272 /*
1273 * Populate lttng_event_notifier structure before
1274 * event_notifier registration.
1275 */
1276 smp_wmb();
1277
1278 ret = lttng_uprobes_register_event_notifier(
1279 event_notifier_param->event.name,
1280 event_notifier_param->event.u.uprobe.fd,
1281 event_notifier);
1282 if (ret)
1283 goto register_error;
1284 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1285 WARN_ON_ONCE(!ret);
1286 break;
1287
1288 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1289 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1290 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1291 default:
1292 WARN_ON_ONCE(1);
1293 ret = -EINVAL;
1294 goto register_error;
1295 }
1296
1297 list_add(&event_notifier->priv->node, &event_notifier_group->event_notifiers_head);
1298 hlist_add_head(&event_notifier->priv->hlist, head);
1299
1300 /*
1301 * Clear the error counter bucket. The sessiond keeps track of which
1302 * bucket is currently in use. We trust it. The session lock
1303 * synchronizes against concurrent creation of the error
1304 * counter.
1305 */
1306 error_counter = event_notifier_group->error_counter;
1307 if (error_counter) {
1308 size_t dimension_index[1];
1309
1310 /*
1311 * Check that the index is within the boundary of the counter.
1312 */
1313 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1314 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1315 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1316 ret = -EINVAL;
1317 goto register_error;
1318 }
1319
1320 dimension_index[0] = event_notifier->priv->error_counter_index;
1321 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1322 if (ret) {
1323 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1324 event_notifier->priv->error_counter_index);
1325 goto register_error;
1326 }
1327 }
1328
1329 return event_notifier;
1330
1331 register_error:
1332 kmem_cache_free(event_notifier_private_cache, event_notifier_priv);
1333 cache_private_error:
1334 kmem_cache_free(event_notifier_cache, event_notifier);
1335 cache_error:
1336 exist:
1337 type_error:
1338 return ERR_PTR(ret);
1339 }
1340
1341 int lttng_kernel_counter_read(struct lttng_counter *counter,
1342 const size_t *dim_indexes, int32_t cpu,
1343 int64_t *val, bool *overflow, bool *underflow)
1344 {
1345 return counter->ops->counter_read(counter->counter, dim_indexes,
1346 cpu, val, overflow, underflow);
1347 }
1348
1349 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1350 const size_t *dim_indexes, int64_t *val,
1351 bool *overflow, bool *underflow)
1352 {
1353 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1354 val, overflow, underflow);
1355 }
1356
1357 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1358 const size_t *dim_indexes)
1359 {
1360 return counter->ops->counter_clear(counter->counter, dim_indexes);
1361 }
1362
1363 struct lttng_kernel_event_recorder *lttng_kernel_event_recorder_create(struct lttng_channel *chan,
1364 struct lttng_kernel_abi_event *event_param,
1365 const struct lttng_kernel_event_desc *event_desc,
1366 enum lttng_kernel_abi_instrumentation itype)
1367 {
1368 struct lttng_kernel_event_recorder *event;
1369
1370 mutex_lock(&sessions_mutex);
1371 event = _lttng_kernel_event_recorder_create(chan, event_param, event_desc, itype);
1372 mutex_unlock(&sessions_mutex);
1373 return event;
1374 }
1375
1376 struct lttng_kernel_event_notifier *lttng_event_notifier_create(
1377 const struct lttng_kernel_event_desc *event_desc,
1378 uint64_t id, uint64_t error_counter_index,
1379 struct lttng_event_notifier_group *event_notifier_group,
1380 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1381 enum lttng_kernel_abi_instrumentation itype)
1382 {
1383 struct lttng_kernel_event_notifier *event_notifier;
1384
1385 mutex_lock(&sessions_mutex);
1386 event_notifier = _lttng_event_notifier_create(event_desc, id,
1387 error_counter_index, event_notifier_group,
1388 event_notifier_param, itype);
1389 mutex_unlock(&sessions_mutex);
1390 return event_notifier;
1391 }
1392
1393 /* Only used for tracepoints for now. */
1394 static
1395 void register_event(struct lttng_kernel_event_recorder *event_recorder)
1396 {
1397 const struct lttng_kernel_event_desc *desc;
1398 int ret = -EINVAL;
1399
1400 if (event_recorder->priv->parent.registered)
1401 return;
1402
1403 desc = event_recorder->priv->parent.desc;
1404 switch (event_recorder->priv->parent.instrumentation) {
1405 case LTTNG_KERNEL_ABI_TRACEPOINT:
1406 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1407 desc->probe_callback,
1408 event_recorder);
1409 break;
1410
1411 case LTTNG_KERNEL_ABI_SYSCALL:
1412 ret = lttng_syscall_filter_enable_event(event_recorder->chan, event_recorder);
1413 break;
1414
1415 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1416 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1417 case LTTNG_KERNEL_ABI_KRETPROBE:
1418 ret = 0;
1419 break;
1420
1421 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1422 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1423 default:
1424 WARN_ON_ONCE(1);
1425 }
1426 if (!ret)
1427 event_recorder->priv->parent.registered = 1;
1428 }
1429
1430 /*
1431 * Only used internally at session destruction.
1432 */
1433 int _lttng_event_unregister(struct lttng_kernel_event_recorder *event_recorder)
1434 {
1435 struct lttng_kernel_event_common_private *event_priv = &event_recorder->priv->parent;
1436 const struct lttng_kernel_event_desc *desc;
1437 int ret = -EINVAL;
1438
1439 if (!event_priv->registered)
1440 return 0;
1441
1442 desc = event_priv->desc;
1443 switch (event_priv->instrumentation) {
1444 case LTTNG_KERNEL_ABI_TRACEPOINT:
1445 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1446 event_priv->desc->probe_callback,
1447 event_recorder);
1448 break;
1449
1450 case LTTNG_KERNEL_ABI_KPROBE:
1451 lttng_kprobes_unregister_event(event_recorder);
1452 ret = 0;
1453 break;
1454
1455 case LTTNG_KERNEL_ABI_KRETPROBE:
1456 lttng_kretprobes_unregister(event_recorder);
1457 ret = 0;
1458 break;
1459
1460 case LTTNG_KERNEL_ABI_SYSCALL:
1461 ret = lttng_syscall_filter_disable_event(event_recorder->chan, event_recorder);
1462 break;
1463
1464 case LTTNG_KERNEL_ABI_NOOP:
1465 ret = 0;
1466 break;
1467
1468 case LTTNG_KERNEL_ABI_UPROBE:
1469 lttng_uprobes_unregister_event(event_recorder);
1470 ret = 0;
1471 break;
1472
1473 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1474 default:
1475 WARN_ON_ONCE(1);
1476 }
1477 if (!ret)
1478 event_priv->registered = 0;
1479 return ret;
1480 }
1481
1482 /* Only used for tracepoints for now. */
1483 static
1484 void register_event_notifier(struct lttng_kernel_event_notifier *event_notifier)
1485 {
1486 const struct lttng_kernel_event_desc *desc;
1487 int ret = -EINVAL;
1488
1489 if (event_notifier->priv->parent.registered)
1490 return;
1491
1492 desc = event_notifier->priv->parent.desc;
1493 switch (event_notifier->priv->parent.instrumentation) {
1494 case LTTNG_KERNEL_ABI_TRACEPOINT:
1495 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1496 desc->probe_callback,
1497 event_notifier);
1498 break;
1499
1500 case LTTNG_KERNEL_ABI_SYSCALL:
1501 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1502 break;
1503
1504 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1505 case LTTNG_KERNEL_ABI_UPROBE:
1506 ret = 0;
1507 break;
1508
1509 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1510 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1511 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1512 default:
1513 WARN_ON_ONCE(1);
1514 }
1515 if (!ret)
1516 event_notifier->priv->parent.registered = 1;
1517 }
1518
1519 static
1520 int _lttng_event_notifier_unregister(
1521 struct lttng_kernel_event_notifier *event_notifier)
1522 {
1523 const struct lttng_kernel_event_desc *desc;
1524 int ret = -EINVAL;
1525
1526 if (!event_notifier->priv->parent.registered)
1527 return 0;
1528
1529 desc = event_notifier->priv->parent.desc;
1530 switch (event_notifier->priv->parent.instrumentation) {
1531 case LTTNG_KERNEL_ABI_TRACEPOINT:
1532 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->priv->parent.desc->event_kname,
1533 event_notifier->priv->parent.desc->probe_callback,
1534 event_notifier);
1535 break;
1536
1537 case LTTNG_KERNEL_ABI_KPROBE:
1538 lttng_kprobes_unregister_event_notifier(event_notifier);
1539 ret = 0;
1540 break;
1541
1542 case LTTNG_KERNEL_ABI_UPROBE:
1543 lttng_uprobes_unregister_event_notifier(event_notifier);
1544 ret = 0;
1545 break;
1546
1547 case LTTNG_KERNEL_ABI_SYSCALL:
1548 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1549 break;
1550
1551 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1552 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1553 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1554 default:
1555 WARN_ON_ONCE(1);
1556 }
1557 if (!ret)
1558 event_notifier->priv->parent.registered = 0;
1559 return ret;
1560 }
1561
1562 /*
1563 * Only used internally at session destruction.
1564 */
1565 static
1566 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1567 {
1568 struct lttng_kernel_event_common_private *event_priv = event->priv;
1569 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1570
1571 lttng_free_event_filter_runtime(event);
1572 /* Free event enabler refs */
1573 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1574 &event_priv->enablers_ref_head, node)
1575 kfree(enabler_ref);
1576
1577 switch (event->type) {
1578 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1579 {
1580 struct lttng_kernel_event_recorder *event_recorder =
1581 container_of(event, struct lttng_kernel_event_recorder, parent);
1582
1583 switch (event_priv->instrumentation) {
1584 case LTTNG_KERNEL_ABI_TRACEPOINT:
1585 lttng_event_desc_put(event_priv->desc);
1586 break;
1587
1588 case LTTNG_KERNEL_ABI_KPROBE:
1589 module_put(event_priv->desc->owner);
1590 lttng_kprobes_destroy_event_private(event_recorder);
1591 break;
1592
1593 case LTTNG_KERNEL_ABI_KRETPROBE:
1594 module_put(event_priv->desc->owner);
1595 lttng_kretprobes_destroy_private(event_recorder);
1596 break;
1597
1598 case LTTNG_KERNEL_ABI_SYSCALL:
1599 break;
1600
1601 case LTTNG_KERNEL_ABI_UPROBE:
1602 module_put(event_priv->desc->owner);
1603 lttng_uprobes_destroy_event_private(event_recorder);
1604 break;
1605
1606 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1607 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1608 default:
1609 WARN_ON_ONCE(1);
1610 }
1611 list_del(&event_recorder->priv->node);
1612 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1613 kmem_cache_free(event_recorder_cache, event_recorder);
1614 break;
1615 }
1616 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1617 {
1618 struct lttng_kernel_event_notifier *event_notifier =
1619 container_of(event, struct lttng_kernel_event_notifier, parent);
1620
1621 switch (event_notifier->priv->parent.instrumentation) {
1622 case LTTNG_KERNEL_ABI_TRACEPOINT:
1623 lttng_event_desc_put(event_notifier->priv->parent.desc);
1624 break;
1625
1626 case LTTNG_KERNEL_ABI_KPROBE:
1627 module_put(event_notifier->priv->parent.desc->owner);
1628 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1629 break;
1630
1631 case LTTNG_KERNEL_ABI_SYSCALL:
1632 break;
1633
1634 case LTTNG_KERNEL_ABI_UPROBE:
1635 module_put(event_notifier->priv->parent.desc->owner);
1636 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1637 break;
1638
1639 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1640 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1641 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1642 default:
1643 WARN_ON_ONCE(1);
1644 }
1645 list_del(&event_notifier->priv->node);
1646 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1647 kmem_cache_free(event_notifier_cache, event_notifier);
1648 break;
1649 }
1650 default:
1651 WARN_ON_ONCE(1);
1652 }
1653 }
1654
1655 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1656 enum tracker_type tracker_type)
1657 {
1658 switch (tracker_type) {
1659 case TRACKER_PID:
1660 return &session->pid_tracker;
1661 case TRACKER_VPID:
1662 return &session->vpid_tracker;
1663 case TRACKER_UID:
1664 return &session->uid_tracker;
1665 case TRACKER_VUID:
1666 return &session->vuid_tracker;
1667 case TRACKER_GID:
1668 return &session->gid_tracker;
1669 case TRACKER_VGID:
1670 return &session->vgid_tracker;
1671 default:
1672 WARN_ON_ONCE(1);
1673 return NULL;
1674 }
1675 }
1676
1677 int lttng_session_track_id(struct lttng_kernel_session *session,
1678 enum tracker_type tracker_type, int id)
1679 {
1680 struct lttng_kernel_id_tracker *tracker;
1681 int ret;
1682
1683 tracker = get_tracker(session, tracker_type);
1684 if (!tracker)
1685 return -EINVAL;
1686 if (id < -1)
1687 return -EINVAL;
1688 mutex_lock(&sessions_mutex);
1689 if (id == -1) {
1690 /* track all ids: destroy tracker. */
1691 lttng_id_tracker_destroy(tracker, true);
1692 ret = 0;
1693 } else {
1694 ret = lttng_id_tracker_add(tracker, id);
1695 }
1696 mutex_unlock(&sessions_mutex);
1697 return ret;
1698 }
1699
1700 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1701 enum tracker_type tracker_type, int id)
1702 {
1703 struct lttng_kernel_id_tracker *tracker;
1704 int ret;
1705
1706 tracker = get_tracker(session, tracker_type);
1707 if (!tracker)
1708 return -EINVAL;
1709 if (id < -1)
1710 return -EINVAL;
1711 mutex_lock(&sessions_mutex);
1712 if (id == -1) {
1713 /* untrack all ids: replace by empty tracker. */
1714 ret = lttng_id_tracker_empty_set(tracker);
1715 } else {
1716 ret = lttng_id_tracker_del(tracker, id);
1717 }
1718 mutex_unlock(&sessions_mutex);
1719 return ret;
1720 }
1721
1722 static
1723 void *id_list_start(struct seq_file *m, loff_t *pos)
1724 {
1725 struct lttng_kernel_id_tracker *id_tracker = m->private;
1726 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1727 struct lttng_id_hash_node *e;
1728 int iter = 0, i;
1729
1730 mutex_lock(&sessions_mutex);
1731 if (id_tracker_p) {
1732 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1733 struct hlist_head *head = &id_tracker_p->id_hash[i];
1734
1735 lttng_hlist_for_each_entry(e, head, hlist) {
1736 if (iter++ >= *pos)
1737 return e;
1738 }
1739 }
1740 } else {
1741 /* ID tracker disabled. */
1742 if (iter >= *pos && iter == 0) {
1743 return id_tracker_p; /* empty tracker */
1744 }
1745 iter++;
1746 }
1747 /* End of list */
1748 return NULL;
1749 }
1750
1751 /* Called with sessions_mutex held. */
1752 static
1753 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1754 {
1755 struct lttng_kernel_id_tracker *id_tracker = m->private;
1756 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1757 struct lttng_id_hash_node *e;
1758 int iter = 0, i;
1759
1760 (*ppos)++;
1761 if (id_tracker_p) {
1762 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1763 struct hlist_head *head = &id_tracker_p->id_hash[i];
1764
1765 lttng_hlist_for_each_entry(e, head, hlist) {
1766 if (iter++ >= *ppos)
1767 return e;
1768 }
1769 }
1770 } else {
1771 /* ID tracker disabled. */
1772 if (iter >= *ppos && iter == 0)
1773 return p; /* empty tracker */
1774 iter++;
1775 }
1776
1777 /* End of list */
1778 return NULL;
1779 }
1780
1781 static
1782 void id_list_stop(struct seq_file *m, void *p)
1783 {
1784 mutex_unlock(&sessions_mutex);
1785 }
1786
1787 static
1788 int id_list_show(struct seq_file *m, void *p)
1789 {
1790 struct lttng_kernel_id_tracker *id_tracker = m->private;
1791 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1792 int id;
1793
1794 if (p == id_tracker_p) {
1795 /* Tracker disabled. */
1796 id = -1;
1797 } else {
1798 const struct lttng_id_hash_node *e = p;
1799
1800 id = lttng_id_tracker_get_node_id(e);
1801 }
1802 switch (id_tracker->priv->tracker_type) {
1803 case TRACKER_PID:
1804 seq_printf(m, "process { pid = %d; };\n", id);
1805 break;
1806 case TRACKER_VPID:
1807 seq_printf(m, "process { vpid = %d; };\n", id);
1808 break;
1809 case TRACKER_UID:
1810 seq_printf(m, "user { uid = %d; };\n", id);
1811 break;
1812 case TRACKER_VUID:
1813 seq_printf(m, "user { vuid = %d; };\n", id);
1814 break;
1815 case TRACKER_GID:
1816 seq_printf(m, "group { gid = %d; };\n", id);
1817 break;
1818 case TRACKER_VGID:
1819 seq_printf(m, "group { vgid = %d; };\n", id);
1820 break;
1821 default:
1822 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1823 }
1824 return 0;
1825 }
1826
1827 static
1828 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1829 .start = id_list_start,
1830 .next = id_list_next,
1831 .stop = id_list_stop,
1832 .show = id_list_show,
1833 };
1834
1835 static
1836 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1837 {
1838 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1839 }
1840
1841 static
1842 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1843 {
1844 struct seq_file *m = file->private_data;
1845 struct lttng_kernel_id_tracker *id_tracker = m->private;
1846 int ret;
1847
1848 WARN_ON_ONCE(!id_tracker);
1849 ret = seq_release(inode, file);
1850 if (!ret)
1851 fput(id_tracker->priv->session->priv->file);
1852 return ret;
1853 }
1854
1855 const struct file_operations lttng_tracker_ids_list_fops = {
1856 .owner = THIS_MODULE,
1857 .open = lttng_tracker_ids_list_open,
1858 .read = seq_read,
1859 .llseek = seq_lseek,
1860 .release = lttng_tracker_ids_list_release,
1861 };
1862
1863 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1864 enum tracker_type tracker_type)
1865 {
1866 struct file *tracker_ids_list_file;
1867 struct seq_file *m;
1868 int file_fd, ret;
1869
1870 file_fd = lttng_get_unused_fd();
1871 if (file_fd < 0) {
1872 ret = file_fd;
1873 goto fd_error;
1874 }
1875
1876 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1877 &lttng_tracker_ids_list_fops,
1878 NULL, O_RDWR);
1879 if (IS_ERR(tracker_ids_list_file)) {
1880 ret = PTR_ERR(tracker_ids_list_file);
1881 goto file_error;
1882 }
1883 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1884 ret = -EOVERFLOW;
1885 goto refcount_error;
1886 }
1887 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1888 if (ret < 0)
1889 goto open_error;
1890 m = tracker_ids_list_file->private_data;
1891
1892 m->private = get_tracker(session, tracker_type);
1893 BUG_ON(!m->private);
1894 fd_install(file_fd, tracker_ids_list_file);
1895
1896 return file_fd;
1897
1898 open_error:
1899 atomic_long_dec(&session->priv->file->f_count);
1900 refcount_error:
1901 fput(tracker_ids_list_file);
1902 file_error:
1903 put_unused_fd(file_fd);
1904 fd_error:
1905 return ret;
1906 }
1907
1908 /*
1909 * Enabler management.
1910 */
1911 static
1912 int lttng_match_enabler_star_glob(const char *desc_name,
1913 const char *pattern)
1914 {
1915 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1916 desc_name, LTTNG_SIZE_MAX))
1917 return 0;
1918 return 1;
1919 }
1920
1921 static
1922 int lttng_match_enabler_name(const char *desc_name,
1923 const char *name)
1924 {
1925 if (strcmp(desc_name, name))
1926 return 0;
1927 return 1;
1928 }
1929
1930 int lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1931 struct lttng_enabler *enabler)
1932 {
1933 const char *desc_name, *enabler_name;
1934 bool compat = false, entry = false;
1935
1936 enabler_name = enabler->event_param.name;
1937 switch (enabler->event_param.instrumentation) {
1938 case LTTNG_KERNEL_ABI_TRACEPOINT:
1939 desc_name = desc->event_name;
1940 switch (enabler->format_type) {
1941 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1942 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1943 case LTTNG_ENABLER_FORMAT_NAME:
1944 return lttng_match_enabler_name(desc_name, enabler_name);
1945 default:
1946 return -EINVAL;
1947 }
1948 break;
1949
1950 case LTTNG_KERNEL_ABI_SYSCALL:
1951 desc_name = desc->event_name;
1952 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1953 desc_name += strlen("compat_");
1954 compat = true;
1955 }
1956 if (!strncmp(desc_name, "syscall_exit_",
1957 strlen("syscall_exit_"))) {
1958 desc_name += strlen("syscall_exit_");
1959 } else if (!strncmp(desc_name, "syscall_entry_",
1960 strlen("syscall_entry_"))) {
1961 desc_name += strlen("syscall_entry_");
1962 entry = true;
1963 } else {
1964 WARN_ON_ONCE(1);
1965 return -EINVAL;
1966 }
1967 switch (enabler->event_param.u.syscall.entryexit) {
1968 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1969 break;
1970 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1971 if (!entry)
1972 return 0;
1973 break;
1974 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1975 if (entry)
1976 return 0;
1977 break;
1978 default:
1979 return -EINVAL;
1980 }
1981 switch (enabler->event_param.u.syscall.abi) {
1982 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1983 break;
1984 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1985 if (compat)
1986 return 0;
1987 break;
1988 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1989 if (!compat)
1990 return 0;
1991 break;
1992 default:
1993 return -EINVAL;
1994 }
1995 switch (enabler->event_param.u.syscall.match) {
1996 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1997 switch (enabler->format_type) {
1998 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1999 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2000 case LTTNG_ENABLER_FORMAT_NAME:
2001 return lttng_match_enabler_name(desc_name, enabler_name);
2002 default:
2003 return -EINVAL;
2004 }
2005 break;
2006 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2007 return -EINVAL; /* Not implemented. */
2008 default:
2009 return -EINVAL;
2010 }
2011 break;
2012
2013 default:
2014 WARN_ON_ONCE(1);
2015 return -EINVAL;
2016 }
2017 }
2018
2019 static
2020 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
2021 struct lttng_kernel_event_recorder *event_recorder)
2022 {
2023 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
2024 event_enabler);
2025
2026 if (base_enabler->event_param.instrumentation != event_recorder->priv->parent.instrumentation)
2027 return 0;
2028 if (lttng_desc_match_enabler(event_recorder->priv->parent.desc, base_enabler)
2029 && event_recorder->chan == event_enabler->chan)
2030 return 1;
2031 else
2032 return 0;
2033 }
2034
2035 static
2036 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
2037 struct lttng_kernel_event_notifier *event_notifier)
2038 {
2039 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
2040 event_notifier_enabler);
2041
2042 if (base_enabler->event_param.instrumentation != event_notifier->priv->parent.instrumentation)
2043 return 0;
2044 if (lttng_desc_match_enabler(event_notifier->priv->parent.desc, base_enabler)
2045 && event_notifier->priv->group == event_notifier_enabler->group
2046 && event_notifier->priv->parent.user_token == event_notifier_enabler->base.user_token)
2047 return 1;
2048 else
2049 return 0;
2050 }
2051
2052 static
2053 struct lttng_enabler_ref *lttng_enabler_ref(
2054 struct list_head *enablers_ref_list,
2055 struct lttng_enabler *enabler)
2056 {
2057 struct lttng_enabler_ref *enabler_ref;
2058
2059 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2060 if (enabler_ref->ref == enabler)
2061 return enabler_ref;
2062 }
2063 return NULL;
2064 }
2065
2066 static
2067 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
2068 {
2069 struct lttng_kernel_session *session = event_enabler->chan->session;
2070 struct lttng_kernel_probe_desc *probe_desc;
2071 const struct lttng_kernel_event_desc *desc;
2072 int i;
2073 struct list_head *probe_list;
2074
2075 probe_list = lttng_get_probe_list_head();
2076 /*
2077 * For each probe event, if we find that a probe event matches
2078 * our enabler, create an associated lttng_event if not
2079 * already present.
2080 */
2081 list_for_each_entry(probe_desc, probe_list, head) {
2082 for (i = 0; i < probe_desc->nr_events; i++) {
2083 int found = 0;
2084 struct hlist_head *head;
2085 struct lttng_kernel_event_recorder_private *event_recorder_private;
2086 struct lttng_kernel_event_recorder *event_recorder;
2087
2088 desc = probe_desc->event_desc[i];
2089 if (!lttng_desc_match_enabler(desc,
2090 lttng_event_enabler_as_enabler(event_enabler)))
2091 continue;
2092
2093 /*
2094 * Check if already created.
2095 */
2096 head = utils_borrow_hash_table_bucket(
2097 session->priv->events_ht.table, LTTNG_EVENT_HT_SIZE,
2098 desc->event_name);
2099 lttng_hlist_for_each_entry(event_recorder_private, head, hlist) {
2100 if (event_recorder_private->parent.desc == desc
2101 && event_recorder_private->pub->chan == event_enabler->chan)
2102 found = 1;
2103 }
2104 if (found)
2105 continue;
2106
2107 /*
2108 * We need to create an event for this
2109 * event probe.
2110 */
2111 event_recorder = _lttng_kernel_event_recorder_create(event_enabler->chan,
2112 NULL, desc, LTTNG_KERNEL_ABI_TRACEPOINT);
2113 if (!event_recorder) {
2114 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2115 probe_desc->event_desc[i]->event_name);
2116 }
2117 }
2118 }
2119 }
2120
2121 static
2122 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2123 {
2124 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2125 struct lttng_kernel_probe_desc *probe_desc;
2126 const struct lttng_kernel_event_desc *desc;
2127 int i;
2128 struct list_head *probe_list;
2129
2130 probe_list = lttng_get_probe_list_head();
2131 /*
2132 * For each probe event, if we find that a probe event matches
2133 * our enabler, create an associated lttng_event_notifier if not
2134 * already present.
2135 */
2136 list_for_each_entry(probe_desc, probe_list, head) {
2137 for (i = 0; i < probe_desc->nr_events; i++) {
2138 int found = 0;
2139 struct hlist_head *head;
2140 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2141 struct lttng_kernel_event_notifier *event_notifier;
2142
2143 desc = probe_desc->event_desc[i];
2144 if (!lttng_desc_match_enabler(desc,
2145 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2146 continue;
2147
2148 /*
2149 * Check if already created.
2150 */
2151 head = utils_borrow_hash_table_bucket(
2152 event_notifier_group->event_notifiers_ht.table,
2153 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->event_name);
2154 lttng_hlist_for_each_entry(event_notifier_priv, head, hlist) {
2155 if (event_notifier_priv->parent.desc == desc
2156 && event_notifier_priv->parent.user_token == event_notifier_enabler->base.user_token)
2157 found = 1;
2158 }
2159 if (found)
2160 continue;
2161
2162 /*
2163 * We need to create a event_notifier for this event probe.
2164 */
2165 event_notifier = _lttng_event_notifier_create(desc,
2166 event_notifier_enabler->base.user_token,
2167 event_notifier_enabler->error_counter_index,
2168 event_notifier_group, NULL,
2169 LTTNG_KERNEL_ABI_TRACEPOINT);
2170 if (IS_ERR(event_notifier)) {
2171 printk(KERN_INFO "Unable to create event_notifier %s\n",
2172 probe_desc->event_desc[i]->event_name);
2173 }
2174 }
2175 }
2176 }
2177
2178 static
2179 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2180 {
2181 int ret;
2182
2183 ret = lttng_syscalls_register_event(event_enabler);
2184 WARN_ON_ONCE(ret);
2185 }
2186
2187 static
2188 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2189 {
2190 int ret;
2191
2192 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler);
2193 WARN_ON_ONCE(ret);
2194 ret = lttng_syscalls_create_matching_event_notifiers(event_notifier_enabler);
2195 WARN_ON_ONCE(ret);
2196 }
2197
2198 /*
2199 * Create struct lttng_kernel_event_recorder if it is missing and present in the list of
2200 * tracepoint probes.
2201 * Should be called with sessions mutex held.
2202 */
2203 static
2204 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2205 {
2206 switch (event_enabler->base.event_param.instrumentation) {
2207 case LTTNG_KERNEL_ABI_TRACEPOINT:
2208 lttng_create_tracepoint_event_if_missing(event_enabler);
2209 break;
2210
2211 case LTTNG_KERNEL_ABI_SYSCALL:
2212 lttng_create_syscall_event_if_missing(event_enabler);
2213 break;
2214
2215 default:
2216 WARN_ON_ONCE(1);
2217 break;
2218 }
2219 }
2220
2221 /*
2222 * Create events associated with an event_enabler (if not already present),
2223 * and add backward reference from the event to the enabler.
2224 * Should be called with sessions mutex held.
2225 */
2226 static
2227 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2228 {
2229 struct lttng_channel *chan = event_enabler->chan;
2230 struct lttng_kernel_session *session = event_enabler->chan->session;
2231 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2232 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2233
2234 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2235 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2236 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2237 !strcmp(base_enabler->event_param.name, "*")) {
2238 int enabled = base_enabler->enabled;
2239 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2240
2241 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2242 WRITE_ONCE(chan->syscall_all_entry, enabled);
2243
2244 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2245 WRITE_ONCE(chan->syscall_all_exit, enabled);
2246 }
2247
2248 /* First ensure that probe events are created for this enabler. */
2249 lttng_create_event_if_missing(event_enabler);
2250
2251 /* For each event matching event_enabler in session event list. */
2252 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2253 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2254 struct lttng_enabler_ref *enabler_ref;
2255
2256 if (!lttng_event_enabler_match_event(event_enabler, event_recorder))
2257 continue;
2258 enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
2259 lttng_event_enabler_as_enabler(event_enabler));
2260 if (!enabler_ref) {
2261 /*
2262 * If no backward ref, create it.
2263 * Add backward ref from event to event_enabler.
2264 */
2265 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2266 if (!enabler_ref)
2267 return -ENOMEM;
2268 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2269 list_add(&enabler_ref->node,
2270 &event_recorder_priv->parent.enablers_ref_head);
2271 }
2272
2273 /*
2274 * Link filter bytecodes if not linked yet.
2275 */
2276 lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
2277 lttng_static_ctx,
2278 &event_recorder_priv->parent.filter_bytecode_runtime_head,
2279 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2280 }
2281 return 0;
2282 }
2283
2284 /*
2285 * Create struct lttng_kernel_event_notifier if it is missing and present in the list of
2286 * tracepoint probes.
2287 * Should be called with sessions mutex held.
2288 */
2289 static
2290 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2291 {
2292 switch (event_notifier_enabler->base.event_param.instrumentation) {
2293 case LTTNG_KERNEL_ABI_TRACEPOINT:
2294 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2295 break;
2296
2297 case LTTNG_KERNEL_ABI_SYSCALL:
2298 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2299 break;
2300
2301 default:
2302 WARN_ON_ONCE(1);
2303 break;
2304 }
2305 }
2306
2307 /*
2308 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2309 */
2310 static
2311 int lttng_event_notifier_enabler_ref_event_notifiers(
2312 struct lttng_event_notifier_enabler *event_notifier_enabler)
2313 {
2314 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2315 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2316 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2317
2318 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2319 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2320 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2321 !strcmp(base_enabler->event_param.name, "*")) {
2322
2323 int enabled = base_enabler->enabled;
2324 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2325
2326 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2327 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2328
2329 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2330 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2331
2332 }
2333
2334 /* First ensure that probe event_notifiers are created for this enabler. */
2335 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2336
2337 /* Link the created event_notifier with its associated enabler. */
2338 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2339 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2340 struct lttng_enabler_ref *enabler_ref;
2341
2342 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2343 continue;
2344
2345 enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
2346 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2347 if (!enabler_ref) {
2348 /*
2349 * If no backward ref, create it.
2350 * Add backward ref from event_notifier to enabler.
2351 */
2352 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2353 if (!enabler_ref)
2354 return -ENOMEM;
2355
2356 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2357 event_notifier_enabler);
2358 list_add(&enabler_ref->node,
2359 &event_notifier_priv->parent.enablers_ref_head);
2360 }
2361
2362 /*
2363 * Link filter bytecodes if not linked yet.
2364 */
2365 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2366 lttng_static_ctx, &event_notifier_priv->parent.filter_bytecode_runtime_head,
2367 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2368
2369 /* Link capture bytecodes if not linked yet. */
2370 lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
2371 lttng_static_ctx, &event_notifier_priv->capture_bytecode_runtime_head,
2372 &event_notifier_enabler->capture_bytecode_head);
2373
2374 event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
2375 }
2376 return 0;
2377 }
2378
2379 /*
2380 * Called at module load: connect the probe on all enablers matching
2381 * this event.
2382 * Called with sessions lock held.
2383 */
2384 int lttng_fix_pending_events(void)
2385 {
2386 struct lttng_kernel_session_private *session_priv;
2387
2388 list_for_each_entry(session_priv, &sessions, list)
2389 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2390 return 0;
2391 }
2392
2393 static bool lttng_event_notifier_group_has_active_event_notifiers(
2394 struct lttng_event_notifier_group *event_notifier_group)
2395 {
2396 struct lttng_event_notifier_enabler *event_notifier_enabler;
2397
2398 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2399 node) {
2400 if (event_notifier_enabler->base.enabled)
2401 return true;
2402 }
2403 return false;
2404 }
2405
2406 bool lttng_event_notifier_active(void)
2407 {
2408 struct lttng_event_notifier_group *event_notifier_group;
2409
2410 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2411 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2412 return true;
2413 }
2414 return false;
2415 }
2416
2417 int lttng_fix_pending_event_notifiers(void)
2418 {
2419 struct lttng_event_notifier_group *event_notifier_group;
2420
2421 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2422 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2423 return 0;
2424 }
2425
2426 struct lttng_event_enabler *lttng_event_enabler_create(
2427 enum lttng_enabler_format_type format_type,
2428 struct lttng_kernel_abi_event *event_param,
2429 struct lttng_channel *chan)
2430 {
2431 struct lttng_event_enabler *event_enabler;
2432
2433 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2434 if (!event_enabler)
2435 return NULL;
2436 event_enabler->base.format_type = format_type;
2437 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2438 memcpy(&event_enabler->base.event_param, event_param,
2439 sizeof(event_enabler->base.event_param));
2440 event_enabler->chan = chan;
2441 /* ctx left NULL */
2442 event_enabler->base.enabled = 0;
2443 mutex_lock(&sessions_mutex);
2444 list_add(&event_enabler->node, &event_enabler->chan->session->priv->enablers_head);
2445 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2446 mutex_unlock(&sessions_mutex);
2447 return event_enabler;
2448 }
2449
2450 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2451 {
2452 mutex_lock(&sessions_mutex);
2453 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2454 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2455 mutex_unlock(&sessions_mutex);
2456 return 0;
2457 }
2458
2459 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2460 {
2461 mutex_lock(&sessions_mutex);
2462 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2463 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2464 mutex_unlock(&sessions_mutex);
2465 return 0;
2466 }
2467
2468 static
2469 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2470 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2471 {
2472 struct lttng_kernel_bytecode_node *bytecode_node;
2473 uint32_t bytecode_len;
2474 int ret;
2475
2476 ret = get_user(bytecode_len, &bytecode->len);
2477 if (ret)
2478 return ret;
2479 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2480 GFP_KERNEL);
2481 if (!bytecode_node)
2482 return -ENOMEM;
2483 ret = copy_from_user(&bytecode_node->bc, bytecode,
2484 sizeof(*bytecode) + bytecode_len);
2485 if (ret)
2486 goto error_free;
2487
2488 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2489 bytecode_node->enabler = enabler;
2490 /* Enforce length based on allocated size */
2491 bytecode_node->bc.len = bytecode_len;
2492 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2493
2494 return 0;
2495
2496 error_free:
2497 lttng_kvfree(bytecode_node);
2498 return ret;
2499 }
2500
2501 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2502 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2503 {
2504 int ret;
2505 ret = lttng_enabler_attach_filter_bytecode(
2506 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2507 if (ret)
2508 goto error;
2509
2510 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2511 return 0;
2512
2513 error:
2514 return ret;
2515 }
2516
2517 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2518 struct lttng_kernel_abi_event_callsite __user *callsite)
2519 {
2520
2521 switch (event->priv->instrumentation) {
2522 case LTTNG_KERNEL_ABI_UPROBE:
2523 return lttng_uprobes_event_add_callsite(event, callsite);
2524 default:
2525 return -EINVAL;
2526 }
2527 }
2528
2529 static
2530 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2531 {
2532 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2533
2534 /* Destroy filter bytecode */
2535 list_for_each_entry_safe(filter_node, tmp_filter_node,
2536 &enabler->filter_bytecode_head, node) {
2537 lttng_kvfree(filter_node);
2538 }
2539 }
2540
2541 static
2542 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2543 {
2544 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2545
2546 list_del(&event_enabler->node);
2547 kfree(event_enabler);
2548 }
2549
2550 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2551 struct lttng_event_notifier_group *event_notifier_group,
2552 enum lttng_enabler_format_type format_type,
2553 struct lttng_kernel_abi_event_notifier *event_notifier_param)
2554 {
2555 struct lttng_event_notifier_enabler *event_notifier_enabler;
2556
2557 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2558 if (!event_notifier_enabler)
2559 return NULL;
2560
2561 event_notifier_enabler->base.format_type = format_type;
2562 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2563 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2564
2565 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2566 event_notifier_enabler->num_captures = 0;
2567
2568 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2569 sizeof(event_notifier_enabler->base.event_param));
2570
2571 event_notifier_enabler->base.enabled = 0;
2572 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2573 event_notifier_enabler->group = event_notifier_group;
2574
2575 mutex_lock(&sessions_mutex);
2576 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2577 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2578
2579 mutex_unlock(&sessions_mutex);
2580
2581 return event_notifier_enabler;
2582 }
2583
2584 int lttng_event_notifier_enabler_enable(
2585 struct lttng_event_notifier_enabler *event_notifier_enabler)
2586 {
2587 mutex_lock(&sessions_mutex);
2588 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2589 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2590 mutex_unlock(&sessions_mutex);
2591 return 0;
2592 }
2593
2594 int lttng_event_notifier_enabler_disable(
2595 struct lttng_event_notifier_enabler *event_notifier_enabler)
2596 {
2597 mutex_lock(&sessions_mutex);
2598 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2599 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2600 mutex_unlock(&sessions_mutex);
2601 return 0;
2602 }
2603
2604 int lttng_event_notifier_enabler_attach_filter_bytecode(
2605 struct lttng_event_notifier_enabler *event_notifier_enabler,
2606 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2607 {
2608 int ret;
2609
2610 ret = lttng_enabler_attach_filter_bytecode(
2611 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2612 bytecode);
2613 if (ret)
2614 goto error;
2615
2616 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2617 return 0;
2618
2619 error:
2620 return ret;
2621 }
2622
2623 int lttng_event_notifier_enabler_attach_capture_bytecode(
2624 struct lttng_event_notifier_enabler *event_notifier_enabler,
2625 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2626 {
2627 struct lttng_kernel_bytecode_node *bytecode_node;
2628 struct lttng_enabler *enabler =
2629 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2630 uint32_t bytecode_len;
2631 int ret;
2632
2633 ret = get_user(bytecode_len, &bytecode->len);
2634 if (ret)
2635 return ret;
2636
2637 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2638 GFP_KERNEL);
2639 if (!bytecode_node)
2640 return -ENOMEM;
2641
2642 ret = copy_from_user(&bytecode_node->bc, bytecode,
2643 sizeof(*bytecode) + bytecode_len);
2644 if (ret)
2645 goto error_free;
2646
2647 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2648 bytecode_node->enabler = enabler;
2649
2650 /* Enforce length based on allocated size */
2651 bytecode_node->bc.len = bytecode_len;
2652 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2653
2654 event_notifier_enabler->num_captures++;
2655
2656 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2657 goto end;
2658
2659 error_free:
2660 lttng_kvfree(bytecode_node);
2661 end:
2662 return ret;
2663 }
2664
2665 static
2666 void lttng_event_notifier_enabler_destroy(
2667 struct lttng_event_notifier_enabler *event_notifier_enabler)
2668 {
2669 if (!event_notifier_enabler) {
2670 return;
2671 }
2672
2673 list_del(&event_notifier_enabler->node);
2674
2675 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2676 kfree(event_notifier_enabler);
2677 }
2678
2679 /*
2680 * lttng_session_sync_event_enablers should be called just before starting a
2681 * session.
2682 * Should be called with sessions mutex held.
2683 */
2684 static
2685 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2686 {
2687 struct lttng_event_enabler *event_enabler;
2688 struct lttng_kernel_event_recorder_private *event_recorder_priv;
2689
2690 list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
2691 lttng_event_enabler_ref_events(event_enabler);
2692 /*
2693 * For each event, if at least one of its enablers is enabled,
2694 * and its channel and session transient states are enabled, we
2695 * enable the event, else we disable it.
2696 */
2697 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
2698 struct lttng_kernel_event_recorder *event_recorder = event_recorder_priv->pub;
2699 struct lttng_enabler_ref *enabler_ref;
2700 struct lttng_kernel_bytecode_runtime *runtime;
2701 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2702 int nr_filters = 0;
2703
2704 switch (event_recorder_priv->parent.instrumentation) {
2705 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2706 case LTTNG_KERNEL_ABI_SYSCALL:
2707 /* Enable events */
2708 list_for_each_entry(enabler_ref,
2709 &event_recorder_priv->parent.enablers_ref_head, node) {
2710 if (enabler_ref->ref->enabled) {
2711 enabled = 1;
2712 break;
2713 }
2714 }
2715 break;
2716
2717 default:
2718 /* Not handled with lazy sync. */
2719 continue;
2720 }
2721 /*
2722 * Enabled state is based on union of enablers, with
2723 * intesection of session and channel transient enable
2724 * states.
2725 */
2726 enabled = enabled && session->priv->tstate && event_recorder->chan->tstate;
2727
2728 WRITE_ONCE(event_recorder->parent.enabled, enabled);
2729 /*
2730 * Sync tracepoint registration with event enabled
2731 * state.
2732 */
2733 if (enabled) {
2734 register_event(event_recorder);
2735 } else {
2736 _lttng_event_unregister(event_recorder);
2737 }
2738
2739 /* Check if has enablers without bytecode enabled */
2740 list_for_each_entry(enabler_ref,
2741 &event_recorder_priv->parent.enablers_ref_head, node) {
2742 if (enabler_ref->ref->enabled
2743 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2744 has_enablers_without_filter_bytecode = 1;
2745 break;
2746 }
2747 }
2748 event_recorder_priv->parent.has_enablers_without_filter_bytecode =
2749 has_enablers_without_filter_bytecode;
2750
2751 /* Enable filters */
2752 list_for_each_entry(runtime,
2753 &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
2754 lttng_bytecode_sync_state(runtime);
2755 nr_filters++;
2756 }
2757 WRITE_ONCE(event_recorder_priv->parent.pub->eval_filter,
2758 !(has_enablers_without_filter_bytecode || !nr_filters));
2759 }
2760 }
2761
2762 /*
2763 * Apply enablers to session events, adding events to session if need
2764 * be. It is required after each modification applied to an active
2765 * session, and right before session "start".
2766 * "lazy" sync means we only sync if required.
2767 * Should be called with sessions mutex held.
2768 */
2769 static
2770 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2771 {
2772 /* We can skip if session is not active */
2773 if (!session->active)
2774 return;
2775 lttng_session_sync_event_enablers(session);
2776 }
2777
2778 static
2779 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2780 {
2781 struct lttng_event_notifier_enabler *event_notifier_enabler;
2782 struct lttng_kernel_event_notifier_private *event_notifier_priv;
2783
2784 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2785 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2786
2787 /*
2788 * For each event_notifier, if at least one of its enablers is enabled,
2789 * we enable the event_notifier, else we disable it.
2790 */
2791 list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
2792 struct lttng_kernel_event_notifier *event_notifier = event_notifier_priv->pub;
2793 struct lttng_enabler_ref *enabler_ref;
2794 struct lttng_kernel_bytecode_runtime *runtime;
2795 int enabled = 0, has_enablers_without_filter_bytecode = 0;
2796 int nr_filters = 0, nr_captures = 0;
2797
2798 switch (event_notifier_priv->parent.instrumentation) {
2799 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2800 case LTTNG_KERNEL_ABI_SYSCALL:
2801 /* Enable event_notifiers */
2802 list_for_each_entry(enabler_ref,
2803 &event_notifier_priv->parent.enablers_ref_head, node) {
2804 if (enabler_ref->ref->enabled) {
2805 enabled = 1;
2806 break;
2807 }
2808 }
2809 break;
2810
2811 default:
2812 /* Not handled with sync. */
2813 continue;
2814 }
2815
2816 WRITE_ONCE(event_notifier->parent.enabled, enabled);
2817 /*
2818 * Sync tracepoint registration with event_notifier enabled
2819 * state.
2820 */
2821 if (enabled) {
2822 if (!event_notifier_priv->parent.registered)
2823 register_event_notifier(event_notifier);
2824 } else {
2825 if (event_notifier_priv->parent.registered)
2826 _lttng_event_notifier_unregister(event_notifier);
2827 }
2828
2829 /* Check if has enablers without bytecode enabled */
2830 list_for_each_entry(enabler_ref,
2831 &event_notifier_priv->parent.enablers_ref_head, node) {
2832 if (enabler_ref->ref->enabled
2833 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2834 has_enablers_without_filter_bytecode = 1;
2835 break;
2836 }
2837 }
2838 event_notifier_priv->parent.has_enablers_without_filter_bytecode =
2839 has_enablers_without_filter_bytecode;
2840
2841 /* Enable filters */
2842 list_for_each_entry(runtime,
2843 &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
2844 lttng_bytecode_sync_state(runtime);
2845 nr_filters++;
2846 }
2847 WRITE_ONCE(event_notifier_priv->parent.pub->eval_filter,
2848 !(has_enablers_without_filter_bytecode || !nr_filters));
2849
2850 /* Enable captures */
2851 list_for_each_entry(runtime,
2852 &event_notifier_priv->capture_bytecode_runtime_head, node) {
2853 lttng_bytecode_sync_state(runtime);
2854 nr_captures++;
2855 }
2856 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2857 }
2858 }
2859
2860 /*
2861 * Serialize at most one packet worth of metadata into a metadata
2862 * channel.
2863 * We grab the metadata cache mutex to get exclusive access to our metadata
2864 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2865 * allows us to do racy operations such as looking for remaining space left in
2866 * packet and write, since mutual exclusion protects us from concurrent writes.
2867 * Mutual exclusion on the metadata cache allow us to read the cache content
2868 * without racing against reallocation of the cache by updates.
2869 * Returns the number of bytes written in the channel, 0 if no data
2870 * was written and a negative value on error.
2871 */
2872 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2873 struct channel *chan, bool *coherent)
2874 {
2875 struct lttng_kernel_ring_buffer_ctx ctx;
2876 int ret = 0;
2877 size_t len, reserve_len;
2878
2879 /*
2880 * Ensure we support mutiple get_next / put sequences followed by
2881 * put_next. The metadata cache lock protects reading the metadata
2882 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2883 * "flush" operations on the buffer invoked by different processes.
2884 * Moreover, since the metadata cache memory can be reallocated, we
2885 * need to have exclusive access against updates even though we only
2886 * read it.
2887 */
2888 mutex_lock(&stream->metadata_cache->lock);
2889 WARN_ON(stream->metadata_in < stream->metadata_out);
2890 if (stream->metadata_in != stream->metadata_out)
2891 goto end;
2892
2893 /* Metadata regenerated, change the version. */
2894 if (stream->metadata_cache->version != stream->version)
2895 stream->version = stream->metadata_cache->version;
2896
2897 len = stream->metadata_cache->metadata_written -
2898 stream->metadata_in;
2899 if (!len)
2900 goto end;
2901 reserve_len = min_t(size_t,
2902 stream->transport->ops.priv->packet_avail_size(chan),
2903 len);
2904 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2905 sizeof(char), NULL);
2906 /*
2907 * If reservation failed, return an error to the caller.
2908 */
2909 ret = stream->transport->ops.event_reserve(&ctx);
2910 if (ret != 0) {
2911 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2912 stream->coherent = false;
2913 goto end;
2914 }
2915 stream->transport->ops.event_write(&ctx,
2916 stream->metadata_cache->data + stream->metadata_in,
2917 reserve_len);
2918 stream->transport->ops.event_commit(&ctx);
2919 stream->metadata_in += reserve_len;
2920 if (reserve_len < len)
2921 stream->coherent = false;
2922 else
2923 stream->coherent = true;
2924 ret = reserve_len;
2925
2926 end:
2927 if (coherent)
2928 *coherent = stream->coherent;
2929 mutex_unlock(&stream->metadata_cache->lock);
2930 return ret;
2931 }
2932
2933 static
2934 void lttng_metadata_begin(struct lttng_kernel_session *session)
2935 {
2936 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2937 mutex_lock(&session->priv->metadata_cache->lock);
2938 }
2939
2940 static
2941 void lttng_metadata_end(struct lttng_kernel_session *session)
2942 {
2943 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2944 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2945 struct lttng_metadata_stream *stream;
2946
2947 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2948 wake_up_interruptible(&stream->read_wait);
2949 mutex_unlock(&session->priv->metadata_cache->lock);
2950 }
2951 }
2952
2953 /*
2954 * Write the metadata to the metadata cache.
2955 * Must be called with sessions_mutex held.
2956 * The metadata cache lock protects us from concurrent read access from
2957 * thread outputting metadata content to ring buffer.
2958 * The content of the printf is printed as a single atomic metadata
2959 * transaction.
2960 */
2961 int lttng_metadata_printf(struct lttng_kernel_session *session,
2962 const char *fmt, ...)
2963 {
2964 char *str;
2965 size_t len;
2966 va_list ap;
2967
2968 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2969
2970 va_start(ap, fmt);
2971 str = kvasprintf(GFP_KERNEL, fmt, ap);
2972 va_end(ap);
2973 if (!str)
2974 return -ENOMEM;
2975
2976 len = strlen(str);
2977 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2978 if (session->priv->metadata_cache->metadata_written + len >
2979 session->priv->metadata_cache->cache_alloc) {
2980 char *tmp_cache_realloc;
2981 unsigned int tmp_cache_alloc_size;
2982
2983 tmp_cache_alloc_size = max_t(unsigned int,
2984 session->priv->metadata_cache->cache_alloc + len,
2985 session->priv->metadata_cache->cache_alloc << 1);
2986 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2987 if (!tmp_cache_realloc)
2988 goto err;
2989 if (session->priv->metadata_cache->data) {
2990 memcpy(tmp_cache_realloc,
2991 session->priv->metadata_cache->data,
2992 session->priv->metadata_cache->cache_alloc);
2993 vfree(session->priv->metadata_cache->data);
2994 }
2995
2996 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2997 session->priv->metadata_cache->data = tmp_cache_realloc;
2998 }
2999 memcpy(session->priv->metadata_cache->data +
3000 session->priv->metadata_cache->metadata_written,
3001 str, len);
3002 session->priv->metadata_cache->metadata_written += len;
3003 kfree(str);
3004
3005 return 0;
3006
3007 err:
3008 kfree(str);
3009 return -ENOMEM;
3010 }
3011
3012 static
3013 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3014 {
3015 size_t i;
3016
3017 for (i = 0; i < nesting; i++) {
3018 int ret;
3019
3020 ret = lttng_metadata_printf(session, " ");
3021 if (ret) {
3022 return ret;
3023 }
3024 }
3025 return 0;
3026 }
3027
3028 static
3029 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3030 const struct lttng_kernel_event_field *field,
3031 size_t nesting)
3032 {
3033 return lttng_metadata_printf(session, " _%s;\n", field->name);
3034 }
3035
3036 static
3037 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3038 const struct lttng_kernel_type_integer *type,
3039 enum lttng_kernel_string_encoding parent_encoding,
3040 size_t nesting)
3041 {
3042 int ret;
3043
3044 ret = print_tabs(session, nesting);
3045 if (ret)
3046 return ret;
3047 ret = lttng_metadata_printf(session,
3048 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3049 type->size,
3050 type->alignment,
3051 type->signedness,
3052 (parent_encoding == lttng_kernel_string_encoding_none)
3053 ? "none"
3054 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3055 ? "UTF8"
3056 : "ASCII",
3057 type->base,
3058 #if __BYTE_ORDER == __BIG_ENDIAN
3059 type->reverse_byte_order ? " byte_order = le;" : ""
3060 #else
3061 type->reverse_byte_order ? " byte_order = be;" : ""
3062 #endif
3063 );
3064 return ret;
3065 }
3066
3067 /*
3068 * Must be called with sessions_mutex held.
3069 */
3070 static
3071 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3072 const struct lttng_kernel_type_struct *type,
3073 size_t nesting)
3074 {
3075 const char *prev_field_name = NULL;
3076 int ret;
3077 uint32_t i, nr_fields;
3078 unsigned int alignment;
3079
3080 ret = print_tabs(session, nesting);
3081 if (ret)
3082 return ret;
3083 ret = lttng_metadata_printf(session,
3084 "struct {\n");
3085 if (ret)
3086 return ret;
3087 nr_fields = type->nr_fields;
3088 for (i = 0; i < nr_fields; i++) {
3089 const struct lttng_kernel_event_field *iter_field;
3090
3091 iter_field = type->fields[i];
3092 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3093 if (ret)
3094 return ret;
3095 }
3096 ret = print_tabs(session, nesting);
3097 if (ret)
3098 return ret;
3099 alignment = type->alignment;
3100 if (alignment) {
3101 ret = lttng_metadata_printf(session,
3102 "} align(%u)",
3103 alignment);
3104 } else {
3105 ret = lttng_metadata_printf(session,
3106 "}");
3107 }
3108 return ret;
3109 }
3110
3111 /*
3112 * Must be called with sessions_mutex held.
3113 */
3114 static
3115 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3116 const struct lttng_kernel_event_field *field,
3117 size_t nesting)
3118 {
3119 int ret;
3120
3121 ret = _lttng_struct_type_statedump(session,
3122 lttng_kernel_get_type_struct(field->type), nesting);
3123 if (ret)
3124 return ret;
3125 return lttng_field_name_statedump(session, field, nesting);
3126 }
3127
3128 /*
3129 * Must be called with sessions_mutex held.
3130 */
3131 static
3132 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3133 const struct lttng_kernel_type_variant *type,
3134 size_t nesting,
3135 const char *prev_field_name)
3136 {
3137 const char *tag_name;
3138 int ret;
3139 uint32_t i, nr_choices;
3140
3141 tag_name = type->tag_name;
3142 if (!tag_name)
3143 tag_name = prev_field_name;
3144 if (!tag_name)
3145 return -EINVAL;
3146 /*
3147 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3148 */
3149 if (type->alignment != 0)
3150 return -EINVAL;
3151 ret = print_tabs(session, nesting);
3152 if (ret)
3153 return ret;
3154 ret = lttng_metadata_printf(session,
3155 "variant <_%s> {\n",
3156 tag_name);
3157 if (ret)
3158 return ret;
3159 nr_choices = type->nr_choices;
3160 for (i = 0; i < nr_choices; i++) {
3161 const struct lttng_kernel_event_field *iter_field;
3162
3163 iter_field = type->choices[i];
3164 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3165 if (ret)
3166 return ret;
3167 }
3168 ret = print_tabs(session, nesting);
3169 if (ret)
3170 return ret;
3171 ret = lttng_metadata_printf(session,
3172 "}");
3173 return ret;
3174 }
3175
3176 /*
3177 * Must be called with sessions_mutex held.
3178 */
3179 static
3180 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3181 const struct lttng_kernel_event_field *field,
3182 size_t nesting,
3183 const char *prev_field_name)
3184 {
3185 int ret;
3186
3187 ret = _lttng_variant_type_statedump(session,
3188 lttng_kernel_get_type_variant(field->type), nesting,
3189 prev_field_name);
3190 if (ret)
3191 return ret;
3192 return lttng_field_name_statedump(session, field, nesting);
3193 }
3194
3195 /*
3196 * Must be called with sessions_mutex held.
3197 */
3198 static
3199 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3200 const struct lttng_kernel_event_field *field,
3201 size_t nesting)
3202 {
3203 int ret;
3204 const struct lttng_kernel_type_array *array_type;
3205 const struct lttng_kernel_type_common *elem_type;
3206
3207 array_type = lttng_kernel_get_type_array(field->type);
3208 WARN_ON_ONCE(!array_type);
3209
3210 if (array_type->alignment) {
3211 ret = print_tabs(session, nesting);
3212 if (ret)
3213 return ret;
3214 ret = lttng_metadata_printf(session,
3215 "struct { } align(%u) _%s_padding;\n",
3216 array_type->alignment * CHAR_BIT,
3217 field->name);
3218 if (ret)
3219 return ret;
3220 }
3221 /*
3222 * Nested compound types: Only array of structures and variants are
3223 * currently supported.
3224 */
3225 elem_type = array_type->elem_type;
3226 switch (elem_type->type) {
3227 case lttng_kernel_type_integer:
3228 case lttng_kernel_type_struct:
3229 case lttng_kernel_type_variant:
3230 ret = _lttng_type_statedump(session, elem_type,
3231 array_type->encoding, nesting);
3232 if (ret)
3233 return ret;
3234 break;
3235
3236 default:
3237 return -EINVAL;
3238 }
3239 ret = lttng_metadata_printf(session,
3240 " _%s[%u];\n",
3241 field->name,
3242 array_type->length);
3243 return ret;
3244 }
3245
3246 /*
3247 * Must be called with sessions_mutex held.
3248 */
3249 static
3250 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3251 const struct lttng_kernel_event_field *field,
3252 size_t nesting,
3253 const char *prev_field_name)
3254 {
3255 int ret;
3256 const char *length_name;
3257 const struct lttng_kernel_type_sequence *sequence_type;
3258 const struct lttng_kernel_type_common *elem_type;
3259
3260 sequence_type = lttng_kernel_get_type_sequence(field->type);
3261 WARN_ON_ONCE(!sequence_type);
3262
3263 length_name = sequence_type->length_name;
3264 if (!length_name)
3265 length_name = prev_field_name;
3266 if (!length_name)
3267 return -EINVAL;
3268
3269 if (sequence_type->alignment) {
3270 ret = print_tabs(session, nesting);
3271 if (ret)
3272 return ret;
3273 ret = lttng_metadata_printf(session,
3274 "struct { } align(%u) _%s_padding;\n",
3275 sequence_type->alignment * CHAR_BIT,
3276 field->name);
3277 if (ret)
3278 return ret;
3279 }
3280
3281 /*
3282 * Nested compound types: Only array of structures and variants are
3283 * currently supported.
3284 */
3285 elem_type = sequence_type->elem_type;
3286 switch (elem_type->type) {
3287 case lttng_kernel_type_integer:
3288 case lttng_kernel_type_struct:
3289 case lttng_kernel_type_variant:
3290 ret = _lttng_type_statedump(session, elem_type,
3291 sequence_type->encoding, nesting);
3292 if (ret)
3293 return ret;
3294 break;
3295
3296 default:
3297 return -EINVAL;
3298 }
3299 ret = lttng_metadata_printf(session,
3300 " _%s[ _%s ];\n",
3301 field->name,
3302 length_name);
3303 return ret;
3304 }
3305
3306 /*
3307 * Must be called with sessions_mutex held.
3308 */
3309 static
3310 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3311 const struct lttng_kernel_type_enum *type,
3312 size_t nesting)
3313 {
3314 const struct lttng_kernel_enum_desc *enum_desc;
3315 const struct lttng_kernel_type_common *container_type;
3316 int ret;
3317 unsigned int i, nr_entries;
3318
3319 container_type = type->container_type;
3320 if (container_type->type != lttng_kernel_type_integer) {
3321 ret = -EINVAL;
3322 goto end;
3323 }
3324 enum_desc = type->desc;
3325 nr_entries = enum_desc->nr_entries;
3326
3327 ret = print_tabs(session, nesting);
3328 if (ret)
3329 goto end;
3330 ret = lttng_metadata_printf(session, "enum : ");
3331 if (ret)
3332 goto end;
3333 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3334 lttng_kernel_string_encoding_none, 0);
3335 if (ret)
3336 goto end;
3337 ret = lttng_metadata_printf(session, " {\n");
3338 if (ret)
3339 goto end;
3340 /* Dump all entries */
3341 for (i = 0; i < nr_entries; i++) {
3342 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3343 int j, len;
3344
3345 ret = print_tabs(session, nesting + 1);
3346 if (ret)
3347 goto end;
3348 ret = lttng_metadata_printf(session,
3349 "\"");
3350 if (ret)
3351 goto end;
3352 len = strlen(entry->string);
3353 /* Escape the character '"' */
3354 for (j = 0; j < len; j++) {
3355 char c = entry->string[j];
3356
3357 switch (c) {
3358 case '"':
3359 ret = lttng_metadata_printf(session,
3360 "\\\"");
3361 break;
3362 case '\\':
3363 ret = lttng_metadata_printf(session,
3364 "\\\\");
3365 break;
3366 default:
3367 ret = lttng_metadata_printf(session,
3368 "%c", c);
3369 break;
3370 }
3371 if (ret)
3372 goto end;
3373 }
3374 ret = lttng_metadata_printf(session, "\"");
3375 if (ret)
3376 goto end;
3377
3378 if (entry->options.is_auto) {
3379 ret = lttng_metadata_printf(session, ",\n");
3380 if (ret)
3381 goto end;
3382 } else {
3383 ret = lttng_metadata_printf(session,
3384 " = ");
3385 if (ret)
3386 goto end;
3387 if (entry->start.signedness)
3388 ret = lttng_metadata_printf(session,
3389 "%lld", (long long) entry->start.value);
3390 else
3391 ret = lttng_metadata_printf(session,
3392 "%llu", entry->start.value);
3393 if (ret)
3394 goto end;
3395 if (entry->start.signedness == entry->end.signedness &&
3396 entry->start.value
3397 == entry->end.value) {
3398 ret = lttng_metadata_printf(session,
3399 ",\n");
3400 } else {
3401 if (entry->end.signedness) {
3402 ret = lttng_metadata_printf(session,
3403 " ... %lld,\n",
3404 (long long) entry->end.value);
3405 } else {
3406 ret = lttng_metadata_printf(session,
3407 " ... %llu,\n",
3408 entry->end.value);
3409 }
3410 }
3411 if (ret)
3412 goto end;
3413 }
3414 }
3415 ret = print_tabs(session, nesting);
3416 if (ret)
3417 goto end;
3418 ret = lttng_metadata_printf(session, "}");
3419 end:
3420 return ret;
3421 }
3422
3423 /*
3424 * Must be called with sessions_mutex held.
3425 */
3426 static
3427 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3428 const struct lttng_kernel_event_field *field,
3429 size_t nesting)
3430 {
3431 int ret;
3432 const struct lttng_kernel_type_enum *enum_type;
3433
3434 enum_type = lttng_kernel_get_type_enum(field->type);
3435 WARN_ON_ONCE(!enum_type);
3436 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3437 if (ret)
3438 return ret;
3439 return lttng_field_name_statedump(session, field, nesting);
3440 }
3441
3442 static
3443 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3444 const struct lttng_kernel_event_field *field,
3445 size_t nesting)
3446 {
3447 int ret;
3448
3449 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3450 lttng_kernel_string_encoding_none, nesting);
3451 if (ret)
3452 return ret;
3453 return lttng_field_name_statedump(session, field, nesting);
3454 }
3455
3456 static
3457 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3458 const struct lttng_kernel_type_string *type,
3459 size_t nesting)
3460 {
3461 int ret;
3462
3463 /* Default encoding is UTF8 */
3464 ret = print_tabs(session, nesting);
3465 if (ret)
3466 return ret;
3467 ret = lttng_metadata_printf(session,
3468 "string%s",
3469 type->encoding == lttng_kernel_string_encoding_ASCII ?
3470 " { encoding = ASCII; }" : "");
3471 return ret;
3472 }
3473
3474 static
3475 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3476 const struct lttng_kernel_event_field *field,
3477 size_t nesting)
3478 {
3479 const struct lttng_kernel_type_string *string_type;
3480 int ret;
3481
3482 string_type = lttng_kernel_get_type_string(field->type);
3483 WARN_ON_ONCE(!string_type);
3484 ret = _lttng_string_type_statedump(session, string_type, nesting);
3485 if (ret)
3486 return ret;
3487 return lttng_field_name_statedump(session, field, nesting);
3488 }
3489
3490 /*
3491 * Must be called with sessions_mutex held.
3492 */
3493 static
3494 int _lttng_type_statedump(struct lttng_kernel_session *session,
3495 const struct lttng_kernel_type_common *type,
3496 enum lttng_kernel_string_encoding parent_encoding,
3497 size_t nesting)
3498 {
3499 int ret = 0;
3500
3501 switch (type->type) {
3502 case lttng_kernel_type_integer:
3503 ret = _lttng_integer_type_statedump(session,
3504 lttng_kernel_get_type_integer(type),
3505 parent_encoding, nesting);
3506 break;
3507 case lttng_kernel_type_enum:
3508 ret = _lttng_enum_type_statedump(session,
3509 lttng_kernel_get_type_enum(type),
3510 nesting);
3511 break;
3512 case lttng_kernel_type_string:
3513 ret = _lttng_string_type_statedump(session,
3514 lttng_kernel_get_type_string(type),
3515 nesting);
3516 break;
3517 case lttng_kernel_type_struct:
3518 ret = _lttng_struct_type_statedump(session,
3519 lttng_kernel_get_type_struct(type),
3520 nesting);
3521 break;
3522 case lttng_kernel_type_variant:
3523 ret = _lttng_variant_type_statedump(session,
3524 lttng_kernel_get_type_variant(type),
3525 nesting, NULL);
3526 break;
3527
3528 /* Nested arrays and sequences are not supported yet. */
3529 case lttng_kernel_type_array:
3530 case lttng_kernel_type_sequence:
3531 default:
3532 WARN_ON_ONCE(1);
3533 return -EINVAL;
3534 }
3535 return ret;
3536 }
3537
3538 /*
3539 * Must be called with sessions_mutex held.
3540 */
3541 static
3542 int _lttng_field_statedump(struct lttng_kernel_session *session,
3543 const struct lttng_kernel_event_field *field,
3544 size_t nesting,
3545 const char **prev_field_name_p)
3546 {
3547 const char *prev_field_name = NULL;
3548 int ret = 0;
3549
3550 if (prev_field_name_p)
3551 prev_field_name = *prev_field_name_p;
3552 switch (field->type->type) {
3553 case lttng_kernel_type_integer:
3554 ret = _lttng_integer_field_statedump(session, field, nesting);
3555 break;
3556 case lttng_kernel_type_enum:
3557 ret = _lttng_enum_field_statedump(session, field, nesting);
3558 break;
3559 case lttng_kernel_type_string:
3560 ret = _lttng_string_field_statedump(session, field, nesting);
3561 break;
3562 case lttng_kernel_type_struct:
3563 ret = _lttng_struct_field_statedump(session, field, nesting);
3564 break;
3565 case lttng_kernel_type_array:
3566 ret = _lttng_array_field_statedump(session, field, nesting);
3567 break;
3568 case lttng_kernel_type_sequence:
3569 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3570 break;
3571 case lttng_kernel_type_variant:
3572 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3573 break;
3574
3575 default:
3576 WARN_ON_ONCE(1);
3577 return -EINVAL;
3578 }
3579 if (prev_field_name_p)
3580 *prev_field_name_p = field->name;
3581 return ret;
3582 }
3583
3584 static
3585 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3586 struct lttng_kernel_ctx *ctx)
3587 {
3588 const char *prev_field_name = NULL;
3589 int ret = 0;
3590 int i;
3591
3592 if (!ctx)
3593 return 0;
3594 for (i = 0; i < ctx->nr_fields; i++) {
3595 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3596
3597 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3598 if (ret)
3599 return ret;
3600 }
3601 return ret;
3602 }
3603
3604 static
3605 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3606 struct lttng_kernel_event_recorder *event_recorder)
3607 {
3608 const char *prev_field_name = NULL;
3609 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3610 int ret = 0;
3611 int i;
3612
3613 for (i = 0; i < desc->nr_fields; i++) {
3614 const struct lttng_kernel_event_field *field = desc->fields[i];
3615
3616 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3617 if (ret)
3618 return ret;
3619 }
3620 return ret;
3621 }
3622
3623 /*
3624 * Must be called with sessions_mutex held.
3625 * The entire event metadata is printed as a single atomic metadata
3626 * transaction.
3627 */
3628 static
3629 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
3630 struct lttng_channel *chan,
3631 struct lttng_kernel_event_recorder *event_recorder)
3632 {
3633 int ret = 0;
3634
3635 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3636 return 0;
3637 if (chan->channel_type == METADATA_CHANNEL)
3638 return 0;
3639
3640 lttng_metadata_begin(session);
3641
3642 ret = lttng_metadata_printf(session,
3643 "event {\n"
3644 " name = \"%s\";\n"
3645 " id = %u;\n"
3646 " stream_id = %u;\n",
3647 event_recorder->priv->parent.desc->event_name,
3648 event_recorder->priv->id,
3649 event_recorder->chan->id);
3650 if (ret)
3651 goto end;
3652
3653 ret = lttng_metadata_printf(session,
3654 " fields := struct {\n"
3655 );
3656 if (ret)
3657 goto end;
3658
3659 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3660 if (ret)
3661 goto end;
3662
3663 /*
3664 * LTTng space reservation can only reserve multiples of the
3665 * byte size.
3666 */
3667 ret = lttng_metadata_printf(session,
3668 " };\n"
3669 "};\n\n");
3670 if (ret)
3671 goto end;
3672
3673 event_recorder->priv->metadata_dumped = 1;
3674 end:
3675 lttng_metadata_end(session);
3676 return ret;
3677
3678 }
3679
3680 /*
3681 * Must be called with sessions_mutex held.
3682 * The entire channel metadata is printed as a single atomic metadata
3683 * transaction.
3684 */
3685 static
3686 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3687 struct lttng_channel *chan)
3688 {
3689 int ret = 0;
3690
3691 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3692 return 0;
3693
3694 if (chan->channel_type == METADATA_CHANNEL)
3695 return 0;
3696
3697 lttng_metadata_begin(session);
3698
3699 WARN_ON_ONCE(!chan->header_type);
3700 ret = lttng_metadata_printf(session,
3701 "stream {\n"
3702 " id = %u;\n"
3703 " event.header := %s;\n"
3704 " packet.context := struct packet_context;\n",
3705 chan->id,
3706 chan->header_type == 1 ? "struct event_header_compact" :
3707 "struct event_header_large");
3708 if (ret)
3709 goto end;
3710
3711 if (chan->ctx) {
3712 ret = lttng_metadata_printf(session,
3713 " event.context := struct {\n");
3714 if (ret)
3715 goto end;
3716 }
3717 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3718 if (ret)
3719 goto end;
3720 if (chan->ctx) {
3721 ret = lttng_metadata_printf(session,
3722 " };\n");
3723 if (ret)
3724 goto end;
3725 }
3726
3727 ret = lttng_metadata_printf(session,
3728 "};\n\n");
3729
3730 chan->metadata_dumped = 1;
3731 end:
3732 lttng_metadata_end(session);
3733 return ret;
3734 }
3735
3736 /*
3737 * Must be called with sessions_mutex held.
3738 */
3739 static
3740 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3741 {
3742 return lttng_metadata_printf(session,
3743 "struct packet_context {\n"
3744 " uint64_clock_monotonic_t timestamp_begin;\n"
3745 " uint64_clock_monotonic_t timestamp_end;\n"
3746 " uint64_t content_size;\n"
3747 " uint64_t packet_size;\n"
3748 " uint64_t packet_seq_num;\n"
3749 " unsigned long events_discarded;\n"
3750 " uint32_t cpu_id;\n"
3751 "};\n\n"
3752 );
3753 }
3754
3755 /*
3756 * Compact header:
3757 * id: range: 0 - 30.
3758 * id 31 is reserved to indicate an extended header.
3759 *
3760 * Large header:
3761 * id: range: 0 - 65534.
3762 * id 65535 is reserved to indicate an extended header.
3763 *
3764 * Must be called with sessions_mutex held.
3765 */
3766 static
3767 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3768 {
3769 return lttng_metadata_printf(session,
3770 "struct event_header_compact {\n"
3771 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3772 " variant <id> {\n"
3773 " struct {\n"
3774 " uint27_clock_monotonic_t timestamp;\n"
3775 " } compact;\n"
3776 " struct {\n"
3777 " uint32_t id;\n"
3778 " uint64_clock_monotonic_t timestamp;\n"
3779 " } extended;\n"
3780 " } v;\n"
3781 "} align(%u);\n"
3782 "\n"
3783 "struct event_header_large {\n"
3784 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3785 " variant <id> {\n"
3786 " struct {\n"
3787 " uint32_clock_monotonic_t timestamp;\n"
3788 " } compact;\n"
3789 " struct {\n"
3790 " uint32_t id;\n"
3791 " uint64_clock_monotonic_t timestamp;\n"
3792 " } extended;\n"
3793 " } v;\n"
3794 "} align(%u);\n\n",
3795 lttng_alignof(uint32_t) * CHAR_BIT,
3796 lttng_alignof(uint16_t) * CHAR_BIT
3797 );
3798 }
3799
3800 /*
3801 * Approximation of NTP time of day to clock monotonic correlation,
3802 * taken at start of trace.
3803 * Yes, this is only an approximation. Yes, we can (and will) do better
3804 * in future versions.
3805 * This function may return a negative offset. It may happen if the
3806 * system sets the REALTIME clock to 0 after boot.
3807 *
3808 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3809 * y2038 compliant.
3810 */
3811 static
3812 int64_t measure_clock_offset(void)
3813 {
3814 uint64_t monotonic_avg, monotonic[2], realtime;
3815 uint64_t tcf = trace_clock_freq();
3816 int64_t offset;
3817 unsigned long flags;
3818 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3819 struct timespec64 rts = { 0, 0 };
3820 #else
3821 struct timespec rts = { 0, 0 };
3822 #endif
3823
3824 /* Disable interrupts to increase correlation precision. */
3825 local_irq_save(flags);
3826 monotonic[0] = trace_clock_read64();
3827 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3828 ktime_get_real_ts64(&rts);
3829 #else
3830 getnstimeofday(&rts);
3831 #endif
3832 monotonic[1] = trace_clock_read64();
3833 local_irq_restore(flags);
3834
3835 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3836 realtime = (uint64_t) rts.tv_sec * tcf;
3837 if (tcf == NSEC_PER_SEC) {
3838 realtime += rts.tv_nsec;
3839 } else {
3840 uint64_t n = rts.tv_nsec * tcf;
3841
3842 do_div(n, NSEC_PER_SEC);
3843 realtime += n;
3844 }
3845 offset = (int64_t) realtime - monotonic_avg;
3846 return offset;
3847 }
3848
3849 static
3850 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3851 {
3852 int ret = 0;
3853 size_t i;
3854 char cur;
3855
3856 i = 0;
3857 cur = string[i];
3858 while (cur != '\0') {
3859 switch (cur) {
3860 case '\n':
3861 ret = lttng_metadata_printf(session, "%s", "\\n");
3862 break;
3863 case '\\':
3864 case '"':
3865 ret = lttng_metadata_printf(session, "%c", '\\');
3866 if (ret)
3867 goto error;
3868 /* We still print the current char */
3869 /* Fallthrough */
3870 default:
3871 ret = lttng_metadata_printf(session, "%c", cur);
3872 break;
3873 }
3874
3875 if (ret)
3876 goto error;
3877
3878 cur = string[++i];
3879 }
3880 error:
3881 return ret;
3882 }
3883
3884 static
3885 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3886 const char *field_value)
3887 {
3888 int ret;
3889
3890 ret = lttng_metadata_printf(session, " %s = \"", field);
3891 if (ret)
3892 goto error;
3893
3894 ret = print_escaped_ctf_string(session, field_value);
3895 if (ret)
3896 goto error;
3897
3898 ret = lttng_metadata_printf(session, "\";\n");
3899
3900 error:
3901 return ret;
3902 }
3903
3904 /*
3905 * Output metadata into this session's metadata buffers.
3906 * Must be called with sessions_mutex held.
3907 */
3908 static
3909 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3910 {
3911 unsigned char *uuid_c = session->priv->uuid.b;
3912 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3913 const char *product_uuid;
3914 struct lttng_channel *chan;
3915 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3916 int ret = 0;
3917
3918 if (!LTTNG_READ_ONCE(session->active))
3919 return 0;
3920
3921 lttng_metadata_begin(session);
3922
3923 if (session->priv->metadata_dumped)
3924 goto skip_session;
3925
3926 snprintf(uuid_s, sizeof(uuid_s),
3927 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3928 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3929 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3930 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3931 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3932
3933 ret = lttng_metadata_printf(session,
3934 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3935 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3936 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3937 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3938 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3939 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3940 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3941 "\n"
3942 "trace {\n"
3943 " major = %u;\n"
3944 " minor = %u;\n"
3945 " uuid = \"%s\";\n"
3946 " byte_order = %s;\n"
3947 " packet.header := struct {\n"
3948 " uint32_t magic;\n"
3949 " uint8_t uuid[16];\n"
3950 " uint32_t stream_id;\n"
3951 " uint64_t stream_instance_id;\n"
3952 " };\n"
3953 "};\n\n",
3954 lttng_alignof(uint8_t) * CHAR_BIT,
3955 lttng_alignof(uint16_t) * CHAR_BIT,
3956 lttng_alignof(uint32_t) * CHAR_BIT,
3957 lttng_alignof(uint64_t) * CHAR_BIT,
3958 sizeof(unsigned long) * CHAR_BIT,
3959 lttng_alignof(unsigned long) * CHAR_BIT,
3960 CTF_SPEC_MAJOR,
3961 CTF_SPEC_MINOR,
3962 uuid_s,
3963 #if __BYTE_ORDER == __BIG_ENDIAN
3964 "be"
3965 #else
3966 "le"
3967 #endif
3968 );
3969 if (ret)
3970 goto end;
3971
3972 ret = lttng_metadata_printf(session,
3973 "env {\n"
3974 " hostname = \"%s\";\n"
3975 " domain = \"kernel\";\n"
3976 " sysname = \"%s\";\n"
3977 " kernel_release = \"%s\";\n"
3978 " kernel_version = \"%s\";\n"
3979 " tracer_name = \"lttng-modules\";\n"
3980 " tracer_major = %d;\n"
3981 " tracer_minor = %d;\n"
3982 " tracer_patchlevel = %d;\n"
3983 " trace_buffering_scheme = \"global\";\n",
3984 current->nsproxy->uts_ns->name.nodename,
3985 utsname()->sysname,
3986 utsname()->release,
3987 utsname()->version,
3988 LTTNG_MODULES_MAJOR_VERSION,
3989 LTTNG_MODULES_MINOR_VERSION,
3990 LTTNG_MODULES_PATCHLEVEL_VERSION
3991 );
3992 if (ret)
3993 goto end;
3994
3995 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3996 if (ret)
3997 goto end;
3998 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3999 session->priv->creation_time);
4000 if (ret)
4001 goto end;
4002
4003 /* Add the product UUID to the 'env' section */
4004 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4005 if (product_uuid) {
4006 ret = lttng_metadata_printf(session,
4007 " product_uuid = \"%s\";\n",
4008 product_uuid
4009 );
4010 if (ret)
4011 goto end;
4012 }
4013
4014 /* Close the 'env' section */
4015 ret = lttng_metadata_printf(session, "};\n\n");
4016 if (ret)
4017 goto end;
4018
4019 ret = lttng_metadata_printf(session,
4020 "clock {\n"
4021 " name = \"%s\";\n",
4022 trace_clock_name()
4023 );
4024 if (ret)
4025 goto end;
4026
4027 if (!trace_clock_uuid(clock_uuid_s)) {
4028 ret = lttng_metadata_printf(session,
4029 " uuid = \"%s\";\n",
4030 clock_uuid_s
4031 );
4032 if (ret)
4033 goto end;
4034 }
4035
4036 ret = lttng_metadata_printf(session,
4037 " description = \"%s\";\n"
4038 " freq = %llu; /* Frequency, in Hz */\n"
4039 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4040 " offset = %lld;\n"
4041 "};\n\n",
4042 trace_clock_description(),
4043 (unsigned long long) trace_clock_freq(),
4044 (long long) measure_clock_offset()
4045 );
4046 if (ret)
4047 goto end;
4048
4049 ret = lttng_metadata_printf(session,
4050 "typealias integer {\n"
4051 " size = 27; align = 1; signed = false;\n"
4052 " map = clock.%s.value;\n"
4053 "} := uint27_clock_monotonic_t;\n"
4054 "\n"
4055 "typealias integer {\n"
4056 " size = 32; align = %u; signed = false;\n"
4057 " map = clock.%s.value;\n"
4058 "} := uint32_clock_monotonic_t;\n"
4059 "\n"
4060 "typealias integer {\n"
4061 " size = 64; align = %u; signed = false;\n"
4062 " map = clock.%s.value;\n"
4063 "} := uint64_clock_monotonic_t;\n\n",
4064 trace_clock_name(),
4065 lttng_alignof(uint32_t) * CHAR_BIT,
4066 trace_clock_name(),
4067 lttng_alignof(uint64_t) * CHAR_BIT,
4068 trace_clock_name()
4069 );
4070 if (ret)
4071 goto end;
4072
4073 ret = _lttng_stream_packet_context_declare(session);
4074 if (ret)
4075 goto end;
4076
4077 ret = _lttng_event_header_declare(session);
4078 if (ret)
4079 goto end;
4080
4081 skip_session:
4082 list_for_each_entry(chan, &session->priv->chan, list) {
4083 ret = _lttng_channel_metadata_statedump(session, chan);
4084 if (ret)
4085 goto end;
4086 }
4087
4088 list_for_each_entry(event_recorder_priv, &session->priv->events, node) {
4089 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4090 event_recorder_priv->pub);
4091 if (ret)
4092 goto end;
4093 }
4094 session->priv->metadata_dumped = 1;
4095 end:
4096 lttng_metadata_end(session);
4097 return ret;
4098 }
4099
4100 /**
4101 * lttng_transport_register - LTT transport registration
4102 * @transport: transport structure
4103 *
4104 * Registers a transport which can be used as output to extract the data out of
4105 * LTTng. The module calling this registration function must ensure that no
4106 * trap-inducing code will be executed by the transport functions. E.g.
4107 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4108 * is made visible to the transport function. This registration acts as a
4109 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4110 * after its registration must it synchronize the TLBs.
4111 */
4112 void lttng_transport_register(struct lttng_transport *transport)
4113 {
4114 /*
4115 * Make sure no page fault can be triggered by the module about to be
4116 * registered. We deal with this here so we don't have to call
4117 * vmalloc_sync_mappings() in each module's init.
4118 */
4119 wrapper_vmalloc_sync_mappings();
4120
4121 mutex_lock(&sessions_mutex);
4122 list_add_tail(&transport->node, &lttng_transport_list);
4123 mutex_unlock(&sessions_mutex);
4124 }
4125 EXPORT_SYMBOL_GPL(lttng_transport_register);
4126
4127 /**
4128 * lttng_transport_unregister - LTT transport unregistration
4129 * @transport: transport structure
4130 */
4131 void lttng_transport_unregister(struct lttng_transport *transport)
4132 {
4133 mutex_lock(&sessions_mutex);
4134 list_del(&transport->node);
4135 mutex_unlock(&sessions_mutex);
4136 }
4137 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4138
4139 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4140 {
4141 /*
4142 * Make sure no page fault can be triggered by the module about to be
4143 * registered. We deal with this here so we don't have to call
4144 * vmalloc_sync_mappings() in each module's init.
4145 */
4146 wrapper_vmalloc_sync_mappings();
4147
4148 mutex_lock(&sessions_mutex);
4149 list_add_tail(&transport->node, &lttng_counter_transport_list);
4150 mutex_unlock(&sessions_mutex);
4151 }
4152 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4153
4154 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4155 {
4156 mutex_lock(&sessions_mutex);
4157 list_del(&transport->node);
4158 mutex_unlock(&sessions_mutex);
4159 }
4160 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4161
4162 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4163
4164 enum cpuhp_state lttng_hp_prepare;
4165 enum cpuhp_state lttng_hp_online;
4166
4167 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4168 {
4169 struct lttng_cpuhp_node *lttng_node;
4170
4171 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4172 switch (lttng_node->component) {
4173 case LTTNG_RING_BUFFER_FRONTEND:
4174 return 0;
4175 case LTTNG_RING_BUFFER_BACKEND:
4176 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4177 case LTTNG_RING_BUFFER_ITER:
4178 return 0;
4179 case LTTNG_CONTEXT_PERF_COUNTERS:
4180 return 0;
4181 default:
4182 return -EINVAL;
4183 }
4184 }
4185
4186 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4187 {
4188 struct lttng_cpuhp_node *lttng_node;
4189
4190 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4191 switch (lttng_node->component) {
4192 case LTTNG_RING_BUFFER_FRONTEND:
4193 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4194 case LTTNG_RING_BUFFER_BACKEND:
4195 return 0;
4196 case LTTNG_RING_BUFFER_ITER:
4197 return 0;
4198 case LTTNG_CONTEXT_PERF_COUNTERS:
4199 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4200 default:
4201 return -EINVAL;
4202 }
4203 }
4204
4205 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4206 {
4207 struct lttng_cpuhp_node *lttng_node;
4208
4209 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4210 switch (lttng_node->component) {
4211 case LTTNG_RING_BUFFER_FRONTEND:
4212 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4213 case LTTNG_RING_BUFFER_BACKEND:
4214 return 0;
4215 case LTTNG_RING_BUFFER_ITER:
4216 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4217 case LTTNG_CONTEXT_PERF_COUNTERS:
4218 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4219 default:
4220 return -EINVAL;
4221 }
4222 }
4223
4224 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4225 {
4226 struct lttng_cpuhp_node *lttng_node;
4227
4228 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4229 switch (lttng_node->component) {
4230 case LTTNG_RING_BUFFER_FRONTEND:
4231 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4232 case LTTNG_RING_BUFFER_BACKEND:
4233 return 0;
4234 case LTTNG_RING_BUFFER_ITER:
4235 return 0;
4236 case LTTNG_CONTEXT_PERF_COUNTERS:
4237 return 0;
4238 default:
4239 return -EINVAL;
4240 }
4241 }
4242
4243 static int __init lttng_init_cpu_hotplug(void)
4244 {
4245 int ret;
4246
4247 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4248 lttng_hotplug_prepare,
4249 lttng_hotplug_dead);
4250 if (ret < 0) {
4251 return ret;
4252 }
4253 lttng_hp_prepare = ret;
4254 lttng_rb_set_hp_prepare(ret);
4255
4256 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4257 lttng_hotplug_online,
4258 lttng_hotplug_offline);
4259 if (ret < 0) {
4260 cpuhp_remove_multi_state(lttng_hp_prepare);
4261 lttng_hp_prepare = 0;
4262 return ret;
4263 }
4264 lttng_hp_online = ret;
4265 lttng_rb_set_hp_online(ret);
4266
4267 return 0;
4268 }
4269
4270 static void __exit lttng_exit_cpu_hotplug(void)
4271 {
4272 lttng_rb_set_hp_online(0);
4273 cpuhp_remove_multi_state(lttng_hp_online);
4274 lttng_rb_set_hp_prepare(0);
4275 cpuhp_remove_multi_state(lttng_hp_prepare);
4276 }
4277
4278 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4279 static int lttng_init_cpu_hotplug(void)
4280 {
4281 return 0;
4282 }
4283 static void lttng_exit_cpu_hotplug(void)
4284 {
4285 }
4286 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4287
4288
4289 static int __init lttng_events_init(void)
4290 {
4291 int ret;
4292
4293 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4294 if (ret)
4295 return ret;
4296 ret = wrapper_get_pfnblock_flags_mask_init();
4297 if (ret)
4298 return ret;
4299 ret = wrapper_get_pageblock_flags_mask_init();
4300 if (ret)
4301 return ret;
4302 ret = lttng_probes_init();
4303 if (ret)
4304 return ret;
4305 ret = lttng_context_init();
4306 if (ret)
4307 return ret;
4308 ret = lttng_tracepoint_init();
4309 if (ret)
4310 goto error_tp;
4311 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4312 if (!event_recorder_cache) {
4313 ret = -ENOMEM;
4314 goto error_kmem_event_recorder;
4315 }
4316 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4317 if (!event_recorder_private_cache) {
4318 ret = -ENOMEM;
4319 goto error_kmem_event_recorder_private;
4320 }
4321 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4322 if (!event_notifier_cache) {
4323 ret = -ENOMEM;
4324 goto error_kmem_event_notifier;
4325 }
4326 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4327 if (!event_notifier_private_cache) {
4328 ret = -ENOMEM;
4329 goto error_kmem_event_notifier_private;
4330 }
4331 ret = lttng_abi_init();
4332 if (ret)
4333 goto error_abi;
4334 ret = lttng_logger_init();
4335 if (ret)
4336 goto error_logger;
4337 ret = lttng_init_cpu_hotplug();
4338 if (ret)
4339 goto error_hotplug;
4340 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4341 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4342 __stringify(LTTNG_MODULES_MINOR_VERSION),
4343 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4344 LTTNG_MODULES_EXTRAVERSION,
4345 LTTNG_VERSION_NAME,
4346 #ifdef LTTNG_EXTRA_VERSION_GIT
4347 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4348 #else
4349 "",
4350 #endif
4351 #ifdef LTTNG_EXTRA_VERSION_NAME
4352 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4353 #else
4354 "");
4355 #endif
4356 return 0;
4357
4358 error_hotplug:
4359 lttng_logger_exit();
4360 error_logger:
4361 lttng_abi_exit();
4362 error_abi:
4363 kmem_cache_destroy(event_notifier_private_cache);
4364 error_kmem_event_notifier_private:
4365 kmem_cache_destroy(event_notifier_cache);
4366 error_kmem_event_notifier:
4367 kmem_cache_destroy(event_recorder_private_cache);
4368 error_kmem_event_recorder_private:
4369 kmem_cache_destroy(event_recorder_cache);
4370 error_kmem_event_recorder:
4371 lttng_tracepoint_exit();
4372 error_tp:
4373 lttng_context_exit();
4374 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4375 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4376 __stringify(LTTNG_MODULES_MINOR_VERSION),
4377 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4378 LTTNG_MODULES_EXTRAVERSION,
4379 LTTNG_VERSION_NAME,
4380 #ifdef LTTNG_EXTRA_VERSION_GIT
4381 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4382 #else
4383 "",
4384 #endif
4385 #ifdef LTTNG_EXTRA_VERSION_NAME
4386 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4387 #else
4388 "");
4389 #endif
4390 return ret;
4391 }
4392
4393 module_init(lttng_events_init);
4394
4395 static void __exit lttng_events_exit(void)
4396 {
4397 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4398
4399 lttng_exit_cpu_hotplug();
4400 lttng_logger_exit();
4401 lttng_abi_exit();
4402 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4403 lttng_session_destroy(session_priv->pub);
4404 kmem_cache_destroy(event_recorder_cache);
4405 kmem_cache_destroy(event_recorder_private_cache);
4406 kmem_cache_destroy(event_notifier_cache);
4407 kmem_cache_destroy(event_notifier_private_cache);
4408 lttng_tracepoint_exit();
4409 lttng_context_exit();
4410 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4411 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4412 __stringify(LTTNG_MODULES_MINOR_VERSION),
4413 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4414 LTTNG_MODULES_EXTRAVERSION,
4415 LTTNG_VERSION_NAME,
4416 #ifdef LTTNG_EXTRA_VERSION_GIT
4417 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4418 #else
4419 "",
4420 #endif
4421 #ifdef LTTNG_EXTRA_VERSION_NAME
4422 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4423 #else
4424 "");
4425 #endif
4426 }
4427
4428 module_exit(lttng_events_exit);
4429
4430 #include <generated/patches.h>
4431 #ifdef LTTNG_EXTRA_VERSION_GIT
4432 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4433 #endif
4434 #ifdef LTTNG_EXTRA_VERSION_NAME
4435 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4436 #endif
4437 MODULE_LICENSE("GPL and additional rights");
4438 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4439 MODULE_DESCRIPTION("LTTng tracer");
4440 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4441 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4442 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4443 LTTNG_MODULES_EXTRAVERSION);
This page took 0.187232 seconds and 4 git commands to generate.