Refactoring: introduce lttng_kernel_event_id_available
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_kernel_session *session,
83 const struct lttng_kernel_type_common *type,
84 enum lttng_kernel_string_encoding parent_encoding,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_kernel_session *session,
88 const struct lttng_kernel_event_field *field,
89 size_t nesting, const char **prev_field_name_p);
90
91 void synchronize_trace(void)
92 {
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
94 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
95 synchronize_rcu();
96 #else
97 synchronize_sched();
98 #endif
99
100 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
101 #ifdef CONFIG_PREEMPT_RT_FULL
102 synchronize_rcu();
103 #endif
104 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
105 #ifdef CONFIG_PREEMPT_RT
106 synchronize_rcu();
107 #endif
108 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
109 }
110
111 void lttng_lock_sessions(void)
112 {
113 mutex_lock(&sessions_mutex);
114 }
115
116 void lttng_unlock_sessions(void)
117 {
118 mutex_unlock(&sessions_mutex);
119 }
120
121 static struct lttng_transport *lttng_transport_find(const char *name)
122 {
123 struct lttng_transport *transport;
124
125 list_for_each_entry(transport, &lttng_transport_list, node) {
126 if (!strcmp(transport->name, name))
127 return transport;
128 }
129 return NULL;
130 }
131
132 /*
133 * Called with sessions lock held.
134 */
135 int lttng_session_active(void)
136 {
137 struct lttng_kernel_session_private *iter;
138
139 list_for_each_entry(iter, &sessions, list) {
140 if (iter->pub->active)
141 return 1;
142 }
143 return 0;
144 }
145
146 struct lttng_kernel_session *lttng_session_create(void)
147 {
148 struct lttng_kernel_session *session;
149 struct lttng_kernel_session_private *session_priv;
150 struct lttng_metadata_cache *metadata_cache;
151 int i;
152
153 mutex_lock(&sessions_mutex);
154 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
155 if (!session)
156 goto err;
157 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
158 if (!session_priv)
159 goto err_free_session;
160 session->priv = session_priv;
161 session_priv->pub = session;
162
163 INIT_LIST_HEAD(&session_priv->chan);
164 INIT_LIST_HEAD(&session_priv->events);
165 lttng_guid_gen(&session_priv->uuid);
166
167 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
168 GFP_KERNEL);
169 if (!metadata_cache)
170 goto err_free_session_private;
171 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
172 if (!metadata_cache->data)
173 goto err_free_cache;
174 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
175 kref_init(&metadata_cache->refcount);
176 mutex_init(&metadata_cache->lock);
177 session_priv->metadata_cache = metadata_cache;
178 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
179 memcpy(&metadata_cache->uuid, &session_priv->uuid,
180 sizeof(metadata_cache->uuid));
181 INIT_LIST_HEAD(&session_priv->enablers_head);
182 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
183 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
184 list_add(&session_priv->list, &sessions);
185
186 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
187 goto tracker_alloc_error;
188 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
197 goto tracker_alloc_error;
198
199 mutex_unlock(&sessions_mutex);
200
201 return session;
202
203 tracker_alloc_error:
204 lttng_id_tracker_fini(&session->pid_tracker);
205 lttng_id_tracker_fini(&session->vpid_tracker);
206 lttng_id_tracker_fini(&session->uid_tracker);
207 lttng_id_tracker_fini(&session->vuid_tracker);
208 lttng_id_tracker_fini(&session->gid_tracker);
209 lttng_id_tracker_fini(&session->vgid_tracker);
210 err_free_cache:
211 kfree(metadata_cache);
212 err_free_session_private:
213 lttng_kvfree(session_priv);
214 err_free_session:
215 lttng_kvfree(session);
216 err:
217 mutex_unlock(&sessions_mutex);
218 return NULL;
219 }
220
221 static
222 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
223 {
224 struct lttng_counter_transport *transport;
225
226 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
227 if (!strcmp(transport->name, name))
228 return transport;
229 }
230 return NULL;
231 }
232
233 struct lttng_counter *lttng_kernel_counter_create(
234 const char *counter_transport_name,
235 size_t number_dimensions, const size_t *dimensions_sizes)
236 {
237 struct lttng_counter *counter = NULL;
238 struct lttng_counter_transport *counter_transport = NULL;
239
240 counter_transport = lttng_counter_transport_find(counter_transport_name);
241 if (!counter_transport) {
242 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
243 counter_transport_name);
244 goto notransport;
245 }
246 if (!try_module_get(counter_transport->owner)) {
247 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
248 goto notransport;
249 }
250
251 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
252 if (!counter)
253 goto nomem;
254
255 /* Create event notifier error counter. */
256 counter->ops = &counter_transport->ops;
257 counter->transport = counter_transport;
258
259 counter->counter = counter->ops->counter_create(
260 number_dimensions, dimensions_sizes, 0);
261 if (!counter->counter) {
262 goto create_error;
263 }
264
265 return counter;
266
267 create_error:
268 lttng_kvfree(counter);
269 nomem:
270 if (counter_transport)
271 module_put(counter_transport->owner);
272 notransport:
273 return NULL;
274 }
275
276 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
277 {
278 struct lttng_transport *transport = NULL;
279 struct lttng_event_notifier_group *event_notifier_group;
280 const char *transport_name = "relay-event-notifier";
281 size_t subbuf_size = 4096; //TODO
282 size_t num_subbuf = 16; //TODO
283 unsigned int switch_timer_interval = 0;
284 unsigned int read_timer_interval = 0;
285 int i;
286
287 mutex_lock(&sessions_mutex);
288
289 transport = lttng_transport_find(transport_name);
290 if (!transport) {
291 printk(KERN_WARNING "LTTng: transport %s not found\n",
292 transport_name);
293 goto notransport;
294 }
295 if (!try_module_get(transport->owner)) {
296 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
297 transport_name);
298 goto notransport;
299 }
300
301 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
302 GFP_KERNEL);
303 if (!event_notifier_group)
304 goto nomem;
305
306 /*
307 * Initialize the ring buffer used to store event notifier
308 * notifications.
309 */
310 event_notifier_group->ops = &transport->ops;
311 event_notifier_group->chan = transport->ops.priv->channel_create(
312 transport_name, event_notifier_group, NULL,
313 subbuf_size, num_subbuf, switch_timer_interval,
314 read_timer_interval);
315 if (!event_notifier_group->chan)
316 goto create_error;
317
318 event_notifier_group->transport = transport;
319
320 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
321 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
322 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
323 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
324
325 list_add(&event_notifier_group->node, &event_notifier_groups);
326
327 mutex_unlock(&sessions_mutex);
328
329 return event_notifier_group;
330
331 create_error:
332 lttng_kvfree(event_notifier_group);
333 nomem:
334 if (transport)
335 module_put(transport->owner);
336 notransport:
337 mutex_unlock(&sessions_mutex);
338 return NULL;
339 }
340
341 void metadata_cache_destroy(struct kref *kref)
342 {
343 struct lttng_metadata_cache *cache =
344 container_of(kref, struct lttng_metadata_cache, refcount);
345 vfree(cache->data);
346 kfree(cache);
347 }
348
349 void lttng_session_destroy(struct lttng_kernel_session *session)
350 {
351 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
352 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
353 struct lttng_metadata_stream *metadata_stream;
354 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
355 int ret;
356
357 mutex_lock(&sessions_mutex);
358 WRITE_ONCE(session->active, 0);
359 list_for_each_entry(chan_priv, &session->priv->chan, node) {
360 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
361 WARN_ON(ret);
362 }
363 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
364 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
365 WARN_ON(ret);
366 }
367 synchronize_trace(); /* Wait for in-flight events to complete */
368 list_for_each_entry(chan_priv, &session->priv->chan, node) {
369 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
370 WARN_ON(ret);
371 }
372 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
373 lttng_event_enabler_destroy(event_enabler);
374 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
375 _lttng_event_destroy(&event_recorder_priv->pub->parent);
376 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
377 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
378 _lttng_channel_destroy(chan_priv->pub);
379 }
380 mutex_lock(&session->priv->metadata_cache->lock);
381 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
382 _lttng_metadata_channel_hangup(metadata_stream);
383 mutex_unlock(&session->priv->metadata_cache->lock);
384 lttng_id_tracker_fini(&session->pid_tracker);
385 lttng_id_tracker_fini(&session->vpid_tracker);
386 lttng_id_tracker_fini(&session->uid_tracker);
387 lttng_id_tracker_fini(&session->vuid_tracker);
388 lttng_id_tracker_fini(&session->gid_tracker);
389 lttng_id_tracker_fini(&session->vgid_tracker);
390 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
391 list_del(&session->priv->list);
392 mutex_unlock(&sessions_mutex);
393 lttng_kvfree(session->priv);
394 lttng_kvfree(session);
395 }
396
397 void lttng_event_notifier_group_destroy(
398 struct lttng_event_notifier_group *event_notifier_group)
399 {
400 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
401 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
402 int ret;
403
404 if (!event_notifier_group)
405 return;
406
407 mutex_lock(&sessions_mutex);
408
409 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
410 WARN_ON(ret);
411
412 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
413 &event_notifier_group->event_notifiers_head, parent.node) {
414 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
415 WARN_ON(ret);
416 }
417
418 /* Wait for in-flight event notifier to complete */
419 synchronize_trace();
420
421 irq_work_sync(&event_notifier_group->wakeup_pending);
422
423 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
424 WARN_ON(ret);
425
426 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
427 &event_notifier_group->enablers_head, node)
428 lttng_event_enabler_destroy(event_enabler);
429
430 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
431 &event_notifier_group->event_notifiers_head, parent.node)
432 _lttng_event_destroy(&event_notifier_priv->pub->parent);
433
434 if (event_notifier_group->error_counter) {
435 struct lttng_counter *error_counter = event_notifier_group->error_counter;
436
437 error_counter->ops->counter_destroy(error_counter->counter);
438 module_put(error_counter->transport->owner);
439 lttng_kvfree(error_counter);
440 event_notifier_group->error_counter = NULL;
441 }
442
443 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
444 module_put(event_notifier_group->transport->owner);
445 list_del(&event_notifier_group->node);
446
447 mutex_unlock(&sessions_mutex);
448 lttng_kvfree(event_notifier_group);
449 }
450
451 int lttng_session_statedump(struct lttng_kernel_session *session)
452 {
453 int ret;
454
455 mutex_lock(&sessions_mutex);
456 ret = lttng_statedump_start(session);
457 mutex_unlock(&sessions_mutex);
458 return ret;
459 }
460
461 int lttng_session_enable(struct lttng_kernel_session *session)
462 {
463 int ret = 0;
464 struct lttng_kernel_channel_buffer_private *chan_priv;
465
466 mutex_lock(&sessions_mutex);
467 if (session->active) {
468 ret = -EBUSY;
469 goto end;
470 }
471
472 /* Set transient enabler state to "enabled" */
473 session->priv->tstate = 1;
474
475 /* We need to sync enablers with session before activation. */
476 lttng_session_sync_event_enablers(session);
477
478 /*
479 * Snapshot the number of events per channel to know the type of header
480 * we need to use.
481 */
482 list_for_each_entry(chan_priv, &session->priv->chan, node) {
483 if (chan_priv->header_type)
484 continue; /* don't change it if session stop/restart */
485 if (chan_priv->free_event_id < 31)
486 chan_priv->header_type = 1; /* compact */
487 else
488 chan_priv->header_type = 2; /* large */
489 }
490
491 /* Clear each stream's quiescent state. */
492 list_for_each_entry(chan_priv, &session->priv->chan, node) {
493 if (chan_priv->channel_type != METADATA_CHANNEL)
494 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
495 }
496
497 WRITE_ONCE(session->active, 1);
498 WRITE_ONCE(session->priv->been_active, 1);
499 ret = _lttng_session_metadata_statedump(session);
500 if (ret) {
501 WRITE_ONCE(session->active, 0);
502 goto end;
503 }
504 ret = lttng_statedump_start(session);
505 if (ret)
506 WRITE_ONCE(session->active, 0);
507 end:
508 mutex_unlock(&sessions_mutex);
509 return ret;
510 }
511
512 int lttng_session_disable(struct lttng_kernel_session *session)
513 {
514 int ret = 0;
515 struct lttng_kernel_channel_buffer_private *chan_priv;
516
517 mutex_lock(&sessions_mutex);
518 if (!session->active) {
519 ret = -EBUSY;
520 goto end;
521 }
522 WRITE_ONCE(session->active, 0);
523
524 /* Set transient enabler state to "disabled" */
525 session->priv->tstate = 0;
526 lttng_session_sync_event_enablers(session);
527
528 /* Set each stream's quiescent state. */
529 list_for_each_entry(chan_priv, &session->priv->chan, node) {
530 if (chan_priv->channel_type != METADATA_CHANNEL)
531 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
532 }
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
539 {
540 int ret = 0;
541 struct lttng_kernel_channel_buffer_private *chan_priv;
542 struct lttng_kernel_event_recorder_private *event_recorder_priv;
543 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
544 struct lttng_metadata_stream *stream;
545
546 mutex_lock(&sessions_mutex);
547 if (!session->active) {
548 ret = -EBUSY;
549 goto end;
550 }
551
552 mutex_lock(&cache->lock);
553 memset(cache->data, 0, cache->cache_alloc);
554 cache->metadata_written = 0;
555 cache->version++;
556 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
557 stream->metadata_out = 0;
558 stream->metadata_in = 0;
559 }
560 mutex_unlock(&cache->lock);
561
562 session->priv->metadata_dumped = 0;
563 list_for_each_entry(chan_priv, &session->priv->chan, node) {
564 chan_priv->metadata_dumped = 0;
565 }
566
567 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
568 event_recorder_priv->metadata_dumped = 0;
569 }
570
571 ret = _lttng_session_metadata_statedump(session);
572
573 end:
574 mutex_unlock(&sessions_mutex);
575 return ret;
576 }
577
578 static
579 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
580 {
581 struct lttng_kernel_channel_buffer *chan_buf;
582
583 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
584 return false;
585 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
586 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
587 return true;
588 return false;
589 }
590
591 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
592 {
593 int ret = 0;
594
595 mutex_lock(&sessions_mutex);
596 if (is_channel_buffer_metadata(channel)) {
597 ret = -EPERM;
598 goto end;
599 }
600 if (channel->enabled) {
601 ret = -EEXIST;
602 goto end;
603 }
604 /* Set transient enabler state to "enabled" */
605 channel->priv->tstate = 1;
606 lttng_session_sync_event_enablers(channel->session);
607 /* Set atomically the state to "enabled" */
608 WRITE_ONCE(channel->enabled, 1);
609 end:
610 mutex_unlock(&sessions_mutex);
611 return ret;
612 }
613
614 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
615 {
616 int ret = 0;
617
618 mutex_lock(&sessions_mutex);
619 if (is_channel_buffer_metadata(channel)) {
620 ret = -EPERM;
621 goto end;
622 }
623 if (!channel->enabled) {
624 ret = -EEXIST;
625 goto end;
626 }
627 /* Set atomically the state to "disabled" */
628 WRITE_ONCE(channel->enabled, 0);
629 /* Set transient enabler state to "enabled" */
630 channel->priv->tstate = 0;
631 lttng_session_sync_event_enablers(channel->session);
632 end:
633 mutex_unlock(&sessions_mutex);
634 return ret;
635 }
636
637 int lttng_event_enable(struct lttng_kernel_event_common *event)
638 {
639 int ret = 0;
640
641 mutex_lock(&sessions_mutex);
642 switch (event->type) {
643 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
644 {
645 struct lttng_kernel_event_recorder *event_recorder =
646 container_of(event, struct lttng_kernel_event_recorder, parent);
647
648 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
649 ret = -EPERM;
650 goto end;
651 }
652 break;
653 }
654 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
655 switch (event->priv->instrumentation) {
656 case LTTNG_KERNEL_ABI_KRETPROBE:
657 ret = -EINVAL;
658 goto end;
659 default:
660 break;
661 }
662 break;
663 default:
664 break;
665 }
666
667 if (event->enabled) {
668 ret = -EEXIST;
669 goto end;
670 }
671 switch (event->priv->instrumentation) {
672 case LTTNG_KERNEL_ABI_TRACEPOINT:
673 lttng_fallthrough;
674 case LTTNG_KERNEL_ABI_SYSCALL:
675 ret = -EINVAL;
676 break;
677
678 case LTTNG_KERNEL_ABI_KPROBE:
679 lttng_fallthrough;
680 case LTTNG_KERNEL_ABI_UPROBE:
681 WRITE_ONCE(event->enabled, 1);
682 break;
683
684 case LTTNG_KERNEL_ABI_KRETPROBE:
685 ret = lttng_kretprobes_event_enable_state(event, 1);
686 break;
687
688 case LTTNG_KERNEL_ABI_FUNCTION:
689 lttng_fallthrough;
690 case LTTNG_KERNEL_ABI_NOOP:
691 lttng_fallthrough;
692 default:
693 WARN_ON_ONCE(1);
694 ret = -EINVAL;
695 }
696 end:
697 mutex_unlock(&sessions_mutex);
698 return ret;
699 }
700
701 int lttng_event_disable(struct lttng_kernel_event_common *event)
702 {
703 int ret = 0;
704
705 mutex_lock(&sessions_mutex);
706 switch (event->type) {
707 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
708 {
709 struct lttng_kernel_event_recorder *event_recorder =
710 container_of(event, struct lttng_kernel_event_recorder, parent);
711
712 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
713 ret = -EPERM;
714 goto end;
715 }
716 break;
717 }
718 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
719 switch (event->priv->instrumentation) {
720 case LTTNG_KERNEL_ABI_KRETPROBE:
721 ret = -EINVAL;
722 goto end;
723 default:
724 break;
725 }
726 break;
727 default:
728 break;
729 }
730
731 if (!event->enabled) {
732 ret = -EEXIST;
733 goto end;
734 }
735 switch (event->priv->instrumentation) {
736 case LTTNG_KERNEL_ABI_TRACEPOINT:
737 lttng_fallthrough;
738 case LTTNG_KERNEL_ABI_SYSCALL:
739 ret = -EINVAL;
740 break;
741
742 case LTTNG_KERNEL_ABI_KPROBE:
743 lttng_fallthrough;
744 case LTTNG_KERNEL_ABI_UPROBE:
745 WRITE_ONCE(event->enabled, 0);
746 break;
747
748 case LTTNG_KERNEL_ABI_KRETPROBE:
749 ret = lttng_kretprobes_event_enable_state(event, 0);
750 break;
751
752 case LTTNG_KERNEL_ABI_FUNCTION:
753 lttng_fallthrough;
754 case LTTNG_KERNEL_ABI_NOOP:
755 lttng_fallthrough;
756 default:
757 WARN_ON_ONCE(1);
758 ret = -EINVAL;
759 }
760 end:
761 mutex_unlock(&sessions_mutex);
762 return ret;
763 }
764
765 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
766 const char *transport_name,
767 void *buf_addr,
768 size_t subbuf_size, size_t num_subbuf,
769 unsigned int switch_timer_interval,
770 unsigned int read_timer_interval,
771 enum channel_type channel_type)
772 {
773 struct lttng_kernel_channel_buffer *chan;
774 struct lttng_kernel_channel_buffer_private *chan_priv;
775 struct lttng_transport *transport = NULL;
776
777 mutex_lock(&sessions_mutex);
778 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
779 goto active; /* Refuse to add channel to active session */
780 transport = lttng_transport_find(transport_name);
781 if (!transport) {
782 printk(KERN_WARNING "LTTng: transport %s not found\n",
783 transport_name);
784 goto notransport;
785 }
786 if (!try_module_get(transport->owner)) {
787 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
788 goto notransport;
789 }
790 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
791 if (!chan)
792 goto nomem;
793 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
794 if (!chan_priv)
795 goto nomem_priv;
796 chan->priv = chan_priv;
797 chan_priv->pub = chan;
798 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
799 chan->parent.session = session;
800 chan->priv->id = session->priv->free_chan_id++;
801 chan->ops = &transport->ops;
802 /*
803 * Note: the channel creation op already writes into the packet
804 * headers. Therefore the "chan" information used as input
805 * should be already accessible.
806 */
807 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
808 chan, buf_addr, subbuf_size, num_subbuf,
809 switch_timer_interval, read_timer_interval);
810 if (!chan->priv->rb_chan)
811 goto create_error;
812 chan->priv->parent.tstate = 1;
813 chan->parent.enabled = 1;
814 chan->priv->transport = transport;
815 chan->priv->channel_type = channel_type;
816 list_add(&chan->priv->node, &session->priv->chan);
817 mutex_unlock(&sessions_mutex);
818 return chan;
819
820 create_error:
821 kfree(chan_priv);
822 nomem_priv:
823 kfree(chan);
824 nomem:
825 if (transport)
826 module_put(transport->owner);
827 notransport:
828 active:
829 mutex_unlock(&sessions_mutex);
830 return NULL;
831 }
832
833 /*
834 * Only used internally at session destruction for per-cpu channels, and
835 * when metadata channel is released.
836 * Needs to be called with sessions mutex held.
837 */
838 static
839 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
840 {
841 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
842 module_put(chan->priv->transport->owner);
843 list_del(&chan->priv->node);
844 lttng_kernel_destroy_context(chan->priv->ctx);
845 kfree(chan->priv);
846 kfree(chan);
847 }
848
849 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
850 {
851 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
852
853 /* Protect the metadata cache with the sessions_mutex. */
854 mutex_lock(&sessions_mutex);
855 _lttng_channel_destroy(chan);
856 mutex_unlock(&sessions_mutex);
857 }
858 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
859
860 static
861 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
862 {
863 stream->finalized = 1;
864 wake_up_interruptible(&stream->read_wait);
865 }
866
867 static
868 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common *event_enabler)
869 {
870 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
871 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
872
873 switch (event_enabler->enabler_type) {
874 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
875 {
876 struct lttng_event_recorder_enabler *event_recorder_enabler =
877 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
878 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
879
880 switch (itype) {
881 case LTTNG_KERNEL_ABI_TRACEPOINT:
882 lttng_fallthrough;
883 case LTTNG_KERNEL_ABI_KPROBE:
884 lttng_fallthrough;
885 case LTTNG_KERNEL_ABI_SYSCALL:
886 lttng_fallthrough;
887 case LTTNG_KERNEL_ABI_UPROBE:
888 if (chan->priv->free_event_id == -1U)
889 return false;
890 return true;
891 case LTTNG_KERNEL_ABI_KRETPROBE:
892 /* kretprobes require 2 event IDs. */
893 if (chan->priv->free_event_id >= -2U)
894 return false;
895 return true;
896 default:
897 WARN_ON_ONCE(1);
898 return false;
899 }
900 }
901 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
902 return true;
903 default:
904 WARN_ON_ONCE(1);
905 return false;
906 }
907 }
908
909 static
910 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
911 {
912 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
913 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
914
915 switch (event_enabler->enabler_type) {
916 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
917 {
918 struct lttng_event_recorder_enabler *event_recorder_enabler =
919 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
920 struct lttng_kernel_event_recorder *event_recorder;
921 struct lttng_kernel_event_recorder_private *event_recorder_priv;
922 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
923
924 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
925 if (!event_recorder)
926 return NULL;
927 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
928 if (!event_recorder_priv) {
929 kmem_cache_free(event_recorder_private_cache, event_recorder);
930 return NULL;
931 }
932 event_recorder_priv->pub = event_recorder;
933 event_recorder_priv->parent.pub = &event_recorder->parent;
934 event_recorder->priv = event_recorder_priv;
935 event_recorder->parent.priv = &event_recorder_priv->parent;
936
937 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
938 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
939 event_recorder->priv->parent.instrumentation = itype;
940 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
941 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
942
943 event_recorder->chan = chan;
944 event_recorder->priv->id = chan->priv->free_event_id++;
945 return &event_recorder->parent;
946 }
947 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
948 {
949 struct lttng_event_notifier_enabler *event_notifier_enabler =
950 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
951 struct lttng_kernel_event_notifier *event_notifier;
952 struct lttng_kernel_event_notifier_private *event_notifier_priv;
953
954 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
955 if (!event_notifier)
956 return NULL;
957 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
958 if (!event_notifier_priv) {
959 kmem_cache_free(event_notifier_private_cache, event_notifier);
960 return NULL;
961 }
962 event_notifier_priv->pub = event_notifier;
963 event_notifier_priv->parent.pub = &event_notifier->parent;
964 event_notifier->priv = event_notifier_priv;
965 event_notifier->parent.priv = &event_notifier_priv->parent;
966
967 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
968 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
969 event_notifier->priv->parent.instrumentation = itype;
970 event_notifier->priv->parent.user_token = event_enabler->user_token;
971 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
972 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
973
974 event_notifier->priv->group = event_notifier_enabler->group;
975 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
976 event_notifier->priv->num_captures = 0;
977 event_notifier->notification_send = lttng_event_notifier_notification_send;
978 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
979 return &event_notifier->parent;
980 }
981 default:
982 return NULL;
983 }
984 }
985
986 static
987 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
988 {
989 switch (event->type) {
990 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
991 {
992 struct lttng_kernel_event_recorder *event_recorder =
993 container_of(event, struct lttng_kernel_event_recorder, parent);
994
995 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
996 kmem_cache_free(event_recorder_cache, event_recorder);
997 break;
998 }
999 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1000 {
1001 struct lttng_kernel_event_notifier *event_notifier =
1002 container_of(event, struct lttng_kernel_event_notifier, parent);
1003
1004 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1005 kmem_cache_free(event_notifier_cache, event_notifier);
1006 break;
1007 }
1008 default:
1009 WARN_ON_ONCE(1);
1010 }
1011 }
1012
1013 static
1014 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common *event)
1015 {
1016 switch (event->type) {
1017 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1018 return 0;
1019 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1020 {
1021 struct lttng_kernel_event_notifier *event_notifier =
1022 container_of(event, struct lttng_kernel_event_notifier, parent);
1023 struct lttng_counter *error_counter;
1024 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
1025 size_t dimension_index[1];
1026 int ret;
1027
1028 /*
1029 * Clear the error counter bucket. The sessiond keeps track of which
1030 * bucket is currently in use. We trust it. The session lock
1031 * synchronizes against concurrent creation of the error
1032 * counter.
1033 */
1034 error_counter = event_notifier_group->error_counter;
1035 if (!error_counter)
1036 return 0;
1037 /*
1038 * Check that the index is within the boundary of the counter.
1039 */
1040 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1041 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1042 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1043 return -EINVAL;
1044 }
1045
1046 dimension_index[0] = event_notifier->priv->error_counter_index;
1047 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1048 if (ret) {
1049 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1050 event_notifier->priv->error_counter_index);
1051 return -EINVAL;
1052 }
1053 return 0;
1054 }
1055 default:
1056 return -EINVAL;
1057 }
1058 }
1059
1060 /*
1061 * Supports event creation while tracing session is active.
1062 * Needs to be called with sessions mutex held.
1063 */
1064 static
1065 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
1066 const struct lttng_kernel_event_desc *event_desc)
1067 {
1068 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1069 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(&event_enabler->parent);
1070 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1071 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1072 struct lttng_kernel_event_common_private *event_priv;
1073 struct lttng_kernel_event_common *event;
1074 struct lttng_kernel_event_recorder *event_recorder;
1075 const char *event_name;
1076 struct hlist_head *head;
1077 int ret;
1078
1079 if (!lttng_kernel_event_id_available(&event_enabler->parent)) {
1080 ret = -EMFILE;
1081 goto full;
1082 }
1083
1084 switch (itype) {
1085 case LTTNG_KERNEL_ABI_TRACEPOINT:
1086 event_name = event_desc->event_name;
1087 break;
1088
1089 case LTTNG_KERNEL_ABI_KPROBE:
1090 lttng_fallthrough;
1091 case LTTNG_KERNEL_ABI_UPROBE:
1092 lttng_fallthrough;
1093 case LTTNG_KERNEL_ABI_KRETPROBE:
1094 lttng_fallthrough;
1095 case LTTNG_KERNEL_ABI_SYSCALL:
1096 event_name = event_param->name;
1097 break;
1098
1099 case LTTNG_KERNEL_ABI_FUNCTION:
1100 lttng_fallthrough;
1101 case LTTNG_KERNEL_ABI_NOOP:
1102 lttng_fallthrough;
1103 default:
1104 WARN_ON_ONCE(1);
1105 ret = -EINVAL;
1106 goto type_error;
1107 }
1108
1109 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1110 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1111 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1112 ret = -EEXIST;
1113 goto exist;
1114 }
1115 }
1116
1117 event = lttng_kernel_event_alloc(&event_enabler->parent);
1118 if (!event) {
1119 ret = -ENOMEM;
1120 goto alloc_error;
1121 }
1122 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
1123
1124 switch (itype) {
1125 case LTTNG_KERNEL_ABI_TRACEPOINT:
1126 /* Event will be enabled by enabler sync. */
1127 event->enabled = 0;
1128 event->priv->registered = 0;
1129 event->priv->desc = lttng_event_desc_get(event_name);
1130 if (!event->priv->desc) {
1131 ret = -ENOENT;
1132 goto register_error;
1133 }
1134 /* Populate lttng_event structure before event registration. */
1135 smp_wmb();
1136 break;
1137
1138 case LTTNG_KERNEL_ABI_KPROBE:
1139 /*
1140 * Needs to be explicitly enabled after creation, since
1141 * we may want to apply filters.
1142 */
1143 event->enabled = 0;
1144 event->priv->registered = 1;
1145 /*
1146 * Populate lttng_event structure before event
1147 * registration.
1148 */
1149 smp_wmb();
1150 ret = lttng_kprobes_register_event(event_name,
1151 event_param->u.kprobe.symbol_name,
1152 event_param->u.kprobe.offset,
1153 event_param->u.kprobe.addr,
1154 event);
1155 if (ret) {
1156 ret = -EINVAL;
1157 goto register_error;
1158 }
1159 ret = try_module_get(event->priv->desc->owner);
1160 WARN_ON_ONCE(!ret);
1161 break;
1162
1163 case LTTNG_KERNEL_ABI_KRETPROBE:
1164 {
1165 struct lttng_kernel_event_common *event_return;
1166
1167 /* kretprobe defines 2 events */
1168 /*
1169 * Needs to be explicitly enabled after creation, since
1170 * we may want to apply filters.
1171 */
1172 event->enabled = 0;
1173 event->priv->registered = 1;
1174
1175 event_return = lttng_kernel_event_alloc(&event_enabler->parent);
1176 if (!event) {
1177 ret = -ENOMEM;
1178 goto alloc_error;
1179 }
1180
1181 event_return->enabled = 0;
1182 event_return->priv->registered = 1;
1183
1184 /*
1185 * Populate lttng_event structure before kretprobe registration.
1186 */
1187 smp_wmb();
1188 ret = lttng_kretprobes_register(event_name,
1189 event_param->u.kretprobe.symbol_name,
1190 event_param->u.kretprobe.offset,
1191 event_param->u.kretprobe.addr,
1192 event, event_return);
1193 if (ret) {
1194 lttng_kernel_event_free(event_return);
1195 ret = -EINVAL;
1196 goto register_error;
1197 }
1198 /* Take 2 refs on the module: one per event. */
1199 ret = try_module_get(event->priv->desc->owner);
1200 WARN_ON_ONCE(!ret);
1201 ret = try_module_get(event_return->priv->desc->owner);
1202 WARN_ON_ONCE(!ret);
1203 ret = _lttng_event_recorder_metadata_statedump(event_return);
1204 WARN_ON_ONCE(ret > 0);
1205 if (ret) {
1206 lttng_kernel_event_free(event_return);
1207 module_put(event_return->priv->desc->owner);
1208 module_put(event->priv->desc->owner);
1209 goto statedump_error;
1210 }
1211 list_add(&event_return->priv->node, event_list_head);
1212 break;
1213 }
1214
1215 case LTTNG_KERNEL_ABI_SYSCALL:
1216 /*
1217 * Needs to be explicitly enabled after creation, since
1218 * we may want to apply filters.
1219 */
1220 event->enabled = 0;
1221 event->priv->registered = 0;
1222 event->priv->desc = event_desc;
1223 switch (event_param->u.syscall.entryexit) {
1224 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1225 ret = -EINVAL;
1226 goto register_error;
1227 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1228 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1229 break;
1230 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1231 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1232 break;
1233 }
1234 switch (event_param->u.syscall.abi) {
1235 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1236 ret = -EINVAL;
1237 goto register_error;
1238 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1239 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1240 break;
1241 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1242 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1243 break;
1244 }
1245 if (!event->priv->desc) {
1246 ret = -EINVAL;
1247 goto register_error;
1248 }
1249 break;
1250
1251 case LTTNG_KERNEL_ABI_UPROBE:
1252 /*
1253 * Needs to be explicitly enabled after creation, since
1254 * we may want to apply filters.
1255 */
1256 event->enabled = 0;
1257 event->priv->registered = 1;
1258
1259 /*
1260 * Populate lttng_event structure before event
1261 * registration.
1262 */
1263 smp_wmb();
1264
1265 ret = lttng_uprobes_register_event(event_param->name,
1266 event_param->u.uprobe.fd,
1267 event);
1268 if (ret)
1269 goto register_error;
1270 ret = try_module_get(event->priv->desc->owner);
1271 WARN_ON_ONCE(!ret);
1272 break;
1273
1274 case LTTNG_KERNEL_ABI_FUNCTION:
1275 lttng_fallthrough;
1276 case LTTNG_KERNEL_ABI_NOOP:
1277 lttng_fallthrough;
1278 default:
1279 WARN_ON_ONCE(1);
1280 ret = -EINVAL;
1281 goto register_error;
1282 }
1283 ret = _lttng_event_recorder_metadata_statedump(event);
1284 WARN_ON_ONCE(ret > 0);
1285 if (ret) {
1286 goto statedump_error;
1287 }
1288 hlist_add_head(&event->priv->hlist_node, head);
1289 list_add(&event->priv->node, event_list_head);
1290 return event_recorder;
1291
1292 statedump_error:
1293 /* If a statedump error occurs, events will not be readable. */
1294 register_error:
1295 lttng_kernel_event_free(event);
1296 alloc_error:
1297 exist:
1298 type_error:
1299 full:
1300 return ERR_PTR(ret);
1301 }
1302
1303 static
1304 struct lttng_kernel_event_notifier *_lttng_kernel_event_notifier_create(struct lttng_event_notifier_enabler *event_enabler,
1305 const struct lttng_kernel_event_desc *event_desc)
1306 {
1307 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1308 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(&event_enabler->parent);
1309 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1310 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1311 struct lttng_kernel_event_common_private *event_priv;
1312 struct lttng_kernel_event_common *event;
1313 struct lttng_kernel_event_notifier *event_notifier;
1314 const char *event_name;
1315 struct hlist_head *head;
1316 int ret;
1317
1318 switch (itype) {
1319 case LTTNG_KERNEL_ABI_TRACEPOINT:
1320 event_name = event_desc->event_name;
1321 break;
1322
1323 case LTTNG_KERNEL_ABI_KPROBE:
1324 lttng_fallthrough;
1325 case LTTNG_KERNEL_ABI_UPROBE:
1326 lttng_fallthrough;
1327 case LTTNG_KERNEL_ABI_SYSCALL:
1328 event_name = event_param->name;
1329 break;
1330
1331 case LTTNG_KERNEL_ABI_KRETPROBE:
1332 lttng_fallthrough;
1333 case LTTNG_KERNEL_ABI_FUNCTION:
1334 lttng_fallthrough;
1335 case LTTNG_KERNEL_ABI_NOOP:
1336 lttng_fallthrough;
1337 default:
1338 WARN_ON_ONCE(1);
1339 ret = -EINVAL;
1340 goto type_error;
1341 }
1342
1343 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1344 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1345 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1346 ret = -EEXIST;
1347 goto exist;
1348 }
1349 }
1350
1351 event = lttng_kernel_event_alloc(&event_enabler->parent);
1352 if (!event) {
1353 ret = -ENOMEM;
1354 goto alloc_error;
1355 }
1356 event_notifier = container_of(event, struct lttng_kernel_event_notifier, parent);
1357
1358 switch (itype) {
1359 case LTTNG_KERNEL_ABI_TRACEPOINT:
1360 /* Event will be enabled by enabler sync. */
1361 event->enabled = 0;
1362 event->priv->registered = 0;
1363 event->priv->desc = lttng_event_desc_get(event_name);
1364 if (!event->priv->desc) {
1365 ret = -ENOENT;
1366 goto register_error;
1367 }
1368 /* Populate lttng_event_notifier structure before event registration. */
1369 smp_wmb();
1370 break;
1371
1372 case LTTNG_KERNEL_ABI_KPROBE:
1373 /*
1374 * Needs to be explicitly enabled after creation, since
1375 * we may want to apply filters.
1376 */
1377 event->enabled = 0;
1378 event->priv->registered = 1;
1379 /*
1380 * Populate lttng_event_notifier structure before event
1381 * registration.
1382 */
1383 smp_wmb();
1384 ret = lttng_kprobes_register_event(event_param->u.kprobe.symbol_name,
1385 event_param->u.kprobe.symbol_name,
1386 event_param->u.kprobe.offset,
1387 event_param->u.kprobe.addr,
1388 event);
1389 if (ret) {
1390 ret = -EINVAL;
1391 goto register_error;
1392 }
1393 ret = try_module_get(event->priv->desc->owner);
1394 WARN_ON_ONCE(!ret);
1395 break;
1396
1397 case LTTNG_KERNEL_ABI_SYSCALL:
1398 /*
1399 * Needs to be explicitly enabled after creation, since
1400 * we may want to apply filters.
1401 */
1402 event->enabled = 0;
1403 event->priv->registered = 0;
1404 event->priv->desc = event_desc;
1405 switch (event_param->u.syscall.entryexit) {
1406 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1407 ret = -EINVAL;
1408 goto register_error;
1409 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1410 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1411 break;
1412 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1413 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1414 break;
1415 }
1416 switch (event_param->u.syscall.abi) {
1417 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1418 ret = -EINVAL;
1419 goto register_error;
1420 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1421 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1422 break;
1423 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1424 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1425 break;
1426 }
1427
1428 if (!event->priv->desc) {
1429 ret = -EINVAL;
1430 goto register_error;
1431 }
1432 break;
1433
1434 case LTTNG_KERNEL_ABI_UPROBE:
1435 /*
1436 * Needs to be explicitly enabled after creation, since
1437 * we may want to apply filters.
1438 */
1439 event->enabled = 0;
1440 event->priv->registered = 1;
1441
1442 /*
1443 * Populate lttng_event_notifier structure before
1444 * event_notifier registration.
1445 */
1446 smp_wmb();
1447
1448 ret = lttng_uprobes_register_event(event_param->name,
1449 event_param->u.uprobe.fd,
1450 event);
1451 if (ret)
1452 goto register_error;
1453 ret = try_module_get(event->priv->desc->owner);
1454 WARN_ON_ONCE(!ret);
1455 break;
1456
1457 case LTTNG_KERNEL_ABI_KRETPROBE:
1458 lttng_fallthrough;
1459 case LTTNG_KERNEL_ABI_FUNCTION:
1460 lttng_fallthrough;
1461 case LTTNG_KERNEL_ABI_NOOP:
1462 lttng_fallthrough;
1463 default:
1464 WARN_ON_ONCE(1);
1465 ret = -EINVAL;
1466 goto register_error;
1467 }
1468
1469 list_add(&event->priv->node, event_list_head);
1470 hlist_add_head(&event->priv->hlist_node, head);
1471
1472 ret = lttng_kernel_event_notifier_clear_error_counter(event);
1473 if (ret)
1474 goto register_error;
1475 return event_notifier;
1476
1477 register_error:
1478 lttng_kernel_event_free(event);
1479 alloc_error:
1480 exist:
1481 type_error:
1482 return ERR_PTR(ret);
1483 }
1484
1485 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1486 const struct lttng_kernel_event_desc *event_desc)
1487 {
1488 switch (event_enabler->enabler_type) {
1489 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1490 {
1491 struct lttng_event_recorder_enabler *event_recorder_enabler =
1492 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1493 struct lttng_kernel_event_recorder *event_recorder;
1494
1495 event_recorder = _lttng_kernel_event_recorder_create(event_recorder_enabler, event_desc);
1496 if (!event_recorder)
1497 return NULL;
1498 return &event_recorder->parent;
1499 }
1500 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1501 {
1502 struct lttng_event_notifier_enabler *event_notifier_enabler =
1503 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1504 struct lttng_kernel_event_notifier *event_notifier;
1505
1506 event_notifier = _lttng_kernel_event_notifier_create(event_notifier_enabler, event_desc);
1507 if (!event_notifier)
1508 return NULL;
1509 return &event_notifier->parent;
1510 }
1511 default:
1512 return NULL;
1513 }
1514 }
1515
1516 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1517 const struct lttng_kernel_event_desc *event_desc)
1518 {
1519 struct lttng_kernel_event_common *event;
1520
1521 mutex_lock(&sessions_mutex);
1522 event = _lttng_kernel_event_create(event_enabler, event_desc);
1523 mutex_unlock(&sessions_mutex);
1524 return event;
1525 }
1526
1527
1528
1529 int lttng_kernel_counter_read(struct lttng_counter *counter,
1530 const size_t *dim_indexes, int32_t cpu,
1531 int64_t *val, bool *overflow, bool *underflow)
1532 {
1533 return counter->ops->counter_read(counter->counter, dim_indexes,
1534 cpu, val, overflow, underflow);
1535 }
1536
1537 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1538 const size_t *dim_indexes, int64_t *val,
1539 bool *overflow, bool *underflow)
1540 {
1541 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1542 val, overflow, underflow);
1543 }
1544
1545 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1546 const size_t *dim_indexes)
1547 {
1548 return counter->ops->counter_clear(counter->counter, dim_indexes);
1549 }
1550
1551 /* Only used for tracepoints for now. */
1552 static
1553 void register_event(struct lttng_kernel_event_common *event)
1554 {
1555 const struct lttng_kernel_event_desc *desc;
1556 int ret = -EINVAL;
1557
1558 if (event->priv->registered)
1559 return;
1560
1561 desc = event->priv->desc;
1562 switch (event->priv->instrumentation) {
1563 case LTTNG_KERNEL_ABI_TRACEPOINT:
1564 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1565 desc->tp_class->probe_callback,
1566 event);
1567 break;
1568
1569 case LTTNG_KERNEL_ABI_SYSCALL:
1570 ret = lttng_syscall_filter_enable_event(event);
1571 break;
1572
1573 case LTTNG_KERNEL_ABI_KPROBE:
1574 lttng_fallthrough;
1575 case LTTNG_KERNEL_ABI_UPROBE:
1576 ret = 0;
1577 break;
1578
1579 case LTTNG_KERNEL_ABI_KRETPROBE:
1580 switch (event->type) {
1581 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1582 ret = 0;
1583 break;
1584 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1585 WARN_ON_ONCE(1);
1586 break;
1587 }
1588 break;
1589
1590 case LTTNG_KERNEL_ABI_FUNCTION:
1591 lttng_fallthrough;
1592 case LTTNG_KERNEL_ABI_NOOP:
1593 lttng_fallthrough;
1594 default:
1595 WARN_ON_ONCE(1);
1596 }
1597 if (!ret)
1598 event->priv->registered = 1;
1599 }
1600
1601 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1602 {
1603 struct lttng_kernel_event_common_private *event_priv = event->priv;
1604 const struct lttng_kernel_event_desc *desc;
1605 int ret = -EINVAL;
1606
1607 if (!event_priv->registered)
1608 return 0;
1609
1610 desc = event_priv->desc;
1611 switch (event_priv->instrumentation) {
1612 case LTTNG_KERNEL_ABI_TRACEPOINT:
1613 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1614 event_priv->desc->tp_class->probe_callback,
1615 event);
1616 break;
1617
1618 case LTTNG_KERNEL_ABI_KPROBE:
1619 lttng_kprobes_unregister_event(event);
1620 ret = 0;
1621 break;
1622
1623 case LTTNG_KERNEL_ABI_KRETPROBE:
1624 switch (event->type) {
1625 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1626 lttng_kretprobes_unregister(event);
1627 ret = 0;
1628 break;
1629 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1630 WARN_ON_ONCE(1);
1631 break;
1632 }
1633 break;
1634
1635 case LTTNG_KERNEL_ABI_SYSCALL:
1636 ret = lttng_syscall_filter_disable_event(event);
1637 break;
1638
1639 case LTTNG_KERNEL_ABI_NOOP:
1640 switch (event->type) {
1641 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1642 ret = 0;
1643 break;
1644 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1645 WARN_ON_ONCE(1);
1646 break;
1647 }
1648 break;
1649
1650 case LTTNG_KERNEL_ABI_UPROBE:
1651 lttng_uprobes_unregister_event(event);
1652 ret = 0;
1653 break;
1654
1655 case LTTNG_KERNEL_ABI_FUNCTION:
1656 lttng_fallthrough;
1657 default:
1658 WARN_ON_ONCE(1);
1659 }
1660 if (!ret)
1661 event_priv->registered = 0;
1662 return ret;
1663 }
1664
1665 /*
1666 * Only used internally at session destruction.
1667 */
1668 static
1669 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1670 {
1671 struct lttng_kernel_event_common_private *event_priv = event->priv;
1672 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1673
1674 lttng_free_event_filter_runtime(event);
1675 /* Free event enabler refs */
1676 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1677 &event_priv->enablers_ref_head, node)
1678 kfree(enabler_ref);
1679
1680 switch (event->type) {
1681 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1682 {
1683 struct lttng_kernel_event_recorder *event_recorder =
1684 container_of(event, struct lttng_kernel_event_recorder, parent);
1685
1686 switch (event_priv->instrumentation) {
1687 case LTTNG_KERNEL_ABI_TRACEPOINT:
1688 lttng_event_desc_put(event_priv->desc);
1689 break;
1690
1691 case LTTNG_KERNEL_ABI_KPROBE:
1692 module_put(event_priv->desc->owner);
1693 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1694 break;
1695
1696 case LTTNG_KERNEL_ABI_KRETPROBE:
1697 module_put(event_priv->desc->owner);
1698 lttng_kretprobes_destroy_private(&event_recorder->parent);
1699 break;
1700
1701 case LTTNG_KERNEL_ABI_SYSCALL:
1702 break;
1703
1704 case LTTNG_KERNEL_ABI_UPROBE:
1705 module_put(event_priv->desc->owner);
1706 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1707 break;
1708
1709 case LTTNG_KERNEL_ABI_FUNCTION:
1710 lttng_fallthrough;
1711 case LTTNG_KERNEL_ABI_NOOP:
1712 lttng_fallthrough;
1713 default:
1714 WARN_ON_ONCE(1);
1715 }
1716 list_del(&event_recorder->priv->parent.node);
1717 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1718 kmem_cache_free(event_recorder_cache, event_recorder);
1719 break;
1720 }
1721 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1722 {
1723 struct lttng_kernel_event_notifier *event_notifier =
1724 container_of(event, struct lttng_kernel_event_notifier, parent);
1725
1726 switch (event_notifier->priv->parent.instrumentation) {
1727 case LTTNG_KERNEL_ABI_TRACEPOINT:
1728 lttng_event_desc_put(event_notifier->priv->parent.desc);
1729 break;
1730
1731 case LTTNG_KERNEL_ABI_KPROBE:
1732 module_put(event_notifier->priv->parent.desc->owner);
1733 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1734 break;
1735
1736 case LTTNG_KERNEL_ABI_SYSCALL:
1737 break;
1738
1739 case LTTNG_KERNEL_ABI_UPROBE:
1740 module_put(event_notifier->priv->parent.desc->owner);
1741 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1742 break;
1743
1744 case LTTNG_KERNEL_ABI_KRETPROBE:
1745 lttng_fallthrough;
1746 case LTTNG_KERNEL_ABI_FUNCTION:
1747 lttng_fallthrough;
1748 case LTTNG_KERNEL_ABI_NOOP:
1749 lttng_fallthrough;
1750 default:
1751 WARN_ON_ONCE(1);
1752 }
1753 list_del(&event_notifier->priv->parent.node);
1754 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1755 kmem_cache_free(event_notifier_cache, event_notifier);
1756 break;
1757 }
1758 default:
1759 WARN_ON_ONCE(1);
1760 }
1761 }
1762
1763 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1764 enum tracker_type tracker_type)
1765 {
1766 switch (tracker_type) {
1767 case TRACKER_PID:
1768 return &session->pid_tracker;
1769 case TRACKER_VPID:
1770 return &session->vpid_tracker;
1771 case TRACKER_UID:
1772 return &session->uid_tracker;
1773 case TRACKER_VUID:
1774 return &session->vuid_tracker;
1775 case TRACKER_GID:
1776 return &session->gid_tracker;
1777 case TRACKER_VGID:
1778 return &session->vgid_tracker;
1779 default:
1780 WARN_ON_ONCE(1);
1781 return NULL;
1782 }
1783 }
1784
1785 int lttng_session_track_id(struct lttng_kernel_session *session,
1786 enum tracker_type tracker_type, int id)
1787 {
1788 struct lttng_kernel_id_tracker *tracker;
1789 int ret;
1790
1791 tracker = get_tracker(session, tracker_type);
1792 if (!tracker)
1793 return -EINVAL;
1794 if (id < -1)
1795 return -EINVAL;
1796 mutex_lock(&sessions_mutex);
1797 if (id == -1) {
1798 /* track all ids: destroy tracker. */
1799 lttng_id_tracker_destroy(tracker, true);
1800 ret = 0;
1801 } else {
1802 ret = lttng_id_tracker_add(tracker, id);
1803 }
1804 mutex_unlock(&sessions_mutex);
1805 return ret;
1806 }
1807
1808 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1809 enum tracker_type tracker_type, int id)
1810 {
1811 struct lttng_kernel_id_tracker *tracker;
1812 int ret;
1813
1814 tracker = get_tracker(session, tracker_type);
1815 if (!tracker)
1816 return -EINVAL;
1817 if (id < -1)
1818 return -EINVAL;
1819 mutex_lock(&sessions_mutex);
1820 if (id == -1) {
1821 /* untrack all ids: replace by empty tracker. */
1822 ret = lttng_id_tracker_empty_set(tracker);
1823 } else {
1824 ret = lttng_id_tracker_del(tracker, id);
1825 }
1826 mutex_unlock(&sessions_mutex);
1827 return ret;
1828 }
1829
1830 static
1831 void *id_list_start(struct seq_file *m, loff_t *pos)
1832 {
1833 struct lttng_kernel_id_tracker *id_tracker = m->private;
1834 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1835 struct lttng_id_hash_node *e;
1836 int iter = 0, i;
1837
1838 mutex_lock(&sessions_mutex);
1839 if (id_tracker_p) {
1840 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1841 struct hlist_head *head = &id_tracker_p->id_hash[i];
1842
1843 lttng_hlist_for_each_entry(e, head, hlist) {
1844 if (iter++ >= *pos)
1845 return e;
1846 }
1847 }
1848 } else {
1849 /* ID tracker disabled. */
1850 if (iter >= *pos && iter == 0) {
1851 return id_tracker_p; /* empty tracker */
1852 }
1853 iter++;
1854 }
1855 /* End of list */
1856 return NULL;
1857 }
1858
1859 /* Called with sessions_mutex held. */
1860 static
1861 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1862 {
1863 struct lttng_kernel_id_tracker *id_tracker = m->private;
1864 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1865 struct lttng_id_hash_node *e;
1866 int iter = 0, i;
1867
1868 (*ppos)++;
1869 if (id_tracker_p) {
1870 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1871 struct hlist_head *head = &id_tracker_p->id_hash[i];
1872
1873 lttng_hlist_for_each_entry(e, head, hlist) {
1874 if (iter++ >= *ppos)
1875 return e;
1876 }
1877 }
1878 } else {
1879 /* ID tracker disabled. */
1880 if (iter >= *ppos && iter == 0)
1881 return p; /* empty tracker */
1882 iter++;
1883 }
1884
1885 /* End of list */
1886 return NULL;
1887 }
1888
1889 static
1890 void id_list_stop(struct seq_file *m, void *p)
1891 {
1892 mutex_unlock(&sessions_mutex);
1893 }
1894
1895 static
1896 int id_list_show(struct seq_file *m, void *p)
1897 {
1898 struct lttng_kernel_id_tracker *id_tracker = m->private;
1899 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1900 int id;
1901
1902 if (p == id_tracker_p) {
1903 /* Tracker disabled. */
1904 id = -1;
1905 } else {
1906 const struct lttng_id_hash_node *e = p;
1907
1908 id = lttng_id_tracker_get_node_id(e);
1909 }
1910 switch (id_tracker->priv->tracker_type) {
1911 case TRACKER_PID:
1912 seq_printf(m, "process { pid = %d; };\n", id);
1913 break;
1914 case TRACKER_VPID:
1915 seq_printf(m, "process { vpid = %d; };\n", id);
1916 break;
1917 case TRACKER_UID:
1918 seq_printf(m, "user { uid = %d; };\n", id);
1919 break;
1920 case TRACKER_VUID:
1921 seq_printf(m, "user { vuid = %d; };\n", id);
1922 break;
1923 case TRACKER_GID:
1924 seq_printf(m, "group { gid = %d; };\n", id);
1925 break;
1926 case TRACKER_VGID:
1927 seq_printf(m, "group { vgid = %d; };\n", id);
1928 break;
1929 default:
1930 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1931 }
1932 return 0;
1933 }
1934
1935 static
1936 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1937 .start = id_list_start,
1938 .next = id_list_next,
1939 .stop = id_list_stop,
1940 .show = id_list_show,
1941 };
1942
1943 static
1944 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1945 {
1946 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1947 }
1948
1949 static
1950 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1951 {
1952 struct seq_file *m = file->private_data;
1953 struct lttng_kernel_id_tracker *id_tracker = m->private;
1954 int ret;
1955
1956 WARN_ON_ONCE(!id_tracker);
1957 ret = seq_release(inode, file);
1958 if (!ret)
1959 fput(id_tracker->priv->session->priv->file);
1960 return ret;
1961 }
1962
1963 const struct file_operations lttng_tracker_ids_list_fops = {
1964 .owner = THIS_MODULE,
1965 .open = lttng_tracker_ids_list_open,
1966 .read = seq_read,
1967 .llseek = seq_lseek,
1968 .release = lttng_tracker_ids_list_release,
1969 };
1970
1971 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1972 enum tracker_type tracker_type)
1973 {
1974 struct file *tracker_ids_list_file;
1975 struct seq_file *m;
1976 int file_fd, ret;
1977
1978 file_fd = lttng_get_unused_fd();
1979 if (file_fd < 0) {
1980 ret = file_fd;
1981 goto fd_error;
1982 }
1983
1984 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1985 &lttng_tracker_ids_list_fops,
1986 NULL, O_RDWR);
1987 if (IS_ERR(tracker_ids_list_file)) {
1988 ret = PTR_ERR(tracker_ids_list_file);
1989 goto file_error;
1990 }
1991 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1992 ret = -EOVERFLOW;
1993 goto refcount_error;
1994 }
1995 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1996 if (ret < 0)
1997 goto open_error;
1998 m = tracker_ids_list_file->private_data;
1999
2000 m->private = get_tracker(session, tracker_type);
2001 BUG_ON(!m->private);
2002 fd_install(file_fd, tracker_ids_list_file);
2003
2004 return file_fd;
2005
2006 open_error:
2007 atomic_long_dec(&session->priv->file->f_count);
2008 refcount_error:
2009 fput(tracker_ids_list_file);
2010 file_error:
2011 put_unused_fd(file_fd);
2012 fd_error:
2013 return ret;
2014 }
2015
2016 /*
2017 * Enabler management.
2018 */
2019 static
2020 int lttng_match_enabler_star_glob(const char *desc_name,
2021 const char *pattern)
2022 {
2023 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
2024 desc_name, LTTNG_SIZE_MAX))
2025 return 0;
2026 return 1;
2027 }
2028
2029 static
2030 int lttng_match_enabler_name(const char *desc_name,
2031 const char *name)
2032 {
2033 if (strcmp(desc_name, name))
2034 return 0;
2035 return 1;
2036 }
2037
2038 static
2039 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
2040 struct lttng_event_enabler_common *enabler)
2041 {
2042 const char *desc_name, *enabler_name;
2043 bool compat = false, entry = false;
2044
2045 enabler_name = enabler->event_param.name;
2046 switch (enabler->event_param.instrumentation) {
2047 case LTTNG_KERNEL_ABI_TRACEPOINT:
2048 desc_name = desc->event_name;
2049 switch (enabler->format_type) {
2050 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2051 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2052 case LTTNG_ENABLER_FORMAT_NAME:
2053 return lttng_match_enabler_name(desc_name, enabler_name);
2054 default:
2055 return -EINVAL;
2056 }
2057 break;
2058
2059 case LTTNG_KERNEL_ABI_SYSCALL:
2060 desc_name = desc->event_name;
2061 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
2062 desc_name += strlen("compat_");
2063 compat = true;
2064 }
2065 if (!strncmp(desc_name, "syscall_exit_",
2066 strlen("syscall_exit_"))) {
2067 desc_name += strlen("syscall_exit_");
2068 } else if (!strncmp(desc_name, "syscall_entry_",
2069 strlen("syscall_entry_"))) {
2070 desc_name += strlen("syscall_entry_");
2071 entry = true;
2072 } else {
2073 WARN_ON_ONCE(1);
2074 return -EINVAL;
2075 }
2076 switch (enabler->event_param.u.syscall.entryexit) {
2077 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
2078 break;
2079 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
2080 if (!entry)
2081 return 0;
2082 break;
2083 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
2084 if (entry)
2085 return 0;
2086 break;
2087 default:
2088 return -EINVAL;
2089 }
2090 switch (enabler->event_param.u.syscall.abi) {
2091 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
2092 break;
2093 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
2094 if (compat)
2095 return 0;
2096 break;
2097 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
2098 if (!compat)
2099 return 0;
2100 break;
2101 default:
2102 return -EINVAL;
2103 }
2104 switch (enabler->event_param.u.syscall.match) {
2105 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
2106 switch (enabler->format_type) {
2107 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2108 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2109 case LTTNG_ENABLER_FORMAT_NAME:
2110 return lttng_match_enabler_name(desc_name, enabler_name);
2111 default:
2112 return -EINVAL;
2113 }
2114 break;
2115 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2116 return -EINVAL; /* Not implemented. */
2117 default:
2118 return -EINVAL;
2119 }
2120 break;
2121
2122 default:
2123 WARN_ON_ONCE(1);
2124 return -EINVAL;
2125 }
2126 }
2127
2128 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
2129 struct lttng_event_enabler_common *enabler)
2130 {
2131 int ret;
2132
2133 ret = lttng_desc_match_enabler_check(desc, enabler);
2134 if (ret < 0) {
2135 WARN_ON_ONCE(1);
2136 return false;
2137 }
2138 return ret;
2139 }
2140
2141 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
2142 struct lttng_kernel_event_common *event)
2143 {
2144 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2145 return false;
2146
2147 switch (event_enabler->enabler_type) {
2148 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2149 {
2150 struct lttng_event_recorder_enabler *event_recorder_enabler =
2151 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2152 struct lttng_kernel_event_recorder *event_recorder =
2153 container_of(event, struct lttng_kernel_event_recorder, parent);
2154
2155 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2156 && event_recorder->chan == event_recorder_enabler->chan)
2157 return true;
2158 else
2159 return false;
2160 }
2161 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2162 {
2163 struct lttng_event_notifier_enabler *event_notifier_enabler =
2164 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2165 struct lttng_kernel_event_notifier *event_notifier =
2166 container_of(event, struct lttng_kernel_event_notifier, parent);
2167
2168 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2169 && event_notifier->priv->group == event_notifier_enabler->group
2170 && event->priv->user_token == event_enabler->user_token)
2171 return true;
2172 else
2173 return false;
2174 }
2175 default:
2176 WARN_ON_ONCE(1);
2177 return false;
2178 }
2179 }
2180
2181 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
2182 const struct lttng_kernel_event_desc *desc,
2183 struct lttng_kernel_event_common *event)
2184 {
2185 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2186 return false;
2187
2188 switch (event_enabler->enabler_type) {
2189 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2190 {
2191 struct lttng_event_recorder_enabler *event_recorder_enabler =
2192 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2193 struct lttng_kernel_event_recorder *event_recorder =
2194 container_of(event, struct lttng_kernel_event_recorder, parent);
2195
2196 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
2197 return true;
2198 else
2199 return false;
2200 }
2201 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2202 {
2203 struct lttng_event_notifier_enabler *event_notifier_enabler =
2204 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2205 struct lttng_kernel_event_notifier *event_notifier =
2206 container_of(event, struct lttng_kernel_event_notifier, parent);
2207
2208 if (event->priv->desc == desc
2209 && event_notifier->priv->group == event_notifier_enabler->group
2210 && event->priv->user_token == event_enabler->user_token)
2211 return true;
2212 else
2213 return false;
2214 }
2215 default:
2216 WARN_ON_ONCE(1);
2217 return false;
2218 }
2219 }
2220
2221 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2222 const char *event_name,
2223 struct lttng_kernel_event_common *event)
2224 {
2225 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2226 return false;
2227
2228 switch (event_enabler->enabler_type) {
2229 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2230 {
2231 struct lttng_event_recorder_enabler *event_recorder_enabler =
2232 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2233 struct lttng_kernel_event_recorder *event_recorder =
2234 container_of(event, struct lttng_kernel_event_recorder, parent);
2235
2236 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2237 && event_recorder->chan == event_recorder_enabler->chan)
2238 return true;
2239 else
2240 return false;
2241 }
2242 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2243 {
2244 struct lttng_event_notifier_enabler *event_notifier_enabler =
2245 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2246 struct lttng_kernel_event_notifier *event_notifier =
2247 container_of(event, struct lttng_kernel_event_notifier, parent);
2248
2249 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2250 && event_notifier->priv->group == event_notifier_enabler->group
2251 && event->priv->user_token == event_enabler->user_token)
2252 return true;
2253 else
2254 return false;
2255 }
2256 default:
2257 WARN_ON_ONCE(1);
2258 return false;
2259 }
2260 }
2261
2262 static
2263 struct lttng_enabler_ref *lttng_enabler_ref(
2264 struct list_head *enablers_ref_list,
2265 struct lttng_event_enabler_common *enabler)
2266 {
2267 struct lttng_enabler_ref *enabler_ref;
2268
2269 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2270 if (enabler_ref->ref == enabler)
2271 return enabler_ref;
2272 }
2273 return NULL;
2274 }
2275
2276 static
2277 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2278 {
2279 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2280 struct lttng_kernel_probe_desc *probe_desc;
2281 const struct lttng_kernel_event_desc *desc;
2282 struct list_head *probe_list;
2283 int i;
2284
2285 probe_list = lttng_get_probe_list_head();
2286 /*
2287 * For each probe event, if we find that a probe event matches
2288 * our enabler, create an associated lttng_event if not
2289 * already present.
2290 */
2291 list_for_each_entry(probe_desc, probe_list, head) {
2292 for (i = 0; i < probe_desc->nr_events; i++) {
2293 int found = 0;
2294 struct hlist_head *head;
2295 struct lttng_kernel_event_common *event;
2296 struct lttng_kernel_event_common_private *event_priv;
2297
2298 desc = probe_desc->event_desc[i];
2299 if (!lttng_desc_match_enabler(desc, event_enabler))
2300 continue;
2301
2302 /*
2303 * Check if already created.
2304 */
2305 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2306 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2307 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub))
2308 found = 1;
2309 }
2310 if (found)
2311 continue;
2312
2313 /*
2314 * We need to create an event for this event probe.
2315 */
2316 event = _lttng_kernel_event_create(event_enabler, desc);
2317 if (!event) {
2318 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2319 probe_desc->event_desc[i]->event_name);
2320 }
2321 }
2322 }
2323 }
2324
2325 static
2326 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2327 {
2328 int ret;
2329
2330 ret = lttng_syscalls_register_event(event_enabler);
2331 WARN_ON_ONCE(ret);
2332 }
2333
2334 /*
2335 * Create event if it is missing and present in the list of tracepoint probes.
2336 * Should be called with sessions mutex held.
2337 */
2338 static
2339 void lttng_create_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2340 {
2341 switch (event_enabler->event_param.instrumentation) {
2342 case LTTNG_KERNEL_ABI_TRACEPOINT:
2343 lttng_create_tracepoint_event_if_missing(event_enabler);
2344 break;
2345
2346 case LTTNG_KERNEL_ABI_SYSCALL:
2347 lttng_create_syscall_event_if_missing(event_enabler);
2348 break;
2349
2350 default:
2351 WARN_ON_ONCE(1);
2352 break;
2353 }
2354 }
2355
2356 static
2357 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2358 struct lttng_kernel_event_common *event)
2359 {
2360 /* Link filter bytecodes if not linked yet. */
2361 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2362 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2363 }
2364
2365 static
2366 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2367 struct lttng_kernel_event_common *event)
2368 {
2369 switch (event_enabler->enabler_type) {
2370 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2371 break;
2372 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2373 {
2374 struct lttng_event_notifier_enabler *event_notifier_enabler =
2375 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2376 struct lttng_kernel_event_notifier *event_notifier =
2377 container_of(event, struct lttng_kernel_event_notifier, parent);
2378
2379 /* Link capture bytecodes if not linked yet. */
2380 lttng_enabler_link_bytecode(event->priv->desc,
2381 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2382 &event_notifier_enabler->capture_bytecode_head);
2383 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2384 break;
2385 }
2386 default:
2387 WARN_ON_ONCE(1);
2388 }
2389 }
2390
2391 /*
2392 * Create events associated with an event_enabler (if not already present),
2393 * and add backward reference from the event to the enabler.
2394 * Should be called with sessions mutex held.
2395 */
2396 static
2397 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2398 {
2399 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2400 struct lttng_kernel_event_common_private *event_priv;
2401
2402 lttng_syscall_table_set_wildcard_all(event_enabler);
2403
2404 /* First ensure that probe events are created for this enabler. */
2405 lttng_create_event_if_missing(event_enabler);
2406
2407 /* Link the created event with its associated enabler. */
2408 list_for_each_entry(event_priv, event_list_head, node) {
2409 struct lttng_kernel_event_common *event = event_priv->pub;
2410 struct lttng_enabler_ref *enabler_ref;
2411
2412 if (!lttng_event_enabler_match_event(event_enabler, event))
2413 continue;
2414
2415 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2416 if (!enabler_ref) {
2417 /*
2418 * If no backward ref, create it.
2419 * Add backward ref from event_notifier to enabler.
2420 */
2421 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2422 if (!enabler_ref)
2423 return -ENOMEM;
2424
2425 enabler_ref->ref = event_enabler;
2426 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2427 }
2428
2429 lttng_event_enabler_init_event_filter(event_enabler, event);
2430 lttng_event_enabler_init_event_capture(event_enabler, event);
2431 }
2432 return 0;
2433 }
2434
2435 /*
2436 * Called at module load: connect the probe on all enablers matching
2437 * this event.
2438 * Called with sessions lock held.
2439 */
2440 int lttng_fix_pending_events(void)
2441 {
2442 struct lttng_kernel_session_private *session_priv;
2443
2444 list_for_each_entry(session_priv, &sessions, list)
2445 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2446 return 0;
2447 }
2448
2449 static bool lttng_event_notifier_group_has_active_event_notifiers(
2450 struct lttng_event_notifier_group *event_notifier_group)
2451 {
2452 struct lttng_event_enabler_common *event_enabler;
2453
2454 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2455 if (event_enabler->enabled)
2456 return true;
2457 }
2458 return false;
2459 }
2460
2461 bool lttng_event_notifier_active(void)
2462 {
2463 struct lttng_event_notifier_group *event_notifier_group;
2464
2465 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2466 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2467 return true;
2468 }
2469 return false;
2470 }
2471
2472 int lttng_fix_pending_event_notifiers(void)
2473 {
2474 struct lttng_event_notifier_group *event_notifier_group;
2475
2476 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2477 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2478 return 0;
2479 }
2480
2481 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2482 enum lttng_enabler_format_type format_type,
2483 struct lttng_kernel_abi_event *event_param,
2484 struct lttng_kernel_channel_buffer *chan)
2485 {
2486 struct lttng_event_recorder_enabler *event_enabler;
2487
2488 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2489 if (!event_enabler)
2490 return NULL;
2491 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2492 event_enabler->parent.format_type = format_type;
2493 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2494 memcpy(&event_enabler->parent.event_param, event_param,
2495 sizeof(event_enabler->parent.event_param));
2496 event_enabler->chan = chan;
2497 /* ctx left NULL */
2498 event_enabler->parent.enabled = 0;
2499 return event_enabler;
2500 }
2501
2502 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2503 struct lttng_event_recorder_enabler *event_enabler)
2504 {
2505 mutex_lock(&sessions_mutex);
2506 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2507 event_enabler->parent.published = true;
2508 lttng_session_lazy_sync_event_enablers(session);
2509 mutex_unlock(&sessions_mutex);
2510 }
2511
2512 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2513 {
2514 mutex_lock(&sessions_mutex);
2515 event_enabler->enabled = 1;
2516 lttng_event_enabler_sync(event_enabler);
2517 mutex_unlock(&sessions_mutex);
2518 return 0;
2519 }
2520
2521 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2522 {
2523 mutex_lock(&sessions_mutex);
2524 event_enabler->enabled = 0;
2525 lttng_event_enabler_sync(event_enabler);
2526 mutex_unlock(&sessions_mutex);
2527 return 0;
2528 }
2529
2530 static
2531 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2532 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2533 {
2534 struct lttng_kernel_bytecode_node *bytecode_node;
2535 uint32_t bytecode_len;
2536 int ret;
2537
2538 ret = get_user(bytecode_len, &bytecode->len);
2539 if (ret)
2540 return ret;
2541 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2542 GFP_KERNEL);
2543 if (!bytecode_node)
2544 return -ENOMEM;
2545 ret = copy_from_user(&bytecode_node->bc, bytecode,
2546 sizeof(*bytecode) + bytecode_len);
2547 if (ret)
2548 goto error_free;
2549
2550 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2551 bytecode_node->enabler = enabler;
2552 /* Enforce length based on allocated size */
2553 bytecode_node->bc.len = bytecode_len;
2554 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2555
2556 return 0;
2557
2558 error_free:
2559 lttng_kvfree(bytecode_node);
2560 return ret;
2561 }
2562
2563 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2564 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2565 {
2566 int ret;
2567 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2568 if (ret)
2569 goto error;
2570 lttng_event_enabler_sync(event_enabler);
2571 return 0;
2572
2573 error:
2574 return ret;
2575 }
2576
2577 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2578 struct lttng_kernel_abi_event_callsite __user *callsite)
2579 {
2580
2581 switch (event->priv->instrumentation) {
2582 case LTTNG_KERNEL_ABI_UPROBE:
2583 return lttng_uprobes_event_add_callsite(event, callsite);
2584 default:
2585 return -EINVAL;
2586 }
2587 }
2588
2589 static
2590 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2591 {
2592 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2593
2594 /* Destroy filter bytecode */
2595 list_for_each_entry_safe(filter_node, tmp_filter_node,
2596 &enabler->filter_bytecode_head, node) {
2597 lttng_kvfree(filter_node);
2598 }
2599 }
2600
2601 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2602 {
2603 lttng_enabler_destroy(event_enabler);
2604 if (event_enabler->published)
2605 list_del(&event_enabler->node);
2606
2607 switch (event_enabler->enabler_type) {
2608 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2609 {
2610 struct lttng_event_recorder_enabler *event_recorder_enabler =
2611 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2612
2613 kfree(event_recorder_enabler);
2614 break;
2615 }
2616 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2617 {
2618 struct lttng_event_notifier_enabler *event_notifier_enabler =
2619 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2620
2621 kfree(event_notifier_enabler);
2622 break;
2623 }
2624 default:
2625 WARN_ON_ONCE(1);
2626 }
2627 }
2628
2629 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2630 enum lttng_enabler_format_type format_type,
2631 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2632 struct lttng_event_notifier_group *event_notifier_group)
2633 {
2634 struct lttng_event_notifier_enabler *event_notifier_enabler;
2635
2636 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2637 if (!event_notifier_enabler)
2638 return NULL;
2639
2640 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2641 event_notifier_enabler->parent.format_type = format_type;
2642 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2643 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2644
2645 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2646 event_notifier_enabler->num_captures = 0;
2647
2648 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2649 sizeof(event_notifier_enabler->parent.event_param));
2650
2651 event_notifier_enabler->parent.enabled = 0;
2652 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2653 event_notifier_enabler->group = event_notifier_group;
2654 return event_notifier_enabler;
2655 }
2656
2657 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2658 struct lttng_event_notifier_enabler *event_notifier_enabler)
2659 {
2660 mutex_lock(&sessions_mutex);
2661 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2662 event_notifier_enabler->parent.published = true;
2663 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2664 mutex_unlock(&sessions_mutex);
2665 }
2666
2667 int lttng_event_notifier_enabler_enable(
2668 struct lttng_event_notifier_enabler *event_notifier_enabler)
2669 {
2670 mutex_lock(&sessions_mutex);
2671 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2672 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2673 mutex_unlock(&sessions_mutex);
2674 return 0;
2675 }
2676
2677 int lttng_event_notifier_enabler_disable(
2678 struct lttng_event_notifier_enabler *event_notifier_enabler)
2679 {
2680 mutex_lock(&sessions_mutex);
2681 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2682 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2683 mutex_unlock(&sessions_mutex);
2684 return 0;
2685 }
2686
2687 int lttng_event_notifier_enabler_attach_capture_bytecode(
2688 struct lttng_event_notifier_enabler *event_notifier_enabler,
2689 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2690 {
2691 struct lttng_kernel_bytecode_node *bytecode_node;
2692 struct lttng_event_enabler_common *enabler =
2693 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2694 uint32_t bytecode_len;
2695 int ret;
2696
2697 ret = get_user(bytecode_len, &bytecode->len);
2698 if (ret)
2699 return ret;
2700
2701 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2702 GFP_KERNEL);
2703 if (!bytecode_node)
2704 return -ENOMEM;
2705
2706 ret = copy_from_user(&bytecode_node->bc, bytecode,
2707 sizeof(*bytecode) + bytecode_len);
2708 if (ret)
2709 goto error_free;
2710
2711 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2712 bytecode_node->enabler = enabler;
2713
2714 /* Enforce length based on allocated size */
2715 bytecode_node->bc.len = bytecode_len;
2716 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2717
2718 event_notifier_enabler->num_captures++;
2719
2720 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2721 goto end;
2722
2723 error_free:
2724 lttng_kvfree(bytecode_node);
2725 end:
2726 return ret;
2727 }
2728
2729 static
2730 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2731 {
2732 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2733 struct lttng_kernel_bytecode_runtime *runtime;
2734 struct lttng_enabler_ref *enabler_ref;
2735
2736 /* Check if has enablers without bytecode enabled */
2737 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2738 if (enabler_ref->ref->enabled
2739 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2740 has_enablers_without_filter_bytecode = 1;
2741 break;
2742 }
2743 }
2744 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2745
2746 /* Enable filters */
2747 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2748 lttng_bytecode_sync_state(runtime);
2749 nr_filters++;
2750 }
2751 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2752 }
2753
2754 static
2755 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2756 {
2757 switch (event->type) {
2758 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2759 break;
2760 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2761 {
2762 struct lttng_kernel_event_notifier *event_notifier =
2763 container_of(event, struct lttng_kernel_event_notifier, parent);
2764 struct lttng_kernel_bytecode_runtime *runtime;
2765 int nr_captures = 0;
2766
2767 /* Enable captures */
2768 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2769 lttng_bytecode_sync_state(runtime);
2770 nr_captures++;
2771 }
2772 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2773 break;
2774 }
2775 default:
2776 WARN_ON_ONCE(1);
2777 }
2778 }
2779
2780 static
2781 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2782 {
2783 struct lttng_enabler_ref *enabler_ref;
2784 bool enabled = false;
2785
2786 switch (event->priv->instrumentation) {
2787 case LTTNG_KERNEL_ABI_TRACEPOINT:
2788 lttng_fallthrough;
2789 case LTTNG_KERNEL_ABI_SYSCALL:
2790 /* Enable events */
2791 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2792 if (enabler_ref->ref->enabled) {
2793 enabled = true;
2794 break;
2795 }
2796 }
2797 break;
2798 default:
2799 WARN_ON_ONCE(1);
2800 return false;
2801 }
2802
2803 switch (event->type) {
2804 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2805 {
2806 struct lttng_kernel_event_recorder *event_recorder =
2807 container_of(event, struct lttng_kernel_event_recorder, parent);
2808
2809 /*
2810 * Enabled state is based on union of enablers, with
2811 * intersection of session and channel transient enable
2812 * states.
2813 */
2814 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2815 }
2816 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2817 return enabled;
2818 default:
2819 WARN_ON_ONCE(1);
2820 return false;
2821 }
2822 }
2823
2824 static
2825 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2826 {
2827 switch (event->priv->instrumentation) {
2828 case LTTNG_KERNEL_ABI_TRACEPOINT:
2829 lttng_fallthrough;
2830 case LTTNG_KERNEL_ABI_SYSCALL:
2831 return true;
2832
2833 default:
2834 /* Not handled with lazy sync. */
2835 return false;
2836 }
2837 }
2838
2839 /*
2840 * Should be called with sessions mutex held.
2841 */
2842 static
2843 void lttng_sync_event_list(struct list_head *event_enabler_list,
2844 struct list_head *event_list)
2845 {
2846 struct lttng_kernel_event_common_private *event_priv;
2847 struct lttng_event_enabler_common *event_enabler;
2848
2849 list_for_each_entry(event_enabler, event_enabler_list, node)
2850 lttng_event_enabler_ref_events(event_enabler);
2851
2852 /*
2853 * For each event, if at least one of its enablers is enabled,
2854 * and its channel and session transient states are enabled, we
2855 * enable the event, else we disable it.
2856 */
2857 list_for_each_entry(event_priv, event_list, node) {
2858 struct lttng_kernel_event_common *event = event_priv->pub;
2859 bool enabled;
2860
2861 if (!lttng_event_is_lazy_sync(event))
2862 continue;
2863
2864 enabled = lttng_get_event_enabled_state(event);
2865 WRITE_ONCE(event->enabled, enabled);
2866 /*
2867 * Sync tracepoint registration with event enabled state.
2868 */
2869 if (enabled) {
2870 register_event(event);
2871 } else {
2872 _lttng_event_unregister(event);
2873 }
2874
2875 lttng_event_sync_filter_state(event);
2876 lttng_event_sync_capture_state(event);
2877 }
2878 }
2879
2880 /*
2881 * lttng_session_sync_event_enablers should be called just before starting a
2882 * session.
2883 */
2884 static
2885 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2886 {
2887 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2888 }
2889
2890 /*
2891 * Apply enablers to session events, adding events to session if need
2892 * be. It is required after each modification applied to an active
2893 * session, and right before session "start".
2894 * "lazy" sync means we only sync if required.
2895 * Should be called with sessions mutex held.
2896 */
2897 static
2898 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2899 {
2900 /* We can skip if session is not active */
2901 if (!session->active)
2902 return;
2903 lttng_session_sync_event_enablers(session);
2904 }
2905
2906 static
2907 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2908 {
2909 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2910 }
2911
2912 static
2913 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2914 {
2915 switch (event_enabler->enabler_type) {
2916 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2917 {
2918 struct lttng_event_recorder_enabler *event_recorder_enabler =
2919 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2920 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2921 break;
2922 }
2923 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2924 {
2925 struct lttng_event_notifier_enabler *event_notifier_enabler =
2926 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2927 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2928 break;
2929 }
2930 default:
2931 WARN_ON_ONCE(1);
2932 }
2933 }
2934
2935 /*
2936 * Serialize at most one packet worth of metadata into a metadata
2937 * channel.
2938 * We grab the metadata cache mutex to get exclusive access to our metadata
2939 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2940 * allows us to do racy operations such as looking for remaining space left in
2941 * packet and write, since mutual exclusion protects us from concurrent writes.
2942 * Mutual exclusion on the metadata cache allow us to read the cache content
2943 * without racing against reallocation of the cache by updates.
2944 * Returns the number of bytes written in the channel, 0 if no data
2945 * was written and a negative value on error.
2946 */
2947 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2948 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2949 {
2950 struct lttng_kernel_ring_buffer_ctx ctx;
2951 int ret = 0;
2952 size_t len, reserve_len;
2953
2954 /*
2955 * Ensure we support mutiple get_next / put sequences followed by
2956 * put_next. The metadata cache lock protects reading the metadata
2957 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2958 * "flush" operations on the buffer invoked by different processes.
2959 * Moreover, since the metadata cache memory can be reallocated, we
2960 * need to have exclusive access against updates even though we only
2961 * read it.
2962 */
2963 mutex_lock(&stream->metadata_cache->lock);
2964 WARN_ON(stream->metadata_in < stream->metadata_out);
2965 if (stream->metadata_in != stream->metadata_out)
2966 goto end;
2967
2968 /* Metadata regenerated, change the version. */
2969 if (stream->metadata_cache->version != stream->version)
2970 stream->version = stream->metadata_cache->version;
2971
2972 len = stream->metadata_cache->metadata_written -
2973 stream->metadata_in;
2974 if (!len)
2975 goto end;
2976 reserve_len = min_t(size_t,
2977 stream->transport->ops.priv->packet_avail_size(chan),
2978 len);
2979 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2980 sizeof(char), NULL);
2981 /*
2982 * If reservation failed, return an error to the caller.
2983 */
2984 ret = stream->transport->ops.event_reserve(&ctx);
2985 if (ret != 0) {
2986 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2987 stream->coherent = false;
2988 goto end;
2989 }
2990 stream->transport->ops.event_write(&ctx,
2991 stream->metadata_cache->data + stream->metadata_in,
2992 reserve_len, 1);
2993 stream->transport->ops.event_commit(&ctx);
2994 stream->metadata_in += reserve_len;
2995 if (reserve_len < len)
2996 stream->coherent = false;
2997 else
2998 stream->coherent = true;
2999 ret = reserve_len;
3000
3001 end:
3002 if (coherent)
3003 *coherent = stream->coherent;
3004 mutex_unlock(&stream->metadata_cache->lock);
3005 return ret;
3006 }
3007
3008 static
3009 void lttng_metadata_begin(struct lttng_kernel_session *session)
3010 {
3011 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
3012 mutex_lock(&session->priv->metadata_cache->lock);
3013 }
3014
3015 static
3016 void lttng_metadata_end(struct lttng_kernel_session *session)
3017 {
3018 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
3019 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
3020 struct lttng_metadata_stream *stream;
3021
3022 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
3023 wake_up_interruptible(&stream->read_wait);
3024 mutex_unlock(&session->priv->metadata_cache->lock);
3025 }
3026 }
3027
3028 /*
3029 * Write the metadata to the metadata cache.
3030 * Must be called with sessions_mutex held.
3031 * The metadata cache lock protects us from concurrent read access from
3032 * thread outputting metadata content to ring buffer.
3033 * The content of the printf is printed as a single atomic metadata
3034 * transaction.
3035 */
3036 int lttng_metadata_printf(struct lttng_kernel_session *session,
3037 const char *fmt, ...)
3038 {
3039 char *str;
3040 size_t len;
3041 va_list ap;
3042
3043 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
3044
3045 va_start(ap, fmt);
3046 str = kvasprintf(GFP_KERNEL, fmt, ap);
3047 va_end(ap);
3048 if (!str)
3049 return -ENOMEM;
3050
3051 len = strlen(str);
3052 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
3053 if (session->priv->metadata_cache->metadata_written + len >
3054 session->priv->metadata_cache->cache_alloc) {
3055 char *tmp_cache_realloc;
3056 unsigned int tmp_cache_alloc_size;
3057
3058 tmp_cache_alloc_size = max_t(unsigned int,
3059 session->priv->metadata_cache->cache_alloc + len,
3060 session->priv->metadata_cache->cache_alloc << 1);
3061 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
3062 if (!tmp_cache_realloc)
3063 goto err;
3064 if (session->priv->metadata_cache->data) {
3065 memcpy(tmp_cache_realloc,
3066 session->priv->metadata_cache->data,
3067 session->priv->metadata_cache->cache_alloc);
3068 vfree(session->priv->metadata_cache->data);
3069 }
3070
3071 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3072 session->priv->metadata_cache->data = tmp_cache_realloc;
3073 }
3074 memcpy(session->priv->metadata_cache->data +
3075 session->priv->metadata_cache->metadata_written,
3076 str, len);
3077 session->priv->metadata_cache->metadata_written += len;
3078 kfree(str);
3079
3080 return 0;
3081
3082 err:
3083 kfree(str);
3084 return -ENOMEM;
3085 }
3086
3087 static
3088 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3089 {
3090 size_t i;
3091
3092 for (i = 0; i < nesting; i++) {
3093 int ret;
3094
3095 ret = lttng_metadata_printf(session, " ");
3096 if (ret) {
3097 return ret;
3098 }
3099 }
3100 return 0;
3101 }
3102
3103 static
3104 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3105 const struct lttng_kernel_event_field *field,
3106 size_t nesting)
3107 {
3108 return lttng_metadata_printf(session, " _%s;\n", field->name);
3109 }
3110
3111 static
3112 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3113 const struct lttng_kernel_type_integer *type,
3114 enum lttng_kernel_string_encoding parent_encoding,
3115 size_t nesting)
3116 {
3117 int ret;
3118
3119 ret = print_tabs(session, nesting);
3120 if (ret)
3121 return ret;
3122 ret = lttng_metadata_printf(session,
3123 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3124 type->size,
3125 type->alignment,
3126 type->signedness,
3127 (parent_encoding == lttng_kernel_string_encoding_none)
3128 ? "none"
3129 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3130 ? "UTF8"
3131 : "ASCII",
3132 type->base,
3133 #if __BYTE_ORDER == __BIG_ENDIAN
3134 type->reverse_byte_order ? " byte_order = le;" : ""
3135 #else
3136 type->reverse_byte_order ? " byte_order = be;" : ""
3137 #endif
3138 );
3139 return ret;
3140 }
3141
3142 /*
3143 * Must be called with sessions_mutex held.
3144 */
3145 static
3146 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3147 const struct lttng_kernel_type_struct *type,
3148 size_t nesting)
3149 {
3150 const char *prev_field_name = NULL;
3151 int ret;
3152 uint32_t i, nr_fields;
3153 unsigned int alignment;
3154
3155 ret = print_tabs(session, nesting);
3156 if (ret)
3157 return ret;
3158 ret = lttng_metadata_printf(session,
3159 "struct {\n");
3160 if (ret)
3161 return ret;
3162 nr_fields = type->nr_fields;
3163 for (i = 0; i < nr_fields; i++) {
3164 const struct lttng_kernel_event_field *iter_field;
3165
3166 iter_field = type->fields[i];
3167 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3168 if (ret)
3169 return ret;
3170 }
3171 ret = print_tabs(session, nesting);
3172 if (ret)
3173 return ret;
3174 alignment = type->alignment;
3175 if (alignment) {
3176 ret = lttng_metadata_printf(session,
3177 "} align(%u)",
3178 alignment);
3179 } else {
3180 ret = lttng_metadata_printf(session,
3181 "}");
3182 }
3183 return ret;
3184 }
3185
3186 /*
3187 * Must be called with sessions_mutex held.
3188 */
3189 static
3190 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3191 const struct lttng_kernel_event_field *field,
3192 size_t nesting)
3193 {
3194 int ret;
3195
3196 ret = _lttng_struct_type_statedump(session,
3197 lttng_kernel_get_type_struct(field->type), nesting);
3198 if (ret)
3199 return ret;
3200 return lttng_field_name_statedump(session, field, nesting);
3201 }
3202
3203 /*
3204 * Must be called with sessions_mutex held.
3205 */
3206 static
3207 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3208 const struct lttng_kernel_type_variant *type,
3209 size_t nesting,
3210 const char *prev_field_name)
3211 {
3212 const char *tag_name;
3213 int ret;
3214 uint32_t i, nr_choices;
3215
3216 tag_name = type->tag_name;
3217 if (!tag_name)
3218 tag_name = prev_field_name;
3219 if (!tag_name)
3220 return -EINVAL;
3221 /*
3222 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3223 */
3224 if (type->alignment != 0)
3225 return -EINVAL;
3226 ret = print_tabs(session, nesting);
3227 if (ret)
3228 return ret;
3229 ret = lttng_metadata_printf(session,
3230 "variant <_%s> {\n",
3231 tag_name);
3232 if (ret)
3233 return ret;
3234 nr_choices = type->nr_choices;
3235 for (i = 0; i < nr_choices; i++) {
3236 const struct lttng_kernel_event_field *iter_field;
3237
3238 iter_field = type->choices[i];
3239 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3240 if (ret)
3241 return ret;
3242 }
3243 ret = print_tabs(session, nesting);
3244 if (ret)
3245 return ret;
3246 ret = lttng_metadata_printf(session,
3247 "}");
3248 return ret;
3249 }
3250
3251 /*
3252 * Must be called with sessions_mutex held.
3253 */
3254 static
3255 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3256 const struct lttng_kernel_event_field *field,
3257 size_t nesting,
3258 const char *prev_field_name)
3259 {
3260 int ret;
3261
3262 ret = _lttng_variant_type_statedump(session,
3263 lttng_kernel_get_type_variant(field->type), nesting,
3264 prev_field_name);
3265 if (ret)
3266 return ret;
3267 return lttng_field_name_statedump(session, field, nesting);
3268 }
3269
3270 /*
3271 * Must be called with sessions_mutex held.
3272 */
3273 static
3274 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3275 const struct lttng_kernel_event_field *field,
3276 size_t nesting)
3277 {
3278 int ret;
3279 const struct lttng_kernel_type_array *array_type;
3280 const struct lttng_kernel_type_common *elem_type;
3281
3282 array_type = lttng_kernel_get_type_array(field->type);
3283 WARN_ON_ONCE(!array_type);
3284
3285 if (array_type->alignment) {
3286 ret = print_tabs(session, nesting);
3287 if (ret)
3288 return ret;
3289 ret = lttng_metadata_printf(session,
3290 "struct { } align(%u) _%s_padding;\n",
3291 array_type->alignment * CHAR_BIT,
3292 field->name);
3293 if (ret)
3294 return ret;
3295 }
3296 /*
3297 * Nested compound types: Only array of structures and variants are
3298 * currently supported.
3299 */
3300 elem_type = array_type->elem_type;
3301 switch (elem_type->type) {
3302 case lttng_kernel_type_integer:
3303 case lttng_kernel_type_struct:
3304 case lttng_kernel_type_variant:
3305 ret = _lttng_type_statedump(session, elem_type,
3306 array_type->encoding, nesting);
3307 if (ret)
3308 return ret;
3309 break;
3310
3311 default:
3312 return -EINVAL;
3313 }
3314 ret = lttng_metadata_printf(session,
3315 " _%s[%u];\n",
3316 field->name,
3317 array_type->length);
3318 return ret;
3319 }
3320
3321 /*
3322 * Must be called with sessions_mutex held.
3323 */
3324 static
3325 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3326 const struct lttng_kernel_event_field *field,
3327 size_t nesting,
3328 const char *prev_field_name)
3329 {
3330 int ret;
3331 const char *length_name;
3332 const struct lttng_kernel_type_sequence *sequence_type;
3333 const struct lttng_kernel_type_common *elem_type;
3334
3335 sequence_type = lttng_kernel_get_type_sequence(field->type);
3336 WARN_ON_ONCE(!sequence_type);
3337
3338 length_name = sequence_type->length_name;
3339 if (!length_name)
3340 length_name = prev_field_name;
3341 if (!length_name)
3342 return -EINVAL;
3343
3344 if (sequence_type->alignment) {
3345 ret = print_tabs(session, nesting);
3346 if (ret)
3347 return ret;
3348 ret = lttng_metadata_printf(session,
3349 "struct { } align(%u) _%s_padding;\n",
3350 sequence_type->alignment * CHAR_BIT,
3351 field->name);
3352 if (ret)
3353 return ret;
3354 }
3355
3356 /*
3357 * Nested compound types: Only array of structures and variants are
3358 * currently supported.
3359 */
3360 elem_type = sequence_type->elem_type;
3361 switch (elem_type->type) {
3362 case lttng_kernel_type_integer:
3363 case lttng_kernel_type_struct:
3364 case lttng_kernel_type_variant:
3365 ret = _lttng_type_statedump(session, elem_type,
3366 sequence_type->encoding, nesting);
3367 if (ret)
3368 return ret;
3369 break;
3370
3371 default:
3372 return -EINVAL;
3373 }
3374 ret = lttng_metadata_printf(session,
3375 " _%s[ _%s ];\n",
3376 field->name,
3377 length_name);
3378 return ret;
3379 }
3380
3381 /*
3382 * Must be called with sessions_mutex held.
3383 */
3384 static
3385 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3386 const struct lttng_kernel_type_enum *type,
3387 size_t nesting)
3388 {
3389 const struct lttng_kernel_enum_desc *enum_desc;
3390 const struct lttng_kernel_type_common *container_type;
3391 int ret;
3392 unsigned int i, nr_entries;
3393
3394 container_type = type->container_type;
3395 if (container_type->type != lttng_kernel_type_integer) {
3396 ret = -EINVAL;
3397 goto end;
3398 }
3399 enum_desc = type->desc;
3400 nr_entries = enum_desc->nr_entries;
3401
3402 ret = print_tabs(session, nesting);
3403 if (ret)
3404 goto end;
3405 ret = lttng_metadata_printf(session, "enum : ");
3406 if (ret)
3407 goto end;
3408 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3409 lttng_kernel_string_encoding_none, 0);
3410 if (ret)
3411 goto end;
3412 ret = lttng_metadata_printf(session, " {\n");
3413 if (ret)
3414 goto end;
3415 /* Dump all entries */
3416 for (i = 0; i < nr_entries; i++) {
3417 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3418 int j, len;
3419
3420 ret = print_tabs(session, nesting + 1);
3421 if (ret)
3422 goto end;
3423 ret = lttng_metadata_printf(session,
3424 "\"");
3425 if (ret)
3426 goto end;
3427 len = strlen(entry->string);
3428 /* Escape the character '"' */
3429 for (j = 0; j < len; j++) {
3430 char c = entry->string[j];
3431
3432 switch (c) {
3433 case '"':
3434 ret = lttng_metadata_printf(session,
3435 "\\\"");
3436 break;
3437 case '\\':
3438 ret = lttng_metadata_printf(session,
3439 "\\\\");
3440 break;
3441 default:
3442 ret = lttng_metadata_printf(session,
3443 "%c", c);
3444 break;
3445 }
3446 if (ret)
3447 goto end;
3448 }
3449 ret = lttng_metadata_printf(session, "\"");
3450 if (ret)
3451 goto end;
3452
3453 if (entry->options.is_auto) {
3454 ret = lttng_metadata_printf(session, ",\n");
3455 if (ret)
3456 goto end;
3457 } else {
3458 ret = lttng_metadata_printf(session,
3459 " = ");
3460 if (ret)
3461 goto end;
3462 if (entry->start.signedness)
3463 ret = lttng_metadata_printf(session,
3464 "%lld", (long long) entry->start.value);
3465 else
3466 ret = lttng_metadata_printf(session,
3467 "%llu", entry->start.value);
3468 if (ret)
3469 goto end;
3470 if (entry->start.signedness == entry->end.signedness &&
3471 entry->start.value
3472 == entry->end.value) {
3473 ret = lttng_metadata_printf(session,
3474 ",\n");
3475 } else {
3476 if (entry->end.signedness) {
3477 ret = lttng_metadata_printf(session,
3478 " ... %lld,\n",
3479 (long long) entry->end.value);
3480 } else {
3481 ret = lttng_metadata_printf(session,
3482 " ... %llu,\n",
3483 entry->end.value);
3484 }
3485 }
3486 if (ret)
3487 goto end;
3488 }
3489 }
3490 ret = print_tabs(session, nesting);
3491 if (ret)
3492 goto end;
3493 ret = lttng_metadata_printf(session, "}");
3494 end:
3495 return ret;
3496 }
3497
3498 /*
3499 * Must be called with sessions_mutex held.
3500 */
3501 static
3502 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3503 const struct lttng_kernel_event_field *field,
3504 size_t nesting)
3505 {
3506 int ret;
3507 const struct lttng_kernel_type_enum *enum_type;
3508
3509 enum_type = lttng_kernel_get_type_enum(field->type);
3510 WARN_ON_ONCE(!enum_type);
3511 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3512 if (ret)
3513 return ret;
3514 return lttng_field_name_statedump(session, field, nesting);
3515 }
3516
3517 static
3518 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3519 const struct lttng_kernel_event_field *field,
3520 size_t nesting)
3521 {
3522 int ret;
3523
3524 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3525 lttng_kernel_string_encoding_none, nesting);
3526 if (ret)
3527 return ret;
3528 return lttng_field_name_statedump(session, field, nesting);
3529 }
3530
3531 static
3532 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3533 const struct lttng_kernel_type_string *type,
3534 size_t nesting)
3535 {
3536 int ret;
3537
3538 /* Default encoding is UTF8 */
3539 ret = print_tabs(session, nesting);
3540 if (ret)
3541 return ret;
3542 ret = lttng_metadata_printf(session,
3543 "string%s",
3544 type->encoding == lttng_kernel_string_encoding_ASCII ?
3545 " { encoding = ASCII; }" : "");
3546 return ret;
3547 }
3548
3549 static
3550 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3551 const struct lttng_kernel_event_field *field,
3552 size_t nesting)
3553 {
3554 const struct lttng_kernel_type_string *string_type;
3555 int ret;
3556
3557 string_type = lttng_kernel_get_type_string(field->type);
3558 WARN_ON_ONCE(!string_type);
3559 ret = _lttng_string_type_statedump(session, string_type, nesting);
3560 if (ret)
3561 return ret;
3562 return lttng_field_name_statedump(session, field, nesting);
3563 }
3564
3565 /*
3566 * Must be called with sessions_mutex held.
3567 */
3568 static
3569 int _lttng_type_statedump(struct lttng_kernel_session *session,
3570 const struct lttng_kernel_type_common *type,
3571 enum lttng_kernel_string_encoding parent_encoding,
3572 size_t nesting)
3573 {
3574 int ret = 0;
3575
3576 switch (type->type) {
3577 case lttng_kernel_type_integer:
3578 ret = _lttng_integer_type_statedump(session,
3579 lttng_kernel_get_type_integer(type),
3580 parent_encoding, nesting);
3581 break;
3582 case lttng_kernel_type_enum:
3583 ret = _lttng_enum_type_statedump(session,
3584 lttng_kernel_get_type_enum(type),
3585 nesting);
3586 break;
3587 case lttng_kernel_type_string:
3588 ret = _lttng_string_type_statedump(session,
3589 lttng_kernel_get_type_string(type),
3590 nesting);
3591 break;
3592 case lttng_kernel_type_struct:
3593 ret = _lttng_struct_type_statedump(session,
3594 lttng_kernel_get_type_struct(type),
3595 nesting);
3596 break;
3597 case lttng_kernel_type_variant:
3598 ret = _lttng_variant_type_statedump(session,
3599 lttng_kernel_get_type_variant(type),
3600 nesting, NULL);
3601 break;
3602
3603 /* Nested arrays and sequences are not supported yet. */
3604 case lttng_kernel_type_array:
3605 case lttng_kernel_type_sequence:
3606 default:
3607 WARN_ON_ONCE(1);
3608 return -EINVAL;
3609 }
3610 return ret;
3611 }
3612
3613 /*
3614 * Must be called with sessions_mutex held.
3615 */
3616 static
3617 int _lttng_field_statedump(struct lttng_kernel_session *session,
3618 const struct lttng_kernel_event_field *field,
3619 size_t nesting,
3620 const char **prev_field_name_p)
3621 {
3622 const char *prev_field_name = NULL;
3623 int ret = 0;
3624
3625 if (prev_field_name_p)
3626 prev_field_name = *prev_field_name_p;
3627 switch (field->type->type) {
3628 case lttng_kernel_type_integer:
3629 ret = _lttng_integer_field_statedump(session, field, nesting);
3630 break;
3631 case lttng_kernel_type_enum:
3632 ret = _lttng_enum_field_statedump(session, field, nesting);
3633 break;
3634 case lttng_kernel_type_string:
3635 ret = _lttng_string_field_statedump(session, field, nesting);
3636 break;
3637 case lttng_kernel_type_struct:
3638 ret = _lttng_struct_field_statedump(session, field, nesting);
3639 break;
3640 case lttng_kernel_type_array:
3641 ret = _lttng_array_field_statedump(session, field, nesting);
3642 break;
3643 case lttng_kernel_type_sequence:
3644 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3645 break;
3646 case lttng_kernel_type_variant:
3647 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3648 break;
3649
3650 default:
3651 WARN_ON_ONCE(1);
3652 return -EINVAL;
3653 }
3654 if (prev_field_name_p)
3655 *prev_field_name_p = field->name;
3656 return ret;
3657 }
3658
3659 static
3660 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3661 struct lttng_kernel_ctx *ctx)
3662 {
3663 const char *prev_field_name = NULL;
3664 int ret = 0;
3665 int i;
3666
3667 if (!ctx)
3668 return 0;
3669 for (i = 0; i < ctx->nr_fields; i++) {
3670 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3671
3672 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3673 if (ret)
3674 return ret;
3675 }
3676 return ret;
3677 }
3678
3679 static
3680 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3681 struct lttng_kernel_event_recorder *event_recorder)
3682 {
3683 const char *prev_field_name = NULL;
3684 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3685 int ret = 0;
3686 int i;
3687
3688 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3689 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3690
3691 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3692 if (ret)
3693 return ret;
3694 }
3695 return ret;
3696 }
3697
3698 /*
3699 * Must be called with sessions_mutex held.
3700 * The entire event metadata is printed as a single atomic metadata
3701 * transaction.
3702 */
3703 static
3704 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event)
3705 {
3706 struct lttng_kernel_event_recorder *event_recorder;
3707 struct lttng_kernel_channel_buffer *chan;
3708 struct lttng_kernel_session *session;
3709 int ret = 0;
3710
3711 if (event->type != LTTNG_KERNEL_EVENT_TYPE_RECORDER)
3712 return 0;
3713 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
3714 chan = event_recorder->chan;
3715 session = chan->parent.session;
3716
3717 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3718 return 0;
3719 if (chan->priv->channel_type == METADATA_CHANNEL)
3720 return 0;
3721
3722 lttng_metadata_begin(session);
3723
3724 ret = lttng_metadata_printf(session,
3725 "event {\n"
3726 " name = \"%s\";\n"
3727 " id = %u;\n"
3728 " stream_id = %u;\n",
3729 event_recorder->priv->parent.desc->event_name,
3730 event_recorder->priv->id,
3731 event_recorder->chan->priv->id);
3732 if (ret)
3733 goto end;
3734
3735 ret = lttng_metadata_printf(session,
3736 " fields := struct {\n"
3737 );
3738 if (ret)
3739 goto end;
3740
3741 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3742 if (ret)
3743 goto end;
3744
3745 /*
3746 * LTTng space reservation can only reserve multiples of the
3747 * byte size.
3748 */
3749 ret = lttng_metadata_printf(session,
3750 " };\n"
3751 "};\n\n");
3752 if (ret)
3753 goto end;
3754
3755 event_recorder->priv->metadata_dumped = 1;
3756 end:
3757 lttng_metadata_end(session);
3758 return ret;
3759
3760 }
3761
3762 /*
3763 * Must be called with sessions_mutex held.
3764 * The entire channel metadata is printed as a single atomic metadata
3765 * transaction.
3766 */
3767 static
3768 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3769 struct lttng_kernel_channel_buffer *chan)
3770 {
3771 int ret = 0;
3772
3773 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3774 return 0;
3775
3776 if (chan->priv->channel_type == METADATA_CHANNEL)
3777 return 0;
3778
3779 lttng_metadata_begin(session);
3780
3781 WARN_ON_ONCE(!chan->priv->header_type);
3782 ret = lttng_metadata_printf(session,
3783 "stream {\n"
3784 " id = %u;\n"
3785 " event.header := %s;\n"
3786 " packet.context := struct packet_context;\n",
3787 chan->priv->id,
3788 chan->priv->header_type == 1 ? "struct event_header_compact" :
3789 "struct event_header_large");
3790 if (ret)
3791 goto end;
3792
3793 if (chan->priv->ctx) {
3794 ret = lttng_metadata_printf(session,
3795 " event.context := struct {\n");
3796 if (ret)
3797 goto end;
3798 }
3799 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3800 if (ret)
3801 goto end;
3802 if (chan->priv->ctx) {
3803 ret = lttng_metadata_printf(session,
3804 " };\n");
3805 if (ret)
3806 goto end;
3807 }
3808
3809 ret = lttng_metadata_printf(session,
3810 "};\n\n");
3811
3812 chan->priv->metadata_dumped = 1;
3813 end:
3814 lttng_metadata_end(session);
3815 return ret;
3816 }
3817
3818 /*
3819 * Must be called with sessions_mutex held.
3820 */
3821 static
3822 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3823 {
3824 return lttng_metadata_printf(session,
3825 "struct packet_context {\n"
3826 " uint64_clock_monotonic_t timestamp_begin;\n"
3827 " uint64_clock_monotonic_t timestamp_end;\n"
3828 " uint64_t content_size;\n"
3829 " uint64_t packet_size;\n"
3830 " uint64_t packet_seq_num;\n"
3831 " unsigned long events_discarded;\n"
3832 " uint32_t cpu_id;\n"
3833 "};\n\n"
3834 );
3835 }
3836
3837 /*
3838 * Compact header:
3839 * id: range: 0 - 30.
3840 * id 31 is reserved to indicate an extended header.
3841 *
3842 * Large header:
3843 * id: range: 0 - 65534.
3844 * id 65535 is reserved to indicate an extended header.
3845 *
3846 * Must be called with sessions_mutex held.
3847 */
3848 static
3849 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3850 {
3851 return lttng_metadata_printf(session,
3852 "struct event_header_compact {\n"
3853 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3854 " variant <id> {\n"
3855 " struct {\n"
3856 " uint27_clock_monotonic_t timestamp;\n"
3857 " } compact;\n"
3858 " struct {\n"
3859 " uint32_t id;\n"
3860 " uint64_clock_monotonic_t timestamp;\n"
3861 " } extended;\n"
3862 " } v;\n"
3863 "} align(%u);\n"
3864 "\n"
3865 "struct event_header_large {\n"
3866 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3867 " variant <id> {\n"
3868 " struct {\n"
3869 " uint32_clock_monotonic_t timestamp;\n"
3870 " } compact;\n"
3871 " struct {\n"
3872 " uint32_t id;\n"
3873 " uint64_clock_monotonic_t timestamp;\n"
3874 " } extended;\n"
3875 " } v;\n"
3876 "} align(%u);\n\n",
3877 lttng_alignof(uint32_t) * CHAR_BIT,
3878 lttng_alignof(uint16_t) * CHAR_BIT
3879 );
3880 }
3881
3882 /*
3883 * Approximation of NTP time of day to clock monotonic correlation,
3884 * taken at start of trace.
3885 * Yes, this is only an approximation. Yes, we can (and will) do better
3886 * in future versions.
3887 * This function may return a negative offset. It may happen if the
3888 * system sets the REALTIME clock to 0 after boot.
3889 *
3890 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3891 * y2038 compliant.
3892 */
3893 static
3894 int64_t measure_clock_offset(void)
3895 {
3896 uint64_t monotonic_avg, monotonic[2], realtime;
3897 uint64_t tcf = trace_clock_freq();
3898 int64_t offset;
3899 unsigned long flags;
3900 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3901 struct timespec64 rts = { 0, 0 };
3902 #else
3903 struct timespec rts = { 0, 0 };
3904 #endif
3905
3906 /* Disable interrupts to increase correlation precision. */
3907 local_irq_save(flags);
3908 monotonic[0] = trace_clock_read64();
3909 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3910 ktime_get_real_ts64(&rts);
3911 #else
3912 getnstimeofday(&rts);
3913 #endif
3914 monotonic[1] = trace_clock_read64();
3915 local_irq_restore(flags);
3916
3917 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3918 realtime = (uint64_t) rts.tv_sec * tcf;
3919 if (tcf == NSEC_PER_SEC) {
3920 realtime += rts.tv_nsec;
3921 } else {
3922 uint64_t n = rts.tv_nsec * tcf;
3923
3924 do_div(n, NSEC_PER_SEC);
3925 realtime += n;
3926 }
3927 offset = (int64_t) realtime - monotonic_avg;
3928 return offset;
3929 }
3930
3931 static
3932 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3933 {
3934 int ret = 0;
3935 size_t i;
3936 char cur;
3937
3938 i = 0;
3939 cur = string[i];
3940 while (cur != '\0') {
3941 switch (cur) {
3942 case '\n':
3943 ret = lttng_metadata_printf(session, "%s", "\\n");
3944 break;
3945 case '\\':
3946 case '"':
3947 ret = lttng_metadata_printf(session, "%c", '\\');
3948 if (ret)
3949 goto error;
3950 /* We still print the current char */
3951 lttng_fallthrough;
3952 default:
3953 ret = lttng_metadata_printf(session, "%c", cur);
3954 break;
3955 }
3956
3957 if (ret)
3958 goto error;
3959
3960 cur = string[++i];
3961 }
3962 error:
3963 return ret;
3964 }
3965
3966 static
3967 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3968 const char *field_value)
3969 {
3970 int ret;
3971
3972 ret = lttng_metadata_printf(session, " %s = \"", field);
3973 if (ret)
3974 goto error;
3975
3976 ret = print_escaped_ctf_string(session, field_value);
3977 if (ret)
3978 goto error;
3979
3980 ret = lttng_metadata_printf(session, "\";\n");
3981
3982 error:
3983 return ret;
3984 }
3985
3986 /*
3987 * Output metadata into this session's metadata buffers.
3988 * Must be called with sessions_mutex held.
3989 */
3990 static
3991 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3992 {
3993 unsigned char *uuid_c = session->priv->uuid.b;
3994 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3995 const char *product_uuid;
3996 struct lttng_kernel_channel_buffer_private *chan_priv;
3997 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3998 int ret = 0;
3999
4000 if (!LTTNG_READ_ONCE(session->active))
4001 return 0;
4002
4003 lttng_metadata_begin(session);
4004
4005 if (session->priv->metadata_dumped)
4006 goto skip_session;
4007
4008 snprintf(uuid_s, sizeof(uuid_s),
4009 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
4010 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
4011 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
4012 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
4013 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
4014
4015 ret = lttng_metadata_printf(session,
4016 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
4017 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
4018 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
4019 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
4020 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
4021 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
4022 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
4023 "\n"
4024 "trace {\n"
4025 " major = %u;\n"
4026 " minor = %u;\n"
4027 " uuid = \"%s\";\n"
4028 " byte_order = %s;\n"
4029 " packet.header := struct {\n"
4030 " uint32_t magic;\n"
4031 " uint8_t uuid[16];\n"
4032 " uint32_t stream_id;\n"
4033 " uint64_t stream_instance_id;\n"
4034 " };\n"
4035 "};\n\n",
4036 lttng_alignof(uint8_t) * CHAR_BIT,
4037 lttng_alignof(uint16_t) * CHAR_BIT,
4038 lttng_alignof(uint32_t) * CHAR_BIT,
4039 lttng_alignof(uint64_t) * CHAR_BIT,
4040 sizeof(unsigned long) * CHAR_BIT,
4041 lttng_alignof(unsigned long) * CHAR_BIT,
4042 CTF_SPEC_MAJOR,
4043 CTF_SPEC_MINOR,
4044 uuid_s,
4045 #if __BYTE_ORDER == __BIG_ENDIAN
4046 "be"
4047 #else
4048 "le"
4049 #endif
4050 );
4051 if (ret)
4052 goto end;
4053
4054 ret = lttng_metadata_printf(session,
4055 "env {\n"
4056 " hostname = \"%s\";\n"
4057 " domain = \"kernel\";\n"
4058 " sysname = \"%s\";\n"
4059 " kernel_release = \"%s\";\n"
4060 " kernel_version = \"%s\";\n"
4061 " tracer_name = \"lttng-modules\";\n"
4062 " tracer_major = %d;\n"
4063 " tracer_minor = %d;\n"
4064 " tracer_patchlevel = %d;\n"
4065 " trace_buffering_scheme = \"global\";\n",
4066 current->nsproxy->uts_ns->name.nodename,
4067 utsname()->sysname,
4068 utsname()->release,
4069 utsname()->version,
4070 LTTNG_MODULES_MAJOR_VERSION,
4071 LTTNG_MODULES_MINOR_VERSION,
4072 LTTNG_MODULES_PATCHLEVEL_VERSION
4073 );
4074 if (ret)
4075 goto end;
4076
4077 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
4078 if (ret)
4079 goto end;
4080 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
4081 session->priv->creation_time);
4082 if (ret)
4083 goto end;
4084
4085 /* Add the product UUID to the 'env' section */
4086 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4087 if (product_uuid) {
4088 ret = lttng_metadata_printf(session,
4089 " product_uuid = \"%s\";\n",
4090 product_uuid
4091 );
4092 if (ret)
4093 goto end;
4094 }
4095
4096 /* Close the 'env' section */
4097 ret = lttng_metadata_printf(session, "};\n\n");
4098 if (ret)
4099 goto end;
4100
4101 ret = lttng_metadata_printf(session,
4102 "clock {\n"
4103 " name = \"%s\";\n",
4104 trace_clock_name()
4105 );
4106 if (ret)
4107 goto end;
4108
4109 if (!trace_clock_uuid(clock_uuid_s)) {
4110 ret = lttng_metadata_printf(session,
4111 " uuid = \"%s\";\n",
4112 clock_uuid_s
4113 );
4114 if (ret)
4115 goto end;
4116 }
4117
4118 ret = lttng_metadata_printf(session,
4119 " description = \"%s\";\n"
4120 " freq = %llu; /* Frequency, in Hz */\n"
4121 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4122 " offset = %lld;\n"
4123 "};\n\n",
4124 trace_clock_description(),
4125 (unsigned long long) trace_clock_freq(),
4126 (long long) measure_clock_offset()
4127 );
4128 if (ret)
4129 goto end;
4130
4131 ret = lttng_metadata_printf(session,
4132 "typealias integer {\n"
4133 " size = 27; align = 1; signed = false;\n"
4134 " map = clock.%s.value;\n"
4135 "} := uint27_clock_monotonic_t;\n"
4136 "\n"
4137 "typealias integer {\n"
4138 " size = 32; align = %u; signed = false;\n"
4139 " map = clock.%s.value;\n"
4140 "} := uint32_clock_monotonic_t;\n"
4141 "\n"
4142 "typealias integer {\n"
4143 " size = 64; align = %u; signed = false;\n"
4144 " map = clock.%s.value;\n"
4145 "} := uint64_clock_monotonic_t;\n\n",
4146 trace_clock_name(),
4147 lttng_alignof(uint32_t) * CHAR_BIT,
4148 trace_clock_name(),
4149 lttng_alignof(uint64_t) * CHAR_BIT,
4150 trace_clock_name()
4151 );
4152 if (ret)
4153 goto end;
4154
4155 ret = _lttng_stream_packet_context_declare(session);
4156 if (ret)
4157 goto end;
4158
4159 ret = _lttng_event_header_declare(session);
4160 if (ret)
4161 goto end;
4162
4163 skip_session:
4164 list_for_each_entry(chan_priv, &session->priv->chan, node) {
4165 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
4166 if (ret)
4167 goto end;
4168 }
4169
4170 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
4171 ret = _lttng_event_recorder_metadata_statedump(&event_recorder_priv->pub->parent);
4172 if (ret)
4173 goto end;
4174 }
4175 session->priv->metadata_dumped = 1;
4176 end:
4177 lttng_metadata_end(session);
4178 return ret;
4179 }
4180
4181 /**
4182 * lttng_transport_register - LTT transport registration
4183 * @transport: transport structure
4184 *
4185 * Registers a transport which can be used as output to extract the data out of
4186 * LTTng. The module calling this registration function must ensure that no
4187 * trap-inducing code will be executed by the transport functions. E.g.
4188 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4189 * is made visible to the transport function. This registration acts as a
4190 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4191 * after its registration must it synchronize the TLBs.
4192 */
4193 void lttng_transport_register(struct lttng_transport *transport)
4194 {
4195 /*
4196 * Make sure no page fault can be triggered by the module about to be
4197 * registered. We deal with this here so we don't have to call
4198 * vmalloc_sync_mappings() in each module's init.
4199 */
4200 wrapper_vmalloc_sync_mappings();
4201
4202 mutex_lock(&sessions_mutex);
4203 list_add_tail(&transport->node, &lttng_transport_list);
4204 mutex_unlock(&sessions_mutex);
4205 }
4206 EXPORT_SYMBOL_GPL(lttng_transport_register);
4207
4208 /**
4209 * lttng_transport_unregister - LTT transport unregistration
4210 * @transport: transport structure
4211 */
4212 void lttng_transport_unregister(struct lttng_transport *transport)
4213 {
4214 mutex_lock(&sessions_mutex);
4215 list_del(&transport->node);
4216 mutex_unlock(&sessions_mutex);
4217 }
4218 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4219
4220 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4221 {
4222 /*
4223 * Make sure no page fault can be triggered by the module about to be
4224 * registered. We deal with this here so we don't have to call
4225 * vmalloc_sync_mappings() in each module's init.
4226 */
4227 wrapper_vmalloc_sync_mappings();
4228
4229 mutex_lock(&sessions_mutex);
4230 list_add_tail(&transport->node, &lttng_counter_transport_list);
4231 mutex_unlock(&sessions_mutex);
4232 }
4233 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4234
4235 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4236 {
4237 mutex_lock(&sessions_mutex);
4238 list_del(&transport->node);
4239 mutex_unlock(&sessions_mutex);
4240 }
4241 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4242
4243 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4244
4245 enum cpuhp_state lttng_hp_prepare;
4246 enum cpuhp_state lttng_hp_online;
4247
4248 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4249 {
4250 struct lttng_cpuhp_node *lttng_node;
4251
4252 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4253 switch (lttng_node->component) {
4254 case LTTNG_RING_BUFFER_FRONTEND:
4255 return 0;
4256 case LTTNG_RING_BUFFER_BACKEND:
4257 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4258 case LTTNG_RING_BUFFER_ITER:
4259 return 0;
4260 case LTTNG_CONTEXT_PERF_COUNTERS:
4261 return 0;
4262 default:
4263 return -EINVAL;
4264 }
4265 }
4266
4267 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4268 {
4269 struct lttng_cpuhp_node *lttng_node;
4270
4271 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4272 switch (lttng_node->component) {
4273 case LTTNG_RING_BUFFER_FRONTEND:
4274 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4275 case LTTNG_RING_BUFFER_BACKEND:
4276 return 0;
4277 case LTTNG_RING_BUFFER_ITER:
4278 return 0;
4279 case LTTNG_CONTEXT_PERF_COUNTERS:
4280 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4281 default:
4282 return -EINVAL;
4283 }
4284 }
4285
4286 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4287 {
4288 struct lttng_cpuhp_node *lttng_node;
4289
4290 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4291 switch (lttng_node->component) {
4292 case LTTNG_RING_BUFFER_FRONTEND:
4293 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4294 case LTTNG_RING_BUFFER_BACKEND:
4295 return 0;
4296 case LTTNG_RING_BUFFER_ITER:
4297 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4298 case LTTNG_CONTEXT_PERF_COUNTERS:
4299 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4300 default:
4301 return -EINVAL;
4302 }
4303 }
4304
4305 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4306 {
4307 struct lttng_cpuhp_node *lttng_node;
4308
4309 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4310 switch (lttng_node->component) {
4311 case LTTNG_RING_BUFFER_FRONTEND:
4312 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4313 case LTTNG_RING_BUFFER_BACKEND:
4314 return 0;
4315 case LTTNG_RING_BUFFER_ITER:
4316 return 0;
4317 case LTTNG_CONTEXT_PERF_COUNTERS:
4318 return 0;
4319 default:
4320 return -EINVAL;
4321 }
4322 }
4323
4324 static int __init lttng_init_cpu_hotplug(void)
4325 {
4326 int ret;
4327
4328 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4329 lttng_hotplug_prepare,
4330 lttng_hotplug_dead);
4331 if (ret < 0) {
4332 return ret;
4333 }
4334 lttng_hp_prepare = ret;
4335 lttng_rb_set_hp_prepare(ret);
4336
4337 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4338 lttng_hotplug_online,
4339 lttng_hotplug_offline);
4340 if (ret < 0) {
4341 cpuhp_remove_multi_state(lttng_hp_prepare);
4342 lttng_hp_prepare = 0;
4343 return ret;
4344 }
4345 lttng_hp_online = ret;
4346 lttng_rb_set_hp_online(ret);
4347
4348 return 0;
4349 }
4350
4351 static void __exit lttng_exit_cpu_hotplug(void)
4352 {
4353 lttng_rb_set_hp_online(0);
4354 cpuhp_remove_multi_state(lttng_hp_online);
4355 lttng_rb_set_hp_prepare(0);
4356 cpuhp_remove_multi_state(lttng_hp_prepare);
4357 }
4358
4359 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4360 static int lttng_init_cpu_hotplug(void)
4361 {
4362 return 0;
4363 }
4364 static void lttng_exit_cpu_hotplug(void)
4365 {
4366 }
4367 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4368
4369 static int __init lttng_events_init(void)
4370 {
4371 int ret;
4372
4373 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4374 if (ret)
4375 return ret;
4376 ret = wrapper_get_pfnblock_flags_mask_init();
4377 if (ret)
4378 return ret;
4379 ret = wrapper_get_pageblock_flags_mask_init();
4380 if (ret)
4381 return ret;
4382 ret = lttng_probes_init();
4383 if (ret)
4384 return ret;
4385 ret = lttng_context_init();
4386 if (ret)
4387 return ret;
4388 ret = lttng_tracepoint_init();
4389 if (ret)
4390 goto error_tp;
4391 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4392 if (!event_recorder_cache) {
4393 ret = -ENOMEM;
4394 goto error_kmem_event_recorder;
4395 }
4396 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4397 if (!event_recorder_private_cache) {
4398 ret = -ENOMEM;
4399 goto error_kmem_event_recorder_private;
4400 }
4401 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4402 if (!event_notifier_cache) {
4403 ret = -ENOMEM;
4404 goto error_kmem_event_notifier;
4405 }
4406 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4407 if (!event_notifier_private_cache) {
4408 ret = -ENOMEM;
4409 goto error_kmem_event_notifier_private;
4410 }
4411 ret = lttng_abi_init();
4412 if (ret)
4413 goto error_abi;
4414 ret = lttng_logger_init();
4415 if (ret)
4416 goto error_logger;
4417 ret = lttng_init_cpu_hotplug();
4418 if (ret)
4419 goto error_hotplug;
4420 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4421 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4422 __stringify(LTTNG_MODULES_MINOR_VERSION),
4423 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4424 LTTNG_MODULES_EXTRAVERSION,
4425 LTTNG_VERSION_NAME,
4426 #ifdef LTTNG_EXTRA_VERSION_GIT
4427 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4428 #else
4429 "",
4430 #endif
4431 #ifdef LTTNG_EXTRA_VERSION_NAME
4432 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4433 #else
4434 "");
4435 #endif
4436 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4437 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4438 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4439 return 0;
4440
4441 error_hotplug:
4442 lttng_logger_exit();
4443 error_logger:
4444 lttng_abi_exit();
4445 error_abi:
4446 kmem_cache_destroy(event_notifier_private_cache);
4447 error_kmem_event_notifier_private:
4448 kmem_cache_destroy(event_notifier_cache);
4449 error_kmem_event_notifier:
4450 kmem_cache_destroy(event_recorder_private_cache);
4451 error_kmem_event_recorder_private:
4452 kmem_cache_destroy(event_recorder_cache);
4453 error_kmem_event_recorder:
4454 lttng_tracepoint_exit();
4455 error_tp:
4456 lttng_context_exit();
4457 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4458 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4459 __stringify(LTTNG_MODULES_MINOR_VERSION),
4460 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4461 LTTNG_MODULES_EXTRAVERSION,
4462 LTTNG_VERSION_NAME,
4463 #ifdef LTTNG_EXTRA_VERSION_GIT
4464 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4465 #else
4466 "",
4467 #endif
4468 #ifdef LTTNG_EXTRA_VERSION_NAME
4469 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4470 #else
4471 "");
4472 #endif
4473 return ret;
4474 }
4475
4476 module_init(lttng_events_init);
4477
4478 static void __exit lttng_events_exit(void)
4479 {
4480 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4481
4482 lttng_exit_cpu_hotplug();
4483 lttng_logger_exit();
4484 lttng_abi_exit();
4485 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4486 lttng_session_destroy(session_priv->pub);
4487 kmem_cache_destroy(event_recorder_cache);
4488 kmem_cache_destroy(event_recorder_private_cache);
4489 kmem_cache_destroy(event_notifier_cache);
4490 kmem_cache_destroy(event_notifier_private_cache);
4491 lttng_tracepoint_exit();
4492 lttng_context_exit();
4493 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4494 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4495 __stringify(LTTNG_MODULES_MINOR_VERSION),
4496 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4497 LTTNG_MODULES_EXTRAVERSION,
4498 LTTNG_VERSION_NAME,
4499 #ifdef LTTNG_EXTRA_VERSION_GIT
4500 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4501 #else
4502 "",
4503 #endif
4504 #ifdef LTTNG_EXTRA_VERSION_NAME
4505 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4506 #else
4507 "");
4508 #endif
4509 }
4510
4511 module_exit(lttng_events_exit);
4512
4513 #include <generated/patches.h>
4514 #ifdef LTTNG_EXTRA_VERSION_GIT
4515 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4516 #endif
4517 #ifdef LTTNG_EXTRA_VERSION_NAME
4518 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4519 #endif
4520 MODULE_LICENSE("GPL and additional rights");
4521 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4522 MODULE_DESCRIPTION("LTTng tracer");
4523 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4524 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4525 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4526 LTTNG_MODULES_EXTRAVERSION);
This page took 0.163438 seconds and 4 git commands to generate.