Refactoring: lttng_event_enabler_create_tracepoint_events_if_missing: break loop...
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_kernel_session *session,
83 const struct lttng_kernel_type_common *type,
84 enum lttng_kernel_string_encoding parent_encoding,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_kernel_session *session,
88 const struct lttng_kernel_event_field *field,
89 size_t nesting, const char **prev_field_name_p);
90
91 void synchronize_trace(void)
92 {
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
94 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
95 synchronize_rcu();
96 #else
97 synchronize_sched();
98 #endif
99
100 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
101 #ifdef CONFIG_PREEMPT_RT_FULL
102 synchronize_rcu();
103 #endif
104 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
105 #ifdef CONFIG_PREEMPT_RT
106 synchronize_rcu();
107 #endif
108 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
109 }
110
111 void lttng_lock_sessions(void)
112 {
113 mutex_lock(&sessions_mutex);
114 }
115
116 void lttng_unlock_sessions(void)
117 {
118 mutex_unlock(&sessions_mutex);
119 }
120
121 static struct lttng_transport *lttng_transport_find(const char *name)
122 {
123 struct lttng_transport *transport;
124
125 list_for_each_entry(transport, &lttng_transport_list, node) {
126 if (!strcmp(transport->name, name))
127 return transport;
128 }
129 return NULL;
130 }
131
132 /*
133 * Called with sessions lock held.
134 */
135 int lttng_session_active(void)
136 {
137 struct lttng_kernel_session_private *iter;
138
139 list_for_each_entry(iter, &sessions, list) {
140 if (iter->pub->active)
141 return 1;
142 }
143 return 0;
144 }
145
146 struct lttng_kernel_session *lttng_session_create(void)
147 {
148 struct lttng_kernel_session *session;
149 struct lttng_kernel_session_private *session_priv;
150 struct lttng_metadata_cache *metadata_cache;
151 int i;
152
153 mutex_lock(&sessions_mutex);
154 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
155 if (!session)
156 goto err;
157 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
158 if (!session_priv)
159 goto err_free_session;
160 session->priv = session_priv;
161 session_priv->pub = session;
162
163 INIT_LIST_HEAD(&session_priv->chan);
164 INIT_LIST_HEAD(&session_priv->events);
165 lttng_guid_gen(&session_priv->uuid);
166
167 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
168 GFP_KERNEL);
169 if (!metadata_cache)
170 goto err_free_session_private;
171 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
172 if (!metadata_cache->data)
173 goto err_free_cache;
174 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
175 kref_init(&metadata_cache->refcount);
176 mutex_init(&metadata_cache->lock);
177 session_priv->metadata_cache = metadata_cache;
178 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
179 memcpy(&metadata_cache->uuid, &session_priv->uuid,
180 sizeof(metadata_cache->uuid));
181 INIT_LIST_HEAD(&session_priv->enablers_head);
182 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
183 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
184 list_add(&session_priv->list, &sessions);
185
186 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
187 goto tracker_alloc_error;
188 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
197 goto tracker_alloc_error;
198
199 mutex_unlock(&sessions_mutex);
200
201 return session;
202
203 tracker_alloc_error:
204 lttng_id_tracker_fini(&session->pid_tracker);
205 lttng_id_tracker_fini(&session->vpid_tracker);
206 lttng_id_tracker_fini(&session->uid_tracker);
207 lttng_id_tracker_fini(&session->vuid_tracker);
208 lttng_id_tracker_fini(&session->gid_tracker);
209 lttng_id_tracker_fini(&session->vgid_tracker);
210 err_free_cache:
211 kfree(metadata_cache);
212 err_free_session_private:
213 lttng_kvfree(session_priv);
214 err_free_session:
215 lttng_kvfree(session);
216 err:
217 mutex_unlock(&sessions_mutex);
218 return NULL;
219 }
220
221 static
222 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
223 {
224 struct lttng_counter_transport *transport;
225
226 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
227 if (!strcmp(transport->name, name))
228 return transport;
229 }
230 return NULL;
231 }
232
233 struct lttng_counter *lttng_kernel_counter_create(
234 const char *counter_transport_name,
235 size_t number_dimensions, const size_t *dimensions_sizes)
236 {
237 struct lttng_counter *counter = NULL;
238 struct lttng_counter_transport *counter_transport = NULL;
239
240 counter_transport = lttng_counter_transport_find(counter_transport_name);
241 if (!counter_transport) {
242 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
243 counter_transport_name);
244 goto notransport;
245 }
246 if (!try_module_get(counter_transport->owner)) {
247 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
248 goto notransport;
249 }
250
251 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
252 if (!counter)
253 goto nomem;
254
255 /* Create event notifier error counter. */
256 counter->ops = &counter_transport->ops;
257 counter->transport = counter_transport;
258
259 counter->counter = counter->ops->counter_create(
260 number_dimensions, dimensions_sizes, 0);
261 if (!counter->counter) {
262 goto create_error;
263 }
264
265 return counter;
266
267 create_error:
268 lttng_kvfree(counter);
269 nomem:
270 if (counter_transport)
271 module_put(counter_transport->owner);
272 notransport:
273 return NULL;
274 }
275
276 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
277 {
278 struct lttng_transport *transport = NULL;
279 struct lttng_event_notifier_group *event_notifier_group;
280 const char *transport_name = "relay-event-notifier";
281 size_t subbuf_size = 4096; //TODO
282 size_t num_subbuf = 16; //TODO
283 unsigned int switch_timer_interval = 0;
284 unsigned int read_timer_interval = 0;
285 int i;
286
287 mutex_lock(&sessions_mutex);
288
289 transport = lttng_transport_find(transport_name);
290 if (!transport) {
291 printk(KERN_WARNING "LTTng: transport %s not found\n",
292 transport_name);
293 goto notransport;
294 }
295 if (!try_module_get(transport->owner)) {
296 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
297 transport_name);
298 goto notransport;
299 }
300
301 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
302 GFP_KERNEL);
303 if (!event_notifier_group)
304 goto nomem;
305
306 /*
307 * Initialize the ring buffer used to store event notifier
308 * notifications.
309 */
310 event_notifier_group->ops = &transport->ops;
311 event_notifier_group->chan = transport->ops.priv->channel_create(
312 transport_name, event_notifier_group, NULL,
313 subbuf_size, num_subbuf, switch_timer_interval,
314 read_timer_interval);
315 if (!event_notifier_group->chan)
316 goto create_error;
317
318 event_notifier_group->transport = transport;
319
320 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
321 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
322 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
323 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
324
325 list_add(&event_notifier_group->node, &event_notifier_groups);
326
327 mutex_unlock(&sessions_mutex);
328
329 return event_notifier_group;
330
331 create_error:
332 lttng_kvfree(event_notifier_group);
333 nomem:
334 if (transport)
335 module_put(transport->owner);
336 notransport:
337 mutex_unlock(&sessions_mutex);
338 return NULL;
339 }
340
341 void metadata_cache_destroy(struct kref *kref)
342 {
343 struct lttng_metadata_cache *cache =
344 container_of(kref, struct lttng_metadata_cache, refcount);
345 vfree(cache->data);
346 kfree(cache);
347 }
348
349 void lttng_session_destroy(struct lttng_kernel_session *session)
350 {
351 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
352 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
353 struct lttng_metadata_stream *metadata_stream;
354 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
355 int ret;
356
357 mutex_lock(&sessions_mutex);
358 WRITE_ONCE(session->active, 0);
359 list_for_each_entry(chan_priv, &session->priv->chan, node) {
360 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
361 WARN_ON(ret);
362 }
363 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
364 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
365 WARN_ON(ret);
366 }
367 synchronize_trace(); /* Wait for in-flight events to complete */
368 list_for_each_entry(chan_priv, &session->priv->chan, node) {
369 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
370 WARN_ON(ret);
371 }
372 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
373 lttng_event_enabler_destroy(event_enabler);
374 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
375 _lttng_event_destroy(&event_recorder_priv->pub->parent);
376 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
377 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
378 _lttng_channel_destroy(chan_priv->pub);
379 }
380 mutex_lock(&session->priv->metadata_cache->lock);
381 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
382 _lttng_metadata_channel_hangup(metadata_stream);
383 mutex_unlock(&session->priv->metadata_cache->lock);
384 lttng_id_tracker_fini(&session->pid_tracker);
385 lttng_id_tracker_fini(&session->vpid_tracker);
386 lttng_id_tracker_fini(&session->uid_tracker);
387 lttng_id_tracker_fini(&session->vuid_tracker);
388 lttng_id_tracker_fini(&session->gid_tracker);
389 lttng_id_tracker_fini(&session->vgid_tracker);
390 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
391 list_del(&session->priv->list);
392 mutex_unlock(&sessions_mutex);
393 lttng_kvfree(session->priv);
394 lttng_kvfree(session);
395 }
396
397 void lttng_event_notifier_group_destroy(
398 struct lttng_event_notifier_group *event_notifier_group)
399 {
400 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
401 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
402 int ret;
403
404 if (!event_notifier_group)
405 return;
406
407 mutex_lock(&sessions_mutex);
408
409 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
410 WARN_ON(ret);
411
412 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
413 &event_notifier_group->event_notifiers_head, parent.node) {
414 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
415 WARN_ON(ret);
416 }
417
418 /* Wait for in-flight event notifier to complete */
419 synchronize_trace();
420
421 irq_work_sync(&event_notifier_group->wakeup_pending);
422
423 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
424 WARN_ON(ret);
425
426 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
427 &event_notifier_group->enablers_head, node)
428 lttng_event_enabler_destroy(event_enabler);
429
430 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
431 &event_notifier_group->event_notifiers_head, parent.node)
432 _lttng_event_destroy(&event_notifier_priv->pub->parent);
433
434 if (event_notifier_group->error_counter) {
435 struct lttng_counter *error_counter = event_notifier_group->error_counter;
436
437 error_counter->ops->counter_destroy(error_counter->counter);
438 module_put(error_counter->transport->owner);
439 lttng_kvfree(error_counter);
440 event_notifier_group->error_counter = NULL;
441 }
442
443 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
444 module_put(event_notifier_group->transport->owner);
445 list_del(&event_notifier_group->node);
446
447 mutex_unlock(&sessions_mutex);
448 lttng_kvfree(event_notifier_group);
449 }
450
451 int lttng_session_statedump(struct lttng_kernel_session *session)
452 {
453 int ret;
454
455 mutex_lock(&sessions_mutex);
456 ret = lttng_statedump_start(session);
457 mutex_unlock(&sessions_mutex);
458 return ret;
459 }
460
461 int lttng_session_enable(struct lttng_kernel_session *session)
462 {
463 int ret = 0;
464 struct lttng_kernel_channel_buffer_private *chan_priv;
465
466 mutex_lock(&sessions_mutex);
467 if (session->active) {
468 ret = -EBUSY;
469 goto end;
470 }
471
472 /* Set transient enabler state to "enabled" */
473 session->priv->tstate = 1;
474
475 /* We need to sync enablers with session before activation. */
476 lttng_session_sync_event_enablers(session);
477
478 /*
479 * Snapshot the number of events per channel to know the type of header
480 * we need to use.
481 */
482 list_for_each_entry(chan_priv, &session->priv->chan, node) {
483 if (chan_priv->header_type)
484 continue; /* don't change it if session stop/restart */
485 if (chan_priv->free_event_id < 31)
486 chan_priv->header_type = 1; /* compact */
487 else
488 chan_priv->header_type = 2; /* large */
489 }
490
491 /* Clear each stream's quiescent state. */
492 list_for_each_entry(chan_priv, &session->priv->chan, node) {
493 if (chan_priv->channel_type != METADATA_CHANNEL)
494 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
495 }
496
497 WRITE_ONCE(session->active, 1);
498 WRITE_ONCE(session->priv->been_active, 1);
499 ret = _lttng_session_metadata_statedump(session);
500 if (ret) {
501 WRITE_ONCE(session->active, 0);
502 goto end;
503 }
504 ret = lttng_statedump_start(session);
505 if (ret)
506 WRITE_ONCE(session->active, 0);
507 end:
508 mutex_unlock(&sessions_mutex);
509 return ret;
510 }
511
512 int lttng_session_disable(struct lttng_kernel_session *session)
513 {
514 int ret = 0;
515 struct lttng_kernel_channel_buffer_private *chan_priv;
516
517 mutex_lock(&sessions_mutex);
518 if (!session->active) {
519 ret = -EBUSY;
520 goto end;
521 }
522 WRITE_ONCE(session->active, 0);
523
524 /* Set transient enabler state to "disabled" */
525 session->priv->tstate = 0;
526 lttng_session_sync_event_enablers(session);
527
528 /* Set each stream's quiescent state. */
529 list_for_each_entry(chan_priv, &session->priv->chan, node) {
530 if (chan_priv->channel_type != METADATA_CHANNEL)
531 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
532 }
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
539 {
540 int ret = 0;
541 struct lttng_kernel_channel_buffer_private *chan_priv;
542 struct lttng_kernel_event_recorder_private *event_recorder_priv;
543 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
544 struct lttng_metadata_stream *stream;
545
546 mutex_lock(&sessions_mutex);
547 if (!session->active) {
548 ret = -EBUSY;
549 goto end;
550 }
551
552 mutex_lock(&cache->lock);
553 memset(cache->data, 0, cache->cache_alloc);
554 cache->metadata_written = 0;
555 cache->version++;
556 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
557 stream->metadata_out = 0;
558 stream->metadata_in = 0;
559 }
560 mutex_unlock(&cache->lock);
561
562 session->priv->metadata_dumped = 0;
563 list_for_each_entry(chan_priv, &session->priv->chan, node) {
564 chan_priv->metadata_dumped = 0;
565 }
566
567 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
568 event_recorder_priv->metadata_dumped = 0;
569 }
570
571 ret = _lttng_session_metadata_statedump(session);
572
573 end:
574 mutex_unlock(&sessions_mutex);
575 return ret;
576 }
577
578 static
579 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
580 {
581 struct lttng_kernel_channel_buffer *chan_buf;
582
583 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
584 return false;
585 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
586 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
587 return true;
588 return false;
589 }
590
591 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
592 {
593 int ret = 0;
594
595 mutex_lock(&sessions_mutex);
596 if (is_channel_buffer_metadata(channel)) {
597 ret = -EPERM;
598 goto end;
599 }
600 if (channel->enabled) {
601 ret = -EEXIST;
602 goto end;
603 }
604 /* Set transient enabler state to "enabled" */
605 channel->priv->tstate = 1;
606 lttng_session_sync_event_enablers(channel->session);
607 /* Set atomically the state to "enabled" */
608 WRITE_ONCE(channel->enabled, 1);
609 end:
610 mutex_unlock(&sessions_mutex);
611 return ret;
612 }
613
614 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
615 {
616 int ret = 0;
617
618 mutex_lock(&sessions_mutex);
619 if (is_channel_buffer_metadata(channel)) {
620 ret = -EPERM;
621 goto end;
622 }
623 if (!channel->enabled) {
624 ret = -EEXIST;
625 goto end;
626 }
627 /* Set atomically the state to "disabled" */
628 WRITE_ONCE(channel->enabled, 0);
629 /* Set transient enabler state to "enabled" */
630 channel->priv->tstate = 0;
631 lttng_session_sync_event_enablers(channel->session);
632 end:
633 mutex_unlock(&sessions_mutex);
634 return ret;
635 }
636
637 int lttng_event_enable(struct lttng_kernel_event_common *event)
638 {
639 int ret = 0;
640
641 mutex_lock(&sessions_mutex);
642 switch (event->type) {
643 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
644 {
645 struct lttng_kernel_event_recorder *event_recorder =
646 container_of(event, struct lttng_kernel_event_recorder, parent);
647
648 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
649 ret = -EPERM;
650 goto end;
651 }
652 break;
653 }
654 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
655 switch (event->priv->instrumentation) {
656 case LTTNG_KERNEL_ABI_KRETPROBE:
657 ret = -EINVAL;
658 goto end;
659 default:
660 break;
661 }
662 break;
663 default:
664 break;
665 }
666
667 if (event->enabled) {
668 ret = -EEXIST;
669 goto end;
670 }
671 switch (event->priv->instrumentation) {
672 case LTTNG_KERNEL_ABI_TRACEPOINT:
673 lttng_fallthrough;
674 case LTTNG_KERNEL_ABI_SYSCALL:
675 ret = -EINVAL;
676 break;
677
678 case LTTNG_KERNEL_ABI_KPROBE:
679 lttng_fallthrough;
680 case LTTNG_KERNEL_ABI_UPROBE:
681 WRITE_ONCE(event->enabled, 1);
682 break;
683
684 case LTTNG_KERNEL_ABI_KRETPROBE:
685 ret = lttng_kretprobes_event_enable_state(event, 1);
686 break;
687
688 case LTTNG_KERNEL_ABI_FUNCTION:
689 lttng_fallthrough;
690 case LTTNG_KERNEL_ABI_NOOP:
691 lttng_fallthrough;
692 default:
693 WARN_ON_ONCE(1);
694 ret = -EINVAL;
695 }
696 end:
697 mutex_unlock(&sessions_mutex);
698 return ret;
699 }
700
701 int lttng_event_disable(struct lttng_kernel_event_common *event)
702 {
703 int ret = 0;
704
705 mutex_lock(&sessions_mutex);
706 switch (event->type) {
707 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
708 {
709 struct lttng_kernel_event_recorder *event_recorder =
710 container_of(event, struct lttng_kernel_event_recorder, parent);
711
712 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
713 ret = -EPERM;
714 goto end;
715 }
716 break;
717 }
718 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
719 switch (event->priv->instrumentation) {
720 case LTTNG_KERNEL_ABI_KRETPROBE:
721 ret = -EINVAL;
722 goto end;
723 default:
724 break;
725 }
726 break;
727 default:
728 break;
729 }
730
731 if (!event->enabled) {
732 ret = -EEXIST;
733 goto end;
734 }
735 switch (event->priv->instrumentation) {
736 case LTTNG_KERNEL_ABI_TRACEPOINT:
737 lttng_fallthrough;
738 case LTTNG_KERNEL_ABI_SYSCALL:
739 ret = -EINVAL;
740 break;
741
742 case LTTNG_KERNEL_ABI_KPROBE:
743 lttng_fallthrough;
744 case LTTNG_KERNEL_ABI_UPROBE:
745 WRITE_ONCE(event->enabled, 0);
746 break;
747
748 case LTTNG_KERNEL_ABI_KRETPROBE:
749 ret = lttng_kretprobes_event_enable_state(event, 0);
750 break;
751
752 case LTTNG_KERNEL_ABI_FUNCTION:
753 lttng_fallthrough;
754 case LTTNG_KERNEL_ABI_NOOP:
755 lttng_fallthrough;
756 default:
757 WARN_ON_ONCE(1);
758 ret = -EINVAL;
759 }
760 end:
761 mutex_unlock(&sessions_mutex);
762 return ret;
763 }
764
765 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
766 const char *transport_name,
767 void *buf_addr,
768 size_t subbuf_size, size_t num_subbuf,
769 unsigned int switch_timer_interval,
770 unsigned int read_timer_interval,
771 enum channel_type channel_type)
772 {
773 struct lttng_kernel_channel_buffer *chan;
774 struct lttng_kernel_channel_buffer_private *chan_priv;
775 struct lttng_transport *transport = NULL;
776
777 mutex_lock(&sessions_mutex);
778 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
779 goto active; /* Refuse to add channel to active session */
780 transport = lttng_transport_find(transport_name);
781 if (!transport) {
782 printk(KERN_WARNING "LTTng: transport %s not found\n",
783 transport_name);
784 goto notransport;
785 }
786 if (!try_module_get(transport->owner)) {
787 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
788 goto notransport;
789 }
790 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
791 if (!chan)
792 goto nomem;
793 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
794 if (!chan_priv)
795 goto nomem_priv;
796 chan->priv = chan_priv;
797 chan_priv->pub = chan;
798 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
799 chan->parent.session = session;
800 chan->priv->id = session->priv->free_chan_id++;
801 chan->ops = &transport->ops;
802 /*
803 * Note: the channel creation op already writes into the packet
804 * headers. Therefore the "chan" information used as input
805 * should be already accessible.
806 */
807 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
808 chan, buf_addr, subbuf_size, num_subbuf,
809 switch_timer_interval, read_timer_interval);
810 if (!chan->priv->rb_chan)
811 goto create_error;
812 chan->priv->parent.tstate = 1;
813 chan->parent.enabled = 1;
814 chan->priv->transport = transport;
815 chan->priv->channel_type = channel_type;
816 list_add(&chan->priv->node, &session->priv->chan);
817 mutex_unlock(&sessions_mutex);
818 return chan;
819
820 create_error:
821 kfree(chan_priv);
822 nomem_priv:
823 kfree(chan);
824 nomem:
825 if (transport)
826 module_put(transport->owner);
827 notransport:
828 active:
829 mutex_unlock(&sessions_mutex);
830 return NULL;
831 }
832
833 /*
834 * Only used internally at session destruction for per-cpu channels, and
835 * when metadata channel is released.
836 * Needs to be called with sessions mutex held.
837 */
838 static
839 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
840 {
841 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
842 module_put(chan->priv->transport->owner);
843 list_del(&chan->priv->node);
844 lttng_kernel_destroy_context(chan->priv->ctx);
845 kfree(chan->priv);
846 kfree(chan);
847 }
848
849 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
850 {
851 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
852
853 /* Protect the metadata cache with the sessions_mutex. */
854 mutex_lock(&sessions_mutex);
855 _lttng_channel_destroy(chan);
856 mutex_unlock(&sessions_mutex);
857 }
858 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
859
860 static
861 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
862 {
863 stream->finalized = 1;
864 wake_up_interruptible(&stream->read_wait);
865 }
866
867 static
868 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common *event_enabler)
869 {
870 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
871 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
872
873 switch (event_enabler->enabler_type) {
874 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
875 {
876 struct lttng_event_recorder_enabler *event_recorder_enabler =
877 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
878 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
879
880 switch (itype) {
881 case LTTNG_KERNEL_ABI_TRACEPOINT:
882 lttng_fallthrough;
883 case LTTNG_KERNEL_ABI_KPROBE:
884 lttng_fallthrough;
885 case LTTNG_KERNEL_ABI_SYSCALL:
886 lttng_fallthrough;
887 case LTTNG_KERNEL_ABI_UPROBE:
888 if (chan->priv->free_event_id == -1U)
889 return false;
890 return true;
891 case LTTNG_KERNEL_ABI_KRETPROBE:
892 /* kretprobes require 2 event IDs. */
893 if (chan->priv->free_event_id >= -2U)
894 return false;
895 return true;
896 default:
897 WARN_ON_ONCE(1);
898 return false;
899 }
900 }
901 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
902 return true;
903 default:
904 WARN_ON_ONCE(1);
905 return false;
906 }
907 }
908
909 static
910 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
911 {
912 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
913 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
914
915 switch (event_enabler->enabler_type) {
916 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
917 {
918 struct lttng_event_recorder_enabler *event_recorder_enabler =
919 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
920 struct lttng_kernel_event_recorder *event_recorder;
921 struct lttng_kernel_event_recorder_private *event_recorder_priv;
922 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
923
924 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
925 if (!event_recorder)
926 return NULL;
927 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
928 if (!event_recorder_priv) {
929 kmem_cache_free(event_recorder_private_cache, event_recorder);
930 return NULL;
931 }
932 event_recorder_priv->pub = event_recorder;
933 event_recorder_priv->parent.pub = &event_recorder->parent;
934 event_recorder->priv = event_recorder_priv;
935 event_recorder->parent.priv = &event_recorder_priv->parent;
936
937 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
938 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
939 event_recorder->priv->parent.instrumentation = itype;
940 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
941 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
942
943 event_recorder->chan = chan;
944 event_recorder->priv->id = chan->priv->free_event_id++;
945 return &event_recorder->parent;
946 }
947 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
948 {
949 struct lttng_event_notifier_enabler *event_notifier_enabler =
950 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
951 struct lttng_kernel_event_notifier *event_notifier;
952 struct lttng_kernel_event_notifier_private *event_notifier_priv;
953
954 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
955 if (!event_notifier)
956 return NULL;
957 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
958 if (!event_notifier_priv) {
959 kmem_cache_free(event_notifier_private_cache, event_notifier);
960 return NULL;
961 }
962 event_notifier_priv->pub = event_notifier;
963 event_notifier_priv->parent.pub = &event_notifier->parent;
964 event_notifier->priv = event_notifier_priv;
965 event_notifier->parent.priv = &event_notifier_priv->parent;
966
967 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
968 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
969 event_notifier->priv->parent.instrumentation = itype;
970 event_notifier->priv->parent.user_token = event_enabler->user_token;
971 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
972 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
973
974 event_notifier->priv->group = event_notifier_enabler->group;
975 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
976 event_notifier->priv->num_captures = 0;
977 event_notifier->notification_send = lttng_event_notifier_notification_send;
978 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
979 return &event_notifier->parent;
980 }
981 default:
982 return NULL;
983 }
984 }
985
986 static
987 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
988 {
989 switch (event->type) {
990 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
991 {
992 struct lttng_kernel_event_recorder *event_recorder =
993 container_of(event, struct lttng_kernel_event_recorder, parent);
994
995 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
996 kmem_cache_free(event_recorder_cache, event_recorder);
997 break;
998 }
999 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1000 {
1001 struct lttng_kernel_event_notifier *event_notifier =
1002 container_of(event, struct lttng_kernel_event_notifier, parent);
1003
1004 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1005 kmem_cache_free(event_notifier_cache, event_notifier);
1006 break;
1007 }
1008 default:
1009 WARN_ON_ONCE(1);
1010 }
1011 }
1012
1013 static
1014 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common *event)
1015 {
1016 switch (event->type) {
1017 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1018 return 0;
1019 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1020 {
1021 struct lttng_kernel_event_notifier *event_notifier =
1022 container_of(event, struct lttng_kernel_event_notifier, parent);
1023 struct lttng_counter *error_counter;
1024 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
1025 size_t dimension_index[1];
1026 int ret;
1027
1028 /*
1029 * Clear the error counter bucket. The sessiond keeps track of which
1030 * bucket is currently in use. We trust it. The session lock
1031 * synchronizes against concurrent creation of the error
1032 * counter.
1033 */
1034 error_counter = event_notifier_group->error_counter;
1035 if (!error_counter)
1036 return 0;
1037 /*
1038 * Check that the index is within the boundary of the counter.
1039 */
1040 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1041 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1042 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1043 return -EINVAL;
1044 }
1045
1046 dimension_index[0] = event_notifier->priv->error_counter_index;
1047 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1048 if (ret) {
1049 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1050 event_notifier->priv->error_counter_index);
1051 return -EINVAL;
1052 }
1053 return 0;
1054 }
1055 default:
1056 return -EINVAL;
1057 }
1058 }
1059
1060 /*
1061 * Supports event creation while tracing session is active.
1062 * Needs to be called with sessions mutex held.
1063 */
1064 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1065 const struct lttng_kernel_event_desc *event_desc)
1066 {
1067 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
1068 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
1069 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
1070 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1071 struct lttng_kernel_event_common_private *event_priv;
1072 struct lttng_kernel_event_common *event;
1073 const char *event_name;
1074 struct hlist_head *head;
1075 int ret;
1076
1077 if (!lttng_kernel_event_id_available(event_enabler)) {
1078 ret = -EMFILE;
1079 goto full;
1080 }
1081
1082 switch (itype) {
1083 case LTTNG_KERNEL_ABI_TRACEPOINT:
1084 event_name = event_desc->event_name;
1085 break;
1086
1087 case LTTNG_KERNEL_ABI_KPROBE:
1088 lttng_fallthrough;
1089 case LTTNG_KERNEL_ABI_UPROBE:
1090 lttng_fallthrough;
1091 case LTTNG_KERNEL_ABI_KRETPROBE:
1092 lttng_fallthrough;
1093 case LTTNG_KERNEL_ABI_SYSCALL:
1094 event_name = event_param->name;
1095 break;
1096
1097 case LTTNG_KERNEL_ABI_FUNCTION:
1098 lttng_fallthrough;
1099 case LTTNG_KERNEL_ABI_NOOP:
1100 lttng_fallthrough;
1101 default:
1102 WARN_ON_ONCE(1);
1103 ret = -EINVAL;
1104 goto type_error;
1105 }
1106
1107 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1108 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1109 if (lttng_event_enabler_event_name_match_event(event_enabler, event_name, event_priv->pub)) {
1110 ret = -EEXIST;
1111 goto exist;
1112 }
1113 }
1114
1115 event = lttng_kernel_event_alloc(event_enabler);
1116 if (!event) {
1117 ret = -ENOMEM;
1118 goto alloc_error;
1119 }
1120
1121 switch (itype) {
1122 case LTTNG_KERNEL_ABI_TRACEPOINT:
1123 /* Event will be enabled by enabler sync. */
1124 event->enabled = 0;
1125 event->priv->registered = 0;
1126 event->priv->desc = lttng_event_desc_get(event_name);
1127 if (!event->priv->desc) {
1128 ret = -ENOENT;
1129 goto register_error;
1130 }
1131 /* Populate lttng_event structure before event registration. */
1132 smp_wmb();
1133 break;
1134
1135 case LTTNG_KERNEL_ABI_KPROBE:
1136 /*
1137 * Needs to be explicitly enabled after creation, since
1138 * we may want to apply filters.
1139 */
1140 event->enabled = 0;
1141 event->priv->registered = 1;
1142 /*
1143 * Populate lttng_event structure before event
1144 * registration.
1145 */
1146 smp_wmb();
1147 ret = lttng_kprobes_register_event(event_name,
1148 event_param->u.kprobe.symbol_name,
1149 event_param->u.kprobe.offset,
1150 event_param->u.kprobe.addr,
1151 event);
1152 if (ret) {
1153 ret = -EINVAL;
1154 goto register_error;
1155 }
1156 ret = try_module_get(event->priv->desc->owner);
1157 WARN_ON_ONCE(!ret);
1158 break;
1159
1160 case LTTNG_KERNEL_ABI_KRETPROBE:
1161 {
1162 struct lttng_kernel_event_common *event_return;
1163
1164 /* kretprobe defines 2 events */
1165 /*
1166 * Needs to be explicitly enabled after creation, since
1167 * we may want to apply filters.
1168 */
1169 event->enabled = 0;
1170 event->priv->registered = 1;
1171
1172 event_return = lttng_kernel_event_alloc(event_enabler);
1173 if (!event) {
1174 ret = -ENOMEM;
1175 goto alloc_error;
1176 }
1177
1178 event_return->enabled = 0;
1179 event_return->priv->registered = 1;
1180
1181 /*
1182 * Populate lttng_event structure before kretprobe registration.
1183 */
1184 smp_wmb();
1185 ret = lttng_kretprobes_register(event_name,
1186 event_param->u.kretprobe.symbol_name,
1187 event_param->u.kretprobe.offset,
1188 event_param->u.kretprobe.addr,
1189 event, event_return);
1190 if (ret) {
1191 lttng_kernel_event_free(event_return);
1192 ret = -EINVAL;
1193 goto register_error;
1194 }
1195 /* Take 2 refs on the module: one per event. */
1196 ret = try_module_get(event->priv->desc->owner);
1197 WARN_ON_ONCE(!ret);
1198 ret = try_module_get(event_return->priv->desc->owner);
1199 WARN_ON_ONCE(!ret);
1200 ret = _lttng_event_recorder_metadata_statedump(event_return);
1201 WARN_ON_ONCE(ret > 0);
1202 if (ret) {
1203 lttng_kernel_event_free(event_return);
1204 module_put(event_return->priv->desc->owner);
1205 module_put(event->priv->desc->owner);
1206 goto statedump_error;
1207 }
1208 list_add(&event_return->priv->node, event_list_head);
1209 break;
1210 }
1211
1212 case LTTNG_KERNEL_ABI_SYSCALL:
1213 /*
1214 * Needs to be explicitly enabled after creation, since
1215 * we may want to apply filters.
1216 */
1217 event->enabled = 0;
1218 event->priv->registered = 0;
1219 event->priv->desc = event_desc;
1220 switch (event_param->u.syscall.entryexit) {
1221 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1222 ret = -EINVAL;
1223 goto register_error;
1224 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1225 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1226 break;
1227 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1228 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1229 break;
1230 }
1231 switch (event_param->u.syscall.abi) {
1232 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1233 ret = -EINVAL;
1234 goto register_error;
1235 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1236 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1237 break;
1238 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1239 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1240 break;
1241 }
1242 if (!event->priv->desc) {
1243 ret = -EINVAL;
1244 goto register_error;
1245 }
1246 break;
1247
1248 case LTTNG_KERNEL_ABI_UPROBE:
1249 /*
1250 * Needs to be explicitly enabled after creation, since
1251 * we may want to apply filters.
1252 */
1253 event->enabled = 0;
1254 event->priv->registered = 1;
1255
1256 /*
1257 * Populate lttng_event structure before event
1258 * registration.
1259 */
1260 smp_wmb();
1261
1262 ret = lttng_uprobes_register_event(event_param->name,
1263 event_param->u.uprobe.fd,
1264 event);
1265 if (ret)
1266 goto register_error;
1267 ret = try_module_get(event->priv->desc->owner);
1268 WARN_ON_ONCE(!ret);
1269 break;
1270
1271 case LTTNG_KERNEL_ABI_FUNCTION:
1272 lttng_fallthrough;
1273 case LTTNG_KERNEL_ABI_NOOP:
1274 lttng_fallthrough;
1275 default:
1276 WARN_ON_ONCE(1);
1277 ret = -EINVAL;
1278 goto register_error;
1279 }
1280
1281 ret = _lttng_event_recorder_metadata_statedump(event);
1282 WARN_ON_ONCE(ret > 0);
1283 if (ret) {
1284 goto statedump_error;
1285 }
1286
1287 ret = lttng_kernel_event_notifier_clear_error_counter(event);
1288 if (ret)
1289 goto register_error;
1290
1291 hlist_add_head(&event->priv->hlist_node, head);
1292 list_add(&event->priv->node, event_list_head);
1293
1294 return event;
1295
1296 statedump_error:
1297 /* If a statedump error occurs, events will not be readable. */
1298 register_error:
1299 lttng_kernel_event_free(event);
1300 alloc_error:
1301 exist:
1302 type_error:
1303 full:
1304 return ERR_PTR(ret);
1305 }
1306
1307 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1308 const struct lttng_kernel_event_desc *event_desc)
1309 {
1310 struct lttng_kernel_event_common *event;
1311
1312 mutex_lock(&sessions_mutex);
1313 event = _lttng_kernel_event_create(event_enabler, event_desc);
1314 mutex_unlock(&sessions_mutex);
1315 return event;
1316 }
1317
1318 int lttng_kernel_counter_read(struct lttng_counter *counter,
1319 const size_t *dim_indexes, int32_t cpu,
1320 int64_t *val, bool *overflow, bool *underflow)
1321 {
1322 return counter->ops->counter_read(counter->counter, dim_indexes,
1323 cpu, val, overflow, underflow);
1324 }
1325
1326 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1327 const size_t *dim_indexes, int64_t *val,
1328 bool *overflow, bool *underflow)
1329 {
1330 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1331 val, overflow, underflow);
1332 }
1333
1334 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1335 const size_t *dim_indexes)
1336 {
1337 return counter->ops->counter_clear(counter->counter, dim_indexes);
1338 }
1339
1340 /* Only used for tracepoints for now. */
1341 static
1342 void register_event(struct lttng_kernel_event_common *event)
1343 {
1344 const struct lttng_kernel_event_desc *desc;
1345 int ret = -EINVAL;
1346
1347 if (event->priv->registered)
1348 return;
1349
1350 desc = event->priv->desc;
1351 switch (event->priv->instrumentation) {
1352 case LTTNG_KERNEL_ABI_TRACEPOINT:
1353 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1354 desc->tp_class->probe_callback,
1355 event);
1356 break;
1357
1358 case LTTNG_KERNEL_ABI_SYSCALL:
1359 ret = lttng_syscall_filter_enable_event(event);
1360 break;
1361
1362 case LTTNG_KERNEL_ABI_KPROBE:
1363 lttng_fallthrough;
1364 case LTTNG_KERNEL_ABI_UPROBE:
1365 ret = 0;
1366 break;
1367
1368 case LTTNG_KERNEL_ABI_KRETPROBE:
1369 switch (event->type) {
1370 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1371 ret = 0;
1372 break;
1373 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1374 WARN_ON_ONCE(1);
1375 break;
1376 }
1377 break;
1378
1379 case LTTNG_KERNEL_ABI_FUNCTION:
1380 lttng_fallthrough;
1381 case LTTNG_KERNEL_ABI_NOOP:
1382 lttng_fallthrough;
1383 default:
1384 WARN_ON_ONCE(1);
1385 }
1386 if (!ret)
1387 event->priv->registered = 1;
1388 }
1389
1390 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1391 {
1392 struct lttng_kernel_event_common_private *event_priv = event->priv;
1393 const struct lttng_kernel_event_desc *desc;
1394 int ret = -EINVAL;
1395
1396 if (!event_priv->registered)
1397 return 0;
1398
1399 desc = event_priv->desc;
1400 switch (event_priv->instrumentation) {
1401 case LTTNG_KERNEL_ABI_TRACEPOINT:
1402 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1403 event_priv->desc->tp_class->probe_callback,
1404 event);
1405 break;
1406
1407 case LTTNG_KERNEL_ABI_KPROBE:
1408 lttng_kprobes_unregister_event(event);
1409 ret = 0;
1410 break;
1411
1412 case LTTNG_KERNEL_ABI_KRETPROBE:
1413 switch (event->type) {
1414 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1415 lttng_kretprobes_unregister(event);
1416 ret = 0;
1417 break;
1418 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1419 WARN_ON_ONCE(1);
1420 break;
1421 }
1422 break;
1423
1424 case LTTNG_KERNEL_ABI_SYSCALL:
1425 ret = lttng_syscall_filter_disable_event(event);
1426 break;
1427
1428 case LTTNG_KERNEL_ABI_NOOP:
1429 switch (event->type) {
1430 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1431 ret = 0;
1432 break;
1433 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1434 WARN_ON_ONCE(1);
1435 break;
1436 }
1437 break;
1438
1439 case LTTNG_KERNEL_ABI_UPROBE:
1440 lttng_uprobes_unregister_event(event);
1441 ret = 0;
1442 break;
1443
1444 case LTTNG_KERNEL_ABI_FUNCTION:
1445 lttng_fallthrough;
1446 default:
1447 WARN_ON_ONCE(1);
1448 }
1449 if (!ret)
1450 event_priv->registered = 0;
1451 return ret;
1452 }
1453
1454 /*
1455 * Only used internally at session destruction.
1456 */
1457 static
1458 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1459 {
1460 struct lttng_kernel_event_common_private *event_priv = event->priv;
1461 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1462
1463 lttng_free_event_filter_runtime(event);
1464 /* Free event enabler refs */
1465 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1466 &event_priv->enablers_ref_head, node)
1467 kfree(enabler_ref);
1468
1469 switch (event->type) {
1470 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1471 {
1472 struct lttng_kernel_event_recorder *event_recorder =
1473 container_of(event, struct lttng_kernel_event_recorder, parent);
1474
1475 switch (event_priv->instrumentation) {
1476 case LTTNG_KERNEL_ABI_TRACEPOINT:
1477 lttng_event_desc_put(event_priv->desc);
1478 break;
1479
1480 case LTTNG_KERNEL_ABI_KPROBE:
1481 module_put(event_priv->desc->owner);
1482 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1483 break;
1484
1485 case LTTNG_KERNEL_ABI_KRETPROBE:
1486 module_put(event_priv->desc->owner);
1487 lttng_kretprobes_destroy_private(&event_recorder->parent);
1488 break;
1489
1490 case LTTNG_KERNEL_ABI_SYSCALL:
1491 break;
1492
1493 case LTTNG_KERNEL_ABI_UPROBE:
1494 module_put(event_priv->desc->owner);
1495 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1496 break;
1497
1498 case LTTNG_KERNEL_ABI_FUNCTION:
1499 lttng_fallthrough;
1500 case LTTNG_KERNEL_ABI_NOOP:
1501 lttng_fallthrough;
1502 default:
1503 WARN_ON_ONCE(1);
1504 }
1505 list_del(&event_recorder->priv->parent.node);
1506 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1507 kmem_cache_free(event_recorder_cache, event_recorder);
1508 break;
1509 }
1510 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1511 {
1512 struct lttng_kernel_event_notifier *event_notifier =
1513 container_of(event, struct lttng_kernel_event_notifier, parent);
1514
1515 switch (event_notifier->priv->parent.instrumentation) {
1516 case LTTNG_KERNEL_ABI_TRACEPOINT:
1517 lttng_event_desc_put(event_notifier->priv->parent.desc);
1518 break;
1519
1520 case LTTNG_KERNEL_ABI_KPROBE:
1521 module_put(event_notifier->priv->parent.desc->owner);
1522 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1523 break;
1524
1525 case LTTNG_KERNEL_ABI_SYSCALL:
1526 break;
1527
1528 case LTTNG_KERNEL_ABI_UPROBE:
1529 module_put(event_notifier->priv->parent.desc->owner);
1530 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1531 break;
1532
1533 case LTTNG_KERNEL_ABI_KRETPROBE:
1534 lttng_fallthrough;
1535 case LTTNG_KERNEL_ABI_FUNCTION:
1536 lttng_fallthrough;
1537 case LTTNG_KERNEL_ABI_NOOP:
1538 lttng_fallthrough;
1539 default:
1540 WARN_ON_ONCE(1);
1541 }
1542 list_del(&event_notifier->priv->parent.node);
1543 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1544 kmem_cache_free(event_notifier_cache, event_notifier);
1545 break;
1546 }
1547 default:
1548 WARN_ON_ONCE(1);
1549 }
1550 }
1551
1552 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1553 enum tracker_type tracker_type)
1554 {
1555 switch (tracker_type) {
1556 case TRACKER_PID:
1557 return &session->pid_tracker;
1558 case TRACKER_VPID:
1559 return &session->vpid_tracker;
1560 case TRACKER_UID:
1561 return &session->uid_tracker;
1562 case TRACKER_VUID:
1563 return &session->vuid_tracker;
1564 case TRACKER_GID:
1565 return &session->gid_tracker;
1566 case TRACKER_VGID:
1567 return &session->vgid_tracker;
1568 default:
1569 WARN_ON_ONCE(1);
1570 return NULL;
1571 }
1572 }
1573
1574 int lttng_session_track_id(struct lttng_kernel_session *session,
1575 enum tracker_type tracker_type, int id)
1576 {
1577 struct lttng_kernel_id_tracker *tracker;
1578 int ret;
1579
1580 tracker = get_tracker(session, tracker_type);
1581 if (!tracker)
1582 return -EINVAL;
1583 if (id < -1)
1584 return -EINVAL;
1585 mutex_lock(&sessions_mutex);
1586 if (id == -1) {
1587 /* track all ids: destroy tracker. */
1588 lttng_id_tracker_destroy(tracker, true);
1589 ret = 0;
1590 } else {
1591 ret = lttng_id_tracker_add(tracker, id);
1592 }
1593 mutex_unlock(&sessions_mutex);
1594 return ret;
1595 }
1596
1597 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1598 enum tracker_type tracker_type, int id)
1599 {
1600 struct lttng_kernel_id_tracker *tracker;
1601 int ret;
1602
1603 tracker = get_tracker(session, tracker_type);
1604 if (!tracker)
1605 return -EINVAL;
1606 if (id < -1)
1607 return -EINVAL;
1608 mutex_lock(&sessions_mutex);
1609 if (id == -1) {
1610 /* untrack all ids: replace by empty tracker. */
1611 ret = lttng_id_tracker_empty_set(tracker);
1612 } else {
1613 ret = lttng_id_tracker_del(tracker, id);
1614 }
1615 mutex_unlock(&sessions_mutex);
1616 return ret;
1617 }
1618
1619 static
1620 void *id_list_start(struct seq_file *m, loff_t *pos)
1621 {
1622 struct lttng_kernel_id_tracker *id_tracker = m->private;
1623 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1624 struct lttng_id_hash_node *e;
1625 int iter = 0, i;
1626
1627 mutex_lock(&sessions_mutex);
1628 if (id_tracker_p) {
1629 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1630 struct hlist_head *head = &id_tracker_p->id_hash[i];
1631
1632 lttng_hlist_for_each_entry(e, head, hlist) {
1633 if (iter++ >= *pos)
1634 return e;
1635 }
1636 }
1637 } else {
1638 /* ID tracker disabled. */
1639 if (iter >= *pos && iter == 0) {
1640 return id_tracker_p; /* empty tracker */
1641 }
1642 iter++;
1643 }
1644 /* End of list */
1645 return NULL;
1646 }
1647
1648 /* Called with sessions_mutex held. */
1649 static
1650 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1651 {
1652 struct lttng_kernel_id_tracker *id_tracker = m->private;
1653 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1654 struct lttng_id_hash_node *e;
1655 int iter = 0, i;
1656
1657 (*ppos)++;
1658 if (id_tracker_p) {
1659 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1660 struct hlist_head *head = &id_tracker_p->id_hash[i];
1661
1662 lttng_hlist_for_each_entry(e, head, hlist) {
1663 if (iter++ >= *ppos)
1664 return e;
1665 }
1666 }
1667 } else {
1668 /* ID tracker disabled. */
1669 if (iter >= *ppos && iter == 0)
1670 return p; /* empty tracker */
1671 iter++;
1672 }
1673
1674 /* End of list */
1675 return NULL;
1676 }
1677
1678 static
1679 void id_list_stop(struct seq_file *m, void *p)
1680 {
1681 mutex_unlock(&sessions_mutex);
1682 }
1683
1684 static
1685 int id_list_show(struct seq_file *m, void *p)
1686 {
1687 struct lttng_kernel_id_tracker *id_tracker = m->private;
1688 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1689 int id;
1690
1691 if (p == id_tracker_p) {
1692 /* Tracker disabled. */
1693 id = -1;
1694 } else {
1695 const struct lttng_id_hash_node *e = p;
1696
1697 id = lttng_id_tracker_get_node_id(e);
1698 }
1699 switch (id_tracker->priv->tracker_type) {
1700 case TRACKER_PID:
1701 seq_printf(m, "process { pid = %d; };\n", id);
1702 break;
1703 case TRACKER_VPID:
1704 seq_printf(m, "process { vpid = %d; };\n", id);
1705 break;
1706 case TRACKER_UID:
1707 seq_printf(m, "user { uid = %d; };\n", id);
1708 break;
1709 case TRACKER_VUID:
1710 seq_printf(m, "user { vuid = %d; };\n", id);
1711 break;
1712 case TRACKER_GID:
1713 seq_printf(m, "group { gid = %d; };\n", id);
1714 break;
1715 case TRACKER_VGID:
1716 seq_printf(m, "group { vgid = %d; };\n", id);
1717 break;
1718 default:
1719 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1720 }
1721 return 0;
1722 }
1723
1724 static
1725 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1726 .start = id_list_start,
1727 .next = id_list_next,
1728 .stop = id_list_stop,
1729 .show = id_list_show,
1730 };
1731
1732 static
1733 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1734 {
1735 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1736 }
1737
1738 static
1739 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1740 {
1741 struct seq_file *m = file->private_data;
1742 struct lttng_kernel_id_tracker *id_tracker = m->private;
1743 int ret;
1744
1745 WARN_ON_ONCE(!id_tracker);
1746 ret = seq_release(inode, file);
1747 if (!ret)
1748 fput(id_tracker->priv->session->priv->file);
1749 return ret;
1750 }
1751
1752 const struct file_operations lttng_tracker_ids_list_fops = {
1753 .owner = THIS_MODULE,
1754 .open = lttng_tracker_ids_list_open,
1755 .read = seq_read,
1756 .llseek = seq_lseek,
1757 .release = lttng_tracker_ids_list_release,
1758 };
1759
1760 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1761 enum tracker_type tracker_type)
1762 {
1763 struct file *tracker_ids_list_file;
1764 struct seq_file *m;
1765 int file_fd, ret;
1766
1767 file_fd = lttng_get_unused_fd();
1768 if (file_fd < 0) {
1769 ret = file_fd;
1770 goto fd_error;
1771 }
1772
1773 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1774 &lttng_tracker_ids_list_fops,
1775 NULL, O_RDWR);
1776 if (IS_ERR(tracker_ids_list_file)) {
1777 ret = PTR_ERR(tracker_ids_list_file);
1778 goto file_error;
1779 }
1780 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1781 ret = -EOVERFLOW;
1782 goto refcount_error;
1783 }
1784 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1785 if (ret < 0)
1786 goto open_error;
1787 m = tracker_ids_list_file->private_data;
1788
1789 m->private = get_tracker(session, tracker_type);
1790 BUG_ON(!m->private);
1791 fd_install(file_fd, tracker_ids_list_file);
1792
1793 return file_fd;
1794
1795 open_error:
1796 atomic_long_dec(&session->priv->file->f_count);
1797 refcount_error:
1798 fput(tracker_ids_list_file);
1799 file_error:
1800 put_unused_fd(file_fd);
1801 fd_error:
1802 return ret;
1803 }
1804
1805 /*
1806 * Enabler management.
1807 */
1808 static
1809 int lttng_match_enabler_star_glob(const char *desc_name,
1810 const char *pattern)
1811 {
1812 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1813 desc_name, LTTNG_SIZE_MAX))
1814 return 0;
1815 return 1;
1816 }
1817
1818 static
1819 int lttng_match_enabler_name(const char *desc_name,
1820 const char *name)
1821 {
1822 if (strcmp(desc_name, name))
1823 return 0;
1824 return 1;
1825 }
1826
1827 static
1828 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1829 struct lttng_event_enabler_common *enabler)
1830 {
1831 const char *desc_name, *enabler_name;
1832 bool compat = false, entry = false;
1833
1834 enabler_name = enabler->event_param.name;
1835 switch (enabler->event_param.instrumentation) {
1836 case LTTNG_KERNEL_ABI_TRACEPOINT:
1837 desc_name = desc->event_name;
1838 switch (enabler->format_type) {
1839 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1840 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1841 case LTTNG_ENABLER_FORMAT_NAME:
1842 return lttng_match_enabler_name(desc_name, enabler_name);
1843 default:
1844 return -EINVAL;
1845 }
1846 break;
1847
1848 case LTTNG_KERNEL_ABI_SYSCALL:
1849 desc_name = desc->event_name;
1850 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1851 desc_name += strlen("compat_");
1852 compat = true;
1853 }
1854 if (!strncmp(desc_name, "syscall_exit_",
1855 strlen("syscall_exit_"))) {
1856 desc_name += strlen("syscall_exit_");
1857 } else if (!strncmp(desc_name, "syscall_entry_",
1858 strlen("syscall_entry_"))) {
1859 desc_name += strlen("syscall_entry_");
1860 entry = true;
1861 } else {
1862 WARN_ON_ONCE(1);
1863 return -EINVAL;
1864 }
1865 switch (enabler->event_param.u.syscall.entryexit) {
1866 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1867 break;
1868 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1869 if (!entry)
1870 return 0;
1871 break;
1872 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1873 if (entry)
1874 return 0;
1875 break;
1876 default:
1877 return -EINVAL;
1878 }
1879 switch (enabler->event_param.u.syscall.abi) {
1880 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1881 break;
1882 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1883 if (compat)
1884 return 0;
1885 break;
1886 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1887 if (!compat)
1888 return 0;
1889 break;
1890 default:
1891 return -EINVAL;
1892 }
1893 switch (enabler->event_param.u.syscall.match) {
1894 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1895 switch (enabler->format_type) {
1896 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1897 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1898 case LTTNG_ENABLER_FORMAT_NAME:
1899 return lttng_match_enabler_name(desc_name, enabler_name);
1900 default:
1901 return -EINVAL;
1902 }
1903 break;
1904 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
1905 return -EINVAL; /* Not implemented. */
1906 default:
1907 return -EINVAL;
1908 }
1909 break;
1910
1911 default:
1912 WARN_ON_ONCE(1);
1913 return -EINVAL;
1914 }
1915 }
1916
1917 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1918 struct lttng_event_enabler_common *enabler)
1919 {
1920 int ret;
1921
1922 ret = lttng_desc_match_enabler_check(desc, enabler);
1923 if (ret < 0) {
1924 WARN_ON_ONCE(1);
1925 return false;
1926 }
1927 return ret;
1928 }
1929
1930 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
1931 struct lttng_kernel_event_common *event)
1932 {
1933 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1934 return false;
1935
1936 switch (event_enabler->enabler_type) {
1937 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1938 {
1939 struct lttng_event_recorder_enabler *event_recorder_enabler =
1940 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1941 struct lttng_kernel_event_recorder *event_recorder =
1942 container_of(event, struct lttng_kernel_event_recorder, parent);
1943
1944 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1945 && event_recorder->chan == event_recorder_enabler->chan)
1946 return true;
1947 else
1948 return false;
1949 }
1950 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1951 {
1952 struct lttng_event_notifier_enabler *event_notifier_enabler =
1953 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1954 struct lttng_kernel_event_notifier *event_notifier =
1955 container_of(event, struct lttng_kernel_event_notifier, parent);
1956
1957 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1958 && event_notifier->priv->group == event_notifier_enabler->group
1959 && event->priv->user_token == event_enabler->user_token)
1960 return true;
1961 else
1962 return false;
1963 }
1964 default:
1965 WARN_ON_ONCE(1);
1966 return false;
1967 }
1968 }
1969
1970 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
1971 const struct lttng_kernel_event_desc *desc,
1972 struct lttng_kernel_event_common *event)
1973 {
1974 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1975 return false;
1976
1977 switch (event_enabler->enabler_type) {
1978 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1979 {
1980 struct lttng_event_recorder_enabler *event_recorder_enabler =
1981 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1982 struct lttng_kernel_event_recorder *event_recorder =
1983 container_of(event, struct lttng_kernel_event_recorder, parent);
1984
1985 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
1986 return true;
1987 else
1988 return false;
1989 }
1990 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1991 {
1992 struct lttng_event_notifier_enabler *event_notifier_enabler =
1993 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1994 struct lttng_kernel_event_notifier *event_notifier =
1995 container_of(event, struct lttng_kernel_event_notifier, parent);
1996
1997 if (event->priv->desc == desc
1998 && event_notifier->priv->group == event_notifier_enabler->group
1999 && event->priv->user_token == event_enabler->user_token)
2000 return true;
2001 else
2002 return false;
2003 }
2004 default:
2005 WARN_ON_ONCE(1);
2006 return false;
2007 }
2008 }
2009
2010 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2011 const char *event_name,
2012 struct lttng_kernel_event_common *event)
2013 {
2014 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2015 return false;
2016
2017 switch (event_enabler->enabler_type) {
2018 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2019 {
2020 struct lttng_event_recorder_enabler *event_recorder_enabler =
2021 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2022 struct lttng_kernel_event_recorder *event_recorder =
2023 container_of(event, struct lttng_kernel_event_recorder, parent);
2024
2025 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2026 && event_recorder->chan == event_recorder_enabler->chan)
2027 return true;
2028 else
2029 return false;
2030 }
2031 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2032 {
2033 struct lttng_event_notifier_enabler *event_notifier_enabler =
2034 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2035 struct lttng_kernel_event_notifier *event_notifier =
2036 container_of(event, struct lttng_kernel_event_notifier, parent);
2037
2038 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2039 && event_notifier->priv->group == event_notifier_enabler->group
2040 && event->priv->user_token == event_enabler->user_token)
2041 return true;
2042 else
2043 return false;
2044 }
2045 default:
2046 WARN_ON_ONCE(1);
2047 return false;
2048 }
2049 }
2050
2051 static
2052 struct lttng_enabler_ref *lttng_enabler_ref(
2053 struct list_head *enablers_ref_list,
2054 struct lttng_event_enabler_common *enabler)
2055 {
2056 struct lttng_enabler_ref *enabler_ref;
2057
2058 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2059 if (enabler_ref->ref == enabler)
2060 return enabler_ref;
2061 }
2062 return NULL;
2063 }
2064
2065 static
2066 void lttng_event_enabler_create_tracepoint_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2067 {
2068 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2069 struct lttng_kernel_probe_desc *probe_desc;
2070 const struct lttng_kernel_event_desc *desc;
2071 struct list_head *probe_list;
2072 int i;
2073
2074 probe_list = lttng_get_probe_list_head();
2075 /*
2076 * For each probe event, if we find that a probe event matches
2077 * our enabler, create an associated lttng_event if not
2078 * already present.
2079 */
2080 list_for_each_entry(probe_desc, probe_list, head) {
2081 for (i = 0; i < probe_desc->nr_events; i++) {
2082 bool found = false;
2083 struct hlist_head *head;
2084 struct lttng_kernel_event_common *event;
2085 struct lttng_kernel_event_common_private *event_priv;
2086
2087 desc = probe_desc->event_desc[i];
2088 if (!lttng_desc_match_enabler(desc, event_enabler))
2089 continue;
2090
2091 /*
2092 * Check if already created.
2093 */
2094 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2095 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2096 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub)) {
2097 found = true;
2098 break;
2099 }
2100 }
2101 if (found)
2102 continue;
2103
2104 /*
2105 * We need to create an event for this event probe.
2106 */
2107 event = _lttng_kernel_event_create(event_enabler, desc);
2108 if (IS_ERR(event)) {
2109 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2110 probe_desc->event_desc[i]->event_name);
2111 }
2112 }
2113 }
2114 }
2115
2116 /*
2117 * Create event if it is missing and present in the list of tracepoint probes.
2118 * Should be called with sessions mutex held.
2119 */
2120 static
2121 void lttng_event_enabler_create_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2122 {
2123 int ret;
2124
2125 switch (event_enabler->event_param.instrumentation) {
2126 case LTTNG_KERNEL_ABI_TRACEPOINT:
2127 lttng_event_enabler_create_tracepoint_events_if_missing(event_enabler);
2128 break;
2129
2130 case LTTNG_KERNEL_ABI_SYSCALL:
2131 ret = lttng_event_enabler_create_syscall_events_if_missing(event_enabler);
2132 WARN_ON_ONCE(ret);
2133 break;
2134
2135 default:
2136 WARN_ON_ONCE(1);
2137 break;
2138 }
2139 }
2140
2141 static
2142 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2143 struct lttng_kernel_event_common *event)
2144 {
2145 /* Link filter bytecodes if not linked yet. */
2146 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2147 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2148 }
2149
2150 static
2151 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2152 struct lttng_kernel_event_common *event)
2153 {
2154 switch (event_enabler->enabler_type) {
2155 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2156 break;
2157 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2158 {
2159 struct lttng_event_notifier_enabler *event_notifier_enabler =
2160 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2161 struct lttng_kernel_event_notifier *event_notifier =
2162 container_of(event, struct lttng_kernel_event_notifier, parent);
2163
2164 /* Link capture bytecodes if not linked yet. */
2165 lttng_enabler_link_bytecode(event->priv->desc,
2166 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2167 &event_notifier_enabler->capture_bytecode_head);
2168 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2169 break;
2170 }
2171 default:
2172 WARN_ON_ONCE(1);
2173 }
2174 }
2175
2176 /*
2177 * Create events associated with an event_enabler (if not already present),
2178 * and add backward reference from the event to the enabler.
2179 * Should be called with sessions mutex held.
2180 */
2181 static
2182 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2183 {
2184 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2185 struct lttng_kernel_event_common_private *event_priv;
2186
2187 lttng_syscall_table_set_wildcard_all(event_enabler);
2188
2189 /* First ensure that probe events are created for this enabler. */
2190 lttng_event_enabler_create_events_if_missing(event_enabler);
2191
2192 /* Link the created event with its associated enabler. */
2193 list_for_each_entry(event_priv, event_list_head, node) {
2194 struct lttng_kernel_event_common *event = event_priv->pub;
2195 struct lttng_enabler_ref *enabler_ref;
2196
2197 if (!lttng_event_enabler_match_event(event_enabler, event))
2198 continue;
2199
2200 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2201 if (!enabler_ref) {
2202 /*
2203 * If no backward ref, create it.
2204 * Add backward ref from event_notifier to enabler.
2205 */
2206 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2207 if (!enabler_ref)
2208 return -ENOMEM;
2209
2210 enabler_ref->ref = event_enabler;
2211 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2212 }
2213
2214 lttng_event_enabler_init_event_filter(event_enabler, event);
2215 lttng_event_enabler_init_event_capture(event_enabler, event);
2216 }
2217 return 0;
2218 }
2219
2220 /*
2221 * Called at module load: connect the probe on all enablers matching
2222 * this event.
2223 * Called with sessions lock held.
2224 */
2225 int lttng_fix_pending_events(void)
2226 {
2227 struct lttng_kernel_session_private *session_priv;
2228
2229 list_for_each_entry(session_priv, &sessions, list)
2230 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2231 return 0;
2232 }
2233
2234 static bool lttng_event_notifier_group_has_active_event_notifiers(
2235 struct lttng_event_notifier_group *event_notifier_group)
2236 {
2237 struct lttng_event_enabler_common *event_enabler;
2238
2239 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2240 if (event_enabler->enabled)
2241 return true;
2242 }
2243 return false;
2244 }
2245
2246 bool lttng_event_notifier_active(void)
2247 {
2248 struct lttng_event_notifier_group *event_notifier_group;
2249
2250 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2251 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2252 return true;
2253 }
2254 return false;
2255 }
2256
2257 int lttng_fix_pending_event_notifiers(void)
2258 {
2259 struct lttng_event_notifier_group *event_notifier_group;
2260
2261 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2262 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2263 return 0;
2264 }
2265
2266 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2267 enum lttng_enabler_format_type format_type,
2268 struct lttng_kernel_abi_event *event_param,
2269 struct lttng_kernel_channel_buffer *chan)
2270 {
2271 struct lttng_event_recorder_enabler *event_enabler;
2272
2273 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2274 if (!event_enabler)
2275 return NULL;
2276 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2277 event_enabler->parent.format_type = format_type;
2278 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2279 memcpy(&event_enabler->parent.event_param, event_param,
2280 sizeof(event_enabler->parent.event_param));
2281 event_enabler->chan = chan;
2282 /* ctx left NULL */
2283 event_enabler->parent.enabled = 0;
2284 return event_enabler;
2285 }
2286
2287 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2288 struct lttng_event_recorder_enabler *event_enabler)
2289 {
2290 mutex_lock(&sessions_mutex);
2291 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2292 event_enabler->parent.published = true;
2293 lttng_session_lazy_sync_event_enablers(session);
2294 mutex_unlock(&sessions_mutex);
2295 }
2296
2297 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2298 {
2299 mutex_lock(&sessions_mutex);
2300 event_enabler->enabled = 1;
2301 lttng_event_enabler_sync(event_enabler);
2302 mutex_unlock(&sessions_mutex);
2303 return 0;
2304 }
2305
2306 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2307 {
2308 mutex_lock(&sessions_mutex);
2309 event_enabler->enabled = 0;
2310 lttng_event_enabler_sync(event_enabler);
2311 mutex_unlock(&sessions_mutex);
2312 return 0;
2313 }
2314
2315 static
2316 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2317 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2318 {
2319 struct lttng_kernel_bytecode_node *bytecode_node;
2320 uint32_t bytecode_len;
2321 int ret;
2322
2323 ret = get_user(bytecode_len, &bytecode->len);
2324 if (ret)
2325 return ret;
2326 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2327 GFP_KERNEL);
2328 if (!bytecode_node)
2329 return -ENOMEM;
2330 ret = copy_from_user(&bytecode_node->bc, bytecode,
2331 sizeof(*bytecode) + bytecode_len);
2332 if (ret)
2333 goto error_free;
2334
2335 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2336 bytecode_node->enabler = enabler;
2337 /* Enforce length based on allocated size */
2338 bytecode_node->bc.len = bytecode_len;
2339 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2340
2341 return 0;
2342
2343 error_free:
2344 lttng_kvfree(bytecode_node);
2345 return ret;
2346 }
2347
2348 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2349 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2350 {
2351 int ret;
2352 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2353 if (ret)
2354 goto error;
2355 lttng_event_enabler_sync(event_enabler);
2356 return 0;
2357
2358 error:
2359 return ret;
2360 }
2361
2362 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2363 struct lttng_kernel_abi_event_callsite __user *callsite)
2364 {
2365
2366 switch (event->priv->instrumentation) {
2367 case LTTNG_KERNEL_ABI_UPROBE:
2368 return lttng_uprobes_event_add_callsite(event, callsite);
2369 default:
2370 return -EINVAL;
2371 }
2372 }
2373
2374 static
2375 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2376 {
2377 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2378
2379 /* Destroy filter bytecode */
2380 list_for_each_entry_safe(filter_node, tmp_filter_node,
2381 &enabler->filter_bytecode_head, node) {
2382 lttng_kvfree(filter_node);
2383 }
2384 }
2385
2386 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2387 {
2388 lttng_enabler_destroy(event_enabler);
2389 if (event_enabler->published)
2390 list_del(&event_enabler->node);
2391
2392 switch (event_enabler->enabler_type) {
2393 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2394 {
2395 struct lttng_event_recorder_enabler *event_recorder_enabler =
2396 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2397
2398 kfree(event_recorder_enabler);
2399 break;
2400 }
2401 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2402 {
2403 struct lttng_event_notifier_enabler *event_notifier_enabler =
2404 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2405
2406 kfree(event_notifier_enabler);
2407 break;
2408 }
2409 default:
2410 WARN_ON_ONCE(1);
2411 }
2412 }
2413
2414 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2415 enum lttng_enabler_format_type format_type,
2416 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2417 struct lttng_event_notifier_group *event_notifier_group)
2418 {
2419 struct lttng_event_notifier_enabler *event_notifier_enabler;
2420
2421 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2422 if (!event_notifier_enabler)
2423 return NULL;
2424
2425 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2426 event_notifier_enabler->parent.format_type = format_type;
2427 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2428 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2429
2430 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2431 event_notifier_enabler->num_captures = 0;
2432
2433 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2434 sizeof(event_notifier_enabler->parent.event_param));
2435
2436 event_notifier_enabler->parent.enabled = 0;
2437 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2438 event_notifier_enabler->group = event_notifier_group;
2439 return event_notifier_enabler;
2440 }
2441
2442 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2443 struct lttng_event_notifier_enabler *event_notifier_enabler)
2444 {
2445 mutex_lock(&sessions_mutex);
2446 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2447 event_notifier_enabler->parent.published = true;
2448 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2449 mutex_unlock(&sessions_mutex);
2450 }
2451
2452 int lttng_event_notifier_enabler_enable(
2453 struct lttng_event_notifier_enabler *event_notifier_enabler)
2454 {
2455 mutex_lock(&sessions_mutex);
2456 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2457 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2458 mutex_unlock(&sessions_mutex);
2459 return 0;
2460 }
2461
2462 int lttng_event_notifier_enabler_disable(
2463 struct lttng_event_notifier_enabler *event_notifier_enabler)
2464 {
2465 mutex_lock(&sessions_mutex);
2466 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2467 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2468 mutex_unlock(&sessions_mutex);
2469 return 0;
2470 }
2471
2472 int lttng_event_notifier_enabler_attach_capture_bytecode(
2473 struct lttng_event_notifier_enabler *event_notifier_enabler,
2474 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2475 {
2476 struct lttng_kernel_bytecode_node *bytecode_node;
2477 struct lttng_event_enabler_common *enabler =
2478 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2479 uint32_t bytecode_len;
2480 int ret;
2481
2482 ret = get_user(bytecode_len, &bytecode->len);
2483 if (ret)
2484 return ret;
2485
2486 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2487 GFP_KERNEL);
2488 if (!bytecode_node)
2489 return -ENOMEM;
2490
2491 ret = copy_from_user(&bytecode_node->bc, bytecode,
2492 sizeof(*bytecode) + bytecode_len);
2493 if (ret)
2494 goto error_free;
2495
2496 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2497 bytecode_node->enabler = enabler;
2498
2499 /* Enforce length based on allocated size */
2500 bytecode_node->bc.len = bytecode_len;
2501 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2502
2503 event_notifier_enabler->num_captures++;
2504
2505 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2506 goto end;
2507
2508 error_free:
2509 lttng_kvfree(bytecode_node);
2510 end:
2511 return ret;
2512 }
2513
2514 static
2515 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2516 {
2517 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2518 struct lttng_kernel_bytecode_runtime *runtime;
2519 struct lttng_enabler_ref *enabler_ref;
2520
2521 /* Check if has enablers without bytecode enabled */
2522 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2523 if (enabler_ref->ref->enabled
2524 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2525 has_enablers_without_filter_bytecode = 1;
2526 break;
2527 }
2528 }
2529 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2530
2531 /* Enable filters */
2532 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2533 lttng_bytecode_sync_state(runtime);
2534 nr_filters++;
2535 }
2536 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2537 }
2538
2539 static
2540 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2541 {
2542 switch (event->type) {
2543 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2544 break;
2545 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2546 {
2547 struct lttng_kernel_event_notifier *event_notifier =
2548 container_of(event, struct lttng_kernel_event_notifier, parent);
2549 struct lttng_kernel_bytecode_runtime *runtime;
2550 int nr_captures = 0;
2551
2552 /* Enable captures */
2553 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2554 lttng_bytecode_sync_state(runtime);
2555 nr_captures++;
2556 }
2557 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2558 break;
2559 }
2560 default:
2561 WARN_ON_ONCE(1);
2562 }
2563 }
2564
2565 static
2566 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2567 {
2568 struct lttng_enabler_ref *enabler_ref;
2569 bool enabled = false;
2570
2571 switch (event->priv->instrumentation) {
2572 case LTTNG_KERNEL_ABI_TRACEPOINT:
2573 lttng_fallthrough;
2574 case LTTNG_KERNEL_ABI_SYSCALL:
2575 /* Enable events */
2576 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2577 if (enabler_ref->ref->enabled) {
2578 enabled = true;
2579 break;
2580 }
2581 }
2582 break;
2583 default:
2584 WARN_ON_ONCE(1);
2585 return false;
2586 }
2587
2588 switch (event->type) {
2589 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2590 {
2591 struct lttng_kernel_event_recorder *event_recorder =
2592 container_of(event, struct lttng_kernel_event_recorder, parent);
2593
2594 /*
2595 * Enabled state is based on union of enablers, with
2596 * intersection of session and channel transient enable
2597 * states.
2598 */
2599 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2600 }
2601 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2602 return enabled;
2603 default:
2604 WARN_ON_ONCE(1);
2605 return false;
2606 }
2607 }
2608
2609 static
2610 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2611 {
2612 switch (event->priv->instrumentation) {
2613 case LTTNG_KERNEL_ABI_TRACEPOINT:
2614 lttng_fallthrough;
2615 case LTTNG_KERNEL_ABI_SYSCALL:
2616 return true;
2617
2618 default:
2619 /* Not handled with lazy sync. */
2620 return false;
2621 }
2622 }
2623
2624 /*
2625 * Should be called with sessions mutex held.
2626 */
2627 static
2628 void lttng_sync_event_list(struct list_head *event_enabler_list,
2629 struct list_head *event_list)
2630 {
2631 struct lttng_kernel_event_common_private *event_priv;
2632 struct lttng_event_enabler_common *event_enabler;
2633
2634 list_for_each_entry(event_enabler, event_enabler_list, node)
2635 lttng_event_enabler_ref_events(event_enabler);
2636
2637 /*
2638 * For each event, if at least one of its enablers is enabled,
2639 * and its channel and session transient states are enabled, we
2640 * enable the event, else we disable it.
2641 */
2642 list_for_each_entry(event_priv, event_list, node) {
2643 struct lttng_kernel_event_common *event = event_priv->pub;
2644 bool enabled;
2645
2646 if (!lttng_event_is_lazy_sync(event))
2647 continue;
2648
2649 enabled = lttng_get_event_enabled_state(event);
2650 WRITE_ONCE(event->enabled, enabled);
2651 /*
2652 * Sync tracepoint registration with event enabled state.
2653 */
2654 if (enabled) {
2655 register_event(event);
2656 } else {
2657 _lttng_event_unregister(event);
2658 }
2659
2660 lttng_event_sync_filter_state(event);
2661 lttng_event_sync_capture_state(event);
2662 }
2663 }
2664
2665 /*
2666 * lttng_session_sync_event_enablers should be called just before starting a
2667 * session.
2668 */
2669 static
2670 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2671 {
2672 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2673 }
2674
2675 /*
2676 * Apply enablers to session events, adding events to session if need
2677 * be. It is required after each modification applied to an active
2678 * session, and right before session "start".
2679 * "lazy" sync means we only sync if required.
2680 * Should be called with sessions mutex held.
2681 */
2682 static
2683 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2684 {
2685 /* We can skip if session is not active */
2686 if (!session->active)
2687 return;
2688 lttng_session_sync_event_enablers(session);
2689 }
2690
2691 static
2692 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2693 {
2694 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2695 }
2696
2697 static
2698 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2699 {
2700 switch (event_enabler->enabler_type) {
2701 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2702 {
2703 struct lttng_event_recorder_enabler *event_recorder_enabler =
2704 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2705 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2706 break;
2707 }
2708 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2709 {
2710 struct lttng_event_notifier_enabler *event_notifier_enabler =
2711 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2712 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2713 break;
2714 }
2715 default:
2716 WARN_ON_ONCE(1);
2717 }
2718 }
2719
2720 /*
2721 * Serialize at most one packet worth of metadata into a metadata
2722 * channel.
2723 * We grab the metadata cache mutex to get exclusive access to our metadata
2724 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2725 * allows us to do racy operations such as looking for remaining space left in
2726 * packet and write, since mutual exclusion protects us from concurrent writes.
2727 * Mutual exclusion on the metadata cache allow us to read the cache content
2728 * without racing against reallocation of the cache by updates.
2729 * Returns the number of bytes written in the channel, 0 if no data
2730 * was written and a negative value on error.
2731 */
2732 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2733 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2734 {
2735 struct lttng_kernel_ring_buffer_ctx ctx;
2736 int ret = 0;
2737 size_t len, reserve_len;
2738
2739 /*
2740 * Ensure we support mutiple get_next / put sequences followed by
2741 * put_next. The metadata cache lock protects reading the metadata
2742 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2743 * "flush" operations on the buffer invoked by different processes.
2744 * Moreover, since the metadata cache memory can be reallocated, we
2745 * need to have exclusive access against updates even though we only
2746 * read it.
2747 */
2748 mutex_lock(&stream->metadata_cache->lock);
2749 WARN_ON(stream->metadata_in < stream->metadata_out);
2750 if (stream->metadata_in != stream->metadata_out)
2751 goto end;
2752
2753 /* Metadata regenerated, change the version. */
2754 if (stream->metadata_cache->version != stream->version)
2755 stream->version = stream->metadata_cache->version;
2756
2757 len = stream->metadata_cache->metadata_written -
2758 stream->metadata_in;
2759 if (!len)
2760 goto end;
2761 reserve_len = min_t(size_t,
2762 stream->transport->ops.priv->packet_avail_size(chan),
2763 len);
2764 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2765 sizeof(char), NULL);
2766 /*
2767 * If reservation failed, return an error to the caller.
2768 */
2769 ret = stream->transport->ops.event_reserve(&ctx);
2770 if (ret != 0) {
2771 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2772 stream->coherent = false;
2773 goto end;
2774 }
2775 stream->transport->ops.event_write(&ctx,
2776 stream->metadata_cache->data + stream->metadata_in,
2777 reserve_len, 1);
2778 stream->transport->ops.event_commit(&ctx);
2779 stream->metadata_in += reserve_len;
2780 if (reserve_len < len)
2781 stream->coherent = false;
2782 else
2783 stream->coherent = true;
2784 ret = reserve_len;
2785
2786 end:
2787 if (coherent)
2788 *coherent = stream->coherent;
2789 mutex_unlock(&stream->metadata_cache->lock);
2790 return ret;
2791 }
2792
2793 static
2794 void lttng_metadata_begin(struct lttng_kernel_session *session)
2795 {
2796 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2797 mutex_lock(&session->priv->metadata_cache->lock);
2798 }
2799
2800 static
2801 void lttng_metadata_end(struct lttng_kernel_session *session)
2802 {
2803 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2804 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2805 struct lttng_metadata_stream *stream;
2806
2807 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2808 wake_up_interruptible(&stream->read_wait);
2809 mutex_unlock(&session->priv->metadata_cache->lock);
2810 }
2811 }
2812
2813 /*
2814 * Write the metadata to the metadata cache.
2815 * Must be called with sessions_mutex held.
2816 * The metadata cache lock protects us from concurrent read access from
2817 * thread outputting metadata content to ring buffer.
2818 * The content of the printf is printed as a single atomic metadata
2819 * transaction.
2820 */
2821 int lttng_metadata_printf(struct lttng_kernel_session *session,
2822 const char *fmt, ...)
2823 {
2824 char *str;
2825 size_t len;
2826 va_list ap;
2827
2828 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2829
2830 va_start(ap, fmt);
2831 str = kvasprintf(GFP_KERNEL, fmt, ap);
2832 va_end(ap);
2833 if (!str)
2834 return -ENOMEM;
2835
2836 len = strlen(str);
2837 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2838 if (session->priv->metadata_cache->metadata_written + len >
2839 session->priv->metadata_cache->cache_alloc) {
2840 char *tmp_cache_realloc;
2841 unsigned int tmp_cache_alloc_size;
2842
2843 tmp_cache_alloc_size = max_t(unsigned int,
2844 session->priv->metadata_cache->cache_alloc + len,
2845 session->priv->metadata_cache->cache_alloc << 1);
2846 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2847 if (!tmp_cache_realloc)
2848 goto err;
2849 if (session->priv->metadata_cache->data) {
2850 memcpy(tmp_cache_realloc,
2851 session->priv->metadata_cache->data,
2852 session->priv->metadata_cache->cache_alloc);
2853 vfree(session->priv->metadata_cache->data);
2854 }
2855
2856 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2857 session->priv->metadata_cache->data = tmp_cache_realloc;
2858 }
2859 memcpy(session->priv->metadata_cache->data +
2860 session->priv->metadata_cache->metadata_written,
2861 str, len);
2862 session->priv->metadata_cache->metadata_written += len;
2863 kfree(str);
2864
2865 return 0;
2866
2867 err:
2868 kfree(str);
2869 return -ENOMEM;
2870 }
2871
2872 static
2873 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
2874 {
2875 size_t i;
2876
2877 for (i = 0; i < nesting; i++) {
2878 int ret;
2879
2880 ret = lttng_metadata_printf(session, " ");
2881 if (ret) {
2882 return ret;
2883 }
2884 }
2885 return 0;
2886 }
2887
2888 static
2889 int lttng_field_name_statedump(struct lttng_kernel_session *session,
2890 const struct lttng_kernel_event_field *field,
2891 size_t nesting)
2892 {
2893 return lttng_metadata_printf(session, " _%s;\n", field->name);
2894 }
2895
2896 static
2897 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
2898 const struct lttng_kernel_type_integer *type,
2899 enum lttng_kernel_string_encoding parent_encoding,
2900 size_t nesting)
2901 {
2902 int ret;
2903
2904 ret = print_tabs(session, nesting);
2905 if (ret)
2906 return ret;
2907 ret = lttng_metadata_printf(session,
2908 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2909 type->size,
2910 type->alignment,
2911 type->signedness,
2912 (parent_encoding == lttng_kernel_string_encoding_none)
2913 ? "none"
2914 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
2915 ? "UTF8"
2916 : "ASCII",
2917 type->base,
2918 #if __BYTE_ORDER == __BIG_ENDIAN
2919 type->reverse_byte_order ? " byte_order = le;" : ""
2920 #else
2921 type->reverse_byte_order ? " byte_order = be;" : ""
2922 #endif
2923 );
2924 return ret;
2925 }
2926
2927 /*
2928 * Must be called with sessions_mutex held.
2929 */
2930 static
2931 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
2932 const struct lttng_kernel_type_struct *type,
2933 size_t nesting)
2934 {
2935 const char *prev_field_name = NULL;
2936 int ret;
2937 uint32_t i, nr_fields;
2938 unsigned int alignment;
2939
2940 ret = print_tabs(session, nesting);
2941 if (ret)
2942 return ret;
2943 ret = lttng_metadata_printf(session,
2944 "struct {\n");
2945 if (ret)
2946 return ret;
2947 nr_fields = type->nr_fields;
2948 for (i = 0; i < nr_fields; i++) {
2949 const struct lttng_kernel_event_field *iter_field;
2950
2951 iter_field = type->fields[i];
2952 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
2953 if (ret)
2954 return ret;
2955 }
2956 ret = print_tabs(session, nesting);
2957 if (ret)
2958 return ret;
2959 alignment = type->alignment;
2960 if (alignment) {
2961 ret = lttng_metadata_printf(session,
2962 "} align(%u)",
2963 alignment);
2964 } else {
2965 ret = lttng_metadata_printf(session,
2966 "}");
2967 }
2968 return ret;
2969 }
2970
2971 /*
2972 * Must be called with sessions_mutex held.
2973 */
2974 static
2975 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
2976 const struct lttng_kernel_event_field *field,
2977 size_t nesting)
2978 {
2979 int ret;
2980
2981 ret = _lttng_struct_type_statedump(session,
2982 lttng_kernel_get_type_struct(field->type), nesting);
2983 if (ret)
2984 return ret;
2985 return lttng_field_name_statedump(session, field, nesting);
2986 }
2987
2988 /*
2989 * Must be called with sessions_mutex held.
2990 */
2991 static
2992 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
2993 const struct lttng_kernel_type_variant *type,
2994 size_t nesting,
2995 const char *prev_field_name)
2996 {
2997 const char *tag_name;
2998 int ret;
2999 uint32_t i, nr_choices;
3000
3001 tag_name = type->tag_name;
3002 if (!tag_name)
3003 tag_name = prev_field_name;
3004 if (!tag_name)
3005 return -EINVAL;
3006 /*
3007 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3008 */
3009 if (type->alignment != 0)
3010 return -EINVAL;
3011 ret = print_tabs(session, nesting);
3012 if (ret)
3013 return ret;
3014 ret = lttng_metadata_printf(session,
3015 "variant <_%s> {\n",
3016 tag_name);
3017 if (ret)
3018 return ret;
3019 nr_choices = type->nr_choices;
3020 for (i = 0; i < nr_choices; i++) {
3021 const struct lttng_kernel_event_field *iter_field;
3022
3023 iter_field = type->choices[i];
3024 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3025 if (ret)
3026 return ret;
3027 }
3028 ret = print_tabs(session, nesting);
3029 if (ret)
3030 return ret;
3031 ret = lttng_metadata_printf(session,
3032 "}");
3033 return ret;
3034 }
3035
3036 /*
3037 * Must be called with sessions_mutex held.
3038 */
3039 static
3040 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3041 const struct lttng_kernel_event_field *field,
3042 size_t nesting,
3043 const char *prev_field_name)
3044 {
3045 int ret;
3046
3047 ret = _lttng_variant_type_statedump(session,
3048 lttng_kernel_get_type_variant(field->type), nesting,
3049 prev_field_name);
3050 if (ret)
3051 return ret;
3052 return lttng_field_name_statedump(session, field, nesting);
3053 }
3054
3055 /*
3056 * Must be called with sessions_mutex held.
3057 */
3058 static
3059 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3060 const struct lttng_kernel_event_field *field,
3061 size_t nesting)
3062 {
3063 int ret;
3064 const struct lttng_kernel_type_array *array_type;
3065 const struct lttng_kernel_type_common *elem_type;
3066
3067 array_type = lttng_kernel_get_type_array(field->type);
3068 WARN_ON_ONCE(!array_type);
3069
3070 if (array_type->alignment) {
3071 ret = print_tabs(session, nesting);
3072 if (ret)
3073 return ret;
3074 ret = lttng_metadata_printf(session,
3075 "struct { } align(%u) _%s_padding;\n",
3076 array_type->alignment * CHAR_BIT,
3077 field->name);
3078 if (ret)
3079 return ret;
3080 }
3081 /*
3082 * Nested compound types: Only array of structures and variants are
3083 * currently supported.
3084 */
3085 elem_type = array_type->elem_type;
3086 switch (elem_type->type) {
3087 case lttng_kernel_type_integer:
3088 case lttng_kernel_type_struct:
3089 case lttng_kernel_type_variant:
3090 ret = _lttng_type_statedump(session, elem_type,
3091 array_type->encoding, nesting);
3092 if (ret)
3093 return ret;
3094 break;
3095
3096 default:
3097 return -EINVAL;
3098 }
3099 ret = lttng_metadata_printf(session,
3100 " _%s[%u];\n",
3101 field->name,
3102 array_type->length);
3103 return ret;
3104 }
3105
3106 /*
3107 * Must be called with sessions_mutex held.
3108 */
3109 static
3110 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3111 const struct lttng_kernel_event_field *field,
3112 size_t nesting,
3113 const char *prev_field_name)
3114 {
3115 int ret;
3116 const char *length_name;
3117 const struct lttng_kernel_type_sequence *sequence_type;
3118 const struct lttng_kernel_type_common *elem_type;
3119
3120 sequence_type = lttng_kernel_get_type_sequence(field->type);
3121 WARN_ON_ONCE(!sequence_type);
3122
3123 length_name = sequence_type->length_name;
3124 if (!length_name)
3125 length_name = prev_field_name;
3126 if (!length_name)
3127 return -EINVAL;
3128
3129 if (sequence_type->alignment) {
3130 ret = print_tabs(session, nesting);
3131 if (ret)
3132 return ret;
3133 ret = lttng_metadata_printf(session,
3134 "struct { } align(%u) _%s_padding;\n",
3135 sequence_type->alignment * CHAR_BIT,
3136 field->name);
3137 if (ret)
3138 return ret;
3139 }
3140
3141 /*
3142 * Nested compound types: Only array of structures and variants are
3143 * currently supported.
3144 */
3145 elem_type = sequence_type->elem_type;
3146 switch (elem_type->type) {
3147 case lttng_kernel_type_integer:
3148 case lttng_kernel_type_struct:
3149 case lttng_kernel_type_variant:
3150 ret = _lttng_type_statedump(session, elem_type,
3151 sequence_type->encoding, nesting);
3152 if (ret)
3153 return ret;
3154 break;
3155
3156 default:
3157 return -EINVAL;
3158 }
3159 ret = lttng_metadata_printf(session,
3160 " _%s[ _%s ];\n",
3161 field->name,
3162 length_name);
3163 return ret;
3164 }
3165
3166 /*
3167 * Must be called with sessions_mutex held.
3168 */
3169 static
3170 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3171 const struct lttng_kernel_type_enum *type,
3172 size_t nesting)
3173 {
3174 const struct lttng_kernel_enum_desc *enum_desc;
3175 const struct lttng_kernel_type_common *container_type;
3176 int ret;
3177 unsigned int i, nr_entries;
3178
3179 container_type = type->container_type;
3180 if (container_type->type != lttng_kernel_type_integer) {
3181 ret = -EINVAL;
3182 goto end;
3183 }
3184 enum_desc = type->desc;
3185 nr_entries = enum_desc->nr_entries;
3186
3187 ret = print_tabs(session, nesting);
3188 if (ret)
3189 goto end;
3190 ret = lttng_metadata_printf(session, "enum : ");
3191 if (ret)
3192 goto end;
3193 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3194 lttng_kernel_string_encoding_none, 0);
3195 if (ret)
3196 goto end;
3197 ret = lttng_metadata_printf(session, " {\n");
3198 if (ret)
3199 goto end;
3200 /* Dump all entries */
3201 for (i = 0; i < nr_entries; i++) {
3202 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3203 int j, len;
3204
3205 ret = print_tabs(session, nesting + 1);
3206 if (ret)
3207 goto end;
3208 ret = lttng_metadata_printf(session,
3209 "\"");
3210 if (ret)
3211 goto end;
3212 len = strlen(entry->string);
3213 /* Escape the character '"' */
3214 for (j = 0; j < len; j++) {
3215 char c = entry->string[j];
3216
3217 switch (c) {
3218 case '"':
3219 ret = lttng_metadata_printf(session,
3220 "\\\"");
3221 break;
3222 case '\\':
3223 ret = lttng_metadata_printf(session,
3224 "\\\\");
3225 break;
3226 default:
3227 ret = lttng_metadata_printf(session,
3228 "%c", c);
3229 break;
3230 }
3231 if (ret)
3232 goto end;
3233 }
3234 ret = lttng_metadata_printf(session, "\"");
3235 if (ret)
3236 goto end;
3237
3238 if (entry->options.is_auto) {
3239 ret = lttng_metadata_printf(session, ",\n");
3240 if (ret)
3241 goto end;
3242 } else {
3243 ret = lttng_metadata_printf(session,
3244 " = ");
3245 if (ret)
3246 goto end;
3247 if (entry->start.signedness)
3248 ret = lttng_metadata_printf(session,
3249 "%lld", (long long) entry->start.value);
3250 else
3251 ret = lttng_metadata_printf(session,
3252 "%llu", entry->start.value);
3253 if (ret)
3254 goto end;
3255 if (entry->start.signedness == entry->end.signedness &&
3256 entry->start.value
3257 == entry->end.value) {
3258 ret = lttng_metadata_printf(session,
3259 ",\n");
3260 } else {
3261 if (entry->end.signedness) {
3262 ret = lttng_metadata_printf(session,
3263 " ... %lld,\n",
3264 (long long) entry->end.value);
3265 } else {
3266 ret = lttng_metadata_printf(session,
3267 " ... %llu,\n",
3268 entry->end.value);
3269 }
3270 }
3271 if (ret)
3272 goto end;
3273 }
3274 }
3275 ret = print_tabs(session, nesting);
3276 if (ret)
3277 goto end;
3278 ret = lttng_metadata_printf(session, "}");
3279 end:
3280 return ret;
3281 }
3282
3283 /*
3284 * Must be called with sessions_mutex held.
3285 */
3286 static
3287 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3288 const struct lttng_kernel_event_field *field,
3289 size_t nesting)
3290 {
3291 int ret;
3292 const struct lttng_kernel_type_enum *enum_type;
3293
3294 enum_type = lttng_kernel_get_type_enum(field->type);
3295 WARN_ON_ONCE(!enum_type);
3296 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3297 if (ret)
3298 return ret;
3299 return lttng_field_name_statedump(session, field, nesting);
3300 }
3301
3302 static
3303 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3304 const struct lttng_kernel_event_field *field,
3305 size_t nesting)
3306 {
3307 int ret;
3308
3309 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3310 lttng_kernel_string_encoding_none, nesting);
3311 if (ret)
3312 return ret;
3313 return lttng_field_name_statedump(session, field, nesting);
3314 }
3315
3316 static
3317 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3318 const struct lttng_kernel_type_string *type,
3319 size_t nesting)
3320 {
3321 int ret;
3322
3323 /* Default encoding is UTF8 */
3324 ret = print_tabs(session, nesting);
3325 if (ret)
3326 return ret;
3327 ret = lttng_metadata_printf(session,
3328 "string%s",
3329 type->encoding == lttng_kernel_string_encoding_ASCII ?
3330 " { encoding = ASCII; }" : "");
3331 return ret;
3332 }
3333
3334 static
3335 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3336 const struct lttng_kernel_event_field *field,
3337 size_t nesting)
3338 {
3339 const struct lttng_kernel_type_string *string_type;
3340 int ret;
3341
3342 string_type = lttng_kernel_get_type_string(field->type);
3343 WARN_ON_ONCE(!string_type);
3344 ret = _lttng_string_type_statedump(session, string_type, nesting);
3345 if (ret)
3346 return ret;
3347 return lttng_field_name_statedump(session, field, nesting);
3348 }
3349
3350 /*
3351 * Must be called with sessions_mutex held.
3352 */
3353 static
3354 int _lttng_type_statedump(struct lttng_kernel_session *session,
3355 const struct lttng_kernel_type_common *type,
3356 enum lttng_kernel_string_encoding parent_encoding,
3357 size_t nesting)
3358 {
3359 int ret = 0;
3360
3361 switch (type->type) {
3362 case lttng_kernel_type_integer:
3363 ret = _lttng_integer_type_statedump(session,
3364 lttng_kernel_get_type_integer(type),
3365 parent_encoding, nesting);
3366 break;
3367 case lttng_kernel_type_enum:
3368 ret = _lttng_enum_type_statedump(session,
3369 lttng_kernel_get_type_enum(type),
3370 nesting);
3371 break;
3372 case lttng_kernel_type_string:
3373 ret = _lttng_string_type_statedump(session,
3374 lttng_kernel_get_type_string(type),
3375 nesting);
3376 break;
3377 case lttng_kernel_type_struct:
3378 ret = _lttng_struct_type_statedump(session,
3379 lttng_kernel_get_type_struct(type),
3380 nesting);
3381 break;
3382 case lttng_kernel_type_variant:
3383 ret = _lttng_variant_type_statedump(session,
3384 lttng_kernel_get_type_variant(type),
3385 nesting, NULL);
3386 break;
3387
3388 /* Nested arrays and sequences are not supported yet. */
3389 case lttng_kernel_type_array:
3390 case lttng_kernel_type_sequence:
3391 default:
3392 WARN_ON_ONCE(1);
3393 return -EINVAL;
3394 }
3395 return ret;
3396 }
3397
3398 /*
3399 * Must be called with sessions_mutex held.
3400 */
3401 static
3402 int _lttng_field_statedump(struct lttng_kernel_session *session,
3403 const struct lttng_kernel_event_field *field,
3404 size_t nesting,
3405 const char **prev_field_name_p)
3406 {
3407 const char *prev_field_name = NULL;
3408 int ret = 0;
3409
3410 if (prev_field_name_p)
3411 prev_field_name = *prev_field_name_p;
3412 switch (field->type->type) {
3413 case lttng_kernel_type_integer:
3414 ret = _lttng_integer_field_statedump(session, field, nesting);
3415 break;
3416 case lttng_kernel_type_enum:
3417 ret = _lttng_enum_field_statedump(session, field, nesting);
3418 break;
3419 case lttng_kernel_type_string:
3420 ret = _lttng_string_field_statedump(session, field, nesting);
3421 break;
3422 case lttng_kernel_type_struct:
3423 ret = _lttng_struct_field_statedump(session, field, nesting);
3424 break;
3425 case lttng_kernel_type_array:
3426 ret = _lttng_array_field_statedump(session, field, nesting);
3427 break;
3428 case lttng_kernel_type_sequence:
3429 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3430 break;
3431 case lttng_kernel_type_variant:
3432 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3433 break;
3434
3435 default:
3436 WARN_ON_ONCE(1);
3437 return -EINVAL;
3438 }
3439 if (prev_field_name_p)
3440 *prev_field_name_p = field->name;
3441 return ret;
3442 }
3443
3444 static
3445 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3446 struct lttng_kernel_ctx *ctx)
3447 {
3448 const char *prev_field_name = NULL;
3449 int ret = 0;
3450 int i;
3451
3452 if (!ctx)
3453 return 0;
3454 for (i = 0; i < ctx->nr_fields; i++) {
3455 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3456
3457 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3458 if (ret)
3459 return ret;
3460 }
3461 return ret;
3462 }
3463
3464 static
3465 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3466 struct lttng_kernel_event_recorder *event_recorder)
3467 {
3468 const char *prev_field_name = NULL;
3469 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3470 int ret = 0;
3471 int i;
3472
3473 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3474 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3475
3476 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3477 if (ret)
3478 return ret;
3479 }
3480 return ret;
3481 }
3482
3483 /*
3484 * Must be called with sessions_mutex held.
3485 * The entire event metadata is printed as a single atomic metadata
3486 * transaction.
3487 */
3488 static
3489 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event)
3490 {
3491 struct lttng_kernel_event_recorder *event_recorder;
3492 struct lttng_kernel_channel_buffer *chan;
3493 struct lttng_kernel_session *session;
3494 int ret = 0;
3495
3496 if (event->type != LTTNG_KERNEL_EVENT_TYPE_RECORDER)
3497 return 0;
3498 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
3499 chan = event_recorder->chan;
3500 session = chan->parent.session;
3501
3502 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3503 return 0;
3504 if (chan->priv->channel_type == METADATA_CHANNEL)
3505 return 0;
3506
3507 lttng_metadata_begin(session);
3508
3509 ret = lttng_metadata_printf(session,
3510 "event {\n"
3511 " name = \"%s\";\n"
3512 " id = %u;\n"
3513 " stream_id = %u;\n",
3514 event_recorder->priv->parent.desc->event_name,
3515 event_recorder->priv->id,
3516 event_recorder->chan->priv->id);
3517 if (ret)
3518 goto end;
3519
3520 ret = lttng_metadata_printf(session,
3521 " fields := struct {\n"
3522 );
3523 if (ret)
3524 goto end;
3525
3526 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3527 if (ret)
3528 goto end;
3529
3530 /*
3531 * LTTng space reservation can only reserve multiples of the
3532 * byte size.
3533 */
3534 ret = lttng_metadata_printf(session,
3535 " };\n"
3536 "};\n\n");
3537 if (ret)
3538 goto end;
3539
3540 event_recorder->priv->metadata_dumped = 1;
3541 end:
3542 lttng_metadata_end(session);
3543 return ret;
3544
3545 }
3546
3547 /*
3548 * Must be called with sessions_mutex held.
3549 * The entire channel metadata is printed as a single atomic metadata
3550 * transaction.
3551 */
3552 static
3553 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3554 struct lttng_kernel_channel_buffer *chan)
3555 {
3556 int ret = 0;
3557
3558 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3559 return 0;
3560
3561 if (chan->priv->channel_type == METADATA_CHANNEL)
3562 return 0;
3563
3564 lttng_metadata_begin(session);
3565
3566 WARN_ON_ONCE(!chan->priv->header_type);
3567 ret = lttng_metadata_printf(session,
3568 "stream {\n"
3569 " id = %u;\n"
3570 " event.header := %s;\n"
3571 " packet.context := struct packet_context;\n",
3572 chan->priv->id,
3573 chan->priv->header_type == 1 ? "struct event_header_compact" :
3574 "struct event_header_large");
3575 if (ret)
3576 goto end;
3577
3578 if (chan->priv->ctx) {
3579 ret = lttng_metadata_printf(session,
3580 " event.context := struct {\n");
3581 if (ret)
3582 goto end;
3583 }
3584 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3585 if (ret)
3586 goto end;
3587 if (chan->priv->ctx) {
3588 ret = lttng_metadata_printf(session,
3589 " };\n");
3590 if (ret)
3591 goto end;
3592 }
3593
3594 ret = lttng_metadata_printf(session,
3595 "};\n\n");
3596
3597 chan->priv->metadata_dumped = 1;
3598 end:
3599 lttng_metadata_end(session);
3600 return ret;
3601 }
3602
3603 /*
3604 * Must be called with sessions_mutex held.
3605 */
3606 static
3607 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3608 {
3609 return lttng_metadata_printf(session,
3610 "struct packet_context {\n"
3611 " uint64_clock_monotonic_t timestamp_begin;\n"
3612 " uint64_clock_monotonic_t timestamp_end;\n"
3613 " uint64_t content_size;\n"
3614 " uint64_t packet_size;\n"
3615 " uint64_t packet_seq_num;\n"
3616 " unsigned long events_discarded;\n"
3617 " uint32_t cpu_id;\n"
3618 "};\n\n"
3619 );
3620 }
3621
3622 /*
3623 * Compact header:
3624 * id: range: 0 - 30.
3625 * id 31 is reserved to indicate an extended header.
3626 *
3627 * Large header:
3628 * id: range: 0 - 65534.
3629 * id 65535 is reserved to indicate an extended header.
3630 *
3631 * Must be called with sessions_mutex held.
3632 */
3633 static
3634 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3635 {
3636 return lttng_metadata_printf(session,
3637 "struct event_header_compact {\n"
3638 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3639 " variant <id> {\n"
3640 " struct {\n"
3641 " uint27_clock_monotonic_t timestamp;\n"
3642 " } compact;\n"
3643 " struct {\n"
3644 " uint32_t id;\n"
3645 " uint64_clock_monotonic_t timestamp;\n"
3646 " } extended;\n"
3647 " } v;\n"
3648 "} align(%u);\n"
3649 "\n"
3650 "struct event_header_large {\n"
3651 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3652 " variant <id> {\n"
3653 " struct {\n"
3654 " uint32_clock_monotonic_t timestamp;\n"
3655 " } compact;\n"
3656 " struct {\n"
3657 " uint32_t id;\n"
3658 " uint64_clock_monotonic_t timestamp;\n"
3659 " } extended;\n"
3660 " } v;\n"
3661 "} align(%u);\n\n",
3662 lttng_alignof(uint32_t) * CHAR_BIT,
3663 lttng_alignof(uint16_t) * CHAR_BIT
3664 );
3665 }
3666
3667 /*
3668 * Approximation of NTP time of day to clock monotonic correlation,
3669 * taken at start of trace.
3670 * Yes, this is only an approximation. Yes, we can (and will) do better
3671 * in future versions.
3672 * This function may return a negative offset. It may happen if the
3673 * system sets the REALTIME clock to 0 after boot.
3674 *
3675 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3676 * y2038 compliant.
3677 */
3678 static
3679 int64_t measure_clock_offset(void)
3680 {
3681 uint64_t monotonic_avg, monotonic[2], realtime;
3682 uint64_t tcf = trace_clock_freq();
3683 int64_t offset;
3684 unsigned long flags;
3685 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3686 struct timespec64 rts = { 0, 0 };
3687 #else
3688 struct timespec rts = { 0, 0 };
3689 #endif
3690
3691 /* Disable interrupts to increase correlation precision. */
3692 local_irq_save(flags);
3693 monotonic[0] = trace_clock_read64();
3694 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3695 ktime_get_real_ts64(&rts);
3696 #else
3697 getnstimeofday(&rts);
3698 #endif
3699 monotonic[1] = trace_clock_read64();
3700 local_irq_restore(flags);
3701
3702 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3703 realtime = (uint64_t) rts.tv_sec * tcf;
3704 if (tcf == NSEC_PER_SEC) {
3705 realtime += rts.tv_nsec;
3706 } else {
3707 uint64_t n = rts.tv_nsec * tcf;
3708
3709 do_div(n, NSEC_PER_SEC);
3710 realtime += n;
3711 }
3712 offset = (int64_t) realtime - monotonic_avg;
3713 return offset;
3714 }
3715
3716 static
3717 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3718 {
3719 int ret = 0;
3720 size_t i;
3721 char cur;
3722
3723 i = 0;
3724 cur = string[i];
3725 while (cur != '\0') {
3726 switch (cur) {
3727 case '\n':
3728 ret = lttng_metadata_printf(session, "%s", "\\n");
3729 break;
3730 case '\\':
3731 case '"':
3732 ret = lttng_metadata_printf(session, "%c", '\\');
3733 if (ret)
3734 goto error;
3735 /* We still print the current char */
3736 lttng_fallthrough;
3737 default:
3738 ret = lttng_metadata_printf(session, "%c", cur);
3739 break;
3740 }
3741
3742 if (ret)
3743 goto error;
3744
3745 cur = string[++i];
3746 }
3747 error:
3748 return ret;
3749 }
3750
3751 static
3752 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3753 const char *field_value)
3754 {
3755 int ret;
3756
3757 ret = lttng_metadata_printf(session, " %s = \"", field);
3758 if (ret)
3759 goto error;
3760
3761 ret = print_escaped_ctf_string(session, field_value);
3762 if (ret)
3763 goto error;
3764
3765 ret = lttng_metadata_printf(session, "\";\n");
3766
3767 error:
3768 return ret;
3769 }
3770
3771 /*
3772 * Output metadata into this session's metadata buffers.
3773 * Must be called with sessions_mutex held.
3774 */
3775 static
3776 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3777 {
3778 unsigned char *uuid_c = session->priv->uuid.b;
3779 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3780 const char *product_uuid;
3781 struct lttng_kernel_channel_buffer_private *chan_priv;
3782 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3783 int ret = 0;
3784
3785 if (!LTTNG_READ_ONCE(session->active))
3786 return 0;
3787
3788 lttng_metadata_begin(session);
3789
3790 if (session->priv->metadata_dumped)
3791 goto skip_session;
3792
3793 snprintf(uuid_s, sizeof(uuid_s),
3794 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3795 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3796 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3797 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3798 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3799
3800 ret = lttng_metadata_printf(session,
3801 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3802 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3803 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3804 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3805 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3806 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3807 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3808 "\n"
3809 "trace {\n"
3810 " major = %u;\n"
3811 " minor = %u;\n"
3812 " uuid = \"%s\";\n"
3813 " byte_order = %s;\n"
3814 " packet.header := struct {\n"
3815 " uint32_t magic;\n"
3816 " uint8_t uuid[16];\n"
3817 " uint32_t stream_id;\n"
3818 " uint64_t stream_instance_id;\n"
3819 " };\n"
3820 "};\n\n",
3821 lttng_alignof(uint8_t) * CHAR_BIT,
3822 lttng_alignof(uint16_t) * CHAR_BIT,
3823 lttng_alignof(uint32_t) * CHAR_BIT,
3824 lttng_alignof(uint64_t) * CHAR_BIT,
3825 sizeof(unsigned long) * CHAR_BIT,
3826 lttng_alignof(unsigned long) * CHAR_BIT,
3827 CTF_SPEC_MAJOR,
3828 CTF_SPEC_MINOR,
3829 uuid_s,
3830 #if __BYTE_ORDER == __BIG_ENDIAN
3831 "be"
3832 #else
3833 "le"
3834 #endif
3835 );
3836 if (ret)
3837 goto end;
3838
3839 ret = lttng_metadata_printf(session,
3840 "env {\n"
3841 " hostname = \"%s\";\n"
3842 " domain = \"kernel\";\n"
3843 " sysname = \"%s\";\n"
3844 " kernel_release = \"%s\";\n"
3845 " kernel_version = \"%s\";\n"
3846 " tracer_name = \"lttng-modules\";\n"
3847 " tracer_major = %d;\n"
3848 " tracer_minor = %d;\n"
3849 " tracer_patchlevel = %d;\n"
3850 " trace_buffering_scheme = \"global\";\n",
3851 current->nsproxy->uts_ns->name.nodename,
3852 utsname()->sysname,
3853 utsname()->release,
3854 utsname()->version,
3855 LTTNG_MODULES_MAJOR_VERSION,
3856 LTTNG_MODULES_MINOR_VERSION,
3857 LTTNG_MODULES_PATCHLEVEL_VERSION
3858 );
3859 if (ret)
3860 goto end;
3861
3862 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3863 if (ret)
3864 goto end;
3865 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3866 session->priv->creation_time);
3867 if (ret)
3868 goto end;
3869
3870 /* Add the product UUID to the 'env' section */
3871 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3872 if (product_uuid) {
3873 ret = lttng_metadata_printf(session,
3874 " product_uuid = \"%s\";\n",
3875 product_uuid
3876 );
3877 if (ret)
3878 goto end;
3879 }
3880
3881 /* Close the 'env' section */
3882 ret = lttng_metadata_printf(session, "};\n\n");
3883 if (ret)
3884 goto end;
3885
3886 ret = lttng_metadata_printf(session,
3887 "clock {\n"
3888 " name = \"%s\";\n",
3889 trace_clock_name()
3890 );
3891 if (ret)
3892 goto end;
3893
3894 if (!trace_clock_uuid(clock_uuid_s)) {
3895 ret = lttng_metadata_printf(session,
3896 " uuid = \"%s\";\n",
3897 clock_uuid_s
3898 );
3899 if (ret)
3900 goto end;
3901 }
3902
3903 ret = lttng_metadata_printf(session,
3904 " description = \"%s\";\n"
3905 " freq = %llu; /* Frequency, in Hz */\n"
3906 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3907 " offset = %lld;\n"
3908 "};\n\n",
3909 trace_clock_description(),
3910 (unsigned long long) trace_clock_freq(),
3911 (long long) measure_clock_offset()
3912 );
3913 if (ret)
3914 goto end;
3915
3916 ret = lttng_metadata_printf(session,
3917 "typealias integer {\n"
3918 " size = 27; align = 1; signed = false;\n"
3919 " map = clock.%s.value;\n"
3920 "} := uint27_clock_monotonic_t;\n"
3921 "\n"
3922 "typealias integer {\n"
3923 " size = 32; align = %u; signed = false;\n"
3924 " map = clock.%s.value;\n"
3925 "} := uint32_clock_monotonic_t;\n"
3926 "\n"
3927 "typealias integer {\n"
3928 " size = 64; align = %u; signed = false;\n"
3929 " map = clock.%s.value;\n"
3930 "} := uint64_clock_monotonic_t;\n\n",
3931 trace_clock_name(),
3932 lttng_alignof(uint32_t) * CHAR_BIT,
3933 trace_clock_name(),
3934 lttng_alignof(uint64_t) * CHAR_BIT,
3935 trace_clock_name()
3936 );
3937 if (ret)
3938 goto end;
3939
3940 ret = _lttng_stream_packet_context_declare(session);
3941 if (ret)
3942 goto end;
3943
3944 ret = _lttng_event_header_declare(session);
3945 if (ret)
3946 goto end;
3947
3948 skip_session:
3949 list_for_each_entry(chan_priv, &session->priv->chan, node) {
3950 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
3951 if (ret)
3952 goto end;
3953 }
3954
3955 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
3956 ret = _lttng_event_recorder_metadata_statedump(&event_recorder_priv->pub->parent);
3957 if (ret)
3958 goto end;
3959 }
3960 session->priv->metadata_dumped = 1;
3961 end:
3962 lttng_metadata_end(session);
3963 return ret;
3964 }
3965
3966 /**
3967 * lttng_transport_register - LTT transport registration
3968 * @transport: transport structure
3969 *
3970 * Registers a transport which can be used as output to extract the data out of
3971 * LTTng. The module calling this registration function must ensure that no
3972 * trap-inducing code will be executed by the transport functions. E.g.
3973 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3974 * is made visible to the transport function. This registration acts as a
3975 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3976 * after its registration must it synchronize the TLBs.
3977 */
3978 void lttng_transport_register(struct lttng_transport *transport)
3979 {
3980 /*
3981 * Make sure no page fault can be triggered by the module about to be
3982 * registered. We deal with this here so we don't have to call
3983 * vmalloc_sync_mappings() in each module's init.
3984 */
3985 wrapper_vmalloc_sync_mappings();
3986
3987 mutex_lock(&sessions_mutex);
3988 list_add_tail(&transport->node, &lttng_transport_list);
3989 mutex_unlock(&sessions_mutex);
3990 }
3991 EXPORT_SYMBOL_GPL(lttng_transport_register);
3992
3993 /**
3994 * lttng_transport_unregister - LTT transport unregistration
3995 * @transport: transport structure
3996 */
3997 void lttng_transport_unregister(struct lttng_transport *transport)
3998 {
3999 mutex_lock(&sessions_mutex);
4000 list_del(&transport->node);
4001 mutex_unlock(&sessions_mutex);
4002 }
4003 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4004
4005 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4006 {
4007 /*
4008 * Make sure no page fault can be triggered by the module about to be
4009 * registered. We deal with this here so we don't have to call
4010 * vmalloc_sync_mappings() in each module's init.
4011 */
4012 wrapper_vmalloc_sync_mappings();
4013
4014 mutex_lock(&sessions_mutex);
4015 list_add_tail(&transport->node, &lttng_counter_transport_list);
4016 mutex_unlock(&sessions_mutex);
4017 }
4018 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4019
4020 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4021 {
4022 mutex_lock(&sessions_mutex);
4023 list_del(&transport->node);
4024 mutex_unlock(&sessions_mutex);
4025 }
4026 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4027
4028 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4029
4030 enum cpuhp_state lttng_hp_prepare;
4031 enum cpuhp_state lttng_hp_online;
4032
4033 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4034 {
4035 struct lttng_cpuhp_node *lttng_node;
4036
4037 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4038 switch (lttng_node->component) {
4039 case LTTNG_RING_BUFFER_FRONTEND:
4040 return 0;
4041 case LTTNG_RING_BUFFER_BACKEND:
4042 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4043 case LTTNG_RING_BUFFER_ITER:
4044 return 0;
4045 case LTTNG_CONTEXT_PERF_COUNTERS:
4046 return 0;
4047 default:
4048 return -EINVAL;
4049 }
4050 }
4051
4052 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4053 {
4054 struct lttng_cpuhp_node *lttng_node;
4055
4056 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4057 switch (lttng_node->component) {
4058 case LTTNG_RING_BUFFER_FRONTEND:
4059 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4060 case LTTNG_RING_BUFFER_BACKEND:
4061 return 0;
4062 case LTTNG_RING_BUFFER_ITER:
4063 return 0;
4064 case LTTNG_CONTEXT_PERF_COUNTERS:
4065 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4066 default:
4067 return -EINVAL;
4068 }
4069 }
4070
4071 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4072 {
4073 struct lttng_cpuhp_node *lttng_node;
4074
4075 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4076 switch (lttng_node->component) {
4077 case LTTNG_RING_BUFFER_FRONTEND:
4078 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4079 case LTTNG_RING_BUFFER_BACKEND:
4080 return 0;
4081 case LTTNG_RING_BUFFER_ITER:
4082 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4083 case LTTNG_CONTEXT_PERF_COUNTERS:
4084 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4085 default:
4086 return -EINVAL;
4087 }
4088 }
4089
4090 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4091 {
4092 struct lttng_cpuhp_node *lttng_node;
4093
4094 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4095 switch (lttng_node->component) {
4096 case LTTNG_RING_BUFFER_FRONTEND:
4097 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4098 case LTTNG_RING_BUFFER_BACKEND:
4099 return 0;
4100 case LTTNG_RING_BUFFER_ITER:
4101 return 0;
4102 case LTTNG_CONTEXT_PERF_COUNTERS:
4103 return 0;
4104 default:
4105 return -EINVAL;
4106 }
4107 }
4108
4109 static int __init lttng_init_cpu_hotplug(void)
4110 {
4111 int ret;
4112
4113 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4114 lttng_hotplug_prepare,
4115 lttng_hotplug_dead);
4116 if (ret < 0) {
4117 return ret;
4118 }
4119 lttng_hp_prepare = ret;
4120 lttng_rb_set_hp_prepare(ret);
4121
4122 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4123 lttng_hotplug_online,
4124 lttng_hotplug_offline);
4125 if (ret < 0) {
4126 cpuhp_remove_multi_state(lttng_hp_prepare);
4127 lttng_hp_prepare = 0;
4128 return ret;
4129 }
4130 lttng_hp_online = ret;
4131 lttng_rb_set_hp_online(ret);
4132
4133 return 0;
4134 }
4135
4136 static void __exit lttng_exit_cpu_hotplug(void)
4137 {
4138 lttng_rb_set_hp_online(0);
4139 cpuhp_remove_multi_state(lttng_hp_online);
4140 lttng_rb_set_hp_prepare(0);
4141 cpuhp_remove_multi_state(lttng_hp_prepare);
4142 }
4143
4144 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4145 static int lttng_init_cpu_hotplug(void)
4146 {
4147 return 0;
4148 }
4149 static void lttng_exit_cpu_hotplug(void)
4150 {
4151 }
4152 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4153
4154 static int __init lttng_events_init(void)
4155 {
4156 int ret;
4157
4158 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4159 if (ret)
4160 return ret;
4161 ret = wrapper_get_pfnblock_flags_mask_init();
4162 if (ret)
4163 return ret;
4164 ret = wrapper_get_pageblock_flags_mask_init();
4165 if (ret)
4166 return ret;
4167 ret = lttng_probes_init();
4168 if (ret)
4169 return ret;
4170 ret = lttng_context_init();
4171 if (ret)
4172 return ret;
4173 ret = lttng_tracepoint_init();
4174 if (ret)
4175 goto error_tp;
4176 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4177 if (!event_recorder_cache) {
4178 ret = -ENOMEM;
4179 goto error_kmem_event_recorder;
4180 }
4181 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4182 if (!event_recorder_private_cache) {
4183 ret = -ENOMEM;
4184 goto error_kmem_event_recorder_private;
4185 }
4186 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4187 if (!event_notifier_cache) {
4188 ret = -ENOMEM;
4189 goto error_kmem_event_notifier;
4190 }
4191 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4192 if (!event_notifier_private_cache) {
4193 ret = -ENOMEM;
4194 goto error_kmem_event_notifier_private;
4195 }
4196 ret = lttng_abi_init();
4197 if (ret)
4198 goto error_abi;
4199 ret = lttng_logger_init();
4200 if (ret)
4201 goto error_logger;
4202 ret = lttng_init_cpu_hotplug();
4203 if (ret)
4204 goto error_hotplug;
4205 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4206 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4207 __stringify(LTTNG_MODULES_MINOR_VERSION),
4208 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4209 LTTNG_MODULES_EXTRAVERSION,
4210 LTTNG_VERSION_NAME,
4211 #ifdef LTTNG_EXTRA_VERSION_GIT
4212 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4213 #else
4214 "",
4215 #endif
4216 #ifdef LTTNG_EXTRA_VERSION_NAME
4217 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4218 #else
4219 "");
4220 #endif
4221 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4222 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4223 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4224 return 0;
4225
4226 error_hotplug:
4227 lttng_logger_exit();
4228 error_logger:
4229 lttng_abi_exit();
4230 error_abi:
4231 kmem_cache_destroy(event_notifier_private_cache);
4232 error_kmem_event_notifier_private:
4233 kmem_cache_destroy(event_notifier_cache);
4234 error_kmem_event_notifier:
4235 kmem_cache_destroy(event_recorder_private_cache);
4236 error_kmem_event_recorder_private:
4237 kmem_cache_destroy(event_recorder_cache);
4238 error_kmem_event_recorder:
4239 lttng_tracepoint_exit();
4240 error_tp:
4241 lttng_context_exit();
4242 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4243 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4244 __stringify(LTTNG_MODULES_MINOR_VERSION),
4245 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4246 LTTNG_MODULES_EXTRAVERSION,
4247 LTTNG_VERSION_NAME,
4248 #ifdef LTTNG_EXTRA_VERSION_GIT
4249 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4250 #else
4251 "",
4252 #endif
4253 #ifdef LTTNG_EXTRA_VERSION_NAME
4254 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4255 #else
4256 "");
4257 #endif
4258 return ret;
4259 }
4260
4261 module_init(lttng_events_init);
4262
4263 static void __exit lttng_events_exit(void)
4264 {
4265 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4266
4267 lttng_exit_cpu_hotplug();
4268 lttng_logger_exit();
4269 lttng_abi_exit();
4270 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4271 lttng_session_destroy(session_priv->pub);
4272 kmem_cache_destroy(event_recorder_cache);
4273 kmem_cache_destroy(event_recorder_private_cache);
4274 kmem_cache_destroy(event_notifier_cache);
4275 kmem_cache_destroy(event_notifier_private_cache);
4276 lttng_tracepoint_exit();
4277 lttng_context_exit();
4278 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4279 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4280 __stringify(LTTNG_MODULES_MINOR_VERSION),
4281 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4282 LTTNG_MODULES_EXTRAVERSION,
4283 LTTNG_VERSION_NAME,
4284 #ifdef LTTNG_EXTRA_VERSION_GIT
4285 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4286 #else
4287 "",
4288 #endif
4289 #ifdef LTTNG_EXTRA_VERSION_NAME
4290 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4291 #else
4292 "");
4293 #endif
4294 }
4295
4296 module_exit(lttng_events_exit);
4297
4298 #include <generated/patches.h>
4299 #ifdef LTTNG_EXTRA_VERSION_GIT
4300 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4301 #endif
4302 #ifdef LTTNG_EXTRA_VERSION_NAME
4303 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4304 #endif
4305 MODULE_LICENSE("GPL and additional rights");
4306 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4307 MODULE_DESCRIPTION("LTTng tracer");
4308 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4309 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4310 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4311 LTTNG_MODULES_EXTRAVERSION);
This page took 0.184534 seconds and 4 git commands to generate.