Fix: _lttng_kernel_event_create never returns NULL
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_kernel_session *session,
83 const struct lttng_kernel_type_common *type,
84 enum lttng_kernel_string_encoding parent_encoding,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_kernel_session *session,
88 const struct lttng_kernel_event_field *field,
89 size_t nesting, const char **prev_field_name_p);
90
91 void synchronize_trace(void)
92 {
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
94 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
95 synchronize_rcu();
96 #else
97 synchronize_sched();
98 #endif
99
100 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
101 #ifdef CONFIG_PREEMPT_RT_FULL
102 synchronize_rcu();
103 #endif
104 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
105 #ifdef CONFIG_PREEMPT_RT
106 synchronize_rcu();
107 #endif
108 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
109 }
110
111 void lttng_lock_sessions(void)
112 {
113 mutex_lock(&sessions_mutex);
114 }
115
116 void lttng_unlock_sessions(void)
117 {
118 mutex_unlock(&sessions_mutex);
119 }
120
121 static struct lttng_transport *lttng_transport_find(const char *name)
122 {
123 struct lttng_transport *transport;
124
125 list_for_each_entry(transport, &lttng_transport_list, node) {
126 if (!strcmp(transport->name, name))
127 return transport;
128 }
129 return NULL;
130 }
131
132 /*
133 * Called with sessions lock held.
134 */
135 int lttng_session_active(void)
136 {
137 struct lttng_kernel_session_private *iter;
138
139 list_for_each_entry(iter, &sessions, list) {
140 if (iter->pub->active)
141 return 1;
142 }
143 return 0;
144 }
145
146 struct lttng_kernel_session *lttng_session_create(void)
147 {
148 struct lttng_kernel_session *session;
149 struct lttng_kernel_session_private *session_priv;
150 struct lttng_metadata_cache *metadata_cache;
151 int i;
152
153 mutex_lock(&sessions_mutex);
154 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
155 if (!session)
156 goto err;
157 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
158 if (!session_priv)
159 goto err_free_session;
160 session->priv = session_priv;
161 session_priv->pub = session;
162
163 INIT_LIST_HEAD(&session_priv->chan);
164 INIT_LIST_HEAD(&session_priv->events);
165 lttng_guid_gen(&session_priv->uuid);
166
167 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
168 GFP_KERNEL);
169 if (!metadata_cache)
170 goto err_free_session_private;
171 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
172 if (!metadata_cache->data)
173 goto err_free_cache;
174 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
175 kref_init(&metadata_cache->refcount);
176 mutex_init(&metadata_cache->lock);
177 session_priv->metadata_cache = metadata_cache;
178 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
179 memcpy(&metadata_cache->uuid, &session_priv->uuid,
180 sizeof(metadata_cache->uuid));
181 INIT_LIST_HEAD(&session_priv->enablers_head);
182 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
183 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
184 list_add(&session_priv->list, &sessions);
185
186 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
187 goto tracker_alloc_error;
188 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
197 goto tracker_alloc_error;
198
199 mutex_unlock(&sessions_mutex);
200
201 return session;
202
203 tracker_alloc_error:
204 lttng_id_tracker_fini(&session->pid_tracker);
205 lttng_id_tracker_fini(&session->vpid_tracker);
206 lttng_id_tracker_fini(&session->uid_tracker);
207 lttng_id_tracker_fini(&session->vuid_tracker);
208 lttng_id_tracker_fini(&session->gid_tracker);
209 lttng_id_tracker_fini(&session->vgid_tracker);
210 err_free_cache:
211 kfree(metadata_cache);
212 err_free_session_private:
213 lttng_kvfree(session_priv);
214 err_free_session:
215 lttng_kvfree(session);
216 err:
217 mutex_unlock(&sessions_mutex);
218 return NULL;
219 }
220
221 static
222 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
223 {
224 struct lttng_counter_transport *transport;
225
226 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
227 if (!strcmp(transport->name, name))
228 return transport;
229 }
230 return NULL;
231 }
232
233 struct lttng_counter *lttng_kernel_counter_create(
234 const char *counter_transport_name,
235 size_t number_dimensions, const size_t *dimensions_sizes)
236 {
237 struct lttng_counter *counter = NULL;
238 struct lttng_counter_transport *counter_transport = NULL;
239
240 counter_transport = lttng_counter_transport_find(counter_transport_name);
241 if (!counter_transport) {
242 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
243 counter_transport_name);
244 goto notransport;
245 }
246 if (!try_module_get(counter_transport->owner)) {
247 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
248 goto notransport;
249 }
250
251 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
252 if (!counter)
253 goto nomem;
254
255 /* Create event notifier error counter. */
256 counter->ops = &counter_transport->ops;
257 counter->transport = counter_transport;
258
259 counter->counter = counter->ops->counter_create(
260 number_dimensions, dimensions_sizes, 0);
261 if (!counter->counter) {
262 goto create_error;
263 }
264
265 return counter;
266
267 create_error:
268 lttng_kvfree(counter);
269 nomem:
270 if (counter_transport)
271 module_put(counter_transport->owner);
272 notransport:
273 return NULL;
274 }
275
276 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
277 {
278 struct lttng_transport *transport = NULL;
279 struct lttng_event_notifier_group *event_notifier_group;
280 const char *transport_name = "relay-event-notifier";
281 size_t subbuf_size = 4096; //TODO
282 size_t num_subbuf = 16; //TODO
283 unsigned int switch_timer_interval = 0;
284 unsigned int read_timer_interval = 0;
285 int i;
286
287 mutex_lock(&sessions_mutex);
288
289 transport = lttng_transport_find(transport_name);
290 if (!transport) {
291 printk(KERN_WARNING "LTTng: transport %s not found\n",
292 transport_name);
293 goto notransport;
294 }
295 if (!try_module_get(transport->owner)) {
296 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
297 transport_name);
298 goto notransport;
299 }
300
301 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
302 GFP_KERNEL);
303 if (!event_notifier_group)
304 goto nomem;
305
306 /*
307 * Initialize the ring buffer used to store event notifier
308 * notifications.
309 */
310 event_notifier_group->ops = &transport->ops;
311 event_notifier_group->chan = transport->ops.priv->channel_create(
312 transport_name, event_notifier_group, NULL,
313 subbuf_size, num_subbuf, switch_timer_interval,
314 read_timer_interval);
315 if (!event_notifier_group->chan)
316 goto create_error;
317
318 event_notifier_group->transport = transport;
319
320 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
321 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
322 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
323 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
324
325 list_add(&event_notifier_group->node, &event_notifier_groups);
326
327 mutex_unlock(&sessions_mutex);
328
329 return event_notifier_group;
330
331 create_error:
332 lttng_kvfree(event_notifier_group);
333 nomem:
334 if (transport)
335 module_put(transport->owner);
336 notransport:
337 mutex_unlock(&sessions_mutex);
338 return NULL;
339 }
340
341 void metadata_cache_destroy(struct kref *kref)
342 {
343 struct lttng_metadata_cache *cache =
344 container_of(kref, struct lttng_metadata_cache, refcount);
345 vfree(cache->data);
346 kfree(cache);
347 }
348
349 void lttng_session_destroy(struct lttng_kernel_session *session)
350 {
351 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
352 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
353 struct lttng_metadata_stream *metadata_stream;
354 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
355 int ret;
356
357 mutex_lock(&sessions_mutex);
358 WRITE_ONCE(session->active, 0);
359 list_for_each_entry(chan_priv, &session->priv->chan, node) {
360 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
361 WARN_ON(ret);
362 }
363 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
364 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
365 WARN_ON(ret);
366 }
367 synchronize_trace(); /* Wait for in-flight events to complete */
368 list_for_each_entry(chan_priv, &session->priv->chan, node) {
369 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
370 WARN_ON(ret);
371 }
372 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
373 lttng_event_enabler_destroy(event_enabler);
374 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
375 _lttng_event_destroy(&event_recorder_priv->pub->parent);
376 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
377 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
378 _lttng_channel_destroy(chan_priv->pub);
379 }
380 mutex_lock(&session->priv->metadata_cache->lock);
381 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
382 _lttng_metadata_channel_hangup(metadata_stream);
383 mutex_unlock(&session->priv->metadata_cache->lock);
384 lttng_id_tracker_fini(&session->pid_tracker);
385 lttng_id_tracker_fini(&session->vpid_tracker);
386 lttng_id_tracker_fini(&session->uid_tracker);
387 lttng_id_tracker_fini(&session->vuid_tracker);
388 lttng_id_tracker_fini(&session->gid_tracker);
389 lttng_id_tracker_fini(&session->vgid_tracker);
390 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
391 list_del(&session->priv->list);
392 mutex_unlock(&sessions_mutex);
393 lttng_kvfree(session->priv);
394 lttng_kvfree(session);
395 }
396
397 void lttng_event_notifier_group_destroy(
398 struct lttng_event_notifier_group *event_notifier_group)
399 {
400 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
401 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
402 int ret;
403
404 if (!event_notifier_group)
405 return;
406
407 mutex_lock(&sessions_mutex);
408
409 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
410 WARN_ON(ret);
411
412 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
413 &event_notifier_group->event_notifiers_head, parent.node) {
414 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
415 WARN_ON(ret);
416 }
417
418 /* Wait for in-flight event notifier to complete */
419 synchronize_trace();
420
421 irq_work_sync(&event_notifier_group->wakeup_pending);
422
423 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
424 WARN_ON(ret);
425
426 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
427 &event_notifier_group->enablers_head, node)
428 lttng_event_enabler_destroy(event_enabler);
429
430 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
431 &event_notifier_group->event_notifiers_head, parent.node)
432 _lttng_event_destroy(&event_notifier_priv->pub->parent);
433
434 if (event_notifier_group->error_counter) {
435 struct lttng_counter *error_counter = event_notifier_group->error_counter;
436
437 error_counter->ops->counter_destroy(error_counter->counter);
438 module_put(error_counter->transport->owner);
439 lttng_kvfree(error_counter);
440 event_notifier_group->error_counter = NULL;
441 }
442
443 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
444 module_put(event_notifier_group->transport->owner);
445 list_del(&event_notifier_group->node);
446
447 mutex_unlock(&sessions_mutex);
448 lttng_kvfree(event_notifier_group);
449 }
450
451 int lttng_session_statedump(struct lttng_kernel_session *session)
452 {
453 int ret;
454
455 mutex_lock(&sessions_mutex);
456 ret = lttng_statedump_start(session);
457 mutex_unlock(&sessions_mutex);
458 return ret;
459 }
460
461 int lttng_session_enable(struct lttng_kernel_session *session)
462 {
463 int ret = 0;
464 struct lttng_kernel_channel_buffer_private *chan_priv;
465
466 mutex_lock(&sessions_mutex);
467 if (session->active) {
468 ret = -EBUSY;
469 goto end;
470 }
471
472 /* Set transient enabler state to "enabled" */
473 session->priv->tstate = 1;
474
475 /* We need to sync enablers with session before activation. */
476 lttng_session_sync_event_enablers(session);
477
478 /*
479 * Snapshot the number of events per channel to know the type of header
480 * we need to use.
481 */
482 list_for_each_entry(chan_priv, &session->priv->chan, node) {
483 if (chan_priv->header_type)
484 continue; /* don't change it if session stop/restart */
485 if (chan_priv->free_event_id < 31)
486 chan_priv->header_type = 1; /* compact */
487 else
488 chan_priv->header_type = 2; /* large */
489 }
490
491 /* Clear each stream's quiescent state. */
492 list_for_each_entry(chan_priv, &session->priv->chan, node) {
493 if (chan_priv->channel_type != METADATA_CHANNEL)
494 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
495 }
496
497 WRITE_ONCE(session->active, 1);
498 WRITE_ONCE(session->priv->been_active, 1);
499 ret = _lttng_session_metadata_statedump(session);
500 if (ret) {
501 WRITE_ONCE(session->active, 0);
502 goto end;
503 }
504 ret = lttng_statedump_start(session);
505 if (ret)
506 WRITE_ONCE(session->active, 0);
507 end:
508 mutex_unlock(&sessions_mutex);
509 return ret;
510 }
511
512 int lttng_session_disable(struct lttng_kernel_session *session)
513 {
514 int ret = 0;
515 struct lttng_kernel_channel_buffer_private *chan_priv;
516
517 mutex_lock(&sessions_mutex);
518 if (!session->active) {
519 ret = -EBUSY;
520 goto end;
521 }
522 WRITE_ONCE(session->active, 0);
523
524 /* Set transient enabler state to "disabled" */
525 session->priv->tstate = 0;
526 lttng_session_sync_event_enablers(session);
527
528 /* Set each stream's quiescent state. */
529 list_for_each_entry(chan_priv, &session->priv->chan, node) {
530 if (chan_priv->channel_type != METADATA_CHANNEL)
531 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
532 }
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
539 {
540 int ret = 0;
541 struct lttng_kernel_channel_buffer_private *chan_priv;
542 struct lttng_kernel_event_recorder_private *event_recorder_priv;
543 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
544 struct lttng_metadata_stream *stream;
545
546 mutex_lock(&sessions_mutex);
547 if (!session->active) {
548 ret = -EBUSY;
549 goto end;
550 }
551
552 mutex_lock(&cache->lock);
553 memset(cache->data, 0, cache->cache_alloc);
554 cache->metadata_written = 0;
555 cache->version++;
556 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
557 stream->metadata_out = 0;
558 stream->metadata_in = 0;
559 }
560 mutex_unlock(&cache->lock);
561
562 session->priv->metadata_dumped = 0;
563 list_for_each_entry(chan_priv, &session->priv->chan, node) {
564 chan_priv->metadata_dumped = 0;
565 }
566
567 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
568 event_recorder_priv->metadata_dumped = 0;
569 }
570
571 ret = _lttng_session_metadata_statedump(session);
572
573 end:
574 mutex_unlock(&sessions_mutex);
575 return ret;
576 }
577
578 static
579 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
580 {
581 struct lttng_kernel_channel_buffer *chan_buf;
582
583 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
584 return false;
585 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
586 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
587 return true;
588 return false;
589 }
590
591 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
592 {
593 int ret = 0;
594
595 mutex_lock(&sessions_mutex);
596 if (is_channel_buffer_metadata(channel)) {
597 ret = -EPERM;
598 goto end;
599 }
600 if (channel->enabled) {
601 ret = -EEXIST;
602 goto end;
603 }
604 /* Set transient enabler state to "enabled" */
605 channel->priv->tstate = 1;
606 lttng_session_sync_event_enablers(channel->session);
607 /* Set atomically the state to "enabled" */
608 WRITE_ONCE(channel->enabled, 1);
609 end:
610 mutex_unlock(&sessions_mutex);
611 return ret;
612 }
613
614 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
615 {
616 int ret = 0;
617
618 mutex_lock(&sessions_mutex);
619 if (is_channel_buffer_metadata(channel)) {
620 ret = -EPERM;
621 goto end;
622 }
623 if (!channel->enabled) {
624 ret = -EEXIST;
625 goto end;
626 }
627 /* Set atomically the state to "disabled" */
628 WRITE_ONCE(channel->enabled, 0);
629 /* Set transient enabler state to "enabled" */
630 channel->priv->tstate = 0;
631 lttng_session_sync_event_enablers(channel->session);
632 end:
633 mutex_unlock(&sessions_mutex);
634 return ret;
635 }
636
637 int lttng_event_enable(struct lttng_kernel_event_common *event)
638 {
639 int ret = 0;
640
641 mutex_lock(&sessions_mutex);
642 switch (event->type) {
643 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
644 {
645 struct lttng_kernel_event_recorder *event_recorder =
646 container_of(event, struct lttng_kernel_event_recorder, parent);
647
648 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
649 ret = -EPERM;
650 goto end;
651 }
652 break;
653 }
654 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
655 switch (event->priv->instrumentation) {
656 case LTTNG_KERNEL_ABI_KRETPROBE:
657 ret = -EINVAL;
658 goto end;
659 default:
660 break;
661 }
662 break;
663 default:
664 break;
665 }
666
667 if (event->enabled) {
668 ret = -EEXIST;
669 goto end;
670 }
671 switch (event->priv->instrumentation) {
672 case LTTNG_KERNEL_ABI_TRACEPOINT:
673 lttng_fallthrough;
674 case LTTNG_KERNEL_ABI_SYSCALL:
675 ret = -EINVAL;
676 break;
677
678 case LTTNG_KERNEL_ABI_KPROBE:
679 lttng_fallthrough;
680 case LTTNG_KERNEL_ABI_UPROBE:
681 WRITE_ONCE(event->enabled, 1);
682 break;
683
684 case LTTNG_KERNEL_ABI_KRETPROBE:
685 ret = lttng_kretprobes_event_enable_state(event, 1);
686 break;
687
688 case LTTNG_KERNEL_ABI_FUNCTION:
689 lttng_fallthrough;
690 case LTTNG_KERNEL_ABI_NOOP:
691 lttng_fallthrough;
692 default:
693 WARN_ON_ONCE(1);
694 ret = -EINVAL;
695 }
696 end:
697 mutex_unlock(&sessions_mutex);
698 return ret;
699 }
700
701 int lttng_event_disable(struct lttng_kernel_event_common *event)
702 {
703 int ret = 0;
704
705 mutex_lock(&sessions_mutex);
706 switch (event->type) {
707 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
708 {
709 struct lttng_kernel_event_recorder *event_recorder =
710 container_of(event, struct lttng_kernel_event_recorder, parent);
711
712 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
713 ret = -EPERM;
714 goto end;
715 }
716 break;
717 }
718 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
719 switch (event->priv->instrumentation) {
720 case LTTNG_KERNEL_ABI_KRETPROBE:
721 ret = -EINVAL;
722 goto end;
723 default:
724 break;
725 }
726 break;
727 default:
728 break;
729 }
730
731 if (!event->enabled) {
732 ret = -EEXIST;
733 goto end;
734 }
735 switch (event->priv->instrumentation) {
736 case LTTNG_KERNEL_ABI_TRACEPOINT:
737 lttng_fallthrough;
738 case LTTNG_KERNEL_ABI_SYSCALL:
739 ret = -EINVAL;
740 break;
741
742 case LTTNG_KERNEL_ABI_KPROBE:
743 lttng_fallthrough;
744 case LTTNG_KERNEL_ABI_UPROBE:
745 WRITE_ONCE(event->enabled, 0);
746 break;
747
748 case LTTNG_KERNEL_ABI_KRETPROBE:
749 ret = lttng_kretprobes_event_enable_state(event, 0);
750 break;
751
752 case LTTNG_KERNEL_ABI_FUNCTION:
753 lttng_fallthrough;
754 case LTTNG_KERNEL_ABI_NOOP:
755 lttng_fallthrough;
756 default:
757 WARN_ON_ONCE(1);
758 ret = -EINVAL;
759 }
760 end:
761 mutex_unlock(&sessions_mutex);
762 return ret;
763 }
764
765 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
766 const char *transport_name,
767 void *buf_addr,
768 size_t subbuf_size, size_t num_subbuf,
769 unsigned int switch_timer_interval,
770 unsigned int read_timer_interval,
771 enum channel_type channel_type)
772 {
773 struct lttng_kernel_channel_buffer *chan;
774 struct lttng_kernel_channel_buffer_private *chan_priv;
775 struct lttng_transport *transport = NULL;
776
777 mutex_lock(&sessions_mutex);
778 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
779 goto active; /* Refuse to add channel to active session */
780 transport = lttng_transport_find(transport_name);
781 if (!transport) {
782 printk(KERN_WARNING "LTTng: transport %s not found\n",
783 transport_name);
784 goto notransport;
785 }
786 if (!try_module_get(transport->owner)) {
787 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
788 goto notransport;
789 }
790 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
791 if (!chan)
792 goto nomem;
793 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
794 if (!chan_priv)
795 goto nomem_priv;
796 chan->priv = chan_priv;
797 chan_priv->pub = chan;
798 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
799 chan->parent.session = session;
800 chan->priv->id = session->priv->free_chan_id++;
801 chan->ops = &transport->ops;
802 /*
803 * Note: the channel creation op already writes into the packet
804 * headers. Therefore the "chan" information used as input
805 * should be already accessible.
806 */
807 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
808 chan, buf_addr, subbuf_size, num_subbuf,
809 switch_timer_interval, read_timer_interval);
810 if (!chan->priv->rb_chan)
811 goto create_error;
812 chan->priv->parent.tstate = 1;
813 chan->parent.enabled = 1;
814 chan->priv->transport = transport;
815 chan->priv->channel_type = channel_type;
816 list_add(&chan->priv->node, &session->priv->chan);
817 mutex_unlock(&sessions_mutex);
818 return chan;
819
820 create_error:
821 kfree(chan_priv);
822 nomem_priv:
823 kfree(chan);
824 nomem:
825 if (transport)
826 module_put(transport->owner);
827 notransport:
828 active:
829 mutex_unlock(&sessions_mutex);
830 return NULL;
831 }
832
833 /*
834 * Only used internally at session destruction for per-cpu channels, and
835 * when metadata channel is released.
836 * Needs to be called with sessions mutex held.
837 */
838 static
839 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
840 {
841 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
842 module_put(chan->priv->transport->owner);
843 list_del(&chan->priv->node);
844 lttng_kernel_destroy_context(chan->priv->ctx);
845 kfree(chan->priv);
846 kfree(chan);
847 }
848
849 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
850 {
851 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
852
853 /* Protect the metadata cache with the sessions_mutex. */
854 mutex_lock(&sessions_mutex);
855 _lttng_channel_destroy(chan);
856 mutex_unlock(&sessions_mutex);
857 }
858 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
859
860 static
861 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
862 {
863 stream->finalized = 1;
864 wake_up_interruptible(&stream->read_wait);
865 }
866
867 static
868 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common *event_enabler)
869 {
870 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
871 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
872
873 switch (event_enabler->enabler_type) {
874 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
875 {
876 struct lttng_event_recorder_enabler *event_recorder_enabler =
877 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
878 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
879
880 switch (itype) {
881 case LTTNG_KERNEL_ABI_TRACEPOINT:
882 lttng_fallthrough;
883 case LTTNG_KERNEL_ABI_KPROBE:
884 lttng_fallthrough;
885 case LTTNG_KERNEL_ABI_SYSCALL:
886 lttng_fallthrough;
887 case LTTNG_KERNEL_ABI_UPROBE:
888 if (chan->priv->free_event_id == -1U)
889 return false;
890 return true;
891 case LTTNG_KERNEL_ABI_KRETPROBE:
892 /* kretprobes require 2 event IDs. */
893 if (chan->priv->free_event_id >= -2U)
894 return false;
895 return true;
896 default:
897 WARN_ON_ONCE(1);
898 return false;
899 }
900 }
901 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
902 return true;
903 default:
904 WARN_ON_ONCE(1);
905 return false;
906 }
907 }
908
909 static
910 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
911 {
912 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
913 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
914
915 switch (event_enabler->enabler_type) {
916 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
917 {
918 struct lttng_event_recorder_enabler *event_recorder_enabler =
919 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
920 struct lttng_kernel_event_recorder *event_recorder;
921 struct lttng_kernel_event_recorder_private *event_recorder_priv;
922 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
923
924 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
925 if (!event_recorder)
926 return NULL;
927 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
928 if (!event_recorder_priv) {
929 kmem_cache_free(event_recorder_private_cache, event_recorder);
930 return NULL;
931 }
932 event_recorder_priv->pub = event_recorder;
933 event_recorder_priv->parent.pub = &event_recorder->parent;
934 event_recorder->priv = event_recorder_priv;
935 event_recorder->parent.priv = &event_recorder_priv->parent;
936
937 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
938 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
939 event_recorder->priv->parent.instrumentation = itype;
940 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
941 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
942
943 event_recorder->chan = chan;
944 event_recorder->priv->id = chan->priv->free_event_id++;
945 return &event_recorder->parent;
946 }
947 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
948 {
949 struct lttng_event_notifier_enabler *event_notifier_enabler =
950 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
951 struct lttng_kernel_event_notifier *event_notifier;
952 struct lttng_kernel_event_notifier_private *event_notifier_priv;
953
954 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
955 if (!event_notifier)
956 return NULL;
957 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
958 if (!event_notifier_priv) {
959 kmem_cache_free(event_notifier_private_cache, event_notifier);
960 return NULL;
961 }
962 event_notifier_priv->pub = event_notifier;
963 event_notifier_priv->parent.pub = &event_notifier->parent;
964 event_notifier->priv = event_notifier_priv;
965 event_notifier->parent.priv = &event_notifier_priv->parent;
966
967 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
968 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
969 event_notifier->priv->parent.instrumentation = itype;
970 event_notifier->priv->parent.user_token = event_enabler->user_token;
971 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
972 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
973
974 event_notifier->priv->group = event_notifier_enabler->group;
975 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
976 event_notifier->priv->num_captures = 0;
977 event_notifier->notification_send = lttng_event_notifier_notification_send;
978 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
979 return &event_notifier->parent;
980 }
981 default:
982 return NULL;
983 }
984 }
985
986 static
987 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
988 {
989 switch (event->type) {
990 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
991 {
992 struct lttng_kernel_event_recorder *event_recorder =
993 container_of(event, struct lttng_kernel_event_recorder, parent);
994
995 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
996 kmem_cache_free(event_recorder_cache, event_recorder);
997 break;
998 }
999 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1000 {
1001 struct lttng_kernel_event_notifier *event_notifier =
1002 container_of(event, struct lttng_kernel_event_notifier, parent);
1003
1004 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1005 kmem_cache_free(event_notifier_cache, event_notifier);
1006 break;
1007 }
1008 default:
1009 WARN_ON_ONCE(1);
1010 }
1011 }
1012
1013 static
1014 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common *event)
1015 {
1016 switch (event->type) {
1017 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1018 return 0;
1019 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1020 {
1021 struct lttng_kernel_event_notifier *event_notifier =
1022 container_of(event, struct lttng_kernel_event_notifier, parent);
1023 struct lttng_counter *error_counter;
1024 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
1025 size_t dimension_index[1];
1026 int ret;
1027
1028 /*
1029 * Clear the error counter bucket. The sessiond keeps track of which
1030 * bucket is currently in use. We trust it. The session lock
1031 * synchronizes against concurrent creation of the error
1032 * counter.
1033 */
1034 error_counter = event_notifier_group->error_counter;
1035 if (!error_counter)
1036 return 0;
1037 /*
1038 * Check that the index is within the boundary of the counter.
1039 */
1040 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1041 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1042 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1043 return -EINVAL;
1044 }
1045
1046 dimension_index[0] = event_notifier->priv->error_counter_index;
1047 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1048 if (ret) {
1049 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1050 event_notifier->priv->error_counter_index);
1051 return -EINVAL;
1052 }
1053 return 0;
1054 }
1055 default:
1056 return -EINVAL;
1057 }
1058 }
1059
1060 /*
1061 * Supports event creation while tracing session is active.
1062 * Needs to be called with sessions mutex held.
1063 */
1064 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1065 const struct lttng_kernel_event_desc *event_desc)
1066 {
1067 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
1068 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
1069 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
1070 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1071 struct lttng_kernel_event_common_private *event_priv;
1072 struct lttng_kernel_event_common *event;
1073 const char *event_name;
1074 struct hlist_head *head;
1075 int ret;
1076
1077 if (!lttng_kernel_event_id_available(event_enabler)) {
1078 ret = -EMFILE;
1079 goto full;
1080 }
1081
1082 switch (itype) {
1083 case LTTNG_KERNEL_ABI_TRACEPOINT:
1084 event_name = event_desc->event_name;
1085 break;
1086
1087 case LTTNG_KERNEL_ABI_KPROBE:
1088 lttng_fallthrough;
1089 case LTTNG_KERNEL_ABI_UPROBE:
1090 lttng_fallthrough;
1091 case LTTNG_KERNEL_ABI_KRETPROBE:
1092 lttng_fallthrough;
1093 case LTTNG_KERNEL_ABI_SYSCALL:
1094 event_name = event_param->name;
1095 break;
1096
1097 case LTTNG_KERNEL_ABI_FUNCTION:
1098 lttng_fallthrough;
1099 case LTTNG_KERNEL_ABI_NOOP:
1100 lttng_fallthrough;
1101 default:
1102 WARN_ON_ONCE(1);
1103 ret = -EINVAL;
1104 goto type_error;
1105 }
1106
1107 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1108 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1109 if (lttng_event_enabler_event_name_match_event(event_enabler, event_name, event_priv->pub)) {
1110 ret = -EEXIST;
1111 goto exist;
1112 }
1113 }
1114
1115 event = lttng_kernel_event_alloc(event_enabler);
1116 if (!event) {
1117 ret = -ENOMEM;
1118 goto alloc_error;
1119 }
1120
1121 switch (itype) {
1122 case LTTNG_KERNEL_ABI_TRACEPOINT:
1123 /* Event will be enabled by enabler sync. */
1124 event->enabled = 0;
1125 event->priv->registered = 0;
1126 event->priv->desc = lttng_event_desc_get(event_name);
1127 if (!event->priv->desc) {
1128 ret = -ENOENT;
1129 goto register_error;
1130 }
1131 /* Populate lttng_event structure before event registration. */
1132 smp_wmb();
1133 break;
1134
1135 case LTTNG_KERNEL_ABI_KPROBE:
1136 /*
1137 * Needs to be explicitly enabled after creation, since
1138 * we may want to apply filters.
1139 */
1140 event->enabled = 0;
1141 event->priv->registered = 1;
1142 /*
1143 * Populate lttng_event structure before event
1144 * registration.
1145 */
1146 smp_wmb();
1147 ret = lttng_kprobes_register_event(event_name,
1148 event_param->u.kprobe.symbol_name,
1149 event_param->u.kprobe.offset,
1150 event_param->u.kprobe.addr,
1151 event);
1152 if (ret) {
1153 ret = -EINVAL;
1154 goto register_error;
1155 }
1156 ret = try_module_get(event->priv->desc->owner);
1157 WARN_ON_ONCE(!ret);
1158 break;
1159
1160 case LTTNG_KERNEL_ABI_KRETPROBE:
1161 {
1162 struct lttng_kernel_event_common *event_return;
1163
1164 /* kretprobe defines 2 events */
1165 /*
1166 * Needs to be explicitly enabled after creation, since
1167 * we may want to apply filters.
1168 */
1169 event->enabled = 0;
1170 event->priv->registered = 1;
1171
1172 event_return = lttng_kernel_event_alloc(event_enabler);
1173 if (!event) {
1174 ret = -ENOMEM;
1175 goto alloc_error;
1176 }
1177
1178 event_return->enabled = 0;
1179 event_return->priv->registered = 1;
1180
1181 /*
1182 * Populate lttng_event structure before kretprobe registration.
1183 */
1184 smp_wmb();
1185 ret = lttng_kretprobes_register(event_name,
1186 event_param->u.kretprobe.symbol_name,
1187 event_param->u.kretprobe.offset,
1188 event_param->u.kretprobe.addr,
1189 event, event_return);
1190 if (ret) {
1191 lttng_kernel_event_free(event_return);
1192 ret = -EINVAL;
1193 goto register_error;
1194 }
1195 /* Take 2 refs on the module: one per event. */
1196 ret = try_module_get(event->priv->desc->owner);
1197 WARN_ON_ONCE(!ret);
1198 ret = try_module_get(event_return->priv->desc->owner);
1199 WARN_ON_ONCE(!ret);
1200 ret = _lttng_event_recorder_metadata_statedump(event_return);
1201 WARN_ON_ONCE(ret > 0);
1202 if (ret) {
1203 lttng_kernel_event_free(event_return);
1204 module_put(event_return->priv->desc->owner);
1205 module_put(event->priv->desc->owner);
1206 goto statedump_error;
1207 }
1208 list_add(&event_return->priv->node, event_list_head);
1209 break;
1210 }
1211
1212 case LTTNG_KERNEL_ABI_SYSCALL:
1213 /*
1214 * Needs to be explicitly enabled after creation, since
1215 * we may want to apply filters.
1216 */
1217 event->enabled = 0;
1218 event->priv->registered = 0;
1219 event->priv->desc = event_desc;
1220 switch (event_param->u.syscall.entryexit) {
1221 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1222 ret = -EINVAL;
1223 goto register_error;
1224 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1225 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1226 break;
1227 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1228 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1229 break;
1230 }
1231 switch (event_param->u.syscall.abi) {
1232 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1233 ret = -EINVAL;
1234 goto register_error;
1235 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1236 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1237 break;
1238 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1239 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1240 break;
1241 }
1242 if (!event->priv->desc) {
1243 ret = -EINVAL;
1244 goto register_error;
1245 }
1246 break;
1247
1248 case LTTNG_KERNEL_ABI_UPROBE:
1249 /*
1250 * Needs to be explicitly enabled after creation, since
1251 * we may want to apply filters.
1252 */
1253 event->enabled = 0;
1254 event->priv->registered = 1;
1255
1256 /*
1257 * Populate lttng_event structure before event
1258 * registration.
1259 */
1260 smp_wmb();
1261
1262 ret = lttng_uprobes_register_event(event_param->name,
1263 event_param->u.uprobe.fd,
1264 event);
1265 if (ret)
1266 goto register_error;
1267 ret = try_module_get(event->priv->desc->owner);
1268 WARN_ON_ONCE(!ret);
1269 break;
1270
1271 case LTTNG_KERNEL_ABI_FUNCTION:
1272 lttng_fallthrough;
1273 case LTTNG_KERNEL_ABI_NOOP:
1274 lttng_fallthrough;
1275 default:
1276 WARN_ON_ONCE(1);
1277 ret = -EINVAL;
1278 goto register_error;
1279 }
1280
1281 ret = _lttng_event_recorder_metadata_statedump(event);
1282 WARN_ON_ONCE(ret > 0);
1283 if (ret) {
1284 goto statedump_error;
1285 }
1286
1287 ret = lttng_kernel_event_notifier_clear_error_counter(event);
1288 if (ret)
1289 goto register_error;
1290
1291 hlist_add_head(&event->priv->hlist_node, head);
1292 list_add(&event->priv->node, event_list_head);
1293
1294 return event;
1295
1296 statedump_error:
1297 /* If a statedump error occurs, events will not be readable. */
1298 register_error:
1299 lttng_kernel_event_free(event);
1300 alloc_error:
1301 exist:
1302 type_error:
1303 full:
1304 return ERR_PTR(ret);
1305 }
1306
1307 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1308 const struct lttng_kernel_event_desc *event_desc)
1309 {
1310 struct lttng_kernel_event_common *event;
1311
1312 mutex_lock(&sessions_mutex);
1313 event = _lttng_kernel_event_create(event_enabler, event_desc);
1314 mutex_unlock(&sessions_mutex);
1315 return event;
1316 }
1317
1318 int lttng_kernel_counter_read(struct lttng_counter *counter,
1319 const size_t *dim_indexes, int32_t cpu,
1320 int64_t *val, bool *overflow, bool *underflow)
1321 {
1322 return counter->ops->counter_read(counter->counter, dim_indexes,
1323 cpu, val, overflow, underflow);
1324 }
1325
1326 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1327 const size_t *dim_indexes, int64_t *val,
1328 bool *overflow, bool *underflow)
1329 {
1330 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1331 val, overflow, underflow);
1332 }
1333
1334 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1335 const size_t *dim_indexes)
1336 {
1337 return counter->ops->counter_clear(counter->counter, dim_indexes);
1338 }
1339
1340 /* Only used for tracepoints for now. */
1341 static
1342 void register_event(struct lttng_kernel_event_common *event)
1343 {
1344 const struct lttng_kernel_event_desc *desc;
1345 int ret = -EINVAL;
1346
1347 if (event->priv->registered)
1348 return;
1349
1350 desc = event->priv->desc;
1351 switch (event->priv->instrumentation) {
1352 case LTTNG_KERNEL_ABI_TRACEPOINT:
1353 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1354 desc->tp_class->probe_callback,
1355 event);
1356 break;
1357
1358 case LTTNG_KERNEL_ABI_SYSCALL:
1359 ret = lttng_syscall_filter_enable_event(event);
1360 break;
1361
1362 case LTTNG_KERNEL_ABI_KPROBE:
1363 lttng_fallthrough;
1364 case LTTNG_KERNEL_ABI_UPROBE:
1365 ret = 0;
1366 break;
1367
1368 case LTTNG_KERNEL_ABI_KRETPROBE:
1369 switch (event->type) {
1370 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1371 ret = 0;
1372 break;
1373 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1374 WARN_ON_ONCE(1);
1375 break;
1376 }
1377 break;
1378
1379 case LTTNG_KERNEL_ABI_FUNCTION:
1380 lttng_fallthrough;
1381 case LTTNG_KERNEL_ABI_NOOP:
1382 lttng_fallthrough;
1383 default:
1384 WARN_ON_ONCE(1);
1385 }
1386 if (!ret)
1387 event->priv->registered = 1;
1388 }
1389
1390 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1391 {
1392 struct lttng_kernel_event_common_private *event_priv = event->priv;
1393 const struct lttng_kernel_event_desc *desc;
1394 int ret = -EINVAL;
1395
1396 if (!event_priv->registered)
1397 return 0;
1398
1399 desc = event_priv->desc;
1400 switch (event_priv->instrumentation) {
1401 case LTTNG_KERNEL_ABI_TRACEPOINT:
1402 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1403 event_priv->desc->tp_class->probe_callback,
1404 event);
1405 break;
1406
1407 case LTTNG_KERNEL_ABI_KPROBE:
1408 lttng_kprobes_unregister_event(event);
1409 ret = 0;
1410 break;
1411
1412 case LTTNG_KERNEL_ABI_KRETPROBE:
1413 switch (event->type) {
1414 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1415 lttng_kretprobes_unregister(event);
1416 ret = 0;
1417 break;
1418 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1419 WARN_ON_ONCE(1);
1420 break;
1421 }
1422 break;
1423
1424 case LTTNG_KERNEL_ABI_SYSCALL:
1425 ret = lttng_syscall_filter_disable_event(event);
1426 break;
1427
1428 case LTTNG_KERNEL_ABI_NOOP:
1429 switch (event->type) {
1430 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1431 ret = 0;
1432 break;
1433 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1434 WARN_ON_ONCE(1);
1435 break;
1436 }
1437 break;
1438
1439 case LTTNG_KERNEL_ABI_UPROBE:
1440 lttng_uprobes_unregister_event(event);
1441 ret = 0;
1442 break;
1443
1444 case LTTNG_KERNEL_ABI_FUNCTION:
1445 lttng_fallthrough;
1446 default:
1447 WARN_ON_ONCE(1);
1448 }
1449 if (!ret)
1450 event_priv->registered = 0;
1451 return ret;
1452 }
1453
1454 /*
1455 * Only used internally at session destruction.
1456 */
1457 static
1458 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1459 {
1460 struct lttng_kernel_event_common_private *event_priv = event->priv;
1461 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1462
1463 lttng_free_event_filter_runtime(event);
1464 /* Free event enabler refs */
1465 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1466 &event_priv->enablers_ref_head, node)
1467 kfree(enabler_ref);
1468
1469 switch (event->type) {
1470 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1471 {
1472 struct lttng_kernel_event_recorder *event_recorder =
1473 container_of(event, struct lttng_kernel_event_recorder, parent);
1474
1475 switch (event_priv->instrumentation) {
1476 case LTTNG_KERNEL_ABI_TRACEPOINT:
1477 lttng_event_desc_put(event_priv->desc);
1478 break;
1479
1480 case LTTNG_KERNEL_ABI_KPROBE:
1481 module_put(event_priv->desc->owner);
1482 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1483 break;
1484
1485 case LTTNG_KERNEL_ABI_KRETPROBE:
1486 module_put(event_priv->desc->owner);
1487 lttng_kretprobes_destroy_private(&event_recorder->parent);
1488 break;
1489
1490 case LTTNG_KERNEL_ABI_SYSCALL:
1491 break;
1492
1493 case LTTNG_KERNEL_ABI_UPROBE:
1494 module_put(event_priv->desc->owner);
1495 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1496 break;
1497
1498 case LTTNG_KERNEL_ABI_FUNCTION:
1499 lttng_fallthrough;
1500 case LTTNG_KERNEL_ABI_NOOP:
1501 lttng_fallthrough;
1502 default:
1503 WARN_ON_ONCE(1);
1504 }
1505 list_del(&event_recorder->priv->parent.node);
1506 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1507 kmem_cache_free(event_recorder_cache, event_recorder);
1508 break;
1509 }
1510 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1511 {
1512 struct lttng_kernel_event_notifier *event_notifier =
1513 container_of(event, struct lttng_kernel_event_notifier, parent);
1514
1515 switch (event_notifier->priv->parent.instrumentation) {
1516 case LTTNG_KERNEL_ABI_TRACEPOINT:
1517 lttng_event_desc_put(event_notifier->priv->parent.desc);
1518 break;
1519
1520 case LTTNG_KERNEL_ABI_KPROBE:
1521 module_put(event_notifier->priv->parent.desc->owner);
1522 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1523 break;
1524
1525 case LTTNG_KERNEL_ABI_SYSCALL:
1526 break;
1527
1528 case LTTNG_KERNEL_ABI_UPROBE:
1529 module_put(event_notifier->priv->parent.desc->owner);
1530 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1531 break;
1532
1533 case LTTNG_KERNEL_ABI_KRETPROBE:
1534 lttng_fallthrough;
1535 case LTTNG_KERNEL_ABI_FUNCTION:
1536 lttng_fallthrough;
1537 case LTTNG_KERNEL_ABI_NOOP:
1538 lttng_fallthrough;
1539 default:
1540 WARN_ON_ONCE(1);
1541 }
1542 list_del(&event_notifier->priv->parent.node);
1543 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1544 kmem_cache_free(event_notifier_cache, event_notifier);
1545 break;
1546 }
1547 default:
1548 WARN_ON_ONCE(1);
1549 }
1550 }
1551
1552 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1553 enum tracker_type tracker_type)
1554 {
1555 switch (tracker_type) {
1556 case TRACKER_PID:
1557 return &session->pid_tracker;
1558 case TRACKER_VPID:
1559 return &session->vpid_tracker;
1560 case TRACKER_UID:
1561 return &session->uid_tracker;
1562 case TRACKER_VUID:
1563 return &session->vuid_tracker;
1564 case TRACKER_GID:
1565 return &session->gid_tracker;
1566 case TRACKER_VGID:
1567 return &session->vgid_tracker;
1568 default:
1569 WARN_ON_ONCE(1);
1570 return NULL;
1571 }
1572 }
1573
1574 int lttng_session_track_id(struct lttng_kernel_session *session,
1575 enum tracker_type tracker_type, int id)
1576 {
1577 struct lttng_kernel_id_tracker *tracker;
1578 int ret;
1579
1580 tracker = get_tracker(session, tracker_type);
1581 if (!tracker)
1582 return -EINVAL;
1583 if (id < -1)
1584 return -EINVAL;
1585 mutex_lock(&sessions_mutex);
1586 if (id == -1) {
1587 /* track all ids: destroy tracker. */
1588 lttng_id_tracker_destroy(tracker, true);
1589 ret = 0;
1590 } else {
1591 ret = lttng_id_tracker_add(tracker, id);
1592 }
1593 mutex_unlock(&sessions_mutex);
1594 return ret;
1595 }
1596
1597 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1598 enum tracker_type tracker_type, int id)
1599 {
1600 struct lttng_kernel_id_tracker *tracker;
1601 int ret;
1602
1603 tracker = get_tracker(session, tracker_type);
1604 if (!tracker)
1605 return -EINVAL;
1606 if (id < -1)
1607 return -EINVAL;
1608 mutex_lock(&sessions_mutex);
1609 if (id == -1) {
1610 /* untrack all ids: replace by empty tracker. */
1611 ret = lttng_id_tracker_empty_set(tracker);
1612 } else {
1613 ret = lttng_id_tracker_del(tracker, id);
1614 }
1615 mutex_unlock(&sessions_mutex);
1616 return ret;
1617 }
1618
1619 static
1620 void *id_list_start(struct seq_file *m, loff_t *pos)
1621 {
1622 struct lttng_kernel_id_tracker *id_tracker = m->private;
1623 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1624 struct lttng_id_hash_node *e;
1625 int iter = 0, i;
1626
1627 mutex_lock(&sessions_mutex);
1628 if (id_tracker_p) {
1629 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1630 struct hlist_head *head = &id_tracker_p->id_hash[i];
1631
1632 lttng_hlist_for_each_entry(e, head, hlist) {
1633 if (iter++ >= *pos)
1634 return e;
1635 }
1636 }
1637 } else {
1638 /* ID tracker disabled. */
1639 if (iter >= *pos && iter == 0) {
1640 return id_tracker_p; /* empty tracker */
1641 }
1642 iter++;
1643 }
1644 /* End of list */
1645 return NULL;
1646 }
1647
1648 /* Called with sessions_mutex held. */
1649 static
1650 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1651 {
1652 struct lttng_kernel_id_tracker *id_tracker = m->private;
1653 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1654 struct lttng_id_hash_node *e;
1655 int iter = 0, i;
1656
1657 (*ppos)++;
1658 if (id_tracker_p) {
1659 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1660 struct hlist_head *head = &id_tracker_p->id_hash[i];
1661
1662 lttng_hlist_for_each_entry(e, head, hlist) {
1663 if (iter++ >= *ppos)
1664 return e;
1665 }
1666 }
1667 } else {
1668 /* ID tracker disabled. */
1669 if (iter >= *ppos && iter == 0)
1670 return p; /* empty tracker */
1671 iter++;
1672 }
1673
1674 /* End of list */
1675 return NULL;
1676 }
1677
1678 static
1679 void id_list_stop(struct seq_file *m, void *p)
1680 {
1681 mutex_unlock(&sessions_mutex);
1682 }
1683
1684 static
1685 int id_list_show(struct seq_file *m, void *p)
1686 {
1687 struct lttng_kernel_id_tracker *id_tracker = m->private;
1688 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1689 int id;
1690
1691 if (p == id_tracker_p) {
1692 /* Tracker disabled. */
1693 id = -1;
1694 } else {
1695 const struct lttng_id_hash_node *e = p;
1696
1697 id = lttng_id_tracker_get_node_id(e);
1698 }
1699 switch (id_tracker->priv->tracker_type) {
1700 case TRACKER_PID:
1701 seq_printf(m, "process { pid = %d; };\n", id);
1702 break;
1703 case TRACKER_VPID:
1704 seq_printf(m, "process { vpid = %d; };\n", id);
1705 break;
1706 case TRACKER_UID:
1707 seq_printf(m, "user { uid = %d; };\n", id);
1708 break;
1709 case TRACKER_VUID:
1710 seq_printf(m, "user { vuid = %d; };\n", id);
1711 break;
1712 case TRACKER_GID:
1713 seq_printf(m, "group { gid = %d; };\n", id);
1714 break;
1715 case TRACKER_VGID:
1716 seq_printf(m, "group { vgid = %d; };\n", id);
1717 break;
1718 default:
1719 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1720 }
1721 return 0;
1722 }
1723
1724 static
1725 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1726 .start = id_list_start,
1727 .next = id_list_next,
1728 .stop = id_list_stop,
1729 .show = id_list_show,
1730 };
1731
1732 static
1733 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1734 {
1735 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1736 }
1737
1738 static
1739 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1740 {
1741 struct seq_file *m = file->private_data;
1742 struct lttng_kernel_id_tracker *id_tracker = m->private;
1743 int ret;
1744
1745 WARN_ON_ONCE(!id_tracker);
1746 ret = seq_release(inode, file);
1747 if (!ret)
1748 fput(id_tracker->priv->session->priv->file);
1749 return ret;
1750 }
1751
1752 const struct file_operations lttng_tracker_ids_list_fops = {
1753 .owner = THIS_MODULE,
1754 .open = lttng_tracker_ids_list_open,
1755 .read = seq_read,
1756 .llseek = seq_lseek,
1757 .release = lttng_tracker_ids_list_release,
1758 };
1759
1760 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1761 enum tracker_type tracker_type)
1762 {
1763 struct file *tracker_ids_list_file;
1764 struct seq_file *m;
1765 int file_fd, ret;
1766
1767 file_fd = lttng_get_unused_fd();
1768 if (file_fd < 0) {
1769 ret = file_fd;
1770 goto fd_error;
1771 }
1772
1773 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1774 &lttng_tracker_ids_list_fops,
1775 NULL, O_RDWR);
1776 if (IS_ERR(tracker_ids_list_file)) {
1777 ret = PTR_ERR(tracker_ids_list_file);
1778 goto file_error;
1779 }
1780 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1781 ret = -EOVERFLOW;
1782 goto refcount_error;
1783 }
1784 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1785 if (ret < 0)
1786 goto open_error;
1787 m = tracker_ids_list_file->private_data;
1788
1789 m->private = get_tracker(session, tracker_type);
1790 BUG_ON(!m->private);
1791 fd_install(file_fd, tracker_ids_list_file);
1792
1793 return file_fd;
1794
1795 open_error:
1796 atomic_long_dec(&session->priv->file->f_count);
1797 refcount_error:
1798 fput(tracker_ids_list_file);
1799 file_error:
1800 put_unused_fd(file_fd);
1801 fd_error:
1802 return ret;
1803 }
1804
1805 /*
1806 * Enabler management.
1807 */
1808 static
1809 int lttng_match_enabler_star_glob(const char *desc_name,
1810 const char *pattern)
1811 {
1812 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1813 desc_name, LTTNG_SIZE_MAX))
1814 return 0;
1815 return 1;
1816 }
1817
1818 static
1819 int lttng_match_enabler_name(const char *desc_name,
1820 const char *name)
1821 {
1822 if (strcmp(desc_name, name))
1823 return 0;
1824 return 1;
1825 }
1826
1827 static
1828 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1829 struct lttng_event_enabler_common *enabler)
1830 {
1831 const char *desc_name, *enabler_name;
1832 bool compat = false, entry = false;
1833
1834 enabler_name = enabler->event_param.name;
1835 switch (enabler->event_param.instrumentation) {
1836 case LTTNG_KERNEL_ABI_TRACEPOINT:
1837 desc_name = desc->event_name;
1838 switch (enabler->format_type) {
1839 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1840 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1841 case LTTNG_ENABLER_FORMAT_NAME:
1842 return lttng_match_enabler_name(desc_name, enabler_name);
1843 default:
1844 return -EINVAL;
1845 }
1846 break;
1847
1848 case LTTNG_KERNEL_ABI_SYSCALL:
1849 desc_name = desc->event_name;
1850 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1851 desc_name += strlen("compat_");
1852 compat = true;
1853 }
1854 if (!strncmp(desc_name, "syscall_exit_",
1855 strlen("syscall_exit_"))) {
1856 desc_name += strlen("syscall_exit_");
1857 } else if (!strncmp(desc_name, "syscall_entry_",
1858 strlen("syscall_entry_"))) {
1859 desc_name += strlen("syscall_entry_");
1860 entry = true;
1861 } else {
1862 WARN_ON_ONCE(1);
1863 return -EINVAL;
1864 }
1865 switch (enabler->event_param.u.syscall.entryexit) {
1866 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1867 break;
1868 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1869 if (!entry)
1870 return 0;
1871 break;
1872 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1873 if (entry)
1874 return 0;
1875 break;
1876 default:
1877 return -EINVAL;
1878 }
1879 switch (enabler->event_param.u.syscall.abi) {
1880 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1881 break;
1882 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1883 if (compat)
1884 return 0;
1885 break;
1886 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1887 if (!compat)
1888 return 0;
1889 break;
1890 default:
1891 return -EINVAL;
1892 }
1893 switch (enabler->event_param.u.syscall.match) {
1894 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1895 switch (enabler->format_type) {
1896 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1897 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1898 case LTTNG_ENABLER_FORMAT_NAME:
1899 return lttng_match_enabler_name(desc_name, enabler_name);
1900 default:
1901 return -EINVAL;
1902 }
1903 break;
1904 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
1905 return -EINVAL; /* Not implemented. */
1906 default:
1907 return -EINVAL;
1908 }
1909 break;
1910
1911 default:
1912 WARN_ON_ONCE(1);
1913 return -EINVAL;
1914 }
1915 }
1916
1917 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1918 struct lttng_event_enabler_common *enabler)
1919 {
1920 int ret;
1921
1922 ret = lttng_desc_match_enabler_check(desc, enabler);
1923 if (ret < 0) {
1924 WARN_ON_ONCE(1);
1925 return false;
1926 }
1927 return ret;
1928 }
1929
1930 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
1931 struct lttng_kernel_event_common *event)
1932 {
1933 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1934 return false;
1935
1936 switch (event_enabler->enabler_type) {
1937 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1938 {
1939 struct lttng_event_recorder_enabler *event_recorder_enabler =
1940 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1941 struct lttng_kernel_event_recorder *event_recorder =
1942 container_of(event, struct lttng_kernel_event_recorder, parent);
1943
1944 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1945 && event_recorder->chan == event_recorder_enabler->chan)
1946 return true;
1947 else
1948 return false;
1949 }
1950 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1951 {
1952 struct lttng_event_notifier_enabler *event_notifier_enabler =
1953 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1954 struct lttng_kernel_event_notifier *event_notifier =
1955 container_of(event, struct lttng_kernel_event_notifier, parent);
1956
1957 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1958 && event_notifier->priv->group == event_notifier_enabler->group
1959 && event->priv->user_token == event_enabler->user_token)
1960 return true;
1961 else
1962 return false;
1963 }
1964 default:
1965 WARN_ON_ONCE(1);
1966 return false;
1967 }
1968 }
1969
1970 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
1971 const struct lttng_kernel_event_desc *desc,
1972 struct lttng_kernel_event_common *event)
1973 {
1974 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1975 return false;
1976
1977 switch (event_enabler->enabler_type) {
1978 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1979 {
1980 struct lttng_event_recorder_enabler *event_recorder_enabler =
1981 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1982 struct lttng_kernel_event_recorder *event_recorder =
1983 container_of(event, struct lttng_kernel_event_recorder, parent);
1984
1985 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
1986 return true;
1987 else
1988 return false;
1989 }
1990 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1991 {
1992 struct lttng_event_notifier_enabler *event_notifier_enabler =
1993 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1994 struct lttng_kernel_event_notifier *event_notifier =
1995 container_of(event, struct lttng_kernel_event_notifier, parent);
1996
1997 if (event->priv->desc == desc
1998 && event_notifier->priv->group == event_notifier_enabler->group
1999 && event->priv->user_token == event_enabler->user_token)
2000 return true;
2001 else
2002 return false;
2003 }
2004 default:
2005 WARN_ON_ONCE(1);
2006 return false;
2007 }
2008 }
2009
2010 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2011 const char *event_name,
2012 struct lttng_kernel_event_common *event)
2013 {
2014 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2015 return false;
2016
2017 switch (event_enabler->enabler_type) {
2018 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2019 {
2020 struct lttng_event_recorder_enabler *event_recorder_enabler =
2021 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2022 struct lttng_kernel_event_recorder *event_recorder =
2023 container_of(event, struct lttng_kernel_event_recorder, parent);
2024
2025 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2026 && event_recorder->chan == event_recorder_enabler->chan)
2027 return true;
2028 else
2029 return false;
2030 }
2031 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2032 {
2033 struct lttng_event_notifier_enabler *event_notifier_enabler =
2034 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2035 struct lttng_kernel_event_notifier *event_notifier =
2036 container_of(event, struct lttng_kernel_event_notifier, parent);
2037
2038 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2039 && event_notifier->priv->group == event_notifier_enabler->group
2040 && event->priv->user_token == event_enabler->user_token)
2041 return true;
2042 else
2043 return false;
2044 }
2045 default:
2046 WARN_ON_ONCE(1);
2047 return false;
2048 }
2049 }
2050
2051 static
2052 struct lttng_enabler_ref *lttng_enabler_ref(
2053 struct list_head *enablers_ref_list,
2054 struct lttng_event_enabler_common *enabler)
2055 {
2056 struct lttng_enabler_ref *enabler_ref;
2057
2058 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2059 if (enabler_ref->ref == enabler)
2060 return enabler_ref;
2061 }
2062 return NULL;
2063 }
2064
2065 static
2066 void lttng_event_enabler_create_tracepoint_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2067 {
2068 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2069 struct lttng_kernel_probe_desc *probe_desc;
2070 const struct lttng_kernel_event_desc *desc;
2071 struct list_head *probe_list;
2072 int i;
2073
2074 probe_list = lttng_get_probe_list_head();
2075 /*
2076 * For each probe event, if we find that a probe event matches
2077 * our enabler, create an associated lttng_event if not
2078 * already present.
2079 */
2080 list_for_each_entry(probe_desc, probe_list, head) {
2081 for (i = 0; i < probe_desc->nr_events; i++) {
2082 int found = 0;
2083 struct hlist_head *head;
2084 struct lttng_kernel_event_common *event;
2085 struct lttng_kernel_event_common_private *event_priv;
2086
2087 desc = probe_desc->event_desc[i];
2088 if (!lttng_desc_match_enabler(desc, event_enabler))
2089 continue;
2090
2091 /*
2092 * Check if already created.
2093 */
2094 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2095 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2096 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub))
2097 found = 1;
2098 }
2099 if (found)
2100 continue;
2101
2102 /*
2103 * We need to create an event for this event probe.
2104 */
2105 event = _lttng_kernel_event_create(event_enabler, desc);
2106 if (IS_ERR(event)) {
2107 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2108 probe_desc->event_desc[i]->event_name);
2109 }
2110 }
2111 }
2112 }
2113
2114 /*
2115 * Create event if it is missing and present in the list of tracepoint probes.
2116 * Should be called with sessions mutex held.
2117 */
2118 static
2119 void lttng_event_enabler_create_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2120 {
2121 int ret;
2122
2123 switch (event_enabler->event_param.instrumentation) {
2124 case LTTNG_KERNEL_ABI_TRACEPOINT:
2125 lttng_event_enabler_create_tracepoint_events_if_missing(event_enabler);
2126 break;
2127
2128 case LTTNG_KERNEL_ABI_SYSCALL:
2129 ret = lttng_event_enabler_create_syscall_events_if_missing(event_enabler);
2130 WARN_ON_ONCE(ret);
2131 break;
2132
2133 default:
2134 WARN_ON_ONCE(1);
2135 break;
2136 }
2137 }
2138
2139 static
2140 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2141 struct lttng_kernel_event_common *event)
2142 {
2143 /* Link filter bytecodes if not linked yet. */
2144 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2145 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2146 }
2147
2148 static
2149 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2150 struct lttng_kernel_event_common *event)
2151 {
2152 switch (event_enabler->enabler_type) {
2153 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2154 break;
2155 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2156 {
2157 struct lttng_event_notifier_enabler *event_notifier_enabler =
2158 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2159 struct lttng_kernel_event_notifier *event_notifier =
2160 container_of(event, struct lttng_kernel_event_notifier, parent);
2161
2162 /* Link capture bytecodes if not linked yet. */
2163 lttng_enabler_link_bytecode(event->priv->desc,
2164 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2165 &event_notifier_enabler->capture_bytecode_head);
2166 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2167 break;
2168 }
2169 default:
2170 WARN_ON_ONCE(1);
2171 }
2172 }
2173
2174 /*
2175 * Create events associated with an event_enabler (if not already present),
2176 * and add backward reference from the event to the enabler.
2177 * Should be called with sessions mutex held.
2178 */
2179 static
2180 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2181 {
2182 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2183 struct lttng_kernel_event_common_private *event_priv;
2184
2185 lttng_syscall_table_set_wildcard_all(event_enabler);
2186
2187 /* First ensure that probe events are created for this enabler. */
2188 lttng_event_enabler_create_events_if_missing(event_enabler);
2189
2190 /* Link the created event with its associated enabler. */
2191 list_for_each_entry(event_priv, event_list_head, node) {
2192 struct lttng_kernel_event_common *event = event_priv->pub;
2193 struct lttng_enabler_ref *enabler_ref;
2194
2195 if (!lttng_event_enabler_match_event(event_enabler, event))
2196 continue;
2197
2198 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2199 if (!enabler_ref) {
2200 /*
2201 * If no backward ref, create it.
2202 * Add backward ref from event_notifier to enabler.
2203 */
2204 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2205 if (!enabler_ref)
2206 return -ENOMEM;
2207
2208 enabler_ref->ref = event_enabler;
2209 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2210 }
2211
2212 lttng_event_enabler_init_event_filter(event_enabler, event);
2213 lttng_event_enabler_init_event_capture(event_enabler, event);
2214 }
2215 return 0;
2216 }
2217
2218 /*
2219 * Called at module load: connect the probe on all enablers matching
2220 * this event.
2221 * Called with sessions lock held.
2222 */
2223 int lttng_fix_pending_events(void)
2224 {
2225 struct lttng_kernel_session_private *session_priv;
2226
2227 list_for_each_entry(session_priv, &sessions, list)
2228 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2229 return 0;
2230 }
2231
2232 static bool lttng_event_notifier_group_has_active_event_notifiers(
2233 struct lttng_event_notifier_group *event_notifier_group)
2234 {
2235 struct lttng_event_enabler_common *event_enabler;
2236
2237 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2238 if (event_enabler->enabled)
2239 return true;
2240 }
2241 return false;
2242 }
2243
2244 bool lttng_event_notifier_active(void)
2245 {
2246 struct lttng_event_notifier_group *event_notifier_group;
2247
2248 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2249 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2250 return true;
2251 }
2252 return false;
2253 }
2254
2255 int lttng_fix_pending_event_notifiers(void)
2256 {
2257 struct lttng_event_notifier_group *event_notifier_group;
2258
2259 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2260 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2261 return 0;
2262 }
2263
2264 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2265 enum lttng_enabler_format_type format_type,
2266 struct lttng_kernel_abi_event *event_param,
2267 struct lttng_kernel_channel_buffer *chan)
2268 {
2269 struct lttng_event_recorder_enabler *event_enabler;
2270
2271 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2272 if (!event_enabler)
2273 return NULL;
2274 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2275 event_enabler->parent.format_type = format_type;
2276 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2277 memcpy(&event_enabler->parent.event_param, event_param,
2278 sizeof(event_enabler->parent.event_param));
2279 event_enabler->chan = chan;
2280 /* ctx left NULL */
2281 event_enabler->parent.enabled = 0;
2282 return event_enabler;
2283 }
2284
2285 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2286 struct lttng_event_recorder_enabler *event_enabler)
2287 {
2288 mutex_lock(&sessions_mutex);
2289 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2290 event_enabler->parent.published = true;
2291 lttng_session_lazy_sync_event_enablers(session);
2292 mutex_unlock(&sessions_mutex);
2293 }
2294
2295 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2296 {
2297 mutex_lock(&sessions_mutex);
2298 event_enabler->enabled = 1;
2299 lttng_event_enabler_sync(event_enabler);
2300 mutex_unlock(&sessions_mutex);
2301 return 0;
2302 }
2303
2304 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2305 {
2306 mutex_lock(&sessions_mutex);
2307 event_enabler->enabled = 0;
2308 lttng_event_enabler_sync(event_enabler);
2309 mutex_unlock(&sessions_mutex);
2310 return 0;
2311 }
2312
2313 static
2314 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2315 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2316 {
2317 struct lttng_kernel_bytecode_node *bytecode_node;
2318 uint32_t bytecode_len;
2319 int ret;
2320
2321 ret = get_user(bytecode_len, &bytecode->len);
2322 if (ret)
2323 return ret;
2324 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2325 GFP_KERNEL);
2326 if (!bytecode_node)
2327 return -ENOMEM;
2328 ret = copy_from_user(&bytecode_node->bc, bytecode,
2329 sizeof(*bytecode) + bytecode_len);
2330 if (ret)
2331 goto error_free;
2332
2333 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2334 bytecode_node->enabler = enabler;
2335 /* Enforce length based on allocated size */
2336 bytecode_node->bc.len = bytecode_len;
2337 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2338
2339 return 0;
2340
2341 error_free:
2342 lttng_kvfree(bytecode_node);
2343 return ret;
2344 }
2345
2346 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2347 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2348 {
2349 int ret;
2350 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2351 if (ret)
2352 goto error;
2353 lttng_event_enabler_sync(event_enabler);
2354 return 0;
2355
2356 error:
2357 return ret;
2358 }
2359
2360 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2361 struct lttng_kernel_abi_event_callsite __user *callsite)
2362 {
2363
2364 switch (event->priv->instrumentation) {
2365 case LTTNG_KERNEL_ABI_UPROBE:
2366 return lttng_uprobes_event_add_callsite(event, callsite);
2367 default:
2368 return -EINVAL;
2369 }
2370 }
2371
2372 static
2373 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2374 {
2375 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2376
2377 /* Destroy filter bytecode */
2378 list_for_each_entry_safe(filter_node, tmp_filter_node,
2379 &enabler->filter_bytecode_head, node) {
2380 lttng_kvfree(filter_node);
2381 }
2382 }
2383
2384 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2385 {
2386 lttng_enabler_destroy(event_enabler);
2387 if (event_enabler->published)
2388 list_del(&event_enabler->node);
2389
2390 switch (event_enabler->enabler_type) {
2391 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2392 {
2393 struct lttng_event_recorder_enabler *event_recorder_enabler =
2394 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2395
2396 kfree(event_recorder_enabler);
2397 break;
2398 }
2399 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2400 {
2401 struct lttng_event_notifier_enabler *event_notifier_enabler =
2402 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2403
2404 kfree(event_notifier_enabler);
2405 break;
2406 }
2407 default:
2408 WARN_ON_ONCE(1);
2409 }
2410 }
2411
2412 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2413 enum lttng_enabler_format_type format_type,
2414 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2415 struct lttng_event_notifier_group *event_notifier_group)
2416 {
2417 struct lttng_event_notifier_enabler *event_notifier_enabler;
2418
2419 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2420 if (!event_notifier_enabler)
2421 return NULL;
2422
2423 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2424 event_notifier_enabler->parent.format_type = format_type;
2425 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2426 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2427
2428 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2429 event_notifier_enabler->num_captures = 0;
2430
2431 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2432 sizeof(event_notifier_enabler->parent.event_param));
2433
2434 event_notifier_enabler->parent.enabled = 0;
2435 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2436 event_notifier_enabler->group = event_notifier_group;
2437 return event_notifier_enabler;
2438 }
2439
2440 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2441 struct lttng_event_notifier_enabler *event_notifier_enabler)
2442 {
2443 mutex_lock(&sessions_mutex);
2444 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2445 event_notifier_enabler->parent.published = true;
2446 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2447 mutex_unlock(&sessions_mutex);
2448 }
2449
2450 int lttng_event_notifier_enabler_enable(
2451 struct lttng_event_notifier_enabler *event_notifier_enabler)
2452 {
2453 mutex_lock(&sessions_mutex);
2454 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2455 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2456 mutex_unlock(&sessions_mutex);
2457 return 0;
2458 }
2459
2460 int lttng_event_notifier_enabler_disable(
2461 struct lttng_event_notifier_enabler *event_notifier_enabler)
2462 {
2463 mutex_lock(&sessions_mutex);
2464 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2465 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2466 mutex_unlock(&sessions_mutex);
2467 return 0;
2468 }
2469
2470 int lttng_event_notifier_enabler_attach_capture_bytecode(
2471 struct lttng_event_notifier_enabler *event_notifier_enabler,
2472 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2473 {
2474 struct lttng_kernel_bytecode_node *bytecode_node;
2475 struct lttng_event_enabler_common *enabler =
2476 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2477 uint32_t bytecode_len;
2478 int ret;
2479
2480 ret = get_user(bytecode_len, &bytecode->len);
2481 if (ret)
2482 return ret;
2483
2484 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2485 GFP_KERNEL);
2486 if (!bytecode_node)
2487 return -ENOMEM;
2488
2489 ret = copy_from_user(&bytecode_node->bc, bytecode,
2490 sizeof(*bytecode) + bytecode_len);
2491 if (ret)
2492 goto error_free;
2493
2494 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2495 bytecode_node->enabler = enabler;
2496
2497 /* Enforce length based on allocated size */
2498 bytecode_node->bc.len = bytecode_len;
2499 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2500
2501 event_notifier_enabler->num_captures++;
2502
2503 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2504 goto end;
2505
2506 error_free:
2507 lttng_kvfree(bytecode_node);
2508 end:
2509 return ret;
2510 }
2511
2512 static
2513 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2514 {
2515 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2516 struct lttng_kernel_bytecode_runtime *runtime;
2517 struct lttng_enabler_ref *enabler_ref;
2518
2519 /* Check if has enablers without bytecode enabled */
2520 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2521 if (enabler_ref->ref->enabled
2522 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2523 has_enablers_without_filter_bytecode = 1;
2524 break;
2525 }
2526 }
2527 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2528
2529 /* Enable filters */
2530 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2531 lttng_bytecode_sync_state(runtime);
2532 nr_filters++;
2533 }
2534 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2535 }
2536
2537 static
2538 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2539 {
2540 switch (event->type) {
2541 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2542 break;
2543 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2544 {
2545 struct lttng_kernel_event_notifier *event_notifier =
2546 container_of(event, struct lttng_kernel_event_notifier, parent);
2547 struct lttng_kernel_bytecode_runtime *runtime;
2548 int nr_captures = 0;
2549
2550 /* Enable captures */
2551 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2552 lttng_bytecode_sync_state(runtime);
2553 nr_captures++;
2554 }
2555 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2556 break;
2557 }
2558 default:
2559 WARN_ON_ONCE(1);
2560 }
2561 }
2562
2563 static
2564 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2565 {
2566 struct lttng_enabler_ref *enabler_ref;
2567 bool enabled = false;
2568
2569 switch (event->priv->instrumentation) {
2570 case LTTNG_KERNEL_ABI_TRACEPOINT:
2571 lttng_fallthrough;
2572 case LTTNG_KERNEL_ABI_SYSCALL:
2573 /* Enable events */
2574 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2575 if (enabler_ref->ref->enabled) {
2576 enabled = true;
2577 break;
2578 }
2579 }
2580 break;
2581 default:
2582 WARN_ON_ONCE(1);
2583 return false;
2584 }
2585
2586 switch (event->type) {
2587 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2588 {
2589 struct lttng_kernel_event_recorder *event_recorder =
2590 container_of(event, struct lttng_kernel_event_recorder, parent);
2591
2592 /*
2593 * Enabled state is based on union of enablers, with
2594 * intersection of session and channel transient enable
2595 * states.
2596 */
2597 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2598 }
2599 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2600 return enabled;
2601 default:
2602 WARN_ON_ONCE(1);
2603 return false;
2604 }
2605 }
2606
2607 static
2608 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2609 {
2610 switch (event->priv->instrumentation) {
2611 case LTTNG_KERNEL_ABI_TRACEPOINT:
2612 lttng_fallthrough;
2613 case LTTNG_KERNEL_ABI_SYSCALL:
2614 return true;
2615
2616 default:
2617 /* Not handled with lazy sync. */
2618 return false;
2619 }
2620 }
2621
2622 /*
2623 * Should be called with sessions mutex held.
2624 */
2625 static
2626 void lttng_sync_event_list(struct list_head *event_enabler_list,
2627 struct list_head *event_list)
2628 {
2629 struct lttng_kernel_event_common_private *event_priv;
2630 struct lttng_event_enabler_common *event_enabler;
2631
2632 list_for_each_entry(event_enabler, event_enabler_list, node)
2633 lttng_event_enabler_ref_events(event_enabler);
2634
2635 /*
2636 * For each event, if at least one of its enablers is enabled,
2637 * and its channel and session transient states are enabled, we
2638 * enable the event, else we disable it.
2639 */
2640 list_for_each_entry(event_priv, event_list, node) {
2641 struct lttng_kernel_event_common *event = event_priv->pub;
2642 bool enabled;
2643
2644 if (!lttng_event_is_lazy_sync(event))
2645 continue;
2646
2647 enabled = lttng_get_event_enabled_state(event);
2648 WRITE_ONCE(event->enabled, enabled);
2649 /*
2650 * Sync tracepoint registration with event enabled state.
2651 */
2652 if (enabled) {
2653 register_event(event);
2654 } else {
2655 _lttng_event_unregister(event);
2656 }
2657
2658 lttng_event_sync_filter_state(event);
2659 lttng_event_sync_capture_state(event);
2660 }
2661 }
2662
2663 /*
2664 * lttng_session_sync_event_enablers should be called just before starting a
2665 * session.
2666 */
2667 static
2668 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2669 {
2670 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2671 }
2672
2673 /*
2674 * Apply enablers to session events, adding events to session if need
2675 * be. It is required after each modification applied to an active
2676 * session, and right before session "start".
2677 * "lazy" sync means we only sync if required.
2678 * Should be called with sessions mutex held.
2679 */
2680 static
2681 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2682 {
2683 /* We can skip if session is not active */
2684 if (!session->active)
2685 return;
2686 lttng_session_sync_event_enablers(session);
2687 }
2688
2689 static
2690 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2691 {
2692 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2693 }
2694
2695 static
2696 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2697 {
2698 switch (event_enabler->enabler_type) {
2699 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2700 {
2701 struct lttng_event_recorder_enabler *event_recorder_enabler =
2702 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2703 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2704 break;
2705 }
2706 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2707 {
2708 struct lttng_event_notifier_enabler *event_notifier_enabler =
2709 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2710 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2711 break;
2712 }
2713 default:
2714 WARN_ON_ONCE(1);
2715 }
2716 }
2717
2718 /*
2719 * Serialize at most one packet worth of metadata into a metadata
2720 * channel.
2721 * We grab the metadata cache mutex to get exclusive access to our metadata
2722 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2723 * allows us to do racy operations such as looking for remaining space left in
2724 * packet and write, since mutual exclusion protects us from concurrent writes.
2725 * Mutual exclusion on the metadata cache allow us to read the cache content
2726 * without racing against reallocation of the cache by updates.
2727 * Returns the number of bytes written in the channel, 0 if no data
2728 * was written and a negative value on error.
2729 */
2730 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2731 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2732 {
2733 struct lttng_kernel_ring_buffer_ctx ctx;
2734 int ret = 0;
2735 size_t len, reserve_len;
2736
2737 /*
2738 * Ensure we support mutiple get_next / put sequences followed by
2739 * put_next. The metadata cache lock protects reading the metadata
2740 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2741 * "flush" operations on the buffer invoked by different processes.
2742 * Moreover, since the metadata cache memory can be reallocated, we
2743 * need to have exclusive access against updates even though we only
2744 * read it.
2745 */
2746 mutex_lock(&stream->metadata_cache->lock);
2747 WARN_ON(stream->metadata_in < stream->metadata_out);
2748 if (stream->metadata_in != stream->metadata_out)
2749 goto end;
2750
2751 /* Metadata regenerated, change the version. */
2752 if (stream->metadata_cache->version != stream->version)
2753 stream->version = stream->metadata_cache->version;
2754
2755 len = stream->metadata_cache->metadata_written -
2756 stream->metadata_in;
2757 if (!len)
2758 goto end;
2759 reserve_len = min_t(size_t,
2760 stream->transport->ops.priv->packet_avail_size(chan),
2761 len);
2762 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2763 sizeof(char), NULL);
2764 /*
2765 * If reservation failed, return an error to the caller.
2766 */
2767 ret = stream->transport->ops.event_reserve(&ctx);
2768 if (ret != 0) {
2769 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2770 stream->coherent = false;
2771 goto end;
2772 }
2773 stream->transport->ops.event_write(&ctx,
2774 stream->metadata_cache->data + stream->metadata_in,
2775 reserve_len, 1);
2776 stream->transport->ops.event_commit(&ctx);
2777 stream->metadata_in += reserve_len;
2778 if (reserve_len < len)
2779 stream->coherent = false;
2780 else
2781 stream->coherent = true;
2782 ret = reserve_len;
2783
2784 end:
2785 if (coherent)
2786 *coherent = stream->coherent;
2787 mutex_unlock(&stream->metadata_cache->lock);
2788 return ret;
2789 }
2790
2791 static
2792 void lttng_metadata_begin(struct lttng_kernel_session *session)
2793 {
2794 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2795 mutex_lock(&session->priv->metadata_cache->lock);
2796 }
2797
2798 static
2799 void lttng_metadata_end(struct lttng_kernel_session *session)
2800 {
2801 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2802 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2803 struct lttng_metadata_stream *stream;
2804
2805 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2806 wake_up_interruptible(&stream->read_wait);
2807 mutex_unlock(&session->priv->metadata_cache->lock);
2808 }
2809 }
2810
2811 /*
2812 * Write the metadata to the metadata cache.
2813 * Must be called with sessions_mutex held.
2814 * The metadata cache lock protects us from concurrent read access from
2815 * thread outputting metadata content to ring buffer.
2816 * The content of the printf is printed as a single atomic metadata
2817 * transaction.
2818 */
2819 int lttng_metadata_printf(struct lttng_kernel_session *session,
2820 const char *fmt, ...)
2821 {
2822 char *str;
2823 size_t len;
2824 va_list ap;
2825
2826 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2827
2828 va_start(ap, fmt);
2829 str = kvasprintf(GFP_KERNEL, fmt, ap);
2830 va_end(ap);
2831 if (!str)
2832 return -ENOMEM;
2833
2834 len = strlen(str);
2835 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2836 if (session->priv->metadata_cache->metadata_written + len >
2837 session->priv->metadata_cache->cache_alloc) {
2838 char *tmp_cache_realloc;
2839 unsigned int tmp_cache_alloc_size;
2840
2841 tmp_cache_alloc_size = max_t(unsigned int,
2842 session->priv->metadata_cache->cache_alloc + len,
2843 session->priv->metadata_cache->cache_alloc << 1);
2844 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2845 if (!tmp_cache_realloc)
2846 goto err;
2847 if (session->priv->metadata_cache->data) {
2848 memcpy(tmp_cache_realloc,
2849 session->priv->metadata_cache->data,
2850 session->priv->metadata_cache->cache_alloc);
2851 vfree(session->priv->metadata_cache->data);
2852 }
2853
2854 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2855 session->priv->metadata_cache->data = tmp_cache_realloc;
2856 }
2857 memcpy(session->priv->metadata_cache->data +
2858 session->priv->metadata_cache->metadata_written,
2859 str, len);
2860 session->priv->metadata_cache->metadata_written += len;
2861 kfree(str);
2862
2863 return 0;
2864
2865 err:
2866 kfree(str);
2867 return -ENOMEM;
2868 }
2869
2870 static
2871 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
2872 {
2873 size_t i;
2874
2875 for (i = 0; i < nesting; i++) {
2876 int ret;
2877
2878 ret = lttng_metadata_printf(session, " ");
2879 if (ret) {
2880 return ret;
2881 }
2882 }
2883 return 0;
2884 }
2885
2886 static
2887 int lttng_field_name_statedump(struct lttng_kernel_session *session,
2888 const struct lttng_kernel_event_field *field,
2889 size_t nesting)
2890 {
2891 return lttng_metadata_printf(session, " _%s;\n", field->name);
2892 }
2893
2894 static
2895 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
2896 const struct lttng_kernel_type_integer *type,
2897 enum lttng_kernel_string_encoding parent_encoding,
2898 size_t nesting)
2899 {
2900 int ret;
2901
2902 ret = print_tabs(session, nesting);
2903 if (ret)
2904 return ret;
2905 ret = lttng_metadata_printf(session,
2906 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2907 type->size,
2908 type->alignment,
2909 type->signedness,
2910 (parent_encoding == lttng_kernel_string_encoding_none)
2911 ? "none"
2912 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
2913 ? "UTF8"
2914 : "ASCII",
2915 type->base,
2916 #if __BYTE_ORDER == __BIG_ENDIAN
2917 type->reverse_byte_order ? " byte_order = le;" : ""
2918 #else
2919 type->reverse_byte_order ? " byte_order = be;" : ""
2920 #endif
2921 );
2922 return ret;
2923 }
2924
2925 /*
2926 * Must be called with sessions_mutex held.
2927 */
2928 static
2929 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
2930 const struct lttng_kernel_type_struct *type,
2931 size_t nesting)
2932 {
2933 const char *prev_field_name = NULL;
2934 int ret;
2935 uint32_t i, nr_fields;
2936 unsigned int alignment;
2937
2938 ret = print_tabs(session, nesting);
2939 if (ret)
2940 return ret;
2941 ret = lttng_metadata_printf(session,
2942 "struct {\n");
2943 if (ret)
2944 return ret;
2945 nr_fields = type->nr_fields;
2946 for (i = 0; i < nr_fields; i++) {
2947 const struct lttng_kernel_event_field *iter_field;
2948
2949 iter_field = type->fields[i];
2950 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
2951 if (ret)
2952 return ret;
2953 }
2954 ret = print_tabs(session, nesting);
2955 if (ret)
2956 return ret;
2957 alignment = type->alignment;
2958 if (alignment) {
2959 ret = lttng_metadata_printf(session,
2960 "} align(%u)",
2961 alignment);
2962 } else {
2963 ret = lttng_metadata_printf(session,
2964 "}");
2965 }
2966 return ret;
2967 }
2968
2969 /*
2970 * Must be called with sessions_mutex held.
2971 */
2972 static
2973 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
2974 const struct lttng_kernel_event_field *field,
2975 size_t nesting)
2976 {
2977 int ret;
2978
2979 ret = _lttng_struct_type_statedump(session,
2980 lttng_kernel_get_type_struct(field->type), nesting);
2981 if (ret)
2982 return ret;
2983 return lttng_field_name_statedump(session, field, nesting);
2984 }
2985
2986 /*
2987 * Must be called with sessions_mutex held.
2988 */
2989 static
2990 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
2991 const struct lttng_kernel_type_variant *type,
2992 size_t nesting,
2993 const char *prev_field_name)
2994 {
2995 const char *tag_name;
2996 int ret;
2997 uint32_t i, nr_choices;
2998
2999 tag_name = type->tag_name;
3000 if (!tag_name)
3001 tag_name = prev_field_name;
3002 if (!tag_name)
3003 return -EINVAL;
3004 /*
3005 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3006 */
3007 if (type->alignment != 0)
3008 return -EINVAL;
3009 ret = print_tabs(session, nesting);
3010 if (ret)
3011 return ret;
3012 ret = lttng_metadata_printf(session,
3013 "variant <_%s> {\n",
3014 tag_name);
3015 if (ret)
3016 return ret;
3017 nr_choices = type->nr_choices;
3018 for (i = 0; i < nr_choices; i++) {
3019 const struct lttng_kernel_event_field *iter_field;
3020
3021 iter_field = type->choices[i];
3022 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3023 if (ret)
3024 return ret;
3025 }
3026 ret = print_tabs(session, nesting);
3027 if (ret)
3028 return ret;
3029 ret = lttng_metadata_printf(session,
3030 "}");
3031 return ret;
3032 }
3033
3034 /*
3035 * Must be called with sessions_mutex held.
3036 */
3037 static
3038 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3039 const struct lttng_kernel_event_field *field,
3040 size_t nesting,
3041 const char *prev_field_name)
3042 {
3043 int ret;
3044
3045 ret = _lttng_variant_type_statedump(session,
3046 lttng_kernel_get_type_variant(field->type), nesting,
3047 prev_field_name);
3048 if (ret)
3049 return ret;
3050 return lttng_field_name_statedump(session, field, nesting);
3051 }
3052
3053 /*
3054 * Must be called with sessions_mutex held.
3055 */
3056 static
3057 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3058 const struct lttng_kernel_event_field *field,
3059 size_t nesting)
3060 {
3061 int ret;
3062 const struct lttng_kernel_type_array *array_type;
3063 const struct lttng_kernel_type_common *elem_type;
3064
3065 array_type = lttng_kernel_get_type_array(field->type);
3066 WARN_ON_ONCE(!array_type);
3067
3068 if (array_type->alignment) {
3069 ret = print_tabs(session, nesting);
3070 if (ret)
3071 return ret;
3072 ret = lttng_metadata_printf(session,
3073 "struct { } align(%u) _%s_padding;\n",
3074 array_type->alignment * CHAR_BIT,
3075 field->name);
3076 if (ret)
3077 return ret;
3078 }
3079 /*
3080 * Nested compound types: Only array of structures and variants are
3081 * currently supported.
3082 */
3083 elem_type = array_type->elem_type;
3084 switch (elem_type->type) {
3085 case lttng_kernel_type_integer:
3086 case lttng_kernel_type_struct:
3087 case lttng_kernel_type_variant:
3088 ret = _lttng_type_statedump(session, elem_type,
3089 array_type->encoding, nesting);
3090 if (ret)
3091 return ret;
3092 break;
3093
3094 default:
3095 return -EINVAL;
3096 }
3097 ret = lttng_metadata_printf(session,
3098 " _%s[%u];\n",
3099 field->name,
3100 array_type->length);
3101 return ret;
3102 }
3103
3104 /*
3105 * Must be called with sessions_mutex held.
3106 */
3107 static
3108 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3109 const struct lttng_kernel_event_field *field,
3110 size_t nesting,
3111 const char *prev_field_name)
3112 {
3113 int ret;
3114 const char *length_name;
3115 const struct lttng_kernel_type_sequence *sequence_type;
3116 const struct lttng_kernel_type_common *elem_type;
3117
3118 sequence_type = lttng_kernel_get_type_sequence(field->type);
3119 WARN_ON_ONCE(!sequence_type);
3120
3121 length_name = sequence_type->length_name;
3122 if (!length_name)
3123 length_name = prev_field_name;
3124 if (!length_name)
3125 return -EINVAL;
3126
3127 if (sequence_type->alignment) {
3128 ret = print_tabs(session, nesting);
3129 if (ret)
3130 return ret;
3131 ret = lttng_metadata_printf(session,
3132 "struct { } align(%u) _%s_padding;\n",
3133 sequence_type->alignment * CHAR_BIT,
3134 field->name);
3135 if (ret)
3136 return ret;
3137 }
3138
3139 /*
3140 * Nested compound types: Only array of structures and variants are
3141 * currently supported.
3142 */
3143 elem_type = sequence_type->elem_type;
3144 switch (elem_type->type) {
3145 case lttng_kernel_type_integer:
3146 case lttng_kernel_type_struct:
3147 case lttng_kernel_type_variant:
3148 ret = _lttng_type_statedump(session, elem_type,
3149 sequence_type->encoding, nesting);
3150 if (ret)
3151 return ret;
3152 break;
3153
3154 default:
3155 return -EINVAL;
3156 }
3157 ret = lttng_metadata_printf(session,
3158 " _%s[ _%s ];\n",
3159 field->name,
3160 length_name);
3161 return ret;
3162 }
3163
3164 /*
3165 * Must be called with sessions_mutex held.
3166 */
3167 static
3168 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3169 const struct lttng_kernel_type_enum *type,
3170 size_t nesting)
3171 {
3172 const struct lttng_kernel_enum_desc *enum_desc;
3173 const struct lttng_kernel_type_common *container_type;
3174 int ret;
3175 unsigned int i, nr_entries;
3176
3177 container_type = type->container_type;
3178 if (container_type->type != lttng_kernel_type_integer) {
3179 ret = -EINVAL;
3180 goto end;
3181 }
3182 enum_desc = type->desc;
3183 nr_entries = enum_desc->nr_entries;
3184
3185 ret = print_tabs(session, nesting);
3186 if (ret)
3187 goto end;
3188 ret = lttng_metadata_printf(session, "enum : ");
3189 if (ret)
3190 goto end;
3191 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3192 lttng_kernel_string_encoding_none, 0);
3193 if (ret)
3194 goto end;
3195 ret = lttng_metadata_printf(session, " {\n");
3196 if (ret)
3197 goto end;
3198 /* Dump all entries */
3199 for (i = 0; i < nr_entries; i++) {
3200 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3201 int j, len;
3202
3203 ret = print_tabs(session, nesting + 1);
3204 if (ret)
3205 goto end;
3206 ret = lttng_metadata_printf(session,
3207 "\"");
3208 if (ret)
3209 goto end;
3210 len = strlen(entry->string);
3211 /* Escape the character '"' */
3212 for (j = 0; j < len; j++) {
3213 char c = entry->string[j];
3214
3215 switch (c) {
3216 case '"':
3217 ret = lttng_metadata_printf(session,
3218 "\\\"");
3219 break;
3220 case '\\':
3221 ret = lttng_metadata_printf(session,
3222 "\\\\");
3223 break;
3224 default:
3225 ret = lttng_metadata_printf(session,
3226 "%c", c);
3227 break;
3228 }
3229 if (ret)
3230 goto end;
3231 }
3232 ret = lttng_metadata_printf(session, "\"");
3233 if (ret)
3234 goto end;
3235
3236 if (entry->options.is_auto) {
3237 ret = lttng_metadata_printf(session, ",\n");
3238 if (ret)
3239 goto end;
3240 } else {
3241 ret = lttng_metadata_printf(session,
3242 " = ");
3243 if (ret)
3244 goto end;
3245 if (entry->start.signedness)
3246 ret = lttng_metadata_printf(session,
3247 "%lld", (long long) entry->start.value);
3248 else
3249 ret = lttng_metadata_printf(session,
3250 "%llu", entry->start.value);
3251 if (ret)
3252 goto end;
3253 if (entry->start.signedness == entry->end.signedness &&
3254 entry->start.value
3255 == entry->end.value) {
3256 ret = lttng_metadata_printf(session,
3257 ",\n");
3258 } else {
3259 if (entry->end.signedness) {
3260 ret = lttng_metadata_printf(session,
3261 " ... %lld,\n",
3262 (long long) entry->end.value);
3263 } else {
3264 ret = lttng_metadata_printf(session,
3265 " ... %llu,\n",
3266 entry->end.value);
3267 }
3268 }
3269 if (ret)
3270 goto end;
3271 }
3272 }
3273 ret = print_tabs(session, nesting);
3274 if (ret)
3275 goto end;
3276 ret = lttng_metadata_printf(session, "}");
3277 end:
3278 return ret;
3279 }
3280
3281 /*
3282 * Must be called with sessions_mutex held.
3283 */
3284 static
3285 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3286 const struct lttng_kernel_event_field *field,
3287 size_t nesting)
3288 {
3289 int ret;
3290 const struct lttng_kernel_type_enum *enum_type;
3291
3292 enum_type = lttng_kernel_get_type_enum(field->type);
3293 WARN_ON_ONCE(!enum_type);
3294 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3295 if (ret)
3296 return ret;
3297 return lttng_field_name_statedump(session, field, nesting);
3298 }
3299
3300 static
3301 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3302 const struct lttng_kernel_event_field *field,
3303 size_t nesting)
3304 {
3305 int ret;
3306
3307 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3308 lttng_kernel_string_encoding_none, nesting);
3309 if (ret)
3310 return ret;
3311 return lttng_field_name_statedump(session, field, nesting);
3312 }
3313
3314 static
3315 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3316 const struct lttng_kernel_type_string *type,
3317 size_t nesting)
3318 {
3319 int ret;
3320
3321 /* Default encoding is UTF8 */
3322 ret = print_tabs(session, nesting);
3323 if (ret)
3324 return ret;
3325 ret = lttng_metadata_printf(session,
3326 "string%s",
3327 type->encoding == lttng_kernel_string_encoding_ASCII ?
3328 " { encoding = ASCII; }" : "");
3329 return ret;
3330 }
3331
3332 static
3333 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3334 const struct lttng_kernel_event_field *field,
3335 size_t nesting)
3336 {
3337 const struct lttng_kernel_type_string *string_type;
3338 int ret;
3339
3340 string_type = lttng_kernel_get_type_string(field->type);
3341 WARN_ON_ONCE(!string_type);
3342 ret = _lttng_string_type_statedump(session, string_type, nesting);
3343 if (ret)
3344 return ret;
3345 return lttng_field_name_statedump(session, field, nesting);
3346 }
3347
3348 /*
3349 * Must be called with sessions_mutex held.
3350 */
3351 static
3352 int _lttng_type_statedump(struct lttng_kernel_session *session,
3353 const struct lttng_kernel_type_common *type,
3354 enum lttng_kernel_string_encoding parent_encoding,
3355 size_t nesting)
3356 {
3357 int ret = 0;
3358
3359 switch (type->type) {
3360 case lttng_kernel_type_integer:
3361 ret = _lttng_integer_type_statedump(session,
3362 lttng_kernel_get_type_integer(type),
3363 parent_encoding, nesting);
3364 break;
3365 case lttng_kernel_type_enum:
3366 ret = _lttng_enum_type_statedump(session,
3367 lttng_kernel_get_type_enum(type),
3368 nesting);
3369 break;
3370 case lttng_kernel_type_string:
3371 ret = _lttng_string_type_statedump(session,
3372 lttng_kernel_get_type_string(type),
3373 nesting);
3374 break;
3375 case lttng_kernel_type_struct:
3376 ret = _lttng_struct_type_statedump(session,
3377 lttng_kernel_get_type_struct(type),
3378 nesting);
3379 break;
3380 case lttng_kernel_type_variant:
3381 ret = _lttng_variant_type_statedump(session,
3382 lttng_kernel_get_type_variant(type),
3383 nesting, NULL);
3384 break;
3385
3386 /* Nested arrays and sequences are not supported yet. */
3387 case lttng_kernel_type_array:
3388 case lttng_kernel_type_sequence:
3389 default:
3390 WARN_ON_ONCE(1);
3391 return -EINVAL;
3392 }
3393 return ret;
3394 }
3395
3396 /*
3397 * Must be called with sessions_mutex held.
3398 */
3399 static
3400 int _lttng_field_statedump(struct lttng_kernel_session *session,
3401 const struct lttng_kernel_event_field *field,
3402 size_t nesting,
3403 const char **prev_field_name_p)
3404 {
3405 const char *prev_field_name = NULL;
3406 int ret = 0;
3407
3408 if (prev_field_name_p)
3409 prev_field_name = *prev_field_name_p;
3410 switch (field->type->type) {
3411 case lttng_kernel_type_integer:
3412 ret = _lttng_integer_field_statedump(session, field, nesting);
3413 break;
3414 case lttng_kernel_type_enum:
3415 ret = _lttng_enum_field_statedump(session, field, nesting);
3416 break;
3417 case lttng_kernel_type_string:
3418 ret = _lttng_string_field_statedump(session, field, nesting);
3419 break;
3420 case lttng_kernel_type_struct:
3421 ret = _lttng_struct_field_statedump(session, field, nesting);
3422 break;
3423 case lttng_kernel_type_array:
3424 ret = _lttng_array_field_statedump(session, field, nesting);
3425 break;
3426 case lttng_kernel_type_sequence:
3427 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3428 break;
3429 case lttng_kernel_type_variant:
3430 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3431 break;
3432
3433 default:
3434 WARN_ON_ONCE(1);
3435 return -EINVAL;
3436 }
3437 if (prev_field_name_p)
3438 *prev_field_name_p = field->name;
3439 return ret;
3440 }
3441
3442 static
3443 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3444 struct lttng_kernel_ctx *ctx)
3445 {
3446 const char *prev_field_name = NULL;
3447 int ret = 0;
3448 int i;
3449
3450 if (!ctx)
3451 return 0;
3452 for (i = 0; i < ctx->nr_fields; i++) {
3453 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3454
3455 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3456 if (ret)
3457 return ret;
3458 }
3459 return ret;
3460 }
3461
3462 static
3463 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3464 struct lttng_kernel_event_recorder *event_recorder)
3465 {
3466 const char *prev_field_name = NULL;
3467 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3468 int ret = 0;
3469 int i;
3470
3471 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3472 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3473
3474 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3475 if (ret)
3476 return ret;
3477 }
3478 return ret;
3479 }
3480
3481 /*
3482 * Must be called with sessions_mutex held.
3483 * The entire event metadata is printed as a single atomic metadata
3484 * transaction.
3485 */
3486 static
3487 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event)
3488 {
3489 struct lttng_kernel_event_recorder *event_recorder;
3490 struct lttng_kernel_channel_buffer *chan;
3491 struct lttng_kernel_session *session;
3492 int ret = 0;
3493
3494 if (event->type != LTTNG_KERNEL_EVENT_TYPE_RECORDER)
3495 return 0;
3496 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
3497 chan = event_recorder->chan;
3498 session = chan->parent.session;
3499
3500 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3501 return 0;
3502 if (chan->priv->channel_type == METADATA_CHANNEL)
3503 return 0;
3504
3505 lttng_metadata_begin(session);
3506
3507 ret = lttng_metadata_printf(session,
3508 "event {\n"
3509 " name = \"%s\";\n"
3510 " id = %u;\n"
3511 " stream_id = %u;\n",
3512 event_recorder->priv->parent.desc->event_name,
3513 event_recorder->priv->id,
3514 event_recorder->chan->priv->id);
3515 if (ret)
3516 goto end;
3517
3518 ret = lttng_metadata_printf(session,
3519 " fields := struct {\n"
3520 );
3521 if (ret)
3522 goto end;
3523
3524 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3525 if (ret)
3526 goto end;
3527
3528 /*
3529 * LTTng space reservation can only reserve multiples of the
3530 * byte size.
3531 */
3532 ret = lttng_metadata_printf(session,
3533 " };\n"
3534 "};\n\n");
3535 if (ret)
3536 goto end;
3537
3538 event_recorder->priv->metadata_dumped = 1;
3539 end:
3540 lttng_metadata_end(session);
3541 return ret;
3542
3543 }
3544
3545 /*
3546 * Must be called with sessions_mutex held.
3547 * The entire channel metadata is printed as a single atomic metadata
3548 * transaction.
3549 */
3550 static
3551 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3552 struct lttng_kernel_channel_buffer *chan)
3553 {
3554 int ret = 0;
3555
3556 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3557 return 0;
3558
3559 if (chan->priv->channel_type == METADATA_CHANNEL)
3560 return 0;
3561
3562 lttng_metadata_begin(session);
3563
3564 WARN_ON_ONCE(!chan->priv->header_type);
3565 ret = lttng_metadata_printf(session,
3566 "stream {\n"
3567 " id = %u;\n"
3568 " event.header := %s;\n"
3569 " packet.context := struct packet_context;\n",
3570 chan->priv->id,
3571 chan->priv->header_type == 1 ? "struct event_header_compact" :
3572 "struct event_header_large");
3573 if (ret)
3574 goto end;
3575
3576 if (chan->priv->ctx) {
3577 ret = lttng_metadata_printf(session,
3578 " event.context := struct {\n");
3579 if (ret)
3580 goto end;
3581 }
3582 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3583 if (ret)
3584 goto end;
3585 if (chan->priv->ctx) {
3586 ret = lttng_metadata_printf(session,
3587 " };\n");
3588 if (ret)
3589 goto end;
3590 }
3591
3592 ret = lttng_metadata_printf(session,
3593 "};\n\n");
3594
3595 chan->priv->metadata_dumped = 1;
3596 end:
3597 lttng_metadata_end(session);
3598 return ret;
3599 }
3600
3601 /*
3602 * Must be called with sessions_mutex held.
3603 */
3604 static
3605 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3606 {
3607 return lttng_metadata_printf(session,
3608 "struct packet_context {\n"
3609 " uint64_clock_monotonic_t timestamp_begin;\n"
3610 " uint64_clock_monotonic_t timestamp_end;\n"
3611 " uint64_t content_size;\n"
3612 " uint64_t packet_size;\n"
3613 " uint64_t packet_seq_num;\n"
3614 " unsigned long events_discarded;\n"
3615 " uint32_t cpu_id;\n"
3616 "};\n\n"
3617 );
3618 }
3619
3620 /*
3621 * Compact header:
3622 * id: range: 0 - 30.
3623 * id 31 is reserved to indicate an extended header.
3624 *
3625 * Large header:
3626 * id: range: 0 - 65534.
3627 * id 65535 is reserved to indicate an extended header.
3628 *
3629 * Must be called with sessions_mutex held.
3630 */
3631 static
3632 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3633 {
3634 return lttng_metadata_printf(session,
3635 "struct event_header_compact {\n"
3636 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3637 " variant <id> {\n"
3638 " struct {\n"
3639 " uint27_clock_monotonic_t timestamp;\n"
3640 " } compact;\n"
3641 " struct {\n"
3642 " uint32_t id;\n"
3643 " uint64_clock_monotonic_t timestamp;\n"
3644 " } extended;\n"
3645 " } v;\n"
3646 "} align(%u);\n"
3647 "\n"
3648 "struct event_header_large {\n"
3649 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3650 " variant <id> {\n"
3651 " struct {\n"
3652 " uint32_clock_monotonic_t timestamp;\n"
3653 " } compact;\n"
3654 " struct {\n"
3655 " uint32_t id;\n"
3656 " uint64_clock_monotonic_t timestamp;\n"
3657 " } extended;\n"
3658 " } v;\n"
3659 "} align(%u);\n\n",
3660 lttng_alignof(uint32_t) * CHAR_BIT,
3661 lttng_alignof(uint16_t) * CHAR_BIT
3662 );
3663 }
3664
3665 /*
3666 * Approximation of NTP time of day to clock monotonic correlation,
3667 * taken at start of trace.
3668 * Yes, this is only an approximation. Yes, we can (and will) do better
3669 * in future versions.
3670 * This function may return a negative offset. It may happen if the
3671 * system sets the REALTIME clock to 0 after boot.
3672 *
3673 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3674 * y2038 compliant.
3675 */
3676 static
3677 int64_t measure_clock_offset(void)
3678 {
3679 uint64_t monotonic_avg, monotonic[2], realtime;
3680 uint64_t tcf = trace_clock_freq();
3681 int64_t offset;
3682 unsigned long flags;
3683 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3684 struct timespec64 rts = { 0, 0 };
3685 #else
3686 struct timespec rts = { 0, 0 };
3687 #endif
3688
3689 /* Disable interrupts to increase correlation precision. */
3690 local_irq_save(flags);
3691 monotonic[0] = trace_clock_read64();
3692 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3693 ktime_get_real_ts64(&rts);
3694 #else
3695 getnstimeofday(&rts);
3696 #endif
3697 monotonic[1] = trace_clock_read64();
3698 local_irq_restore(flags);
3699
3700 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3701 realtime = (uint64_t) rts.tv_sec * tcf;
3702 if (tcf == NSEC_PER_SEC) {
3703 realtime += rts.tv_nsec;
3704 } else {
3705 uint64_t n = rts.tv_nsec * tcf;
3706
3707 do_div(n, NSEC_PER_SEC);
3708 realtime += n;
3709 }
3710 offset = (int64_t) realtime - monotonic_avg;
3711 return offset;
3712 }
3713
3714 static
3715 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3716 {
3717 int ret = 0;
3718 size_t i;
3719 char cur;
3720
3721 i = 0;
3722 cur = string[i];
3723 while (cur != '\0') {
3724 switch (cur) {
3725 case '\n':
3726 ret = lttng_metadata_printf(session, "%s", "\\n");
3727 break;
3728 case '\\':
3729 case '"':
3730 ret = lttng_metadata_printf(session, "%c", '\\');
3731 if (ret)
3732 goto error;
3733 /* We still print the current char */
3734 lttng_fallthrough;
3735 default:
3736 ret = lttng_metadata_printf(session, "%c", cur);
3737 break;
3738 }
3739
3740 if (ret)
3741 goto error;
3742
3743 cur = string[++i];
3744 }
3745 error:
3746 return ret;
3747 }
3748
3749 static
3750 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3751 const char *field_value)
3752 {
3753 int ret;
3754
3755 ret = lttng_metadata_printf(session, " %s = \"", field);
3756 if (ret)
3757 goto error;
3758
3759 ret = print_escaped_ctf_string(session, field_value);
3760 if (ret)
3761 goto error;
3762
3763 ret = lttng_metadata_printf(session, "\";\n");
3764
3765 error:
3766 return ret;
3767 }
3768
3769 /*
3770 * Output metadata into this session's metadata buffers.
3771 * Must be called with sessions_mutex held.
3772 */
3773 static
3774 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3775 {
3776 unsigned char *uuid_c = session->priv->uuid.b;
3777 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3778 const char *product_uuid;
3779 struct lttng_kernel_channel_buffer_private *chan_priv;
3780 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3781 int ret = 0;
3782
3783 if (!LTTNG_READ_ONCE(session->active))
3784 return 0;
3785
3786 lttng_metadata_begin(session);
3787
3788 if (session->priv->metadata_dumped)
3789 goto skip_session;
3790
3791 snprintf(uuid_s, sizeof(uuid_s),
3792 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3793 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3794 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3795 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3796 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3797
3798 ret = lttng_metadata_printf(session,
3799 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3800 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3801 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3802 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3803 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3804 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3805 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3806 "\n"
3807 "trace {\n"
3808 " major = %u;\n"
3809 " minor = %u;\n"
3810 " uuid = \"%s\";\n"
3811 " byte_order = %s;\n"
3812 " packet.header := struct {\n"
3813 " uint32_t magic;\n"
3814 " uint8_t uuid[16];\n"
3815 " uint32_t stream_id;\n"
3816 " uint64_t stream_instance_id;\n"
3817 " };\n"
3818 "};\n\n",
3819 lttng_alignof(uint8_t) * CHAR_BIT,
3820 lttng_alignof(uint16_t) * CHAR_BIT,
3821 lttng_alignof(uint32_t) * CHAR_BIT,
3822 lttng_alignof(uint64_t) * CHAR_BIT,
3823 sizeof(unsigned long) * CHAR_BIT,
3824 lttng_alignof(unsigned long) * CHAR_BIT,
3825 CTF_SPEC_MAJOR,
3826 CTF_SPEC_MINOR,
3827 uuid_s,
3828 #if __BYTE_ORDER == __BIG_ENDIAN
3829 "be"
3830 #else
3831 "le"
3832 #endif
3833 );
3834 if (ret)
3835 goto end;
3836
3837 ret = lttng_metadata_printf(session,
3838 "env {\n"
3839 " hostname = \"%s\";\n"
3840 " domain = \"kernel\";\n"
3841 " sysname = \"%s\";\n"
3842 " kernel_release = \"%s\";\n"
3843 " kernel_version = \"%s\";\n"
3844 " tracer_name = \"lttng-modules\";\n"
3845 " tracer_major = %d;\n"
3846 " tracer_minor = %d;\n"
3847 " tracer_patchlevel = %d;\n"
3848 " trace_buffering_scheme = \"global\";\n",
3849 current->nsproxy->uts_ns->name.nodename,
3850 utsname()->sysname,
3851 utsname()->release,
3852 utsname()->version,
3853 LTTNG_MODULES_MAJOR_VERSION,
3854 LTTNG_MODULES_MINOR_VERSION,
3855 LTTNG_MODULES_PATCHLEVEL_VERSION
3856 );
3857 if (ret)
3858 goto end;
3859
3860 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3861 if (ret)
3862 goto end;
3863 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3864 session->priv->creation_time);
3865 if (ret)
3866 goto end;
3867
3868 /* Add the product UUID to the 'env' section */
3869 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3870 if (product_uuid) {
3871 ret = lttng_metadata_printf(session,
3872 " product_uuid = \"%s\";\n",
3873 product_uuid
3874 );
3875 if (ret)
3876 goto end;
3877 }
3878
3879 /* Close the 'env' section */
3880 ret = lttng_metadata_printf(session, "};\n\n");
3881 if (ret)
3882 goto end;
3883
3884 ret = lttng_metadata_printf(session,
3885 "clock {\n"
3886 " name = \"%s\";\n",
3887 trace_clock_name()
3888 );
3889 if (ret)
3890 goto end;
3891
3892 if (!trace_clock_uuid(clock_uuid_s)) {
3893 ret = lttng_metadata_printf(session,
3894 " uuid = \"%s\";\n",
3895 clock_uuid_s
3896 );
3897 if (ret)
3898 goto end;
3899 }
3900
3901 ret = lttng_metadata_printf(session,
3902 " description = \"%s\";\n"
3903 " freq = %llu; /* Frequency, in Hz */\n"
3904 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3905 " offset = %lld;\n"
3906 "};\n\n",
3907 trace_clock_description(),
3908 (unsigned long long) trace_clock_freq(),
3909 (long long) measure_clock_offset()
3910 );
3911 if (ret)
3912 goto end;
3913
3914 ret = lttng_metadata_printf(session,
3915 "typealias integer {\n"
3916 " size = 27; align = 1; signed = false;\n"
3917 " map = clock.%s.value;\n"
3918 "} := uint27_clock_monotonic_t;\n"
3919 "\n"
3920 "typealias integer {\n"
3921 " size = 32; align = %u; signed = false;\n"
3922 " map = clock.%s.value;\n"
3923 "} := uint32_clock_monotonic_t;\n"
3924 "\n"
3925 "typealias integer {\n"
3926 " size = 64; align = %u; signed = false;\n"
3927 " map = clock.%s.value;\n"
3928 "} := uint64_clock_monotonic_t;\n\n",
3929 trace_clock_name(),
3930 lttng_alignof(uint32_t) * CHAR_BIT,
3931 trace_clock_name(),
3932 lttng_alignof(uint64_t) * CHAR_BIT,
3933 trace_clock_name()
3934 );
3935 if (ret)
3936 goto end;
3937
3938 ret = _lttng_stream_packet_context_declare(session);
3939 if (ret)
3940 goto end;
3941
3942 ret = _lttng_event_header_declare(session);
3943 if (ret)
3944 goto end;
3945
3946 skip_session:
3947 list_for_each_entry(chan_priv, &session->priv->chan, node) {
3948 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
3949 if (ret)
3950 goto end;
3951 }
3952
3953 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
3954 ret = _lttng_event_recorder_metadata_statedump(&event_recorder_priv->pub->parent);
3955 if (ret)
3956 goto end;
3957 }
3958 session->priv->metadata_dumped = 1;
3959 end:
3960 lttng_metadata_end(session);
3961 return ret;
3962 }
3963
3964 /**
3965 * lttng_transport_register - LTT transport registration
3966 * @transport: transport structure
3967 *
3968 * Registers a transport which can be used as output to extract the data out of
3969 * LTTng. The module calling this registration function must ensure that no
3970 * trap-inducing code will be executed by the transport functions. E.g.
3971 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3972 * is made visible to the transport function. This registration acts as a
3973 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3974 * after its registration must it synchronize the TLBs.
3975 */
3976 void lttng_transport_register(struct lttng_transport *transport)
3977 {
3978 /*
3979 * Make sure no page fault can be triggered by the module about to be
3980 * registered. We deal with this here so we don't have to call
3981 * vmalloc_sync_mappings() in each module's init.
3982 */
3983 wrapper_vmalloc_sync_mappings();
3984
3985 mutex_lock(&sessions_mutex);
3986 list_add_tail(&transport->node, &lttng_transport_list);
3987 mutex_unlock(&sessions_mutex);
3988 }
3989 EXPORT_SYMBOL_GPL(lttng_transport_register);
3990
3991 /**
3992 * lttng_transport_unregister - LTT transport unregistration
3993 * @transport: transport structure
3994 */
3995 void lttng_transport_unregister(struct lttng_transport *transport)
3996 {
3997 mutex_lock(&sessions_mutex);
3998 list_del(&transport->node);
3999 mutex_unlock(&sessions_mutex);
4000 }
4001 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4002
4003 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4004 {
4005 /*
4006 * Make sure no page fault can be triggered by the module about to be
4007 * registered. We deal with this here so we don't have to call
4008 * vmalloc_sync_mappings() in each module's init.
4009 */
4010 wrapper_vmalloc_sync_mappings();
4011
4012 mutex_lock(&sessions_mutex);
4013 list_add_tail(&transport->node, &lttng_counter_transport_list);
4014 mutex_unlock(&sessions_mutex);
4015 }
4016 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4017
4018 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4019 {
4020 mutex_lock(&sessions_mutex);
4021 list_del(&transport->node);
4022 mutex_unlock(&sessions_mutex);
4023 }
4024 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4025
4026 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4027
4028 enum cpuhp_state lttng_hp_prepare;
4029 enum cpuhp_state lttng_hp_online;
4030
4031 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4032 {
4033 struct lttng_cpuhp_node *lttng_node;
4034
4035 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4036 switch (lttng_node->component) {
4037 case LTTNG_RING_BUFFER_FRONTEND:
4038 return 0;
4039 case LTTNG_RING_BUFFER_BACKEND:
4040 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4041 case LTTNG_RING_BUFFER_ITER:
4042 return 0;
4043 case LTTNG_CONTEXT_PERF_COUNTERS:
4044 return 0;
4045 default:
4046 return -EINVAL;
4047 }
4048 }
4049
4050 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4051 {
4052 struct lttng_cpuhp_node *lttng_node;
4053
4054 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4055 switch (lttng_node->component) {
4056 case LTTNG_RING_BUFFER_FRONTEND:
4057 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4058 case LTTNG_RING_BUFFER_BACKEND:
4059 return 0;
4060 case LTTNG_RING_BUFFER_ITER:
4061 return 0;
4062 case LTTNG_CONTEXT_PERF_COUNTERS:
4063 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4064 default:
4065 return -EINVAL;
4066 }
4067 }
4068
4069 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4070 {
4071 struct lttng_cpuhp_node *lttng_node;
4072
4073 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4074 switch (lttng_node->component) {
4075 case LTTNG_RING_BUFFER_FRONTEND:
4076 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4077 case LTTNG_RING_BUFFER_BACKEND:
4078 return 0;
4079 case LTTNG_RING_BUFFER_ITER:
4080 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4081 case LTTNG_CONTEXT_PERF_COUNTERS:
4082 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4083 default:
4084 return -EINVAL;
4085 }
4086 }
4087
4088 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4089 {
4090 struct lttng_cpuhp_node *lttng_node;
4091
4092 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4093 switch (lttng_node->component) {
4094 case LTTNG_RING_BUFFER_FRONTEND:
4095 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4096 case LTTNG_RING_BUFFER_BACKEND:
4097 return 0;
4098 case LTTNG_RING_BUFFER_ITER:
4099 return 0;
4100 case LTTNG_CONTEXT_PERF_COUNTERS:
4101 return 0;
4102 default:
4103 return -EINVAL;
4104 }
4105 }
4106
4107 static int __init lttng_init_cpu_hotplug(void)
4108 {
4109 int ret;
4110
4111 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4112 lttng_hotplug_prepare,
4113 lttng_hotplug_dead);
4114 if (ret < 0) {
4115 return ret;
4116 }
4117 lttng_hp_prepare = ret;
4118 lttng_rb_set_hp_prepare(ret);
4119
4120 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4121 lttng_hotplug_online,
4122 lttng_hotplug_offline);
4123 if (ret < 0) {
4124 cpuhp_remove_multi_state(lttng_hp_prepare);
4125 lttng_hp_prepare = 0;
4126 return ret;
4127 }
4128 lttng_hp_online = ret;
4129 lttng_rb_set_hp_online(ret);
4130
4131 return 0;
4132 }
4133
4134 static void __exit lttng_exit_cpu_hotplug(void)
4135 {
4136 lttng_rb_set_hp_online(0);
4137 cpuhp_remove_multi_state(lttng_hp_online);
4138 lttng_rb_set_hp_prepare(0);
4139 cpuhp_remove_multi_state(lttng_hp_prepare);
4140 }
4141
4142 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4143 static int lttng_init_cpu_hotplug(void)
4144 {
4145 return 0;
4146 }
4147 static void lttng_exit_cpu_hotplug(void)
4148 {
4149 }
4150 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4151
4152 static int __init lttng_events_init(void)
4153 {
4154 int ret;
4155
4156 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4157 if (ret)
4158 return ret;
4159 ret = wrapper_get_pfnblock_flags_mask_init();
4160 if (ret)
4161 return ret;
4162 ret = wrapper_get_pageblock_flags_mask_init();
4163 if (ret)
4164 return ret;
4165 ret = lttng_probes_init();
4166 if (ret)
4167 return ret;
4168 ret = lttng_context_init();
4169 if (ret)
4170 return ret;
4171 ret = lttng_tracepoint_init();
4172 if (ret)
4173 goto error_tp;
4174 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4175 if (!event_recorder_cache) {
4176 ret = -ENOMEM;
4177 goto error_kmem_event_recorder;
4178 }
4179 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4180 if (!event_recorder_private_cache) {
4181 ret = -ENOMEM;
4182 goto error_kmem_event_recorder_private;
4183 }
4184 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4185 if (!event_notifier_cache) {
4186 ret = -ENOMEM;
4187 goto error_kmem_event_notifier;
4188 }
4189 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4190 if (!event_notifier_private_cache) {
4191 ret = -ENOMEM;
4192 goto error_kmem_event_notifier_private;
4193 }
4194 ret = lttng_abi_init();
4195 if (ret)
4196 goto error_abi;
4197 ret = lttng_logger_init();
4198 if (ret)
4199 goto error_logger;
4200 ret = lttng_init_cpu_hotplug();
4201 if (ret)
4202 goto error_hotplug;
4203 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4204 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4205 __stringify(LTTNG_MODULES_MINOR_VERSION),
4206 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4207 LTTNG_MODULES_EXTRAVERSION,
4208 LTTNG_VERSION_NAME,
4209 #ifdef LTTNG_EXTRA_VERSION_GIT
4210 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4211 #else
4212 "",
4213 #endif
4214 #ifdef LTTNG_EXTRA_VERSION_NAME
4215 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4216 #else
4217 "");
4218 #endif
4219 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4220 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4221 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4222 return 0;
4223
4224 error_hotplug:
4225 lttng_logger_exit();
4226 error_logger:
4227 lttng_abi_exit();
4228 error_abi:
4229 kmem_cache_destroy(event_notifier_private_cache);
4230 error_kmem_event_notifier_private:
4231 kmem_cache_destroy(event_notifier_cache);
4232 error_kmem_event_notifier:
4233 kmem_cache_destroy(event_recorder_private_cache);
4234 error_kmem_event_recorder_private:
4235 kmem_cache_destroy(event_recorder_cache);
4236 error_kmem_event_recorder:
4237 lttng_tracepoint_exit();
4238 error_tp:
4239 lttng_context_exit();
4240 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4241 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4242 __stringify(LTTNG_MODULES_MINOR_VERSION),
4243 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4244 LTTNG_MODULES_EXTRAVERSION,
4245 LTTNG_VERSION_NAME,
4246 #ifdef LTTNG_EXTRA_VERSION_GIT
4247 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4248 #else
4249 "",
4250 #endif
4251 #ifdef LTTNG_EXTRA_VERSION_NAME
4252 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4253 #else
4254 "");
4255 #endif
4256 return ret;
4257 }
4258
4259 module_init(lttng_events_init);
4260
4261 static void __exit lttng_events_exit(void)
4262 {
4263 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4264
4265 lttng_exit_cpu_hotplug();
4266 lttng_logger_exit();
4267 lttng_abi_exit();
4268 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4269 lttng_session_destroy(session_priv->pub);
4270 kmem_cache_destroy(event_recorder_cache);
4271 kmem_cache_destroy(event_recorder_private_cache);
4272 kmem_cache_destroy(event_notifier_cache);
4273 kmem_cache_destroy(event_notifier_private_cache);
4274 lttng_tracepoint_exit();
4275 lttng_context_exit();
4276 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4277 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4278 __stringify(LTTNG_MODULES_MINOR_VERSION),
4279 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4280 LTTNG_MODULES_EXTRAVERSION,
4281 LTTNG_VERSION_NAME,
4282 #ifdef LTTNG_EXTRA_VERSION_GIT
4283 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4284 #else
4285 "",
4286 #endif
4287 #ifdef LTTNG_EXTRA_VERSION_NAME
4288 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4289 #else
4290 "");
4291 #endif
4292 }
4293
4294 module_exit(lttng_events_exit);
4295
4296 #include <generated/patches.h>
4297 #ifdef LTTNG_EXTRA_VERSION_GIT
4298 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4299 #endif
4300 #ifdef LTTNG_EXTRA_VERSION_NAME
4301 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4302 #endif
4303 MODULE_LICENSE("GPL and additional rights");
4304 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4305 MODULE_DESCRIPTION("LTTng tracer");
4306 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4307 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4308 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4309 LTTNG_MODULES_EXTRAVERSION);
This page took 0.261898 seconds and 4 git commands to generate.