Refactoring: remove unused event_recorder_return
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81 static
82 int _lttng_type_statedump(struct lttng_kernel_session *session,
83 const struct lttng_kernel_type_common *type,
84 enum lttng_kernel_string_encoding parent_encoding,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_kernel_session *session,
88 const struct lttng_kernel_event_field *field,
89 size_t nesting, const char **prev_field_name_p);
90
91 void synchronize_trace(void)
92 {
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
94 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
95 synchronize_rcu();
96 #else
97 synchronize_sched();
98 #endif
99
100 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
101 #ifdef CONFIG_PREEMPT_RT_FULL
102 synchronize_rcu();
103 #endif
104 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
105 #ifdef CONFIG_PREEMPT_RT
106 synchronize_rcu();
107 #endif
108 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
109 }
110
111 void lttng_lock_sessions(void)
112 {
113 mutex_lock(&sessions_mutex);
114 }
115
116 void lttng_unlock_sessions(void)
117 {
118 mutex_unlock(&sessions_mutex);
119 }
120
121 static struct lttng_transport *lttng_transport_find(const char *name)
122 {
123 struct lttng_transport *transport;
124
125 list_for_each_entry(transport, &lttng_transport_list, node) {
126 if (!strcmp(transport->name, name))
127 return transport;
128 }
129 return NULL;
130 }
131
132 /*
133 * Called with sessions lock held.
134 */
135 int lttng_session_active(void)
136 {
137 struct lttng_kernel_session_private *iter;
138
139 list_for_each_entry(iter, &sessions, list) {
140 if (iter->pub->active)
141 return 1;
142 }
143 return 0;
144 }
145
146 struct lttng_kernel_session *lttng_session_create(void)
147 {
148 struct lttng_kernel_session *session;
149 struct lttng_kernel_session_private *session_priv;
150 struct lttng_metadata_cache *metadata_cache;
151 int i;
152
153 mutex_lock(&sessions_mutex);
154 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
155 if (!session)
156 goto err;
157 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
158 if (!session_priv)
159 goto err_free_session;
160 session->priv = session_priv;
161 session_priv->pub = session;
162
163 INIT_LIST_HEAD(&session_priv->chan);
164 INIT_LIST_HEAD(&session_priv->events);
165 lttng_guid_gen(&session_priv->uuid);
166
167 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
168 GFP_KERNEL);
169 if (!metadata_cache)
170 goto err_free_session_private;
171 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
172 if (!metadata_cache->data)
173 goto err_free_cache;
174 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
175 kref_init(&metadata_cache->refcount);
176 mutex_init(&metadata_cache->lock);
177 session_priv->metadata_cache = metadata_cache;
178 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
179 memcpy(&metadata_cache->uuid, &session_priv->uuid,
180 sizeof(metadata_cache->uuid));
181 INIT_LIST_HEAD(&session_priv->enablers_head);
182 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
183 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
184 list_add(&session_priv->list, &sessions);
185
186 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
187 goto tracker_alloc_error;
188 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
197 goto tracker_alloc_error;
198
199 mutex_unlock(&sessions_mutex);
200
201 return session;
202
203 tracker_alloc_error:
204 lttng_id_tracker_fini(&session->pid_tracker);
205 lttng_id_tracker_fini(&session->vpid_tracker);
206 lttng_id_tracker_fini(&session->uid_tracker);
207 lttng_id_tracker_fini(&session->vuid_tracker);
208 lttng_id_tracker_fini(&session->gid_tracker);
209 lttng_id_tracker_fini(&session->vgid_tracker);
210 err_free_cache:
211 kfree(metadata_cache);
212 err_free_session_private:
213 lttng_kvfree(session_priv);
214 err_free_session:
215 lttng_kvfree(session);
216 err:
217 mutex_unlock(&sessions_mutex);
218 return NULL;
219 }
220
221 static
222 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
223 {
224 struct lttng_counter_transport *transport;
225
226 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
227 if (!strcmp(transport->name, name))
228 return transport;
229 }
230 return NULL;
231 }
232
233 struct lttng_counter *lttng_kernel_counter_create(
234 const char *counter_transport_name,
235 size_t number_dimensions, const size_t *dimensions_sizes)
236 {
237 struct lttng_counter *counter = NULL;
238 struct lttng_counter_transport *counter_transport = NULL;
239
240 counter_transport = lttng_counter_transport_find(counter_transport_name);
241 if (!counter_transport) {
242 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
243 counter_transport_name);
244 goto notransport;
245 }
246 if (!try_module_get(counter_transport->owner)) {
247 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
248 goto notransport;
249 }
250
251 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
252 if (!counter)
253 goto nomem;
254
255 /* Create event notifier error counter. */
256 counter->ops = &counter_transport->ops;
257 counter->transport = counter_transport;
258
259 counter->counter = counter->ops->counter_create(
260 number_dimensions, dimensions_sizes, 0);
261 if (!counter->counter) {
262 goto create_error;
263 }
264
265 return counter;
266
267 create_error:
268 lttng_kvfree(counter);
269 nomem:
270 if (counter_transport)
271 module_put(counter_transport->owner);
272 notransport:
273 return NULL;
274 }
275
276 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
277 {
278 struct lttng_transport *transport = NULL;
279 struct lttng_event_notifier_group *event_notifier_group;
280 const char *transport_name = "relay-event-notifier";
281 size_t subbuf_size = 4096; //TODO
282 size_t num_subbuf = 16; //TODO
283 unsigned int switch_timer_interval = 0;
284 unsigned int read_timer_interval = 0;
285 int i;
286
287 mutex_lock(&sessions_mutex);
288
289 transport = lttng_transport_find(transport_name);
290 if (!transport) {
291 printk(KERN_WARNING "LTTng: transport %s not found\n",
292 transport_name);
293 goto notransport;
294 }
295 if (!try_module_get(transport->owner)) {
296 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
297 transport_name);
298 goto notransport;
299 }
300
301 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
302 GFP_KERNEL);
303 if (!event_notifier_group)
304 goto nomem;
305
306 /*
307 * Initialize the ring buffer used to store event notifier
308 * notifications.
309 */
310 event_notifier_group->ops = &transport->ops;
311 event_notifier_group->chan = transport->ops.priv->channel_create(
312 transport_name, event_notifier_group, NULL,
313 subbuf_size, num_subbuf, switch_timer_interval,
314 read_timer_interval);
315 if (!event_notifier_group->chan)
316 goto create_error;
317
318 event_notifier_group->transport = transport;
319
320 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
321 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
322 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
323 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
324
325 list_add(&event_notifier_group->node, &event_notifier_groups);
326
327 mutex_unlock(&sessions_mutex);
328
329 return event_notifier_group;
330
331 create_error:
332 lttng_kvfree(event_notifier_group);
333 nomem:
334 if (transport)
335 module_put(transport->owner);
336 notransport:
337 mutex_unlock(&sessions_mutex);
338 return NULL;
339 }
340
341 void metadata_cache_destroy(struct kref *kref)
342 {
343 struct lttng_metadata_cache *cache =
344 container_of(kref, struct lttng_metadata_cache, refcount);
345 vfree(cache->data);
346 kfree(cache);
347 }
348
349 void lttng_session_destroy(struct lttng_kernel_session *session)
350 {
351 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
352 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
353 struct lttng_metadata_stream *metadata_stream;
354 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
355 int ret;
356
357 mutex_lock(&sessions_mutex);
358 WRITE_ONCE(session->active, 0);
359 list_for_each_entry(chan_priv, &session->priv->chan, node) {
360 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
361 WARN_ON(ret);
362 }
363 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
364 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
365 WARN_ON(ret);
366 }
367 synchronize_trace(); /* Wait for in-flight events to complete */
368 list_for_each_entry(chan_priv, &session->priv->chan, node) {
369 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
370 WARN_ON(ret);
371 }
372 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
373 lttng_event_enabler_destroy(event_enabler);
374 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
375 _lttng_event_destroy(&event_recorder_priv->pub->parent);
376 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
377 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
378 _lttng_channel_destroy(chan_priv->pub);
379 }
380 mutex_lock(&session->priv->metadata_cache->lock);
381 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
382 _lttng_metadata_channel_hangup(metadata_stream);
383 mutex_unlock(&session->priv->metadata_cache->lock);
384 lttng_id_tracker_fini(&session->pid_tracker);
385 lttng_id_tracker_fini(&session->vpid_tracker);
386 lttng_id_tracker_fini(&session->uid_tracker);
387 lttng_id_tracker_fini(&session->vuid_tracker);
388 lttng_id_tracker_fini(&session->gid_tracker);
389 lttng_id_tracker_fini(&session->vgid_tracker);
390 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
391 list_del(&session->priv->list);
392 mutex_unlock(&sessions_mutex);
393 lttng_kvfree(session->priv);
394 lttng_kvfree(session);
395 }
396
397 void lttng_event_notifier_group_destroy(
398 struct lttng_event_notifier_group *event_notifier_group)
399 {
400 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
401 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
402 int ret;
403
404 if (!event_notifier_group)
405 return;
406
407 mutex_lock(&sessions_mutex);
408
409 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
410 WARN_ON(ret);
411
412 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
413 &event_notifier_group->event_notifiers_head, parent.node) {
414 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
415 WARN_ON(ret);
416 }
417
418 /* Wait for in-flight event notifier to complete */
419 synchronize_trace();
420
421 irq_work_sync(&event_notifier_group->wakeup_pending);
422
423 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
424 WARN_ON(ret);
425
426 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
427 &event_notifier_group->enablers_head, node)
428 lttng_event_enabler_destroy(event_enabler);
429
430 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
431 &event_notifier_group->event_notifiers_head, parent.node)
432 _lttng_event_destroy(&event_notifier_priv->pub->parent);
433
434 if (event_notifier_group->error_counter) {
435 struct lttng_counter *error_counter = event_notifier_group->error_counter;
436
437 error_counter->ops->counter_destroy(error_counter->counter);
438 module_put(error_counter->transport->owner);
439 lttng_kvfree(error_counter);
440 event_notifier_group->error_counter = NULL;
441 }
442
443 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
444 module_put(event_notifier_group->transport->owner);
445 list_del(&event_notifier_group->node);
446
447 mutex_unlock(&sessions_mutex);
448 lttng_kvfree(event_notifier_group);
449 }
450
451 int lttng_session_statedump(struct lttng_kernel_session *session)
452 {
453 int ret;
454
455 mutex_lock(&sessions_mutex);
456 ret = lttng_statedump_start(session);
457 mutex_unlock(&sessions_mutex);
458 return ret;
459 }
460
461 int lttng_session_enable(struct lttng_kernel_session *session)
462 {
463 int ret = 0;
464 struct lttng_kernel_channel_buffer_private *chan_priv;
465
466 mutex_lock(&sessions_mutex);
467 if (session->active) {
468 ret = -EBUSY;
469 goto end;
470 }
471
472 /* Set transient enabler state to "enabled" */
473 session->priv->tstate = 1;
474
475 /* We need to sync enablers with session before activation. */
476 lttng_session_sync_event_enablers(session);
477
478 /*
479 * Snapshot the number of events per channel to know the type of header
480 * we need to use.
481 */
482 list_for_each_entry(chan_priv, &session->priv->chan, node) {
483 if (chan_priv->header_type)
484 continue; /* don't change it if session stop/restart */
485 if (chan_priv->free_event_id < 31)
486 chan_priv->header_type = 1; /* compact */
487 else
488 chan_priv->header_type = 2; /* large */
489 }
490
491 /* Clear each stream's quiescent state. */
492 list_for_each_entry(chan_priv, &session->priv->chan, node) {
493 if (chan_priv->channel_type != METADATA_CHANNEL)
494 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
495 }
496
497 WRITE_ONCE(session->active, 1);
498 WRITE_ONCE(session->priv->been_active, 1);
499 ret = _lttng_session_metadata_statedump(session);
500 if (ret) {
501 WRITE_ONCE(session->active, 0);
502 goto end;
503 }
504 ret = lttng_statedump_start(session);
505 if (ret)
506 WRITE_ONCE(session->active, 0);
507 end:
508 mutex_unlock(&sessions_mutex);
509 return ret;
510 }
511
512 int lttng_session_disable(struct lttng_kernel_session *session)
513 {
514 int ret = 0;
515 struct lttng_kernel_channel_buffer_private *chan_priv;
516
517 mutex_lock(&sessions_mutex);
518 if (!session->active) {
519 ret = -EBUSY;
520 goto end;
521 }
522 WRITE_ONCE(session->active, 0);
523
524 /* Set transient enabler state to "disabled" */
525 session->priv->tstate = 0;
526 lttng_session_sync_event_enablers(session);
527
528 /* Set each stream's quiescent state. */
529 list_for_each_entry(chan_priv, &session->priv->chan, node) {
530 if (chan_priv->channel_type != METADATA_CHANNEL)
531 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
532 }
533 end:
534 mutex_unlock(&sessions_mutex);
535 return ret;
536 }
537
538 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
539 {
540 int ret = 0;
541 struct lttng_kernel_channel_buffer_private *chan_priv;
542 struct lttng_kernel_event_recorder_private *event_recorder_priv;
543 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
544 struct lttng_metadata_stream *stream;
545
546 mutex_lock(&sessions_mutex);
547 if (!session->active) {
548 ret = -EBUSY;
549 goto end;
550 }
551
552 mutex_lock(&cache->lock);
553 memset(cache->data, 0, cache->cache_alloc);
554 cache->metadata_written = 0;
555 cache->version++;
556 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
557 stream->metadata_out = 0;
558 stream->metadata_in = 0;
559 }
560 mutex_unlock(&cache->lock);
561
562 session->priv->metadata_dumped = 0;
563 list_for_each_entry(chan_priv, &session->priv->chan, node) {
564 chan_priv->metadata_dumped = 0;
565 }
566
567 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
568 event_recorder_priv->metadata_dumped = 0;
569 }
570
571 ret = _lttng_session_metadata_statedump(session);
572
573 end:
574 mutex_unlock(&sessions_mutex);
575 return ret;
576 }
577
578 static
579 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
580 {
581 struct lttng_kernel_channel_buffer *chan_buf;
582
583 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
584 return false;
585 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
586 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
587 return true;
588 return false;
589 }
590
591 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
592 {
593 int ret = 0;
594
595 mutex_lock(&sessions_mutex);
596 if (is_channel_buffer_metadata(channel)) {
597 ret = -EPERM;
598 goto end;
599 }
600 if (channel->enabled) {
601 ret = -EEXIST;
602 goto end;
603 }
604 /* Set transient enabler state to "enabled" */
605 channel->priv->tstate = 1;
606 lttng_session_sync_event_enablers(channel->session);
607 /* Set atomically the state to "enabled" */
608 WRITE_ONCE(channel->enabled, 1);
609 end:
610 mutex_unlock(&sessions_mutex);
611 return ret;
612 }
613
614 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
615 {
616 int ret = 0;
617
618 mutex_lock(&sessions_mutex);
619 if (is_channel_buffer_metadata(channel)) {
620 ret = -EPERM;
621 goto end;
622 }
623 if (!channel->enabled) {
624 ret = -EEXIST;
625 goto end;
626 }
627 /* Set atomically the state to "disabled" */
628 WRITE_ONCE(channel->enabled, 0);
629 /* Set transient enabler state to "enabled" */
630 channel->priv->tstate = 0;
631 lttng_session_sync_event_enablers(channel->session);
632 end:
633 mutex_unlock(&sessions_mutex);
634 return ret;
635 }
636
637 int lttng_event_enable(struct lttng_kernel_event_common *event)
638 {
639 int ret = 0;
640
641 mutex_lock(&sessions_mutex);
642 switch (event->type) {
643 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
644 {
645 struct lttng_kernel_event_recorder *event_recorder =
646 container_of(event, struct lttng_kernel_event_recorder, parent);
647
648 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
649 ret = -EPERM;
650 goto end;
651 }
652 break;
653 }
654 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
655 switch (event->priv->instrumentation) {
656 case LTTNG_KERNEL_ABI_KRETPROBE:
657 ret = -EINVAL;
658 goto end;
659 default:
660 break;
661 }
662 break;
663 default:
664 break;
665 }
666
667 if (event->enabled) {
668 ret = -EEXIST;
669 goto end;
670 }
671 switch (event->priv->instrumentation) {
672 case LTTNG_KERNEL_ABI_TRACEPOINT:
673 lttng_fallthrough;
674 case LTTNG_KERNEL_ABI_SYSCALL:
675 ret = -EINVAL;
676 break;
677
678 case LTTNG_KERNEL_ABI_KPROBE:
679 lttng_fallthrough;
680 case LTTNG_KERNEL_ABI_UPROBE:
681 WRITE_ONCE(event->enabled, 1);
682 break;
683
684 case LTTNG_KERNEL_ABI_KRETPROBE:
685 ret = lttng_kretprobes_event_enable_state(event, 1);
686 break;
687
688 case LTTNG_KERNEL_ABI_FUNCTION:
689 lttng_fallthrough;
690 case LTTNG_KERNEL_ABI_NOOP:
691 lttng_fallthrough;
692 default:
693 WARN_ON_ONCE(1);
694 ret = -EINVAL;
695 }
696 end:
697 mutex_unlock(&sessions_mutex);
698 return ret;
699 }
700
701 int lttng_event_disable(struct lttng_kernel_event_common *event)
702 {
703 int ret = 0;
704
705 mutex_lock(&sessions_mutex);
706 switch (event->type) {
707 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
708 {
709 struct lttng_kernel_event_recorder *event_recorder =
710 container_of(event, struct lttng_kernel_event_recorder, parent);
711
712 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
713 ret = -EPERM;
714 goto end;
715 }
716 break;
717 }
718 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
719 switch (event->priv->instrumentation) {
720 case LTTNG_KERNEL_ABI_KRETPROBE:
721 ret = -EINVAL;
722 goto end;
723 default:
724 break;
725 }
726 break;
727 default:
728 break;
729 }
730
731 if (!event->enabled) {
732 ret = -EEXIST;
733 goto end;
734 }
735 switch (event->priv->instrumentation) {
736 case LTTNG_KERNEL_ABI_TRACEPOINT:
737 lttng_fallthrough;
738 case LTTNG_KERNEL_ABI_SYSCALL:
739 ret = -EINVAL;
740 break;
741
742 case LTTNG_KERNEL_ABI_KPROBE:
743 lttng_fallthrough;
744 case LTTNG_KERNEL_ABI_UPROBE:
745 WRITE_ONCE(event->enabled, 0);
746 break;
747
748 case LTTNG_KERNEL_ABI_KRETPROBE:
749 ret = lttng_kretprobes_event_enable_state(event, 0);
750 break;
751
752 case LTTNG_KERNEL_ABI_FUNCTION:
753 lttng_fallthrough;
754 case LTTNG_KERNEL_ABI_NOOP:
755 lttng_fallthrough;
756 default:
757 WARN_ON_ONCE(1);
758 ret = -EINVAL;
759 }
760 end:
761 mutex_unlock(&sessions_mutex);
762 return ret;
763 }
764
765 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
766 const char *transport_name,
767 void *buf_addr,
768 size_t subbuf_size, size_t num_subbuf,
769 unsigned int switch_timer_interval,
770 unsigned int read_timer_interval,
771 enum channel_type channel_type)
772 {
773 struct lttng_kernel_channel_buffer *chan;
774 struct lttng_kernel_channel_buffer_private *chan_priv;
775 struct lttng_transport *transport = NULL;
776
777 mutex_lock(&sessions_mutex);
778 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
779 goto active; /* Refuse to add channel to active session */
780 transport = lttng_transport_find(transport_name);
781 if (!transport) {
782 printk(KERN_WARNING "LTTng: transport %s not found\n",
783 transport_name);
784 goto notransport;
785 }
786 if (!try_module_get(transport->owner)) {
787 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
788 goto notransport;
789 }
790 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
791 if (!chan)
792 goto nomem;
793 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
794 if (!chan_priv)
795 goto nomem_priv;
796 chan->priv = chan_priv;
797 chan_priv->pub = chan;
798 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
799 chan->parent.session = session;
800 chan->priv->id = session->priv->free_chan_id++;
801 chan->ops = &transport->ops;
802 /*
803 * Note: the channel creation op already writes into the packet
804 * headers. Therefore the "chan" information used as input
805 * should be already accessible.
806 */
807 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
808 chan, buf_addr, subbuf_size, num_subbuf,
809 switch_timer_interval, read_timer_interval);
810 if (!chan->priv->rb_chan)
811 goto create_error;
812 chan->priv->parent.tstate = 1;
813 chan->parent.enabled = 1;
814 chan->priv->transport = transport;
815 chan->priv->channel_type = channel_type;
816 list_add(&chan->priv->node, &session->priv->chan);
817 mutex_unlock(&sessions_mutex);
818 return chan;
819
820 create_error:
821 kfree(chan_priv);
822 nomem_priv:
823 kfree(chan);
824 nomem:
825 if (transport)
826 module_put(transport->owner);
827 notransport:
828 active:
829 mutex_unlock(&sessions_mutex);
830 return NULL;
831 }
832
833 /*
834 * Only used internally at session destruction for per-cpu channels, and
835 * when metadata channel is released.
836 * Needs to be called with sessions mutex held.
837 */
838 static
839 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
840 {
841 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
842 module_put(chan->priv->transport->owner);
843 list_del(&chan->priv->node);
844 lttng_kernel_destroy_context(chan->priv->ctx);
845 kfree(chan->priv);
846 kfree(chan);
847 }
848
849 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
850 {
851 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
852
853 /* Protect the metadata cache with the sessions_mutex. */
854 mutex_lock(&sessions_mutex);
855 _lttng_channel_destroy(chan);
856 mutex_unlock(&sessions_mutex);
857 }
858 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
859
860 static
861 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
862 {
863 stream->finalized = 1;
864 wake_up_interruptible(&stream->read_wait);
865 }
866
867 static
868 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
869 {
870 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
871 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
872
873 switch (event_enabler->enabler_type) {
874 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
875 {
876 struct lttng_event_recorder_enabler *event_recorder_enabler =
877 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
878 struct lttng_kernel_event_recorder *event_recorder;
879 struct lttng_kernel_event_recorder_private *event_recorder_priv;
880 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
881
882 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
883 if (!event_recorder)
884 return NULL;
885 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
886 if (!event_recorder_priv) {
887 kmem_cache_free(event_recorder_private_cache, event_recorder);
888 return NULL;
889 }
890 event_recorder_priv->pub = event_recorder;
891 event_recorder_priv->parent.pub = &event_recorder->parent;
892 event_recorder->priv = event_recorder_priv;
893 event_recorder->parent.priv = &event_recorder_priv->parent;
894
895 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
896 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
897 event_recorder->priv->parent.instrumentation = itype;
898 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
899 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
900
901 event_recorder->chan = chan;
902 event_recorder->priv->id = chan->priv->free_event_id++;
903 return &event_recorder->parent;
904 }
905 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
906 {
907 struct lttng_event_notifier_enabler *event_notifier_enabler =
908 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
909 struct lttng_kernel_event_notifier *event_notifier;
910 struct lttng_kernel_event_notifier_private *event_notifier_priv;
911
912 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
913 if (!event_notifier)
914 return NULL;
915 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
916 if (!event_notifier_priv) {
917 kmem_cache_free(event_notifier_private_cache, event_notifier);
918 return NULL;
919 }
920 event_notifier_priv->pub = event_notifier;
921 event_notifier_priv->parent.pub = &event_notifier->parent;
922 event_notifier->priv = event_notifier_priv;
923 event_notifier->parent.priv = &event_notifier_priv->parent;
924
925 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
926 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
927 event_notifier->priv->parent.instrumentation = itype;
928 event_notifier->priv->parent.user_token = event_enabler->user_token;
929 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
930 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
931
932 event_notifier->priv->group = event_notifier_enabler->group;
933 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
934 event_notifier->priv->num_captures = 0;
935 event_notifier->notification_send = lttng_event_notifier_notification_send;
936 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
937 return &event_notifier->parent;
938 }
939 default:
940 return NULL;
941 }
942 }
943
944 static
945 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
946 {
947 switch (event->type) {
948 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
949 {
950 struct lttng_kernel_event_recorder *event_recorder =
951 container_of(event, struct lttng_kernel_event_recorder, parent);
952
953 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
954 kmem_cache_free(event_recorder_cache, event_recorder);
955 break;
956 }
957 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
958 {
959 struct lttng_kernel_event_notifier *event_notifier =
960 container_of(event, struct lttng_kernel_event_notifier, parent);
961
962 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
963 kmem_cache_free(event_notifier_cache, event_notifier);
964 break;
965 }
966 default:
967 WARN_ON_ONCE(1);
968 }
969 }
970
971 static
972 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common *event)
973 {
974 switch (event->type) {
975 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
976 return 0;
977 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
978 {
979 struct lttng_kernel_event_notifier *event_notifier =
980 container_of(event, struct lttng_kernel_event_notifier, parent);
981 struct lttng_counter *error_counter;
982 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
983 size_t dimension_index[1];
984 int ret;
985
986 /*
987 * Clear the error counter bucket. The sessiond keeps track of which
988 * bucket is currently in use. We trust it. The session lock
989 * synchronizes against concurrent creation of the error
990 * counter.
991 */
992 error_counter = event_notifier_group->error_counter;
993 if (!error_counter)
994 return 0;
995 /*
996 * Check that the index is within the boundary of the counter.
997 */
998 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
999 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1000 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1001 return -EINVAL;
1002 }
1003
1004 dimension_index[0] = event_notifier->priv->error_counter_index;
1005 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1006 if (ret) {
1007 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1008 event_notifier->priv->error_counter_index);
1009 return -EINVAL;
1010 }
1011 return 0;
1012 }
1013 default:
1014 return -EINVAL;
1015 }
1016 }
1017
1018 /*
1019 * Supports event creation while tracing session is active.
1020 * Needs to be called with sessions mutex held.
1021 */
1022 static
1023 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
1024 const struct lttng_kernel_event_desc *event_desc)
1025 {
1026 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1027 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(&event_enabler->parent);
1028 struct lttng_kernel_channel_buffer *chan = event_enabler->chan;
1029 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1030 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1031 struct lttng_kernel_event_common_private *event_priv;
1032 struct lttng_kernel_event_common *event;
1033 struct lttng_kernel_event_recorder *event_recorder;
1034 const char *event_name;
1035 struct hlist_head *head;
1036 int ret;
1037
1038 if (chan->priv->free_event_id == -1U) {
1039 ret = -EMFILE;
1040 goto full;
1041 }
1042
1043 switch (itype) {
1044 case LTTNG_KERNEL_ABI_TRACEPOINT:
1045 event_name = event_desc->event_name;
1046 break;
1047
1048 case LTTNG_KERNEL_ABI_KPROBE:
1049 lttng_fallthrough;
1050 case LTTNG_KERNEL_ABI_UPROBE:
1051 lttng_fallthrough;
1052 case LTTNG_KERNEL_ABI_KRETPROBE:
1053 lttng_fallthrough;
1054 case LTTNG_KERNEL_ABI_SYSCALL:
1055 event_name = event_param->name;
1056 break;
1057
1058 case LTTNG_KERNEL_ABI_FUNCTION:
1059 lttng_fallthrough;
1060 case LTTNG_KERNEL_ABI_NOOP:
1061 lttng_fallthrough;
1062 default:
1063 WARN_ON_ONCE(1);
1064 ret = -EINVAL;
1065 goto type_error;
1066 }
1067
1068 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1069 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1070 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1071 ret = -EEXIST;
1072 goto exist;
1073 }
1074 }
1075
1076 event = lttng_kernel_event_alloc(&event_enabler->parent);
1077 if (!event) {
1078 ret = -ENOMEM;
1079 goto alloc_error;
1080 }
1081 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
1082
1083 switch (itype) {
1084 case LTTNG_KERNEL_ABI_TRACEPOINT:
1085 /* Event will be enabled by enabler sync. */
1086 event->enabled = 0;
1087 event->priv->registered = 0;
1088 event->priv->desc = lttng_event_desc_get(event_name);
1089 if (!event->priv->desc) {
1090 ret = -ENOENT;
1091 goto register_error;
1092 }
1093 /* Populate lttng_event structure before event registration. */
1094 smp_wmb();
1095 break;
1096
1097 case LTTNG_KERNEL_ABI_KPROBE:
1098 /*
1099 * Needs to be explicitly enabled after creation, since
1100 * we may want to apply filters.
1101 */
1102 event->enabled = 0;
1103 event->priv->registered = 1;
1104 /*
1105 * Populate lttng_event structure before event
1106 * registration.
1107 */
1108 smp_wmb();
1109 ret = lttng_kprobes_register_event(event_name,
1110 event_param->u.kprobe.symbol_name,
1111 event_param->u.kprobe.offset,
1112 event_param->u.kprobe.addr,
1113 event);
1114 if (ret) {
1115 ret = -EINVAL;
1116 goto register_error;
1117 }
1118 ret = try_module_get(event->priv->desc->owner);
1119 WARN_ON_ONCE(!ret);
1120 break;
1121
1122 case LTTNG_KERNEL_ABI_KRETPROBE:
1123 {
1124 struct lttng_kernel_event_common *event_return;
1125
1126 /* kretprobe defines 2 events */
1127 /*
1128 * Needs to be explicitly enabled after creation, since
1129 * we may want to apply filters.
1130 */
1131 event->enabled = 0;
1132 event->priv->registered = 1;
1133
1134 event_return = lttng_kernel_event_alloc(&event_enabler->parent);
1135 if (!event) {
1136 ret = -ENOMEM;
1137 goto alloc_error;
1138 }
1139
1140 event_return->enabled = 0;
1141 event_return->priv->registered = 1;
1142
1143 /*
1144 * Populate lttng_event structure before kretprobe registration.
1145 */
1146 smp_wmb();
1147 ret = lttng_kretprobes_register(event_name,
1148 event_param->u.kretprobe.symbol_name,
1149 event_param->u.kretprobe.offset,
1150 event_param->u.kretprobe.addr,
1151 event, event_return);
1152 if (ret) {
1153 lttng_kernel_event_free(event_return);
1154 ret = -EINVAL;
1155 goto register_error;
1156 }
1157 /* Take 2 refs on the module: one per event. */
1158 ret = try_module_get(event->priv->desc->owner);
1159 WARN_ON_ONCE(!ret);
1160 ret = try_module_get(event_return->priv->desc->owner);
1161 WARN_ON_ONCE(!ret);
1162 ret = _lttng_event_recorder_metadata_statedump(event_return);
1163 WARN_ON_ONCE(ret > 0);
1164 if (ret) {
1165 lttng_kernel_event_free(event_return);
1166 module_put(event_return->priv->desc->owner);
1167 module_put(event->priv->desc->owner);
1168 goto statedump_error;
1169 }
1170 list_add(&event_return->priv->node, event_list_head);
1171 break;
1172 }
1173
1174 case LTTNG_KERNEL_ABI_SYSCALL:
1175 /*
1176 * Needs to be explicitly enabled after creation, since
1177 * we may want to apply filters.
1178 */
1179 event->enabled = 0;
1180 event->priv->registered = 0;
1181 event->priv->desc = event_desc;
1182 switch (event_param->u.syscall.entryexit) {
1183 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1184 ret = -EINVAL;
1185 goto register_error;
1186 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1187 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1188 break;
1189 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1190 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1191 break;
1192 }
1193 switch (event_param->u.syscall.abi) {
1194 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1195 ret = -EINVAL;
1196 goto register_error;
1197 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1198 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1199 break;
1200 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1201 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1202 break;
1203 }
1204 if (!event->priv->desc) {
1205 ret = -EINVAL;
1206 goto register_error;
1207 }
1208 break;
1209
1210 case LTTNG_KERNEL_ABI_UPROBE:
1211 /*
1212 * Needs to be explicitly enabled after creation, since
1213 * we may want to apply filters.
1214 */
1215 event->enabled = 0;
1216 event->priv->registered = 1;
1217
1218 /*
1219 * Populate lttng_event structure before event
1220 * registration.
1221 */
1222 smp_wmb();
1223
1224 ret = lttng_uprobes_register_event(event_param->name,
1225 event_param->u.uprobe.fd,
1226 event);
1227 if (ret)
1228 goto register_error;
1229 ret = try_module_get(event->priv->desc->owner);
1230 WARN_ON_ONCE(!ret);
1231 break;
1232
1233 case LTTNG_KERNEL_ABI_FUNCTION:
1234 lttng_fallthrough;
1235 case LTTNG_KERNEL_ABI_NOOP:
1236 lttng_fallthrough;
1237 default:
1238 WARN_ON_ONCE(1);
1239 ret = -EINVAL;
1240 goto register_error;
1241 }
1242 ret = _lttng_event_recorder_metadata_statedump(event);
1243 WARN_ON_ONCE(ret > 0);
1244 if (ret) {
1245 goto statedump_error;
1246 }
1247 hlist_add_head(&event->priv->hlist_node, head);
1248 list_add(&event->priv->node, event_list_head);
1249 return event_recorder;
1250
1251 statedump_error:
1252 /* If a statedump error occurs, events will not be readable. */
1253 register_error:
1254 lttng_kernel_event_free(event);
1255 alloc_error:
1256 exist:
1257 type_error:
1258 full:
1259 return ERR_PTR(ret);
1260 }
1261
1262 static
1263 struct lttng_kernel_event_notifier *_lttng_kernel_event_notifier_create(struct lttng_event_notifier_enabler *event_enabler,
1264 const struct lttng_kernel_event_desc *event_desc)
1265 {
1266 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1267 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(&event_enabler->parent);
1268 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1269 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1270 struct lttng_kernel_event_common_private *event_priv;
1271 struct lttng_kernel_event_common *event;
1272 struct lttng_kernel_event_notifier *event_notifier;
1273 const char *event_name;
1274 struct hlist_head *head;
1275 int ret;
1276
1277 switch (itype) {
1278 case LTTNG_KERNEL_ABI_TRACEPOINT:
1279 event_name = event_desc->event_name;
1280 break;
1281
1282 case LTTNG_KERNEL_ABI_KPROBE:
1283 lttng_fallthrough;
1284 case LTTNG_KERNEL_ABI_UPROBE:
1285 lttng_fallthrough;
1286 case LTTNG_KERNEL_ABI_SYSCALL:
1287 event_name = event_param->name;
1288 break;
1289
1290 case LTTNG_KERNEL_ABI_KRETPROBE:
1291 lttng_fallthrough;
1292 case LTTNG_KERNEL_ABI_FUNCTION:
1293 lttng_fallthrough;
1294 case LTTNG_KERNEL_ABI_NOOP:
1295 lttng_fallthrough;
1296 default:
1297 WARN_ON_ONCE(1);
1298 ret = -EINVAL;
1299 goto type_error;
1300 }
1301
1302 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1303 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1304 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1305 ret = -EEXIST;
1306 goto exist;
1307 }
1308 }
1309
1310 event = lttng_kernel_event_alloc(&event_enabler->parent);
1311 if (!event) {
1312 ret = -ENOMEM;
1313 goto alloc_error;
1314 }
1315 event_notifier = container_of(event, struct lttng_kernel_event_notifier, parent);
1316
1317 switch (itype) {
1318 case LTTNG_KERNEL_ABI_TRACEPOINT:
1319 /* Event will be enabled by enabler sync. */
1320 event->enabled = 0;
1321 event->priv->registered = 0;
1322 event->priv->desc = lttng_event_desc_get(event_name);
1323 if (!event->priv->desc) {
1324 ret = -ENOENT;
1325 goto register_error;
1326 }
1327 /* Populate lttng_event_notifier structure before event registration. */
1328 smp_wmb();
1329 break;
1330
1331 case LTTNG_KERNEL_ABI_KPROBE:
1332 /*
1333 * Needs to be explicitly enabled after creation, since
1334 * we may want to apply filters.
1335 */
1336 event->enabled = 0;
1337 event->priv->registered = 1;
1338 /*
1339 * Populate lttng_event_notifier structure before event
1340 * registration.
1341 */
1342 smp_wmb();
1343 ret = lttng_kprobes_register_event(event_param->u.kprobe.symbol_name,
1344 event_param->u.kprobe.symbol_name,
1345 event_param->u.kprobe.offset,
1346 event_param->u.kprobe.addr,
1347 event);
1348 if (ret) {
1349 ret = -EINVAL;
1350 goto register_error;
1351 }
1352 ret = try_module_get(event->priv->desc->owner);
1353 WARN_ON_ONCE(!ret);
1354 break;
1355
1356 case LTTNG_KERNEL_ABI_SYSCALL:
1357 /*
1358 * Needs to be explicitly enabled after creation, since
1359 * we may want to apply filters.
1360 */
1361 event->enabled = 0;
1362 event->priv->registered = 0;
1363 event->priv->desc = event_desc;
1364 switch (event_param->u.syscall.entryexit) {
1365 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1366 ret = -EINVAL;
1367 goto register_error;
1368 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1369 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1370 break;
1371 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1372 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1373 break;
1374 }
1375 switch (event_param->u.syscall.abi) {
1376 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1377 ret = -EINVAL;
1378 goto register_error;
1379 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1380 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1381 break;
1382 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1383 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1384 break;
1385 }
1386
1387 if (!event->priv->desc) {
1388 ret = -EINVAL;
1389 goto register_error;
1390 }
1391 break;
1392
1393 case LTTNG_KERNEL_ABI_UPROBE:
1394 /*
1395 * Needs to be explicitly enabled after creation, since
1396 * we may want to apply filters.
1397 */
1398 event->enabled = 0;
1399 event->priv->registered = 1;
1400
1401 /*
1402 * Populate lttng_event_notifier structure before
1403 * event_notifier registration.
1404 */
1405 smp_wmb();
1406
1407 ret = lttng_uprobes_register_event(event_param->name,
1408 event_param->u.uprobe.fd,
1409 event);
1410 if (ret)
1411 goto register_error;
1412 ret = try_module_get(event->priv->desc->owner);
1413 WARN_ON_ONCE(!ret);
1414 break;
1415
1416 case LTTNG_KERNEL_ABI_KRETPROBE:
1417 lttng_fallthrough;
1418 case LTTNG_KERNEL_ABI_FUNCTION:
1419 lttng_fallthrough;
1420 case LTTNG_KERNEL_ABI_NOOP:
1421 lttng_fallthrough;
1422 default:
1423 WARN_ON_ONCE(1);
1424 ret = -EINVAL;
1425 goto register_error;
1426 }
1427
1428 list_add(&event->priv->node, event_list_head);
1429 hlist_add_head(&event->priv->hlist_node, head);
1430
1431 ret = lttng_kernel_event_notifier_clear_error_counter(event);
1432 if (ret)
1433 goto register_error;
1434 return event_notifier;
1435
1436 register_error:
1437 lttng_kernel_event_free(event);
1438 alloc_error:
1439 exist:
1440 type_error:
1441 return ERR_PTR(ret);
1442 }
1443
1444 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1445 const struct lttng_kernel_event_desc *event_desc)
1446 {
1447 switch (event_enabler->enabler_type) {
1448 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1449 {
1450 struct lttng_event_recorder_enabler *event_recorder_enabler =
1451 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1452 struct lttng_kernel_event_recorder *event_recorder;
1453
1454 event_recorder = _lttng_kernel_event_recorder_create(event_recorder_enabler, event_desc);
1455 if (!event_recorder)
1456 return NULL;
1457 return &event_recorder->parent;
1458 }
1459 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1460 {
1461 struct lttng_event_notifier_enabler *event_notifier_enabler =
1462 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1463 struct lttng_kernel_event_notifier *event_notifier;
1464
1465 event_notifier = _lttng_kernel_event_notifier_create(event_notifier_enabler, event_desc);
1466 if (!event_notifier)
1467 return NULL;
1468 return &event_notifier->parent;
1469 }
1470 default:
1471 return NULL;
1472 }
1473 }
1474
1475 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1476 const struct lttng_kernel_event_desc *event_desc)
1477 {
1478 struct lttng_kernel_event_common *event;
1479
1480 mutex_lock(&sessions_mutex);
1481 event = _lttng_kernel_event_create(event_enabler, event_desc);
1482 mutex_unlock(&sessions_mutex);
1483 return event;
1484 }
1485
1486
1487
1488 int lttng_kernel_counter_read(struct lttng_counter *counter,
1489 const size_t *dim_indexes, int32_t cpu,
1490 int64_t *val, bool *overflow, bool *underflow)
1491 {
1492 return counter->ops->counter_read(counter->counter, dim_indexes,
1493 cpu, val, overflow, underflow);
1494 }
1495
1496 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1497 const size_t *dim_indexes, int64_t *val,
1498 bool *overflow, bool *underflow)
1499 {
1500 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1501 val, overflow, underflow);
1502 }
1503
1504 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1505 const size_t *dim_indexes)
1506 {
1507 return counter->ops->counter_clear(counter->counter, dim_indexes);
1508 }
1509
1510 /* Only used for tracepoints for now. */
1511 static
1512 void register_event(struct lttng_kernel_event_common *event)
1513 {
1514 const struct lttng_kernel_event_desc *desc;
1515 int ret = -EINVAL;
1516
1517 if (event->priv->registered)
1518 return;
1519
1520 desc = event->priv->desc;
1521 switch (event->priv->instrumentation) {
1522 case LTTNG_KERNEL_ABI_TRACEPOINT:
1523 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1524 desc->tp_class->probe_callback,
1525 event);
1526 break;
1527
1528 case LTTNG_KERNEL_ABI_SYSCALL:
1529 ret = lttng_syscall_filter_enable_event(event);
1530 break;
1531
1532 case LTTNG_KERNEL_ABI_KPROBE:
1533 lttng_fallthrough;
1534 case LTTNG_KERNEL_ABI_UPROBE:
1535 ret = 0;
1536 break;
1537
1538 case LTTNG_KERNEL_ABI_KRETPROBE:
1539 switch (event->type) {
1540 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1541 ret = 0;
1542 break;
1543 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1544 WARN_ON_ONCE(1);
1545 break;
1546 }
1547 break;
1548
1549 case LTTNG_KERNEL_ABI_FUNCTION:
1550 lttng_fallthrough;
1551 case LTTNG_KERNEL_ABI_NOOP:
1552 lttng_fallthrough;
1553 default:
1554 WARN_ON_ONCE(1);
1555 }
1556 if (!ret)
1557 event->priv->registered = 1;
1558 }
1559
1560 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1561 {
1562 struct lttng_kernel_event_common_private *event_priv = event->priv;
1563 const struct lttng_kernel_event_desc *desc;
1564 int ret = -EINVAL;
1565
1566 if (!event_priv->registered)
1567 return 0;
1568
1569 desc = event_priv->desc;
1570 switch (event_priv->instrumentation) {
1571 case LTTNG_KERNEL_ABI_TRACEPOINT:
1572 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1573 event_priv->desc->tp_class->probe_callback,
1574 event);
1575 break;
1576
1577 case LTTNG_KERNEL_ABI_KPROBE:
1578 lttng_kprobes_unregister_event(event);
1579 ret = 0;
1580 break;
1581
1582 case LTTNG_KERNEL_ABI_KRETPROBE:
1583 switch (event->type) {
1584 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1585 lttng_kretprobes_unregister(event);
1586 ret = 0;
1587 break;
1588 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1589 WARN_ON_ONCE(1);
1590 break;
1591 }
1592 break;
1593
1594 case LTTNG_KERNEL_ABI_SYSCALL:
1595 ret = lttng_syscall_filter_disable_event(event);
1596 break;
1597
1598 case LTTNG_KERNEL_ABI_NOOP:
1599 switch (event->type) {
1600 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1601 ret = 0;
1602 break;
1603 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1604 WARN_ON_ONCE(1);
1605 break;
1606 }
1607 break;
1608
1609 case LTTNG_KERNEL_ABI_UPROBE:
1610 lttng_uprobes_unregister_event(event);
1611 ret = 0;
1612 break;
1613
1614 case LTTNG_KERNEL_ABI_FUNCTION:
1615 lttng_fallthrough;
1616 default:
1617 WARN_ON_ONCE(1);
1618 }
1619 if (!ret)
1620 event_priv->registered = 0;
1621 return ret;
1622 }
1623
1624 /*
1625 * Only used internally at session destruction.
1626 */
1627 static
1628 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1629 {
1630 struct lttng_kernel_event_common_private *event_priv = event->priv;
1631 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1632
1633 lttng_free_event_filter_runtime(event);
1634 /* Free event enabler refs */
1635 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1636 &event_priv->enablers_ref_head, node)
1637 kfree(enabler_ref);
1638
1639 switch (event->type) {
1640 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1641 {
1642 struct lttng_kernel_event_recorder *event_recorder =
1643 container_of(event, struct lttng_kernel_event_recorder, parent);
1644
1645 switch (event_priv->instrumentation) {
1646 case LTTNG_KERNEL_ABI_TRACEPOINT:
1647 lttng_event_desc_put(event_priv->desc);
1648 break;
1649
1650 case LTTNG_KERNEL_ABI_KPROBE:
1651 module_put(event_priv->desc->owner);
1652 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1653 break;
1654
1655 case LTTNG_KERNEL_ABI_KRETPROBE:
1656 module_put(event_priv->desc->owner);
1657 lttng_kretprobes_destroy_private(&event_recorder->parent);
1658 break;
1659
1660 case LTTNG_KERNEL_ABI_SYSCALL:
1661 break;
1662
1663 case LTTNG_KERNEL_ABI_UPROBE:
1664 module_put(event_priv->desc->owner);
1665 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1666 break;
1667
1668 case LTTNG_KERNEL_ABI_FUNCTION:
1669 lttng_fallthrough;
1670 case LTTNG_KERNEL_ABI_NOOP:
1671 lttng_fallthrough;
1672 default:
1673 WARN_ON_ONCE(1);
1674 }
1675 list_del(&event_recorder->priv->parent.node);
1676 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1677 kmem_cache_free(event_recorder_cache, event_recorder);
1678 break;
1679 }
1680 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1681 {
1682 struct lttng_kernel_event_notifier *event_notifier =
1683 container_of(event, struct lttng_kernel_event_notifier, parent);
1684
1685 switch (event_notifier->priv->parent.instrumentation) {
1686 case LTTNG_KERNEL_ABI_TRACEPOINT:
1687 lttng_event_desc_put(event_notifier->priv->parent.desc);
1688 break;
1689
1690 case LTTNG_KERNEL_ABI_KPROBE:
1691 module_put(event_notifier->priv->parent.desc->owner);
1692 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1693 break;
1694
1695 case LTTNG_KERNEL_ABI_SYSCALL:
1696 break;
1697
1698 case LTTNG_KERNEL_ABI_UPROBE:
1699 module_put(event_notifier->priv->parent.desc->owner);
1700 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1701 break;
1702
1703 case LTTNG_KERNEL_ABI_KRETPROBE:
1704 lttng_fallthrough;
1705 case LTTNG_KERNEL_ABI_FUNCTION:
1706 lttng_fallthrough;
1707 case LTTNG_KERNEL_ABI_NOOP:
1708 lttng_fallthrough;
1709 default:
1710 WARN_ON_ONCE(1);
1711 }
1712 list_del(&event_notifier->priv->parent.node);
1713 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1714 kmem_cache_free(event_notifier_cache, event_notifier);
1715 break;
1716 }
1717 default:
1718 WARN_ON_ONCE(1);
1719 }
1720 }
1721
1722 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1723 enum tracker_type tracker_type)
1724 {
1725 switch (tracker_type) {
1726 case TRACKER_PID:
1727 return &session->pid_tracker;
1728 case TRACKER_VPID:
1729 return &session->vpid_tracker;
1730 case TRACKER_UID:
1731 return &session->uid_tracker;
1732 case TRACKER_VUID:
1733 return &session->vuid_tracker;
1734 case TRACKER_GID:
1735 return &session->gid_tracker;
1736 case TRACKER_VGID:
1737 return &session->vgid_tracker;
1738 default:
1739 WARN_ON_ONCE(1);
1740 return NULL;
1741 }
1742 }
1743
1744 int lttng_session_track_id(struct lttng_kernel_session *session,
1745 enum tracker_type tracker_type, int id)
1746 {
1747 struct lttng_kernel_id_tracker *tracker;
1748 int ret;
1749
1750 tracker = get_tracker(session, tracker_type);
1751 if (!tracker)
1752 return -EINVAL;
1753 if (id < -1)
1754 return -EINVAL;
1755 mutex_lock(&sessions_mutex);
1756 if (id == -1) {
1757 /* track all ids: destroy tracker. */
1758 lttng_id_tracker_destroy(tracker, true);
1759 ret = 0;
1760 } else {
1761 ret = lttng_id_tracker_add(tracker, id);
1762 }
1763 mutex_unlock(&sessions_mutex);
1764 return ret;
1765 }
1766
1767 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1768 enum tracker_type tracker_type, int id)
1769 {
1770 struct lttng_kernel_id_tracker *tracker;
1771 int ret;
1772
1773 tracker = get_tracker(session, tracker_type);
1774 if (!tracker)
1775 return -EINVAL;
1776 if (id < -1)
1777 return -EINVAL;
1778 mutex_lock(&sessions_mutex);
1779 if (id == -1) {
1780 /* untrack all ids: replace by empty tracker. */
1781 ret = lttng_id_tracker_empty_set(tracker);
1782 } else {
1783 ret = lttng_id_tracker_del(tracker, id);
1784 }
1785 mutex_unlock(&sessions_mutex);
1786 return ret;
1787 }
1788
1789 static
1790 void *id_list_start(struct seq_file *m, loff_t *pos)
1791 {
1792 struct lttng_kernel_id_tracker *id_tracker = m->private;
1793 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1794 struct lttng_id_hash_node *e;
1795 int iter = 0, i;
1796
1797 mutex_lock(&sessions_mutex);
1798 if (id_tracker_p) {
1799 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1800 struct hlist_head *head = &id_tracker_p->id_hash[i];
1801
1802 lttng_hlist_for_each_entry(e, head, hlist) {
1803 if (iter++ >= *pos)
1804 return e;
1805 }
1806 }
1807 } else {
1808 /* ID tracker disabled. */
1809 if (iter >= *pos && iter == 0) {
1810 return id_tracker_p; /* empty tracker */
1811 }
1812 iter++;
1813 }
1814 /* End of list */
1815 return NULL;
1816 }
1817
1818 /* Called with sessions_mutex held. */
1819 static
1820 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1821 {
1822 struct lttng_kernel_id_tracker *id_tracker = m->private;
1823 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1824 struct lttng_id_hash_node *e;
1825 int iter = 0, i;
1826
1827 (*ppos)++;
1828 if (id_tracker_p) {
1829 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1830 struct hlist_head *head = &id_tracker_p->id_hash[i];
1831
1832 lttng_hlist_for_each_entry(e, head, hlist) {
1833 if (iter++ >= *ppos)
1834 return e;
1835 }
1836 }
1837 } else {
1838 /* ID tracker disabled. */
1839 if (iter >= *ppos && iter == 0)
1840 return p; /* empty tracker */
1841 iter++;
1842 }
1843
1844 /* End of list */
1845 return NULL;
1846 }
1847
1848 static
1849 void id_list_stop(struct seq_file *m, void *p)
1850 {
1851 mutex_unlock(&sessions_mutex);
1852 }
1853
1854 static
1855 int id_list_show(struct seq_file *m, void *p)
1856 {
1857 struct lttng_kernel_id_tracker *id_tracker = m->private;
1858 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1859 int id;
1860
1861 if (p == id_tracker_p) {
1862 /* Tracker disabled. */
1863 id = -1;
1864 } else {
1865 const struct lttng_id_hash_node *e = p;
1866
1867 id = lttng_id_tracker_get_node_id(e);
1868 }
1869 switch (id_tracker->priv->tracker_type) {
1870 case TRACKER_PID:
1871 seq_printf(m, "process { pid = %d; };\n", id);
1872 break;
1873 case TRACKER_VPID:
1874 seq_printf(m, "process { vpid = %d; };\n", id);
1875 break;
1876 case TRACKER_UID:
1877 seq_printf(m, "user { uid = %d; };\n", id);
1878 break;
1879 case TRACKER_VUID:
1880 seq_printf(m, "user { vuid = %d; };\n", id);
1881 break;
1882 case TRACKER_GID:
1883 seq_printf(m, "group { gid = %d; };\n", id);
1884 break;
1885 case TRACKER_VGID:
1886 seq_printf(m, "group { vgid = %d; };\n", id);
1887 break;
1888 default:
1889 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1890 }
1891 return 0;
1892 }
1893
1894 static
1895 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1896 .start = id_list_start,
1897 .next = id_list_next,
1898 .stop = id_list_stop,
1899 .show = id_list_show,
1900 };
1901
1902 static
1903 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1904 {
1905 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1906 }
1907
1908 static
1909 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1910 {
1911 struct seq_file *m = file->private_data;
1912 struct lttng_kernel_id_tracker *id_tracker = m->private;
1913 int ret;
1914
1915 WARN_ON_ONCE(!id_tracker);
1916 ret = seq_release(inode, file);
1917 if (!ret)
1918 fput(id_tracker->priv->session->priv->file);
1919 return ret;
1920 }
1921
1922 const struct file_operations lttng_tracker_ids_list_fops = {
1923 .owner = THIS_MODULE,
1924 .open = lttng_tracker_ids_list_open,
1925 .read = seq_read,
1926 .llseek = seq_lseek,
1927 .release = lttng_tracker_ids_list_release,
1928 };
1929
1930 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1931 enum tracker_type tracker_type)
1932 {
1933 struct file *tracker_ids_list_file;
1934 struct seq_file *m;
1935 int file_fd, ret;
1936
1937 file_fd = lttng_get_unused_fd();
1938 if (file_fd < 0) {
1939 ret = file_fd;
1940 goto fd_error;
1941 }
1942
1943 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1944 &lttng_tracker_ids_list_fops,
1945 NULL, O_RDWR);
1946 if (IS_ERR(tracker_ids_list_file)) {
1947 ret = PTR_ERR(tracker_ids_list_file);
1948 goto file_error;
1949 }
1950 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1951 ret = -EOVERFLOW;
1952 goto refcount_error;
1953 }
1954 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1955 if (ret < 0)
1956 goto open_error;
1957 m = tracker_ids_list_file->private_data;
1958
1959 m->private = get_tracker(session, tracker_type);
1960 BUG_ON(!m->private);
1961 fd_install(file_fd, tracker_ids_list_file);
1962
1963 return file_fd;
1964
1965 open_error:
1966 atomic_long_dec(&session->priv->file->f_count);
1967 refcount_error:
1968 fput(tracker_ids_list_file);
1969 file_error:
1970 put_unused_fd(file_fd);
1971 fd_error:
1972 return ret;
1973 }
1974
1975 /*
1976 * Enabler management.
1977 */
1978 static
1979 int lttng_match_enabler_star_glob(const char *desc_name,
1980 const char *pattern)
1981 {
1982 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1983 desc_name, LTTNG_SIZE_MAX))
1984 return 0;
1985 return 1;
1986 }
1987
1988 static
1989 int lttng_match_enabler_name(const char *desc_name,
1990 const char *name)
1991 {
1992 if (strcmp(desc_name, name))
1993 return 0;
1994 return 1;
1995 }
1996
1997 static
1998 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1999 struct lttng_event_enabler_common *enabler)
2000 {
2001 const char *desc_name, *enabler_name;
2002 bool compat = false, entry = false;
2003
2004 enabler_name = enabler->event_param.name;
2005 switch (enabler->event_param.instrumentation) {
2006 case LTTNG_KERNEL_ABI_TRACEPOINT:
2007 desc_name = desc->event_name;
2008 switch (enabler->format_type) {
2009 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2010 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2011 case LTTNG_ENABLER_FORMAT_NAME:
2012 return lttng_match_enabler_name(desc_name, enabler_name);
2013 default:
2014 return -EINVAL;
2015 }
2016 break;
2017
2018 case LTTNG_KERNEL_ABI_SYSCALL:
2019 desc_name = desc->event_name;
2020 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
2021 desc_name += strlen("compat_");
2022 compat = true;
2023 }
2024 if (!strncmp(desc_name, "syscall_exit_",
2025 strlen("syscall_exit_"))) {
2026 desc_name += strlen("syscall_exit_");
2027 } else if (!strncmp(desc_name, "syscall_entry_",
2028 strlen("syscall_entry_"))) {
2029 desc_name += strlen("syscall_entry_");
2030 entry = true;
2031 } else {
2032 WARN_ON_ONCE(1);
2033 return -EINVAL;
2034 }
2035 switch (enabler->event_param.u.syscall.entryexit) {
2036 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
2037 break;
2038 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
2039 if (!entry)
2040 return 0;
2041 break;
2042 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
2043 if (entry)
2044 return 0;
2045 break;
2046 default:
2047 return -EINVAL;
2048 }
2049 switch (enabler->event_param.u.syscall.abi) {
2050 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
2051 break;
2052 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
2053 if (compat)
2054 return 0;
2055 break;
2056 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
2057 if (!compat)
2058 return 0;
2059 break;
2060 default:
2061 return -EINVAL;
2062 }
2063 switch (enabler->event_param.u.syscall.match) {
2064 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
2065 switch (enabler->format_type) {
2066 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2067 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2068 case LTTNG_ENABLER_FORMAT_NAME:
2069 return lttng_match_enabler_name(desc_name, enabler_name);
2070 default:
2071 return -EINVAL;
2072 }
2073 break;
2074 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2075 return -EINVAL; /* Not implemented. */
2076 default:
2077 return -EINVAL;
2078 }
2079 break;
2080
2081 default:
2082 WARN_ON_ONCE(1);
2083 return -EINVAL;
2084 }
2085 }
2086
2087 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
2088 struct lttng_event_enabler_common *enabler)
2089 {
2090 int ret;
2091
2092 ret = lttng_desc_match_enabler_check(desc, enabler);
2093 if (ret < 0) {
2094 WARN_ON_ONCE(1);
2095 return false;
2096 }
2097 return ret;
2098 }
2099
2100 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
2101 struct lttng_kernel_event_common *event)
2102 {
2103 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2104 return false;
2105
2106 switch (event_enabler->enabler_type) {
2107 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2108 {
2109 struct lttng_event_recorder_enabler *event_recorder_enabler =
2110 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2111 struct lttng_kernel_event_recorder *event_recorder =
2112 container_of(event, struct lttng_kernel_event_recorder, parent);
2113
2114 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2115 && event_recorder->chan == event_recorder_enabler->chan)
2116 return true;
2117 else
2118 return false;
2119 }
2120 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2121 {
2122 struct lttng_event_notifier_enabler *event_notifier_enabler =
2123 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2124 struct lttng_kernel_event_notifier *event_notifier =
2125 container_of(event, struct lttng_kernel_event_notifier, parent);
2126
2127 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2128 && event_notifier->priv->group == event_notifier_enabler->group
2129 && event->priv->user_token == event_enabler->user_token)
2130 return true;
2131 else
2132 return false;
2133 }
2134 default:
2135 WARN_ON_ONCE(1);
2136 return false;
2137 }
2138 }
2139
2140 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
2141 const struct lttng_kernel_event_desc *desc,
2142 struct lttng_kernel_event_common *event)
2143 {
2144 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2145 return false;
2146
2147 switch (event_enabler->enabler_type) {
2148 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2149 {
2150 struct lttng_event_recorder_enabler *event_recorder_enabler =
2151 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2152 struct lttng_kernel_event_recorder *event_recorder =
2153 container_of(event, struct lttng_kernel_event_recorder, parent);
2154
2155 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
2156 return true;
2157 else
2158 return false;
2159 }
2160 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2161 {
2162 struct lttng_event_notifier_enabler *event_notifier_enabler =
2163 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2164 struct lttng_kernel_event_notifier *event_notifier =
2165 container_of(event, struct lttng_kernel_event_notifier, parent);
2166
2167 if (event->priv->desc == desc
2168 && event_notifier->priv->group == event_notifier_enabler->group
2169 && event->priv->user_token == event_enabler->user_token)
2170 return true;
2171 else
2172 return false;
2173 }
2174 default:
2175 WARN_ON_ONCE(1);
2176 return false;
2177 }
2178 }
2179
2180 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2181 const char *event_name,
2182 struct lttng_kernel_event_common *event)
2183 {
2184 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2185 return false;
2186
2187 switch (event_enabler->enabler_type) {
2188 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2189 {
2190 struct lttng_event_recorder_enabler *event_recorder_enabler =
2191 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2192 struct lttng_kernel_event_recorder *event_recorder =
2193 container_of(event, struct lttng_kernel_event_recorder, parent);
2194
2195 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2196 && event_recorder->chan == event_recorder_enabler->chan)
2197 return true;
2198 else
2199 return false;
2200 }
2201 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2202 {
2203 struct lttng_event_notifier_enabler *event_notifier_enabler =
2204 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2205 struct lttng_kernel_event_notifier *event_notifier =
2206 container_of(event, struct lttng_kernel_event_notifier, parent);
2207
2208 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2209 && event_notifier->priv->group == event_notifier_enabler->group
2210 && event->priv->user_token == event_enabler->user_token)
2211 return true;
2212 else
2213 return false;
2214 }
2215 default:
2216 WARN_ON_ONCE(1);
2217 return false;
2218 }
2219 }
2220
2221 static
2222 struct lttng_enabler_ref *lttng_enabler_ref(
2223 struct list_head *enablers_ref_list,
2224 struct lttng_event_enabler_common *enabler)
2225 {
2226 struct lttng_enabler_ref *enabler_ref;
2227
2228 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2229 if (enabler_ref->ref == enabler)
2230 return enabler_ref;
2231 }
2232 return NULL;
2233 }
2234
2235 static
2236 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2237 {
2238 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2239 struct lttng_kernel_probe_desc *probe_desc;
2240 const struct lttng_kernel_event_desc *desc;
2241 struct list_head *probe_list;
2242 int i;
2243
2244 probe_list = lttng_get_probe_list_head();
2245 /*
2246 * For each probe event, if we find that a probe event matches
2247 * our enabler, create an associated lttng_event if not
2248 * already present.
2249 */
2250 list_for_each_entry(probe_desc, probe_list, head) {
2251 for (i = 0; i < probe_desc->nr_events; i++) {
2252 int found = 0;
2253 struct hlist_head *head;
2254 struct lttng_kernel_event_common *event;
2255 struct lttng_kernel_event_common_private *event_priv;
2256
2257 desc = probe_desc->event_desc[i];
2258 if (!lttng_desc_match_enabler(desc, event_enabler))
2259 continue;
2260
2261 /*
2262 * Check if already created.
2263 */
2264 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2265 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2266 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub))
2267 found = 1;
2268 }
2269 if (found)
2270 continue;
2271
2272 /*
2273 * We need to create an event for this event probe.
2274 */
2275 event = _lttng_kernel_event_create(event_enabler, desc);
2276 if (!event) {
2277 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2278 probe_desc->event_desc[i]->event_name);
2279 }
2280 }
2281 }
2282 }
2283
2284 static
2285 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2286 {
2287 int ret;
2288
2289 ret = lttng_syscalls_register_event(event_enabler);
2290 WARN_ON_ONCE(ret);
2291 }
2292
2293 /*
2294 * Create event if it is missing and present in the list of tracepoint probes.
2295 * Should be called with sessions mutex held.
2296 */
2297 static
2298 void lttng_create_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2299 {
2300 switch (event_enabler->event_param.instrumentation) {
2301 case LTTNG_KERNEL_ABI_TRACEPOINT:
2302 lttng_create_tracepoint_event_if_missing(event_enabler);
2303 break;
2304
2305 case LTTNG_KERNEL_ABI_SYSCALL:
2306 lttng_create_syscall_event_if_missing(event_enabler);
2307 break;
2308
2309 default:
2310 WARN_ON_ONCE(1);
2311 break;
2312 }
2313 }
2314
2315 static
2316 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2317 struct lttng_kernel_event_common *event)
2318 {
2319 /* Link filter bytecodes if not linked yet. */
2320 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2321 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2322 }
2323
2324 static
2325 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2326 struct lttng_kernel_event_common *event)
2327 {
2328 switch (event_enabler->enabler_type) {
2329 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2330 break;
2331 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2332 {
2333 struct lttng_event_notifier_enabler *event_notifier_enabler =
2334 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2335 struct lttng_kernel_event_notifier *event_notifier =
2336 container_of(event, struct lttng_kernel_event_notifier, parent);
2337
2338 /* Link capture bytecodes if not linked yet. */
2339 lttng_enabler_link_bytecode(event->priv->desc,
2340 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2341 &event_notifier_enabler->capture_bytecode_head);
2342 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2343 break;
2344 }
2345 default:
2346 WARN_ON_ONCE(1);
2347 }
2348 }
2349
2350 /*
2351 * Create events associated with an event_enabler (if not already present),
2352 * and add backward reference from the event to the enabler.
2353 * Should be called with sessions mutex held.
2354 */
2355 static
2356 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2357 {
2358 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2359 struct lttng_kernel_event_common_private *event_priv;
2360
2361 lttng_syscall_table_set_wildcard_all(event_enabler);
2362
2363 /* First ensure that probe events are created for this enabler. */
2364 lttng_create_event_if_missing(event_enabler);
2365
2366 /* Link the created event with its associated enabler. */
2367 list_for_each_entry(event_priv, event_list_head, node) {
2368 struct lttng_kernel_event_common *event = event_priv->pub;
2369 struct lttng_enabler_ref *enabler_ref;
2370
2371 if (!lttng_event_enabler_match_event(event_enabler, event))
2372 continue;
2373
2374 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2375 if (!enabler_ref) {
2376 /*
2377 * If no backward ref, create it.
2378 * Add backward ref from event_notifier to enabler.
2379 */
2380 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2381 if (!enabler_ref)
2382 return -ENOMEM;
2383
2384 enabler_ref->ref = event_enabler;
2385 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2386 }
2387
2388 lttng_event_enabler_init_event_filter(event_enabler, event);
2389 lttng_event_enabler_init_event_capture(event_enabler, event);
2390 }
2391 return 0;
2392 }
2393
2394 /*
2395 * Called at module load: connect the probe on all enablers matching
2396 * this event.
2397 * Called with sessions lock held.
2398 */
2399 int lttng_fix_pending_events(void)
2400 {
2401 struct lttng_kernel_session_private *session_priv;
2402
2403 list_for_each_entry(session_priv, &sessions, list)
2404 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2405 return 0;
2406 }
2407
2408 static bool lttng_event_notifier_group_has_active_event_notifiers(
2409 struct lttng_event_notifier_group *event_notifier_group)
2410 {
2411 struct lttng_event_enabler_common *event_enabler;
2412
2413 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2414 if (event_enabler->enabled)
2415 return true;
2416 }
2417 return false;
2418 }
2419
2420 bool lttng_event_notifier_active(void)
2421 {
2422 struct lttng_event_notifier_group *event_notifier_group;
2423
2424 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2425 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2426 return true;
2427 }
2428 return false;
2429 }
2430
2431 int lttng_fix_pending_event_notifiers(void)
2432 {
2433 struct lttng_event_notifier_group *event_notifier_group;
2434
2435 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2436 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2437 return 0;
2438 }
2439
2440 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2441 enum lttng_enabler_format_type format_type,
2442 struct lttng_kernel_abi_event *event_param,
2443 struct lttng_kernel_channel_buffer *chan)
2444 {
2445 struct lttng_event_recorder_enabler *event_enabler;
2446
2447 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2448 if (!event_enabler)
2449 return NULL;
2450 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2451 event_enabler->parent.format_type = format_type;
2452 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2453 memcpy(&event_enabler->parent.event_param, event_param,
2454 sizeof(event_enabler->parent.event_param));
2455 event_enabler->chan = chan;
2456 /* ctx left NULL */
2457 event_enabler->parent.enabled = 0;
2458 return event_enabler;
2459 }
2460
2461 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2462 struct lttng_event_recorder_enabler *event_enabler)
2463 {
2464 mutex_lock(&sessions_mutex);
2465 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2466 event_enabler->parent.published = true;
2467 lttng_session_lazy_sync_event_enablers(session);
2468 mutex_unlock(&sessions_mutex);
2469 }
2470
2471 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2472 {
2473 mutex_lock(&sessions_mutex);
2474 event_enabler->enabled = 1;
2475 lttng_event_enabler_sync(event_enabler);
2476 mutex_unlock(&sessions_mutex);
2477 return 0;
2478 }
2479
2480 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2481 {
2482 mutex_lock(&sessions_mutex);
2483 event_enabler->enabled = 0;
2484 lttng_event_enabler_sync(event_enabler);
2485 mutex_unlock(&sessions_mutex);
2486 return 0;
2487 }
2488
2489 static
2490 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2491 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2492 {
2493 struct lttng_kernel_bytecode_node *bytecode_node;
2494 uint32_t bytecode_len;
2495 int ret;
2496
2497 ret = get_user(bytecode_len, &bytecode->len);
2498 if (ret)
2499 return ret;
2500 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2501 GFP_KERNEL);
2502 if (!bytecode_node)
2503 return -ENOMEM;
2504 ret = copy_from_user(&bytecode_node->bc, bytecode,
2505 sizeof(*bytecode) + bytecode_len);
2506 if (ret)
2507 goto error_free;
2508
2509 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2510 bytecode_node->enabler = enabler;
2511 /* Enforce length based on allocated size */
2512 bytecode_node->bc.len = bytecode_len;
2513 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2514
2515 return 0;
2516
2517 error_free:
2518 lttng_kvfree(bytecode_node);
2519 return ret;
2520 }
2521
2522 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2523 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2524 {
2525 int ret;
2526 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2527 if (ret)
2528 goto error;
2529 lttng_event_enabler_sync(event_enabler);
2530 return 0;
2531
2532 error:
2533 return ret;
2534 }
2535
2536 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2537 struct lttng_kernel_abi_event_callsite __user *callsite)
2538 {
2539
2540 switch (event->priv->instrumentation) {
2541 case LTTNG_KERNEL_ABI_UPROBE:
2542 return lttng_uprobes_event_add_callsite(event, callsite);
2543 default:
2544 return -EINVAL;
2545 }
2546 }
2547
2548 static
2549 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2550 {
2551 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2552
2553 /* Destroy filter bytecode */
2554 list_for_each_entry_safe(filter_node, tmp_filter_node,
2555 &enabler->filter_bytecode_head, node) {
2556 lttng_kvfree(filter_node);
2557 }
2558 }
2559
2560 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2561 {
2562 lttng_enabler_destroy(event_enabler);
2563 if (event_enabler->published)
2564 list_del(&event_enabler->node);
2565
2566 switch (event_enabler->enabler_type) {
2567 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2568 {
2569 struct lttng_event_recorder_enabler *event_recorder_enabler =
2570 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2571
2572 kfree(event_recorder_enabler);
2573 break;
2574 }
2575 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2576 {
2577 struct lttng_event_notifier_enabler *event_notifier_enabler =
2578 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2579
2580 kfree(event_notifier_enabler);
2581 break;
2582 }
2583 default:
2584 WARN_ON_ONCE(1);
2585 }
2586 }
2587
2588 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2589 enum lttng_enabler_format_type format_type,
2590 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2591 struct lttng_event_notifier_group *event_notifier_group)
2592 {
2593 struct lttng_event_notifier_enabler *event_notifier_enabler;
2594
2595 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2596 if (!event_notifier_enabler)
2597 return NULL;
2598
2599 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2600 event_notifier_enabler->parent.format_type = format_type;
2601 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2602 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2603
2604 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2605 event_notifier_enabler->num_captures = 0;
2606
2607 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2608 sizeof(event_notifier_enabler->parent.event_param));
2609
2610 event_notifier_enabler->parent.enabled = 0;
2611 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2612 event_notifier_enabler->group = event_notifier_group;
2613 return event_notifier_enabler;
2614 }
2615
2616 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2617 struct lttng_event_notifier_enabler *event_notifier_enabler)
2618 {
2619 mutex_lock(&sessions_mutex);
2620 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2621 event_notifier_enabler->parent.published = true;
2622 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2623 mutex_unlock(&sessions_mutex);
2624 }
2625
2626 int lttng_event_notifier_enabler_enable(
2627 struct lttng_event_notifier_enabler *event_notifier_enabler)
2628 {
2629 mutex_lock(&sessions_mutex);
2630 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2631 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2632 mutex_unlock(&sessions_mutex);
2633 return 0;
2634 }
2635
2636 int lttng_event_notifier_enabler_disable(
2637 struct lttng_event_notifier_enabler *event_notifier_enabler)
2638 {
2639 mutex_lock(&sessions_mutex);
2640 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2641 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2642 mutex_unlock(&sessions_mutex);
2643 return 0;
2644 }
2645
2646 int lttng_event_notifier_enabler_attach_capture_bytecode(
2647 struct lttng_event_notifier_enabler *event_notifier_enabler,
2648 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2649 {
2650 struct lttng_kernel_bytecode_node *bytecode_node;
2651 struct lttng_event_enabler_common *enabler =
2652 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2653 uint32_t bytecode_len;
2654 int ret;
2655
2656 ret = get_user(bytecode_len, &bytecode->len);
2657 if (ret)
2658 return ret;
2659
2660 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2661 GFP_KERNEL);
2662 if (!bytecode_node)
2663 return -ENOMEM;
2664
2665 ret = copy_from_user(&bytecode_node->bc, bytecode,
2666 sizeof(*bytecode) + bytecode_len);
2667 if (ret)
2668 goto error_free;
2669
2670 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2671 bytecode_node->enabler = enabler;
2672
2673 /* Enforce length based on allocated size */
2674 bytecode_node->bc.len = bytecode_len;
2675 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2676
2677 event_notifier_enabler->num_captures++;
2678
2679 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2680 goto end;
2681
2682 error_free:
2683 lttng_kvfree(bytecode_node);
2684 end:
2685 return ret;
2686 }
2687
2688 static
2689 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2690 {
2691 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2692 struct lttng_kernel_bytecode_runtime *runtime;
2693 struct lttng_enabler_ref *enabler_ref;
2694
2695 /* Check if has enablers without bytecode enabled */
2696 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2697 if (enabler_ref->ref->enabled
2698 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2699 has_enablers_without_filter_bytecode = 1;
2700 break;
2701 }
2702 }
2703 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2704
2705 /* Enable filters */
2706 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2707 lttng_bytecode_sync_state(runtime);
2708 nr_filters++;
2709 }
2710 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2711 }
2712
2713 static
2714 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2715 {
2716 switch (event->type) {
2717 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2718 break;
2719 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2720 {
2721 struct lttng_kernel_event_notifier *event_notifier =
2722 container_of(event, struct lttng_kernel_event_notifier, parent);
2723 struct lttng_kernel_bytecode_runtime *runtime;
2724 int nr_captures = 0;
2725
2726 /* Enable captures */
2727 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2728 lttng_bytecode_sync_state(runtime);
2729 nr_captures++;
2730 }
2731 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2732 break;
2733 }
2734 default:
2735 WARN_ON_ONCE(1);
2736 }
2737 }
2738
2739 static
2740 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2741 {
2742 struct lttng_enabler_ref *enabler_ref;
2743 bool enabled = false;
2744
2745 switch (event->priv->instrumentation) {
2746 case LTTNG_KERNEL_ABI_TRACEPOINT:
2747 lttng_fallthrough;
2748 case LTTNG_KERNEL_ABI_SYSCALL:
2749 /* Enable events */
2750 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2751 if (enabler_ref->ref->enabled) {
2752 enabled = true;
2753 break;
2754 }
2755 }
2756 break;
2757 default:
2758 WARN_ON_ONCE(1);
2759 return false;
2760 }
2761
2762 switch (event->type) {
2763 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2764 {
2765 struct lttng_kernel_event_recorder *event_recorder =
2766 container_of(event, struct lttng_kernel_event_recorder, parent);
2767
2768 /*
2769 * Enabled state is based on union of enablers, with
2770 * intersection of session and channel transient enable
2771 * states.
2772 */
2773 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2774 }
2775 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2776 return enabled;
2777 default:
2778 WARN_ON_ONCE(1);
2779 return false;
2780 }
2781 }
2782
2783 static
2784 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2785 {
2786 switch (event->priv->instrumentation) {
2787 case LTTNG_KERNEL_ABI_TRACEPOINT:
2788 lttng_fallthrough;
2789 case LTTNG_KERNEL_ABI_SYSCALL:
2790 return true;
2791
2792 default:
2793 /* Not handled with lazy sync. */
2794 return false;
2795 }
2796 }
2797
2798 /*
2799 * Should be called with sessions mutex held.
2800 */
2801 static
2802 void lttng_sync_event_list(struct list_head *event_enabler_list,
2803 struct list_head *event_list)
2804 {
2805 struct lttng_kernel_event_common_private *event_priv;
2806 struct lttng_event_enabler_common *event_enabler;
2807
2808 list_for_each_entry(event_enabler, event_enabler_list, node)
2809 lttng_event_enabler_ref_events(event_enabler);
2810
2811 /*
2812 * For each event, if at least one of its enablers is enabled,
2813 * and its channel and session transient states are enabled, we
2814 * enable the event, else we disable it.
2815 */
2816 list_for_each_entry(event_priv, event_list, node) {
2817 struct lttng_kernel_event_common *event = event_priv->pub;
2818 bool enabled;
2819
2820 if (!lttng_event_is_lazy_sync(event))
2821 continue;
2822
2823 enabled = lttng_get_event_enabled_state(event);
2824 WRITE_ONCE(event->enabled, enabled);
2825 /*
2826 * Sync tracepoint registration with event enabled state.
2827 */
2828 if (enabled) {
2829 register_event(event);
2830 } else {
2831 _lttng_event_unregister(event);
2832 }
2833
2834 lttng_event_sync_filter_state(event);
2835 lttng_event_sync_capture_state(event);
2836 }
2837 }
2838
2839 /*
2840 * lttng_session_sync_event_enablers should be called just before starting a
2841 * session.
2842 */
2843 static
2844 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2845 {
2846 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2847 }
2848
2849 /*
2850 * Apply enablers to session events, adding events to session if need
2851 * be. It is required after each modification applied to an active
2852 * session, and right before session "start".
2853 * "lazy" sync means we only sync if required.
2854 * Should be called with sessions mutex held.
2855 */
2856 static
2857 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2858 {
2859 /* We can skip if session is not active */
2860 if (!session->active)
2861 return;
2862 lttng_session_sync_event_enablers(session);
2863 }
2864
2865 static
2866 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2867 {
2868 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2869 }
2870
2871 static
2872 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2873 {
2874 switch (event_enabler->enabler_type) {
2875 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2876 {
2877 struct lttng_event_recorder_enabler *event_recorder_enabler =
2878 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2879 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2880 break;
2881 }
2882 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2883 {
2884 struct lttng_event_notifier_enabler *event_notifier_enabler =
2885 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2886 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2887 break;
2888 }
2889 default:
2890 WARN_ON_ONCE(1);
2891 }
2892 }
2893
2894 /*
2895 * Serialize at most one packet worth of metadata into a metadata
2896 * channel.
2897 * We grab the metadata cache mutex to get exclusive access to our metadata
2898 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2899 * allows us to do racy operations such as looking for remaining space left in
2900 * packet and write, since mutual exclusion protects us from concurrent writes.
2901 * Mutual exclusion on the metadata cache allow us to read the cache content
2902 * without racing against reallocation of the cache by updates.
2903 * Returns the number of bytes written in the channel, 0 if no data
2904 * was written and a negative value on error.
2905 */
2906 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2907 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2908 {
2909 struct lttng_kernel_ring_buffer_ctx ctx;
2910 int ret = 0;
2911 size_t len, reserve_len;
2912
2913 /*
2914 * Ensure we support mutiple get_next / put sequences followed by
2915 * put_next. The metadata cache lock protects reading the metadata
2916 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2917 * "flush" operations on the buffer invoked by different processes.
2918 * Moreover, since the metadata cache memory can be reallocated, we
2919 * need to have exclusive access against updates even though we only
2920 * read it.
2921 */
2922 mutex_lock(&stream->metadata_cache->lock);
2923 WARN_ON(stream->metadata_in < stream->metadata_out);
2924 if (stream->metadata_in != stream->metadata_out)
2925 goto end;
2926
2927 /* Metadata regenerated, change the version. */
2928 if (stream->metadata_cache->version != stream->version)
2929 stream->version = stream->metadata_cache->version;
2930
2931 len = stream->metadata_cache->metadata_written -
2932 stream->metadata_in;
2933 if (!len)
2934 goto end;
2935 reserve_len = min_t(size_t,
2936 stream->transport->ops.priv->packet_avail_size(chan),
2937 len);
2938 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2939 sizeof(char), NULL);
2940 /*
2941 * If reservation failed, return an error to the caller.
2942 */
2943 ret = stream->transport->ops.event_reserve(&ctx);
2944 if (ret != 0) {
2945 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2946 stream->coherent = false;
2947 goto end;
2948 }
2949 stream->transport->ops.event_write(&ctx,
2950 stream->metadata_cache->data + stream->metadata_in,
2951 reserve_len, 1);
2952 stream->transport->ops.event_commit(&ctx);
2953 stream->metadata_in += reserve_len;
2954 if (reserve_len < len)
2955 stream->coherent = false;
2956 else
2957 stream->coherent = true;
2958 ret = reserve_len;
2959
2960 end:
2961 if (coherent)
2962 *coherent = stream->coherent;
2963 mutex_unlock(&stream->metadata_cache->lock);
2964 return ret;
2965 }
2966
2967 static
2968 void lttng_metadata_begin(struct lttng_kernel_session *session)
2969 {
2970 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2971 mutex_lock(&session->priv->metadata_cache->lock);
2972 }
2973
2974 static
2975 void lttng_metadata_end(struct lttng_kernel_session *session)
2976 {
2977 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2978 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2979 struct lttng_metadata_stream *stream;
2980
2981 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2982 wake_up_interruptible(&stream->read_wait);
2983 mutex_unlock(&session->priv->metadata_cache->lock);
2984 }
2985 }
2986
2987 /*
2988 * Write the metadata to the metadata cache.
2989 * Must be called with sessions_mutex held.
2990 * The metadata cache lock protects us from concurrent read access from
2991 * thread outputting metadata content to ring buffer.
2992 * The content of the printf is printed as a single atomic metadata
2993 * transaction.
2994 */
2995 int lttng_metadata_printf(struct lttng_kernel_session *session,
2996 const char *fmt, ...)
2997 {
2998 char *str;
2999 size_t len;
3000 va_list ap;
3001
3002 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
3003
3004 va_start(ap, fmt);
3005 str = kvasprintf(GFP_KERNEL, fmt, ap);
3006 va_end(ap);
3007 if (!str)
3008 return -ENOMEM;
3009
3010 len = strlen(str);
3011 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
3012 if (session->priv->metadata_cache->metadata_written + len >
3013 session->priv->metadata_cache->cache_alloc) {
3014 char *tmp_cache_realloc;
3015 unsigned int tmp_cache_alloc_size;
3016
3017 tmp_cache_alloc_size = max_t(unsigned int,
3018 session->priv->metadata_cache->cache_alloc + len,
3019 session->priv->metadata_cache->cache_alloc << 1);
3020 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
3021 if (!tmp_cache_realloc)
3022 goto err;
3023 if (session->priv->metadata_cache->data) {
3024 memcpy(tmp_cache_realloc,
3025 session->priv->metadata_cache->data,
3026 session->priv->metadata_cache->cache_alloc);
3027 vfree(session->priv->metadata_cache->data);
3028 }
3029
3030 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3031 session->priv->metadata_cache->data = tmp_cache_realloc;
3032 }
3033 memcpy(session->priv->metadata_cache->data +
3034 session->priv->metadata_cache->metadata_written,
3035 str, len);
3036 session->priv->metadata_cache->metadata_written += len;
3037 kfree(str);
3038
3039 return 0;
3040
3041 err:
3042 kfree(str);
3043 return -ENOMEM;
3044 }
3045
3046 static
3047 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3048 {
3049 size_t i;
3050
3051 for (i = 0; i < nesting; i++) {
3052 int ret;
3053
3054 ret = lttng_metadata_printf(session, " ");
3055 if (ret) {
3056 return ret;
3057 }
3058 }
3059 return 0;
3060 }
3061
3062 static
3063 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3064 const struct lttng_kernel_event_field *field,
3065 size_t nesting)
3066 {
3067 return lttng_metadata_printf(session, " _%s;\n", field->name);
3068 }
3069
3070 static
3071 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3072 const struct lttng_kernel_type_integer *type,
3073 enum lttng_kernel_string_encoding parent_encoding,
3074 size_t nesting)
3075 {
3076 int ret;
3077
3078 ret = print_tabs(session, nesting);
3079 if (ret)
3080 return ret;
3081 ret = lttng_metadata_printf(session,
3082 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3083 type->size,
3084 type->alignment,
3085 type->signedness,
3086 (parent_encoding == lttng_kernel_string_encoding_none)
3087 ? "none"
3088 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3089 ? "UTF8"
3090 : "ASCII",
3091 type->base,
3092 #if __BYTE_ORDER == __BIG_ENDIAN
3093 type->reverse_byte_order ? " byte_order = le;" : ""
3094 #else
3095 type->reverse_byte_order ? " byte_order = be;" : ""
3096 #endif
3097 );
3098 return ret;
3099 }
3100
3101 /*
3102 * Must be called with sessions_mutex held.
3103 */
3104 static
3105 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3106 const struct lttng_kernel_type_struct *type,
3107 size_t nesting)
3108 {
3109 const char *prev_field_name = NULL;
3110 int ret;
3111 uint32_t i, nr_fields;
3112 unsigned int alignment;
3113
3114 ret = print_tabs(session, nesting);
3115 if (ret)
3116 return ret;
3117 ret = lttng_metadata_printf(session,
3118 "struct {\n");
3119 if (ret)
3120 return ret;
3121 nr_fields = type->nr_fields;
3122 for (i = 0; i < nr_fields; i++) {
3123 const struct lttng_kernel_event_field *iter_field;
3124
3125 iter_field = type->fields[i];
3126 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3127 if (ret)
3128 return ret;
3129 }
3130 ret = print_tabs(session, nesting);
3131 if (ret)
3132 return ret;
3133 alignment = type->alignment;
3134 if (alignment) {
3135 ret = lttng_metadata_printf(session,
3136 "} align(%u)",
3137 alignment);
3138 } else {
3139 ret = lttng_metadata_printf(session,
3140 "}");
3141 }
3142 return ret;
3143 }
3144
3145 /*
3146 * Must be called with sessions_mutex held.
3147 */
3148 static
3149 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3150 const struct lttng_kernel_event_field *field,
3151 size_t nesting)
3152 {
3153 int ret;
3154
3155 ret = _lttng_struct_type_statedump(session,
3156 lttng_kernel_get_type_struct(field->type), nesting);
3157 if (ret)
3158 return ret;
3159 return lttng_field_name_statedump(session, field, nesting);
3160 }
3161
3162 /*
3163 * Must be called with sessions_mutex held.
3164 */
3165 static
3166 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3167 const struct lttng_kernel_type_variant *type,
3168 size_t nesting,
3169 const char *prev_field_name)
3170 {
3171 const char *tag_name;
3172 int ret;
3173 uint32_t i, nr_choices;
3174
3175 tag_name = type->tag_name;
3176 if (!tag_name)
3177 tag_name = prev_field_name;
3178 if (!tag_name)
3179 return -EINVAL;
3180 /*
3181 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3182 */
3183 if (type->alignment != 0)
3184 return -EINVAL;
3185 ret = print_tabs(session, nesting);
3186 if (ret)
3187 return ret;
3188 ret = lttng_metadata_printf(session,
3189 "variant <_%s> {\n",
3190 tag_name);
3191 if (ret)
3192 return ret;
3193 nr_choices = type->nr_choices;
3194 for (i = 0; i < nr_choices; i++) {
3195 const struct lttng_kernel_event_field *iter_field;
3196
3197 iter_field = type->choices[i];
3198 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3199 if (ret)
3200 return ret;
3201 }
3202 ret = print_tabs(session, nesting);
3203 if (ret)
3204 return ret;
3205 ret = lttng_metadata_printf(session,
3206 "}");
3207 return ret;
3208 }
3209
3210 /*
3211 * Must be called with sessions_mutex held.
3212 */
3213 static
3214 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3215 const struct lttng_kernel_event_field *field,
3216 size_t nesting,
3217 const char *prev_field_name)
3218 {
3219 int ret;
3220
3221 ret = _lttng_variant_type_statedump(session,
3222 lttng_kernel_get_type_variant(field->type), nesting,
3223 prev_field_name);
3224 if (ret)
3225 return ret;
3226 return lttng_field_name_statedump(session, field, nesting);
3227 }
3228
3229 /*
3230 * Must be called with sessions_mutex held.
3231 */
3232 static
3233 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3234 const struct lttng_kernel_event_field *field,
3235 size_t nesting)
3236 {
3237 int ret;
3238 const struct lttng_kernel_type_array *array_type;
3239 const struct lttng_kernel_type_common *elem_type;
3240
3241 array_type = lttng_kernel_get_type_array(field->type);
3242 WARN_ON_ONCE(!array_type);
3243
3244 if (array_type->alignment) {
3245 ret = print_tabs(session, nesting);
3246 if (ret)
3247 return ret;
3248 ret = lttng_metadata_printf(session,
3249 "struct { } align(%u) _%s_padding;\n",
3250 array_type->alignment * CHAR_BIT,
3251 field->name);
3252 if (ret)
3253 return ret;
3254 }
3255 /*
3256 * Nested compound types: Only array of structures and variants are
3257 * currently supported.
3258 */
3259 elem_type = array_type->elem_type;
3260 switch (elem_type->type) {
3261 case lttng_kernel_type_integer:
3262 case lttng_kernel_type_struct:
3263 case lttng_kernel_type_variant:
3264 ret = _lttng_type_statedump(session, elem_type,
3265 array_type->encoding, nesting);
3266 if (ret)
3267 return ret;
3268 break;
3269
3270 default:
3271 return -EINVAL;
3272 }
3273 ret = lttng_metadata_printf(session,
3274 " _%s[%u];\n",
3275 field->name,
3276 array_type->length);
3277 return ret;
3278 }
3279
3280 /*
3281 * Must be called with sessions_mutex held.
3282 */
3283 static
3284 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3285 const struct lttng_kernel_event_field *field,
3286 size_t nesting,
3287 const char *prev_field_name)
3288 {
3289 int ret;
3290 const char *length_name;
3291 const struct lttng_kernel_type_sequence *sequence_type;
3292 const struct lttng_kernel_type_common *elem_type;
3293
3294 sequence_type = lttng_kernel_get_type_sequence(field->type);
3295 WARN_ON_ONCE(!sequence_type);
3296
3297 length_name = sequence_type->length_name;
3298 if (!length_name)
3299 length_name = prev_field_name;
3300 if (!length_name)
3301 return -EINVAL;
3302
3303 if (sequence_type->alignment) {
3304 ret = print_tabs(session, nesting);
3305 if (ret)
3306 return ret;
3307 ret = lttng_metadata_printf(session,
3308 "struct { } align(%u) _%s_padding;\n",
3309 sequence_type->alignment * CHAR_BIT,
3310 field->name);
3311 if (ret)
3312 return ret;
3313 }
3314
3315 /*
3316 * Nested compound types: Only array of structures and variants are
3317 * currently supported.
3318 */
3319 elem_type = sequence_type->elem_type;
3320 switch (elem_type->type) {
3321 case lttng_kernel_type_integer:
3322 case lttng_kernel_type_struct:
3323 case lttng_kernel_type_variant:
3324 ret = _lttng_type_statedump(session, elem_type,
3325 sequence_type->encoding, nesting);
3326 if (ret)
3327 return ret;
3328 break;
3329
3330 default:
3331 return -EINVAL;
3332 }
3333 ret = lttng_metadata_printf(session,
3334 " _%s[ _%s ];\n",
3335 field->name,
3336 length_name);
3337 return ret;
3338 }
3339
3340 /*
3341 * Must be called with sessions_mutex held.
3342 */
3343 static
3344 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3345 const struct lttng_kernel_type_enum *type,
3346 size_t nesting)
3347 {
3348 const struct lttng_kernel_enum_desc *enum_desc;
3349 const struct lttng_kernel_type_common *container_type;
3350 int ret;
3351 unsigned int i, nr_entries;
3352
3353 container_type = type->container_type;
3354 if (container_type->type != lttng_kernel_type_integer) {
3355 ret = -EINVAL;
3356 goto end;
3357 }
3358 enum_desc = type->desc;
3359 nr_entries = enum_desc->nr_entries;
3360
3361 ret = print_tabs(session, nesting);
3362 if (ret)
3363 goto end;
3364 ret = lttng_metadata_printf(session, "enum : ");
3365 if (ret)
3366 goto end;
3367 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3368 lttng_kernel_string_encoding_none, 0);
3369 if (ret)
3370 goto end;
3371 ret = lttng_metadata_printf(session, " {\n");
3372 if (ret)
3373 goto end;
3374 /* Dump all entries */
3375 for (i = 0; i < nr_entries; i++) {
3376 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3377 int j, len;
3378
3379 ret = print_tabs(session, nesting + 1);
3380 if (ret)
3381 goto end;
3382 ret = lttng_metadata_printf(session,
3383 "\"");
3384 if (ret)
3385 goto end;
3386 len = strlen(entry->string);
3387 /* Escape the character '"' */
3388 for (j = 0; j < len; j++) {
3389 char c = entry->string[j];
3390
3391 switch (c) {
3392 case '"':
3393 ret = lttng_metadata_printf(session,
3394 "\\\"");
3395 break;
3396 case '\\':
3397 ret = lttng_metadata_printf(session,
3398 "\\\\");
3399 break;
3400 default:
3401 ret = lttng_metadata_printf(session,
3402 "%c", c);
3403 break;
3404 }
3405 if (ret)
3406 goto end;
3407 }
3408 ret = lttng_metadata_printf(session, "\"");
3409 if (ret)
3410 goto end;
3411
3412 if (entry->options.is_auto) {
3413 ret = lttng_metadata_printf(session, ",\n");
3414 if (ret)
3415 goto end;
3416 } else {
3417 ret = lttng_metadata_printf(session,
3418 " = ");
3419 if (ret)
3420 goto end;
3421 if (entry->start.signedness)
3422 ret = lttng_metadata_printf(session,
3423 "%lld", (long long) entry->start.value);
3424 else
3425 ret = lttng_metadata_printf(session,
3426 "%llu", entry->start.value);
3427 if (ret)
3428 goto end;
3429 if (entry->start.signedness == entry->end.signedness &&
3430 entry->start.value
3431 == entry->end.value) {
3432 ret = lttng_metadata_printf(session,
3433 ",\n");
3434 } else {
3435 if (entry->end.signedness) {
3436 ret = lttng_metadata_printf(session,
3437 " ... %lld,\n",
3438 (long long) entry->end.value);
3439 } else {
3440 ret = lttng_metadata_printf(session,
3441 " ... %llu,\n",
3442 entry->end.value);
3443 }
3444 }
3445 if (ret)
3446 goto end;
3447 }
3448 }
3449 ret = print_tabs(session, nesting);
3450 if (ret)
3451 goto end;
3452 ret = lttng_metadata_printf(session, "}");
3453 end:
3454 return ret;
3455 }
3456
3457 /*
3458 * Must be called with sessions_mutex held.
3459 */
3460 static
3461 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3462 const struct lttng_kernel_event_field *field,
3463 size_t nesting)
3464 {
3465 int ret;
3466 const struct lttng_kernel_type_enum *enum_type;
3467
3468 enum_type = lttng_kernel_get_type_enum(field->type);
3469 WARN_ON_ONCE(!enum_type);
3470 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3471 if (ret)
3472 return ret;
3473 return lttng_field_name_statedump(session, field, nesting);
3474 }
3475
3476 static
3477 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3478 const struct lttng_kernel_event_field *field,
3479 size_t nesting)
3480 {
3481 int ret;
3482
3483 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3484 lttng_kernel_string_encoding_none, nesting);
3485 if (ret)
3486 return ret;
3487 return lttng_field_name_statedump(session, field, nesting);
3488 }
3489
3490 static
3491 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3492 const struct lttng_kernel_type_string *type,
3493 size_t nesting)
3494 {
3495 int ret;
3496
3497 /* Default encoding is UTF8 */
3498 ret = print_tabs(session, nesting);
3499 if (ret)
3500 return ret;
3501 ret = lttng_metadata_printf(session,
3502 "string%s",
3503 type->encoding == lttng_kernel_string_encoding_ASCII ?
3504 " { encoding = ASCII; }" : "");
3505 return ret;
3506 }
3507
3508 static
3509 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3510 const struct lttng_kernel_event_field *field,
3511 size_t nesting)
3512 {
3513 const struct lttng_kernel_type_string *string_type;
3514 int ret;
3515
3516 string_type = lttng_kernel_get_type_string(field->type);
3517 WARN_ON_ONCE(!string_type);
3518 ret = _lttng_string_type_statedump(session, string_type, nesting);
3519 if (ret)
3520 return ret;
3521 return lttng_field_name_statedump(session, field, nesting);
3522 }
3523
3524 /*
3525 * Must be called with sessions_mutex held.
3526 */
3527 static
3528 int _lttng_type_statedump(struct lttng_kernel_session *session,
3529 const struct lttng_kernel_type_common *type,
3530 enum lttng_kernel_string_encoding parent_encoding,
3531 size_t nesting)
3532 {
3533 int ret = 0;
3534
3535 switch (type->type) {
3536 case lttng_kernel_type_integer:
3537 ret = _lttng_integer_type_statedump(session,
3538 lttng_kernel_get_type_integer(type),
3539 parent_encoding, nesting);
3540 break;
3541 case lttng_kernel_type_enum:
3542 ret = _lttng_enum_type_statedump(session,
3543 lttng_kernel_get_type_enum(type),
3544 nesting);
3545 break;
3546 case lttng_kernel_type_string:
3547 ret = _lttng_string_type_statedump(session,
3548 lttng_kernel_get_type_string(type),
3549 nesting);
3550 break;
3551 case lttng_kernel_type_struct:
3552 ret = _lttng_struct_type_statedump(session,
3553 lttng_kernel_get_type_struct(type),
3554 nesting);
3555 break;
3556 case lttng_kernel_type_variant:
3557 ret = _lttng_variant_type_statedump(session,
3558 lttng_kernel_get_type_variant(type),
3559 nesting, NULL);
3560 break;
3561
3562 /* Nested arrays and sequences are not supported yet. */
3563 case lttng_kernel_type_array:
3564 case lttng_kernel_type_sequence:
3565 default:
3566 WARN_ON_ONCE(1);
3567 return -EINVAL;
3568 }
3569 return ret;
3570 }
3571
3572 /*
3573 * Must be called with sessions_mutex held.
3574 */
3575 static
3576 int _lttng_field_statedump(struct lttng_kernel_session *session,
3577 const struct lttng_kernel_event_field *field,
3578 size_t nesting,
3579 const char **prev_field_name_p)
3580 {
3581 const char *prev_field_name = NULL;
3582 int ret = 0;
3583
3584 if (prev_field_name_p)
3585 prev_field_name = *prev_field_name_p;
3586 switch (field->type->type) {
3587 case lttng_kernel_type_integer:
3588 ret = _lttng_integer_field_statedump(session, field, nesting);
3589 break;
3590 case lttng_kernel_type_enum:
3591 ret = _lttng_enum_field_statedump(session, field, nesting);
3592 break;
3593 case lttng_kernel_type_string:
3594 ret = _lttng_string_field_statedump(session, field, nesting);
3595 break;
3596 case lttng_kernel_type_struct:
3597 ret = _lttng_struct_field_statedump(session, field, nesting);
3598 break;
3599 case lttng_kernel_type_array:
3600 ret = _lttng_array_field_statedump(session, field, nesting);
3601 break;
3602 case lttng_kernel_type_sequence:
3603 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3604 break;
3605 case lttng_kernel_type_variant:
3606 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3607 break;
3608
3609 default:
3610 WARN_ON_ONCE(1);
3611 return -EINVAL;
3612 }
3613 if (prev_field_name_p)
3614 *prev_field_name_p = field->name;
3615 return ret;
3616 }
3617
3618 static
3619 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3620 struct lttng_kernel_ctx *ctx)
3621 {
3622 const char *prev_field_name = NULL;
3623 int ret = 0;
3624 int i;
3625
3626 if (!ctx)
3627 return 0;
3628 for (i = 0; i < ctx->nr_fields; i++) {
3629 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3630
3631 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3632 if (ret)
3633 return ret;
3634 }
3635 return ret;
3636 }
3637
3638 static
3639 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3640 struct lttng_kernel_event_recorder *event_recorder)
3641 {
3642 const char *prev_field_name = NULL;
3643 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3644 int ret = 0;
3645 int i;
3646
3647 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3648 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3649
3650 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3651 if (ret)
3652 return ret;
3653 }
3654 return ret;
3655 }
3656
3657 /*
3658 * Must be called with sessions_mutex held.
3659 * The entire event metadata is printed as a single atomic metadata
3660 * transaction.
3661 */
3662 static
3663 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event)
3664 {
3665 struct lttng_kernel_event_recorder *event_recorder;
3666 struct lttng_kernel_channel_buffer *chan;
3667 struct lttng_kernel_session *session;
3668 int ret = 0;
3669
3670 if (event->type != LTTNG_KERNEL_EVENT_TYPE_RECORDER)
3671 return 0;
3672 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
3673 chan = event_recorder->chan;
3674 session = chan->parent.session;
3675
3676 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3677 return 0;
3678 if (chan->priv->channel_type == METADATA_CHANNEL)
3679 return 0;
3680
3681 lttng_metadata_begin(session);
3682
3683 ret = lttng_metadata_printf(session,
3684 "event {\n"
3685 " name = \"%s\";\n"
3686 " id = %u;\n"
3687 " stream_id = %u;\n",
3688 event_recorder->priv->parent.desc->event_name,
3689 event_recorder->priv->id,
3690 event_recorder->chan->priv->id);
3691 if (ret)
3692 goto end;
3693
3694 ret = lttng_metadata_printf(session,
3695 " fields := struct {\n"
3696 );
3697 if (ret)
3698 goto end;
3699
3700 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3701 if (ret)
3702 goto end;
3703
3704 /*
3705 * LTTng space reservation can only reserve multiples of the
3706 * byte size.
3707 */
3708 ret = lttng_metadata_printf(session,
3709 " };\n"
3710 "};\n\n");
3711 if (ret)
3712 goto end;
3713
3714 event_recorder->priv->metadata_dumped = 1;
3715 end:
3716 lttng_metadata_end(session);
3717 return ret;
3718
3719 }
3720
3721 /*
3722 * Must be called with sessions_mutex held.
3723 * The entire channel metadata is printed as a single atomic metadata
3724 * transaction.
3725 */
3726 static
3727 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3728 struct lttng_kernel_channel_buffer *chan)
3729 {
3730 int ret = 0;
3731
3732 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3733 return 0;
3734
3735 if (chan->priv->channel_type == METADATA_CHANNEL)
3736 return 0;
3737
3738 lttng_metadata_begin(session);
3739
3740 WARN_ON_ONCE(!chan->priv->header_type);
3741 ret = lttng_metadata_printf(session,
3742 "stream {\n"
3743 " id = %u;\n"
3744 " event.header := %s;\n"
3745 " packet.context := struct packet_context;\n",
3746 chan->priv->id,
3747 chan->priv->header_type == 1 ? "struct event_header_compact" :
3748 "struct event_header_large");
3749 if (ret)
3750 goto end;
3751
3752 if (chan->priv->ctx) {
3753 ret = lttng_metadata_printf(session,
3754 " event.context := struct {\n");
3755 if (ret)
3756 goto end;
3757 }
3758 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3759 if (ret)
3760 goto end;
3761 if (chan->priv->ctx) {
3762 ret = lttng_metadata_printf(session,
3763 " };\n");
3764 if (ret)
3765 goto end;
3766 }
3767
3768 ret = lttng_metadata_printf(session,
3769 "};\n\n");
3770
3771 chan->priv->metadata_dumped = 1;
3772 end:
3773 lttng_metadata_end(session);
3774 return ret;
3775 }
3776
3777 /*
3778 * Must be called with sessions_mutex held.
3779 */
3780 static
3781 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3782 {
3783 return lttng_metadata_printf(session,
3784 "struct packet_context {\n"
3785 " uint64_clock_monotonic_t timestamp_begin;\n"
3786 " uint64_clock_monotonic_t timestamp_end;\n"
3787 " uint64_t content_size;\n"
3788 " uint64_t packet_size;\n"
3789 " uint64_t packet_seq_num;\n"
3790 " unsigned long events_discarded;\n"
3791 " uint32_t cpu_id;\n"
3792 "};\n\n"
3793 );
3794 }
3795
3796 /*
3797 * Compact header:
3798 * id: range: 0 - 30.
3799 * id 31 is reserved to indicate an extended header.
3800 *
3801 * Large header:
3802 * id: range: 0 - 65534.
3803 * id 65535 is reserved to indicate an extended header.
3804 *
3805 * Must be called with sessions_mutex held.
3806 */
3807 static
3808 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3809 {
3810 return lttng_metadata_printf(session,
3811 "struct event_header_compact {\n"
3812 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3813 " variant <id> {\n"
3814 " struct {\n"
3815 " uint27_clock_monotonic_t timestamp;\n"
3816 " } compact;\n"
3817 " struct {\n"
3818 " uint32_t id;\n"
3819 " uint64_clock_monotonic_t timestamp;\n"
3820 " } extended;\n"
3821 " } v;\n"
3822 "} align(%u);\n"
3823 "\n"
3824 "struct event_header_large {\n"
3825 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3826 " variant <id> {\n"
3827 " struct {\n"
3828 " uint32_clock_monotonic_t timestamp;\n"
3829 " } compact;\n"
3830 " struct {\n"
3831 " uint32_t id;\n"
3832 " uint64_clock_monotonic_t timestamp;\n"
3833 " } extended;\n"
3834 " } v;\n"
3835 "} align(%u);\n\n",
3836 lttng_alignof(uint32_t) * CHAR_BIT,
3837 lttng_alignof(uint16_t) * CHAR_BIT
3838 );
3839 }
3840
3841 /*
3842 * Approximation of NTP time of day to clock monotonic correlation,
3843 * taken at start of trace.
3844 * Yes, this is only an approximation. Yes, we can (and will) do better
3845 * in future versions.
3846 * This function may return a negative offset. It may happen if the
3847 * system sets the REALTIME clock to 0 after boot.
3848 *
3849 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3850 * y2038 compliant.
3851 */
3852 static
3853 int64_t measure_clock_offset(void)
3854 {
3855 uint64_t monotonic_avg, monotonic[2], realtime;
3856 uint64_t tcf = trace_clock_freq();
3857 int64_t offset;
3858 unsigned long flags;
3859 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3860 struct timespec64 rts = { 0, 0 };
3861 #else
3862 struct timespec rts = { 0, 0 };
3863 #endif
3864
3865 /* Disable interrupts to increase correlation precision. */
3866 local_irq_save(flags);
3867 monotonic[0] = trace_clock_read64();
3868 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3869 ktime_get_real_ts64(&rts);
3870 #else
3871 getnstimeofday(&rts);
3872 #endif
3873 monotonic[1] = trace_clock_read64();
3874 local_irq_restore(flags);
3875
3876 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3877 realtime = (uint64_t) rts.tv_sec * tcf;
3878 if (tcf == NSEC_PER_SEC) {
3879 realtime += rts.tv_nsec;
3880 } else {
3881 uint64_t n = rts.tv_nsec * tcf;
3882
3883 do_div(n, NSEC_PER_SEC);
3884 realtime += n;
3885 }
3886 offset = (int64_t) realtime - monotonic_avg;
3887 return offset;
3888 }
3889
3890 static
3891 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3892 {
3893 int ret = 0;
3894 size_t i;
3895 char cur;
3896
3897 i = 0;
3898 cur = string[i];
3899 while (cur != '\0') {
3900 switch (cur) {
3901 case '\n':
3902 ret = lttng_metadata_printf(session, "%s", "\\n");
3903 break;
3904 case '\\':
3905 case '"':
3906 ret = lttng_metadata_printf(session, "%c", '\\');
3907 if (ret)
3908 goto error;
3909 /* We still print the current char */
3910 lttng_fallthrough;
3911 default:
3912 ret = lttng_metadata_printf(session, "%c", cur);
3913 break;
3914 }
3915
3916 if (ret)
3917 goto error;
3918
3919 cur = string[++i];
3920 }
3921 error:
3922 return ret;
3923 }
3924
3925 static
3926 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3927 const char *field_value)
3928 {
3929 int ret;
3930
3931 ret = lttng_metadata_printf(session, " %s = \"", field);
3932 if (ret)
3933 goto error;
3934
3935 ret = print_escaped_ctf_string(session, field_value);
3936 if (ret)
3937 goto error;
3938
3939 ret = lttng_metadata_printf(session, "\";\n");
3940
3941 error:
3942 return ret;
3943 }
3944
3945 /*
3946 * Output metadata into this session's metadata buffers.
3947 * Must be called with sessions_mutex held.
3948 */
3949 static
3950 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3951 {
3952 unsigned char *uuid_c = session->priv->uuid.b;
3953 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3954 const char *product_uuid;
3955 struct lttng_kernel_channel_buffer_private *chan_priv;
3956 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3957 int ret = 0;
3958
3959 if (!LTTNG_READ_ONCE(session->active))
3960 return 0;
3961
3962 lttng_metadata_begin(session);
3963
3964 if (session->priv->metadata_dumped)
3965 goto skip_session;
3966
3967 snprintf(uuid_s, sizeof(uuid_s),
3968 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3969 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3970 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3971 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3972 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3973
3974 ret = lttng_metadata_printf(session,
3975 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3976 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3977 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3978 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3979 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3980 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3981 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3982 "\n"
3983 "trace {\n"
3984 " major = %u;\n"
3985 " minor = %u;\n"
3986 " uuid = \"%s\";\n"
3987 " byte_order = %s;\n"
3988 " packet.header := struct {\n"
3989 " uint32_t magic;\n"
3990 " uint8_t uuid[16];\n"
3991 " uint32_t stream_id;\n"
3992 " uint64_t stream_instance_id;\n"
3993 " };\n"
3994 "};\n\n",
3995 lttng_alignof(uint8_t) * CHAR_BIT,
3996 lttng_alignof(uint16_t) * CHAR_BIT,
3997 lttng_alignof(uint32_t) * CHAR_BIT,
3998 lttng_alignof(uint64_t) * CHAR_BIT,
3999 sizeof(unsigned long) * CHAR_BIT,
4000 lttng_alignof(unsigned long) * CHAR_BIT,
4001 CTF_SPEC_MAJOR,
4002 CTF_SPEC_MINOR,
4003 uuid_s,
4004 #if __BYTE_ORDER == __BIG_ENDIAN
4005 "be"
4006 #else
4007 "le"
4008 #endif
4009 );
4010 if (ret)
4011 goto end;
4012
4013 ret = lttng_metadata_printf(session,
4014 "env {\n"
4015 " hostname = \"%s\";\n"
4016 " domain = \"kernel\";\n"
4017 " sysname = \"%s\";\n"
4018 " kernel_release = \"%s\";\n"
4019 " kernel_version = \"%s\";\n"
4020 " tracer_name = \"lttng-modules\";\n"
4021 " tracer_major = %d;\n"
4022 " tracer_minor = %d;\n"
4023 " tracer_patchlevel = %d;\n"
4024 " trace_buffering_scheme = \"global\";\n",
4025 current->nsproxy->uts_ns->name.nodename,
4026 utsname()->sysname,
4027 utsname()->release,
4028 utsname()->version,
4029 LTTNG_MODULES_MAJOR_VERSION,
4030 LTTNG_MODULES_MINOR_VERSION,
4031 LTTNG_MODULES_PATCHLEVEL_VERSION
4032 );
4033 if (ret)
4034 goto end;
4035
4036 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
4037 if (ret)
4038 goto end;
4039 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
4040 session->priv->creation_time);
4041 if (ret)
4042 goto end;
4043
4044 /* Add the product UUID to the 'env' section */
4045 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4046 if (product_uuid) {
4047 ret = lttng_metadata_printf(session,
4048 " product_uuid = \"%s\";\n",
4049 product_uuid
4050 );
4051 if (ret)
4052 goto end;
4053 }
4054
4055 /* Close the 'env' section */
4056 ret = lttng_metadata_printf(session, "};\n\n");
4057 if (ret)
4058 goto end;
4059
4060 ret = lttng_metadata_printf(session,
4061 "clock {\n"
4062 " name = \"%s\";\n",
4063 trace_clock_name()
4064 );
4065 if (ret)
4066 goto end;
4067
4068 if (!trace_clock_uuid(clock_uuid_s)) {
4069 ret = lttng_metadata_printf(session,
4070 " uuid = \"%s\";\n",
4071 clock_uuid_s
4072 );
4073 if (ret)
4074 goto end;
4075 }
4076
4077 ret = lttng_metadata_printf(session,
4078 " description = \"%s\";\n"
4079 " freq = %llu; /* Frequency, in Hz */\n"
4080 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4081 " offset = %lld;\n"
4082 "};\n\n",
4083 trace_clock_description(),
4084 (unsigned long long) trace_clock_freq(),
4085 (long long) measure_clock_offset()
4086 );
4087 if (ret)
4088 goto end;
4089
4090 ret = lttng_metadata_printf(session,
4091 "typealias integer {\n"
4092 " size = 27; align = 1; signed = false;\n"
4093 " map = clock.%s.value;\n"
4094 "} := uint27_clock_monotonic_t;\n"
4095 "\n"
4096 "typealias integer {\n"
4097 " size = 32; align = %u; signed = false;\n"
4098 " map = clock.%s.value;\n"
4099 "} := uint32_clock_monotonic_t;\n"
4100 "\n"
4101 "typealias integer {\n"
4102 " size = 64; align = %u; signed = false;\n"
4103 " map = clock.%s.value;\n"
4104 "} := uint64_clock_monotonic_t;\n\n",
4105 trace_clock_name(),
4106 lttng_alignof(uint32_t) * CHAR_BIT,
4107 trace_clock_name(),
4108 lttng_alignof(uint64_t) * CHAR_BIT,
4109 trace_clock_name()
4110 );
4111 if (ret)
4112 goto end;
4113
4114 ret = _lttng_stream_packet_context_declare(session);
4115 if (ret)
4116 goto end;
4117
4118 ret = _lttng_event_header_declare(session);
4119 if (ret)
4120 goto end;
4121
4122 skip_session:
4123 list_for_each_entry(chan_priv, &session->priv->chan, node) {
4124 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
4125 if (ret)
4126 goto end;
4127 }
4128
4129 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
4130 ret = _lttng_event_recorder_metadata_statedump(&event_recorder_priv->pub->parent);
4131 if (ret)
4132 goto end;
4133 }
4134 session->priv->metadata_dumped = 1;
4135 end:
4136 lttng_metadata_end(session);
4137 return ret;
4138 }
4139
4140 /**
4141 * lttng_transport_register - LTT transport registration
4142 * @transport: transport structure
4143 *
4144 * Registers a transport which can be used as output to extract the data out of
4145 * LTTng. The module calling this registration function must ensure that no
4146 * trap-inducing code will be executed by the transport functions. E.g.
4147 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4148 * is made visible to the transport function. This registration acts as a
4149 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4150 * after its registration must it synchronize the TLBs.
4151 */
4152 void lttng_transport_register(struct lttng_transport *transport)
4153 {
4154 /*
4155 * Make sure no page fault can be triggered by the module about to be
4156 * registered. We deal with this here so we don't have to call
4157 * vmalloc_sync_mappings() in each module's init.
4158 */
4159 wrapper_vmalloc_sync_mappings();
4160
4161 mutex_lock(&sessions_mutex);
4162 list_add_tail(&transport->node, &lttng_transport_list);
4163 mutex_unlock(&sessions_mutex);
4164 }
4165 EXPORT_SYMBOL_GPL(lttng_transport_register);
4166
4167 /**
4168 * lttng_transport_unregister - LTT transport unregistration
4169 * @transport: transport structure
4170 */
4171 void lttng_transport_unregister(struct lttng_transport *transport)
4172 {
4173 mutex_lock(&sessions_mutex);
4174 list_del(&transport->node);
4175 mutex_unlock(&sessions_mutex);
4176 }
4177 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4178
4179 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4180 {
4181 /*
4182 * Make sure no page fault can be triggered by the module about to be
4183 * registered. We deal with this here so we don't have to call
4184 * vmalloc_sync_mappings() in each module's init.
4185 */
4186 wrapper_vmalloc_sync_mappings();
4187
4188 mutex_lock(&sessions_mutex);
4189 list_add_tail(&transport->node, &lttng_counter_transport_list);
4190 mutex_unlock(&sessions_mutex);
4191 }
4192 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4193
4194 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4195 {
4196 mutex_lock(&sessions_mutex);
4197 list_del(&transport->node);
4198 mutex_unlock(&sessions_mutex);
4199 }
4200 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4201
4202 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4203
4204 enum cpuhp_state lttng_hp_prepare;
4205 enum cpuhp_state lttng_hp_online;
4206
4207 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4208 {
4209 struct lttng_cpuhp_node *lttng_node;
4210
4211 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4212 switch (lttng_node->component) {
4213 case LTTNG_RING_BUFFER_FRONTEND:
4214 return 0;
4215 case LTTNG_RING_BUFFER_BACKEND:
4216 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4217 case LTTNG_RING_BUFFER_ITER:
4218 return 0;
4219 case LTTNG_CONTEXT_PERF_COUNTERS:
4220 return 0;
4221 default:
4222 return -EINVAL;
4223 }
4224 }
4225
4226 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4227 {
4228 struct lttng_cpuhp_node *lttng_node;
4229
4230 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4231 switch (lttng_node->component) {
4232 case LTTNG_RING_BUFFER_FRONTEND:
4233 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4234 case LTTNG_RING_BUFFER_BACKEND:
4235 return 0;
4236 case LTTNG_RING_BUFFER_ITER:
4237 return 0;
4238 case LTTNG_CONTEXT_PERF_COUNTERS:
4239 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4240 default:
4241 return -EINVAL;
4242 }
4243 }
4244
4245 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4246 {
4247 struct lttng_cpuhp_node *lttng_node;
4248
4249 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4250 switch (lttng_node->component) {
4251 case LTTNG_RING_BUFFER_FRONTEND:
4252 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4253 case LTTNG_RING_BUFFER_BACKEND:
4254 return 0;
4255 case LTTNG_RING_BUFFER_ITER:
4256 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4257 case LTTNG_CONTEXT_PERF_COUNTERS:
4258 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4259 default:
4260 return -EINVAL;
4261 }
4262 }
4263
4264 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4265 {
4266 struct lttng_cpuhp_node *lttng_node;
4267
4268 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4269 switch (lttng_node->component) {
4270 case LTTNG_RING_BUFFER_FRONTEND:
4271 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4272 case LTTNG_RING_BUFFER_BACKEND:
4273 return 0;
4274 case LTTNG_RING_BUFFER_ITER:
4275 return 0;
4276 case LTTNG_CONTEXT_PERF_COUNTERS:
4277 return 0;
4278 default:
4279 return -EINVAL;
4280 }
4281 }
4282
4283 static int __init lttng_init_cpu_hotplug(void)
4284 {
4285 int ret;
4286
4287 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4288 lttng_hotplug_prepare,
4289 lttng_hotplug_dead);
4290 if (ret < 0) {
4291 return ret;
4292 }
4293 lttng_hp_prepare = ret;
4294 lttng_rb_set_hp_prepare(ret);
4295
4296 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4297 lttng_hotplug_online,
4298 lttng_hotplug_offline);
4299 if (ret < 0) {
4300 cpuhp_remove_multi_state(lttng_hp_prepare);
4301 lttng_hp_prepare = 0;
4302 return ret;
4303 }
4304 lttng_hp_online = ret;
4305 lttng_rb_set_hp_online(ret);
4306
4307 return 0;
4308 }
4309
4310 static void __exit lttng_exit_cpu_hotplug(void)
4311 {
4312 lttng_rb_set_hp_online(0);
4313 cpuhp_remove_multi_state(lttng_hp_online);
4314 lttng_rb_set_hp_prepare(0);
4315 cpuhp_remove_multi_state(lttng_hp_prepare);
4316 }
4317
4318 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4319 static int lttng_init_cpu_hotplug(void)
4320 {
4321 return 0;
4322 }
4323 static void lttng_exit_cpu_hotplug(void)
4324 {
4325 }
4326 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4327
4328 static int __init lttng_events_init(void)
4329 {
4330 int ret;
4331
4332 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4333 if (ret)
4334 return ret;
4335 ret = wrapper_get_pfnblock_flags_mask_init();
4336 if (ret)
4337 return ret;
4338 ret = wrapper_get_pageblock_flags_mask_init();
4339 if (ret)
4340 return ret;
4341 ret = lttng_probes_init();
4342 if (ret)
4343 return ret;
4344 ret = lttng_context_init();
4345 if (ret)
4346 return ret;
4347 ret = lttng_tracepoint_init();
4348 if (ret)
4349 goto error_tp;
4350 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4351 if (!event_recorder_cache) {
4352 ret = -ENOMEM;
4353 goto error_kmem_event_recorder;
4354 }
4355 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4356 if (!event_recorder_private_cache) {
4357 ret = -ENOMEM;
4358 goto error_kmem_event_recorder_private;
4359 }
4360 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4361 if (!event_notifier_cache) {
4362 ret = -ENOMEM;
4363 goto error_kmem_event_notifier;
4364 }
4365 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4366 if (!event_notifier_private_cache) {
4367 ret = -ENOMEM;
4368 goto error_kmem_event_notifier_private;
4369 }
4370 ret = lttng_abi_init();
4371 if (ret)
4372 goto error_abi;
4373 ret = lttng_logger_init();
4374 if (ret)
4375 goto error_logger;
4376 ret = lttng_init_cpu_hotplug();
4377 if (ret)
4378 goto error_hotplug;
4379 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4380 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4381 __stringify(LTTNG_MODULES_MINOR_VERSION),
4382 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4383 LTTNG_MODULES_EXTRAVERSION,
4384 LTTNG_VERSION_NAME,
4385 #ifdef LTTNG_EXTRA_VERSION_GIT
4386 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4387 #else
4388 "",
4389 #endif
4390 #ifdef LTTNG_EXTRA_VERSION_NAME
4391 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4392 #else
4393 "");
4394 #endif
4395 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4396 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4397 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4398 return 0;
4399
4400 error_hotplug:
4401 lttng_logger_exit();
4402 error_logger:
4403 lttng_abi_exit();
4404 error_abi:
4405 kmem_cache_destroy(event_notifier_private_cache);
4406 error_kmem_event_notifier_private:
4407 kmem_cache_destroy(event_notifier_cache);
4408 error_kmem_event_notifier:
4409 kmem_cache_destroy(event_recorder_private_cache);
4410 error_kmem_event_recorder_private:
4411 kmem_cache_destroy(event_recorder_cache);
4412 error_kmem_event_recorder:
4413 lttng_tracepoint_exit();
4414 error_tp:
4415 lttng_context_exit();
4416 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4417 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4418 __stringify(LTTNG_MODULES_MINOR_VERSION),
4419 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4420 LTTNG_MODULES_EXTRAVERSION,
4421 LTTNG_VERSION_NAME,
4422 #ifdef LTTNG_EXTRA_VERSION_GIT
4423 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4424 #else
4425 "",
4426 #endif
4427 #ifdef LTTNG_EXTRA_VERSION_NAME
4428 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4429 #else
4430 "");
4431 #endif
4432 return ret;
4433 }
4434
4435 module_init(lttng_events_init);
4436
4437 static void __exit lttng_events_exit(void)
4438 {
4439 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4440
4441 lttng_exit_cpu_hotplug();
4442 lttng_logger_exit();
4443 lttng_abi_exit();
4444 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4445 lttng_session_destroy(session_priv->pub);
4446 kmem_cache_destroy(event_recorder_cache);
4447 kmem_cache_destroy(event_recorder_private_cache);
4448 kmem_cache_destroy(event_notifier_cache);
4449 kmem_cache_destroy(event_notifier_private_cache);
4450 lttng_tracepoint_exit();
4451 lttng_context_exit();
4452 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4453 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4454 __stringify(LTTNG_MODULES_MINOR_VERSION),
4455 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4456 LTTNG_MODULES_EXTRAVERSION,
4457 LTTNG_VERSION_NAME,
4458 #ifdef LTTNG_EXTRA_VERSION_GIT
4459 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4460 #else
4461 "",
4462 #endif
4463 #ifdef LTTNG_EXTRA_VERSION_NAME
4464 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4465 #else
4466 "");
4467 #endif
4468 }
4469
4470 module_exit(lttng_events_exit);
4471
4472 #include <generated/patches.h>
4473 #ifdef LTTNG_EXTRA_VERSION_GIT
4474 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4475 #endif
4476 #ifdef LTTNG_EXTRA_VERSION_NAME
4477 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4478 #endif
4479 MODULE_LICENSE("GPL and additional rights");
4480 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4481 MODULE_DESCRIPTION("LTTng tracer");
4482 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4483 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4484 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4485 LTTNG_MODULES_EXTRAVERSION);
This page took 0.174582 seconds and 4 git commands to generate.