795286329690b9a3866852b8faa4225c626e7362
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
77 struct lttng_kernel_channel_buffer *chan,
78 struct lttng_kernel_event_recorder *event);
79 static
80 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
81 static
82 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
83 static
84 int _lttng_type_statedump(struct lttng_kernel_session *session,
85 const struct lttng_kernel_type_common *type,
86 enum lttng_kernel_string_encoding parent_encoding,
87 size_t nesting);
88 static
89 int _lttng_field_statedump(struct lttng_kernel_session *session,
90 const struct lttng_kernel_event_field *field,
91 size_t nesting, const char **prev_field_name_p);
92
93 void synchronize_trace(void)
94 {
95 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
96 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
97 synchronize_rcu();
98 #else
99 synchronize_sched();
100 #endif
101
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
103 #ifdef CONFIG_PREEMPT_RT_FULL
104 synchronize_rcu();
105 #endif
106 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
107 #ifdef CONFIG_PREEMPT_RT
108 synchronize_rcu();
109 #endif
110 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 }
112
113 void lttng_lock_sessions(void)
114 {
115 mutex_lock(&sessions_mutex);
116 }
117
118 void lttng_unlock_sessions(void)
119 {
120 mutex_unlock(&sessions_mutex);
121 }
122
123 static struct lttng_transport *lttng_transport_find(const char *name)
124 {
125 struct lttng_transport *transport;
126
127 list_for_each_entry(transport, &lttng_transport_list, node) {
128 if (!strcmp(transport->name, name))
129 return transport;
130 }
131 return NULL;
132 }
133
134 /*
135 * Called with sessions lock held.
136 */
137 int lttng_session_active(void)
138 {
139 struct lttng_kernel_session_private *iter;
140
141 list_for_each_entry(iter, &sessions, list) {
142 if (iter->pub->active)
143 return 1;
144 }
145 return 0;
146 }
147
148 struct lttng_kernel_session *lttng_session_create(void)
149 {
150 struct lttng_kernel_session *session;
151 struct lttng_kernel_session_private *session_priv;
152 struct lttng_metadata_cache *metadata_cache;
153 int i;
154
155 mutex_lock(&sessions_mutex);
156 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
157 if (!session)
158 goto err;
159 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
160 if (!session_priv)
161 goto err_free_session;
162 session->priv = session_priv;
163 session_priv->pub = session;
164
165 INIT_LIST_HEAD(&session_priv->chan);
166 INIT_LIST_HEAD(&session_priv->events);
167 lttng_guid_gen(&session_priv->uuid);
168
169 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
170 GFP_KERNEL);
171 if (!metadata_cache)
172 goto err_free_session_private;
173 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
174 if (!metadata_cache->data)
175 goto err_free_cache;
176 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
177 kref_init(&metadata_cache->refcount);
178 mutex_init(&metadata_cache->lock);
179 session_priv->metadata_cache = metadata_cache;
180 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
181 memcpy(&metadata_cache->uuid, &session_priv->uuid,
182 sizeof(metadata_cache->uuid));
183 INIT_LIST_HEAD(&session_priv->enablers_head);
184 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
185 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
186 list_add(&session_priv->list, &sessions);
187
188 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
197 goto tracker_alloc_error;
198 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
199 goto tracker_alloc_error;
200
201 mutex_unlock(&sessions_mutex);
202
203 return session;
204
205 tracker_alloc_error:
206 lttng_id_tracker_fini(&session->pid_tracker);
207 lttng_id_tracker_fini(&session->vpid_tracker);
208 lttng_id_tracker_fini(&session->uid_tracker);
209 lttng_id_tracker_fini(&session->vuid_tracker);
210 lttng_id_tracker_fini(&session->gid_tracker);
211 lttng_id_tracker_fini(&session->vgid_tracker);
212 err_free_cache:
213 kfree(metadata_cache);
214 err_free_session_private:
215 lttng_kvfree(session_priv);
216 err_free_session:
217 lttng_kvfree(session);
218 err:
219 mutex_unlock(&sessions_mutex);
220 return NULL;
221 }
222
223 static
224 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
225 {
226 struct lttng_counter_transport *transport;
227
228 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
229 if (!strcmp(transport->name, name))
230 return transport;
231 }
232 return NULL;
233 }
234
235 struct lttng_counter *lttng_kernel_counter_create(
236 const char *counter_transport_name,
237 size_t number_dimensions, const size_t *dimensions_sizes)
238 {
239 struct lttng_counter *counter = NULL;
240 struct lttng_counter_transport *counter_transport = NULL;
241
242 counter_transport = lttng_counter_transport_find(counter_transport_name);
243 if (!counter_transport) {
244 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
245 counter_transport_name);
246 goto notransport;
247 }
248 if (!try_module_get(counter_transport->owner)) {
249 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
250 goto notransport;
251 }
252
253 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
254 if (!counter)
255 goto nomem;
256
257 /* Create event notifier error counter. */
258 counter->ops = &counter_transport->ops;
259 counter->transport = counter_transport;
260
261 counter->counter = counter->ops->counter_create(
262 number_dimensions, dimensions_sizes, 0);
263 if (!counter->counter) {
264 goto create_error;
265 }
266
267 return counter;
268
269 create_error:
270 lttng_kvfree(counter);
271 nomem:
272 if (counter_transport)
273 module_put(counter_transport->owner);
274 notransport:
275 return NULL;
276 }
277
278 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
279 {
280 struct lttng_transport *transport = NULL;
281 struct lttng_event_notifier_group *event_notifier_group;
282 const char *transport_name = "relay-event-notifier";
283 size_t subbuf_size = 4096; //TODO
284 size_t num_subbuf = 16; //TODO
285 unsigned int switch_timer_interval = 0;
286 unsigned int read_timer_interval = 0;
287 int i;
288
289 mutex_lock(&sessions_mutex);
290
291 transport = lttng_transport_find(transport_name);
292 if (!transport) {
293 printk(KERN_WARNING "LTTng: transport %s not found\n",
294 transport_name);
295 goto notransport;
296 }
297 if (!try_module_get(transport->owner)) {
298 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
299 transport_name);
300 goto notransport;
301 }
302
303 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
304 GFP_KERNEL);
305 if (!event_notifier_group)
306 goto nomem;
307
308 /*
309 * Initialize the ring buffer used to store event notifier
310 * notifications.
311 */
312 event_notifier_group->ops = &transport->ops;
313 event_notifier_group->chan = transport->ops.priv->channel_create(
314 transport_name, event_notifier_group, NULL,
315 subbuf_size, num_subbuf, switch_timer_interval,
316 read_timer_interval);
317 if (!event_notifier_group->chan)
318 goto create_error;
319
320 event_notifier_group->transport = transport;
321
322 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
323 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
324 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
325 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
326
327 list_add(&event_notifier_group->node, &event_notifier_groups);
328
329 mutex_unlock(&sessions_mutex);
330
331 return event_notifier_group;
332
333 create_error:
334 lttng_kvfree(event_notifier_group);
335 nomem:
336 if (transport)
337 module_put(transport->owner);
338 notransport:
339 mutex_unlock(&sessions_mutex);
340 return NULL;
341 }
342
343 void metadata_cache_destroy(struct kref *kref)
344 {
345 struct lttng_metadata_cache *cache =
346 container_of(kref, struct lttng_metadata_cache, refcount);
347 vfree(cache->data);
348 kfree(cache);
349 }
350
351 void lttng_session_destroy(struct lttng_kernel_session *session)
352 {
353 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
354 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
355 struct lttng_metadata_stream *metadata_stream;
356 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
357 int ret;
358
359 mutex_lock(&sessions_mutex);
360 WRITE_ONCE(session->active, 0);
361 list_for_each_entry(chan_priv, &session->priv->chan, node) {
362 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
363 WARN_ON(ret);
364 }
365 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
366 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
367 WARN_ON(ret);
368 }
369 synchronize_trace(); /* Wait for in-flight events to complete */
370 list_for_each_entry(chan_priv, &session->priv->chan, node) {
371 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
372 WARN_ON(ret);
373 }
374 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
375 lttng_event_enabler_destroy(event_enabler);
376 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
377 _lttng_event_destroy(&event_recorder_priv->pub->parent);
378 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
379 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
380 _lttng_channel_destroy(chan_priv->pub);
381 }
382 mutex_lock(&session->priv->metadata_cache->lock);
383 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
384 _lttng_metadata_channel_hangup(metadata_stream);
385 mutex_unlock(&session->priv->metadata_cache->lock);
386 lttng_id_tracker_fini(&session->pid_tracker);
387 lttng_id_tracker_fini(&session->vpid_tracker);
388 lttng_id_tracker_fini(&session->uid_tracker);
389 lttng_id_tracker_fini(&session->vuid_tracker);
390 lttng_id_tracker_fini(&session->gid_tracker);
391 lttng_id_tracker_fini(&session->vgid_tracker);
392 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
393 list_del(&session->priv->list);
394 mutex_unlock(&sessions_mutex);
395 lttng_kvfree(session->priv);
396 lttng_kvfree(session);
397 }
398
399 void lttng_event_notifier_group_destroy(
400 struct lttng_event_notifier_group *event_notifier_group)
401 {
402 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
403 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
404 int ret;
405
406 if (!event_notifier_group)
407 return;
408
409 mutex_lock(&sessions_mutex);
410
411 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
412 WARN_ON(ret);
413
414 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
415 &event_notifier_group->event_notifiers_head, parent.node) {
416 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
417 WARN_ON(ret);
418 }
419
420 /* Wait for in-flight event notifier to complete */
421 synchronize_trace();
422
423 irq_work_sync(&event_notifier_group->wakeup_pending);
424
425 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
426 WARN_ON(ret);
427
428 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
429 &event_notifier_group->enablers_head, node)
430 lttng_event_enabler_destroy(event_enabler);
431
432 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
433 &event_notifier_group->event_notifiers_head, parent.node)
434 _lttng_event_destroy(&event_notifier_priv->pub->parent);
435
436 if (event_notifier_group->error_counter) {
437 struct lttng_counter *error_counter = event_notifier_group->error_counter;
438
439 error_counter->ops->counter_destroy(error_counter->counter);
440 module_put(error_counter->transport->owner);
441 lttng_kvfree(error_counter);
442 event_notifier_group->error_counter = NULL;
443 }
444
445 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
446 module_put(event_notifier_group->transport->owner);
447 list_del(&event_notifier_group->node);
448
449 mutex_unlock(&sessions_mutex);
450 lttng_kvfree(event_notifier_group);
451 }
452
453 int lttng_session_statedump(struct lttng_kernel_session *session)
454 {
455 int ret;
456
457 mutex_lock(&sessions_mutex);
458 ret = lttng_statedump_start(session);
459 mutex_unlock(&sessions_mutex);
460 return ret;
461 }
462
463 int lttng_session_enable(struct lttng_kernel_session *session)
464 {
465 int ret = 0;
466 struct lttng_kernel_channel_buffer_private *chan_priv;
467
468 mutex_lock(&sessions_mutex);
469 if (session->active) {
470 ret = -EBUSY;
471 goto end;
472 }
473
474 /* Set transient enabler state to "enabled" */
475 session->priv->tstate = 1;
476
477 /* We need to sync enablers with session before activation. */
478 lttng_session_sync_event_enablers(session);
479
480 /*
481 * Snapshot the number of events per channel to know the type of header
482 * we need to use.
483 */
484 list_for_each_entry(chan_priv, &session->priv->chan, node) {
485 if (chan_priv->header_type)
486 continue; /* don't change it if session stop/restart */
487 if (chan_priv->free_event_id < 31)
488 chan_priv->header_type = 1; /* compact */
489 else
490 chan_priv->header_type = 2; /* large */
491 }
492
493 /* Clear each stream's quiescent state. */
494 list_for_each_entry(chan_priv, &session->priv->chan, node) {
495 if (chan_priv->channel_type != METADATA_CHANNEL)
496 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
497 }
498
499 WRITE_ONCE(session->active, 1);
500 WRITE_ONCE(session->priv->been_active, 1);
501 ret = _lttng_session_metadata_statedump(session);
502 if (ret) {
503 WRITE_ONCE(session->active, 0);
504 goto end;
505 }
506 ret = lttng_statedump_start(session);
507 if (ret)
508 WRITE_ONCE(session->active, 0);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_session_disable(struct lttng_kernel_session *session)
515 {
516 int ret = 0;
517 struct lttng_kernel_channel_buffer_private *chan_priv;
518
519 mutex_lock(&sessions_mutex);
520 if (!session->active) {
521 ret = -EBUSY;
522 goto end;
523 }
524 WRITE_ONCE(session->active, 0);
525
526 /* Set transient enabler state to "disabled" */
527 session->priv->tstate = 0;
528 lttng_session_sync_event_enablers(session);
529
530 /* Set each stream's quiescent state. */
531 list_for_each_entry(chan_priv, &session->priv->chan, node) {
532 if (chan_priv->channel_type != METADATA_CHANNEL)
533 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
534 }
535 end:
536 mutex_unlock(&sessions_mutex);
537 return ret;
538 }
539
540 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
541 {
542 int ret = 0;
543 struct lttng_kernel_channel_buffer_private *chan_priv;
544 struct lttng_kernel_event_recorder_private *event_recorder_priv;
545 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
546 struct lttng_metadata_stream *stream;
547
548 mutex_lock(&sessions_mutex);
549 if (!session->active) {
550 ret = -EBUSY;
551 goto end;
552 }
553
554 mutex_lock(&cache->lock);
555 memset(cache->data, 0, cache->cache_alloc);
556 cache->metadata_written = 0;
557 cache->version++;
558 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
559 stream->metadata_out = 0;
560 stream->metadata_in = 0;
561 }
562 mutex_unlock(&cache->lock);
563
564 session->priv->metadata_dumped = 0;
565 list_for_each_entry(chan_priv, &session->priv->chan, node) {
566 chan_priv->metadata_dumped = 0;
567 }
568
569 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
570 event_recorder_priv->metadata_dumped = 0;
571 }
572
573 ret = _lttng_session_metadata_statedump(session);
574
575 end:
576 mutex_unlock(&sessions_mutex);
577 return ret;
578 }
579
580 static
581 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
582 {
583 struct lttng_kernel_channel_buffer *chan_buf;
584
585 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
586 return false;
587 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
588 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
589 return true;
590 return false;
591 }
592
593 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
594 {
595 int ret = 0;
596
597 mutex_lock(&sessions_mutex);
598 if (is_channel_buffer_metadata(channel)) {
599 ret = -EPERM;
600 goto end;
601 }
602 if (channel->enabled) {
603 ret = -EEXIST;
604 goto end;
605 }
606 /* Set transient enabler state to "enabled" */
607 channel->priv->tstate = 1;
608 lttng_session_sync_event_enablers(channel->session);
609 /* Set atomically the state to "enabled" */
610 WRITE_ONCE(channel->enabled, 1);
611 end:
612 mutex_unlock(&sessions_mutex);
613 return ret;
614 }
615
616 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
617 {
618 int ret = 0;
619
620 mutex_lock(&sessions_mutex);
621 if (is_channel_buffer_metadata(channel)) {
622 ret = -EPERM;
623 goto end;
624 }
625 if (!channel->enabled) {
626 ret = -EEXIST;
627 goto end;
628 }
629 /* Set atomically the state to "disabled" */
630 WRITE_ONCE(channel->enabled, 0);
631 /* Set transient enabler state to "enabled" */
632 channel->priv->tstate = 0;
633 lttng_session_sync_event_enablers(channel->session);
634 end:
635 mutex_unlock(&sessions_mutex);
636 return ret;
637 }
638
639 int lttng_event_enable(struct lttng_kernel_event_common *event)
640 {
641 int ret = 0;
642
643 mutex_lock(&sessions_mutex);
644 switch (event->type) {
645 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
646 {
647 struct lttng_kernel_event_recorder *event_recorder =
648 container_of(event, struct lttng_kernel_event_recorder, parent);
649
650 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
651 ret = -EPERM;
652 goto end;
653 }
654 break;
655 }
656 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
657 switch (event->priv->instrumentation) {
658 case LTTNG_KERNEL_ABI_KRETPROBE:
659 ret = -EINVAL;
660 goto end;
661 default:
662 break;
663 }
664 break;
665 default:
666 break;
667 }
668
669 if (event->enabled) {
670 ret = -EEXIST;
671 goto end;
672 }
673 switch (event->priv->instrumentation) {
674 case LTTNG_KERNEL_ABI_TRACEPOINT:
675 lttng_fallthrough;
676 case LTTNG_KERNEL_ABI_SYSCALL:
677 ret = -EINVAL;
678 break;
679
680 case LTTNG_KERNEL_ABI_KPROBE:
681 lttng_fallthrough;
682 case LTTNG_KERNEL_ABI_UPROBE:
683 WRITE_ONCE(event->enabled, 1);
684 break;
685
686 case LTTNG_KERNEL_ABI_KRETPROBE:
687 ret = lttng_kretprobes_event_enable_state(event, 1);
688 break;
689
690 case LTTNG_KERNEL_ABI_FUNCTION:
691 lttng_fallthrough;
692 case LTTNG_KERNEL_ABI_NOOP:
693 lttng_fallthrough;
694 default:
695 WARN_ON_ONCE(1);
696 ret = -EINVAL;
697 }
698 end:
699 mutex_unlock(&sessions_mutex);
700 return ret;
701 }
702
703 int lttng_event_disable(struct lttng_kernel_event_common *event)
704 {
705 int ret = 0;
706
707 mutex_lock(&sessions_mutex);
708 switch (event->type) {
709 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
710 {
711 struct lttng_kernel_event_recorder *event_recorder =
712 container_of(event, struct lttng_kernel_event_recorder, parent);
713
714 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
715 ret = -EPERM;
716 goto end;
717 }
718 break;
719 }
720 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
721 switch (event->priv->instrumentation) {
722 case LTTNG_KERNEL_ABI_KRETPROBE:
723 ret = -EINVAL;
724 goto end;
725 default:
726 break;
727 }
728 break;
729 default:
730 break;
731 }
732
733 if (!event->enabled) {
734 ret = -EEXIST;
735 goto end;
736 }
737 switch (event->priv->instrumentation) {
738 case LTTNG_KERNEL_ABI_TRACEPOINT:
739 lttng_fallthrough;
740 case LTTNG_KERNEL_ABI_SYSCALL:
741 ret = -EINVAL;
742 break;
743
744 case LTTNG_KERNEL_ABI_KPROBE:
745 lttng_fallthrough;
746 case LTTNG_KERNEL_ABI_UPROBE:
747 WRITE_ONCE(event->enabled, 0);
748 break;
749
750 case LTTNG_KERNEL_ABI_KRETPROBE:
751 ret = lttng_kretprobes_event_enable_state(event, 0);
752 break;
753
754 case LTTNG_KERNEL_ABI_FUNCTION:
755 lttng_fallthrough;
756 case LTTNG_KERNEL_ABI_NOOP:
757 lttng_fallthrough;
758 default:
759 WARN_ON_ONCE(1);
760 ret = -EINVAL;
761 }
762 end:
763 mutex_unlock(&sessions_mutex);
764 return ret;
765 }
766
767 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
768 const char *transport_name,
769 void *buf_addr,
770 size_t subbuf_size, size_t num_subbuf,
771 unsigned int switch_timer_interval,
772 unsigned int read_timer_interval,
773 enum channel_type channel_type)
774 {
775 struct lttng_kernel_channel_buffer *chan;
776 struct lttng_kernel_channel_buffer_private *chan_priv;
777 struct lttng_transport *transport = NULL;
778
779 mutex_lock(&sessions_mutex);
780 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
781 goto active; /* Refuse to add channel to active session */
782 transport = lttng_transport_find(transport_name);
783 if (!transport) {
784 printk(KERN_WARNING "LTTng: transport %s not found\n",
785 transport_name);
786 goto notransport;
787 }
788 if (!try_module_get(transport->owner)) {
789 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
790 goto notransport;
791 }
792 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
793 if (!chan)
794 goto nomem;
795 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
796 if (!chan_priv)
797 goto nomem_priv;
798 chan->priv = chan_priv;
799 chan_priv->pub = chan;
800 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
801 chan->parent.session = session;
802 chan->priv->id = session->priv->free_chan_id++;
803 chan->ops = &transport->ops;
804 /*
805 * Note: the channel creation op already writes into the packet
806 * headers. Therefore the "chan" information used as input
807 * should be already accessible.
808 */
809 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
810 chan, buf_addr, subbuf_size, num_subbuf,
811 switch_timer_interval, read_timer_interval);
812 if (!chan->priv->rb_chan)
813 goto create_error;
814 chan->priv->parent.tstate = 1;
815 chan->parent.enabled = 1;
816 chan->priv->transport = transport;
817 chan->priv->channel_type = channel_type;
818 list_add(&chan->priv->node, &session->priv->chan);
819 mutex_unlock(&sessions_mutex);
820 return chan;
821
822 create_error:
823 kfree(chan_priv);
824 nomem_priv:
825 kfree(chan);
826 nomem:
827 if (transport)
828 module_put(transport->owner);
829 notransport:
830 active:
831 mutex_unlock(&sessions_mutex);
832 return NULL;
833 }
834
835 /*
836 * Only used internally at session destruction for per-cpu channels, and
837 * when metadata channel is released.
838 * Needs to be called with sessions mutex held.
839 */
840 static
841 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
842 {
843 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
844 module_put(chan->priv->transport->owner);
845 list_del(&chan->priv->node);
846 lttng_kernel_destroy_context(chan->priv->ctx);
847 kfree(chan->priv);
848 kfree(chan);
849 }
850
851 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
852 {
853 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
854
855 /* Protect the metadata cache with the sessions_mutex. */
856 mutex_lock(&sessions_mutex);
857 _lttng_channel_destroy(chan);
858 mutex_unlock(&sessions_mutex);
859 }
860 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
861
862 static
863 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
864 {
865 stream->finalized = 1;
866 wake_up_interruptible(&stream->read_wait);
867 }
868
869
870 /*
871 * Supports event creation while tracing session is active.
872 * Needs to be called with sessions mutex held.
873 */
874 static
875 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
876 const struct lttng_kernel_event_desc *event_desc)
877 {
878 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
879 struct lttng_kernel_channel_buffer *chan = event_enabler->chan;
880 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
881 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
882 struct lttng_kernel_event_recorder *event_recorder;
883 struct lttng_kernel_event_recorder_private *event_recorder_priv;
884 struct lttng_kernel_event_common_private *event_priv;
885 const char *event_name;
886 struct hlist_head *head;
887 int ret;
888
889 if (chan->priv->free_event_id == -1U) {
890 ret = -EMFILE;
891 goto full;
892 }
893
894 switch (itype) {
895 case LTTNG_KERNEL_ABI_TRACEPOINT:
896 event_name = event_desc->event_name;
897 break;
898
899 case LTTNG_KERNEL_ABI_KPROBE:
900 lttng_fallthrough;
901 case LTTNG_KERNEL_ABI_UPROBE:
902 lttng_fallthrough;
903 case LTTNG_KERNEL_ABI_KRETPROBE:
904 lttng_fallthrough;
905 case LTTNG_KERNEL_ABI_SYSCALL:
906 event_name = event_param->name;
907 break;
908
909 case LTTNG_KERNEL_ABI_FUNCTION:
910 lttng_fallthrough;
911 case LTTNG_KERNEL_ABI_NOOP:
912 lttng_fallthrough;
913 default:
914 WARN_ON_ONCE(1);
915 ret = -EINVAL;
916 goto type_error;
917 }
918
919 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
920 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
921 event_recorder_priv = container_of(event_priv, struct lttng_kernel_event_recorder_private, parent);
922
923 WARN_ON_ONCE(!event_priv->desc);
924 if (!strncmp(event_priv->desc->event_name, event_name,
925 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
926 && chan == event_recorder_priv->pub->chan) {
927 ret = -EEXIST;
928 goto exist;
929 }
930 }
931
932 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
933 if (!event_recorder) {
934 ret = -ENOMEM;
935 goto cache_error;
936 }
937 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
938 if (!event_recorder_priv) {
939 ret = -ENOMEM;
940 goto cache_private_error;
941 }
942 event_recorder_priv->pub = event_recorder;
943 event_recorder_priv->parent.pub = &event_recorder->parent;
944 event_recorder->priv = event_recorder_priv;
945 event_recorder->parent.priv = &event_recorder_priv->parent;
946 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
947
948 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
949 event_recorder->chan = chan;
950 event_recorder->priv->id = chan->priv->free_event_id++;
951 event_recorder->priv->parent.instrumentation = itype;
952 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
953 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
954
955 switch (itype) {
956 case LTTNG_KERNEL_ABI_TRACEPOINT:
957 /* Event will be enabled by enabler sync. */
958 event_recorder->parent.enabled = 0;
959 event_recorder->priv->parent.registered = 0;
960 event_recorder->priv->parent.desc = lttng_event_desc_get(event_name);
961 if (!event_recorder->priv->parent.desc) {
962 ret = -ENOENT;
963 goto register_error;
964 }
965 /* Populate lttng_event structure before event registration. */
966 smp_wmb();
967 break;
968
969 case LTTNG_KERNEL_ABI_KPROBE:
970 /*
971 * Needs to be explicitly enabled after creation, since
972 * we may want to apply filters.
973 */
974 event_recorder->parent.enabled = 0;
975 event_recorder->priv->parent.registered = 1;
976 /*
977 * Populate lttng_event structure before event
978 * registration.
979 */
980 smp_wmb();
981 ret = lttng_kprobes_register_event(event_name,
982 event_param->u.kprobe.symbol_name,
983 event_param->u.kprobe.offset,
984 event_param->u.kprobe.addr,
985 &event_recorder->parent);
986 if (ret) {
987 ret = -EINVAL;
988 goto register_error;
989 }
990 ret = try_module_get(event_recorder->priv->parent.desc->owner);
991 WARN_ON_ONCE(!ret);
992 break;
993
994 case LTTNG_KERNEL_ABI_KRETPROBE:
995 {
996 struct lttng_kernel_event_recorder *event_recorder_return;
997 struct lttng_kernel_event_recorder_private *event_recorder_return_priv;
998
999 /* kretprobe defines 2 events */
1000 /*
1001 * Needs to be explicitly enabled after creation, since
1002 * we may want to apply filters.
1003 */
1004 event_recorder->parent.enabled = 0;
1005 event_recorder->priv->parent.registered = 1;
1006
1007 event_recorder_return = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
1008 if (!event_recorder_return) {
1009 ret = -ENOMEM;
1010 goto register_error;
1011 }
1012 event_recorder_return_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
1013 if (!event_recorder_return_priv) {
1014 kmem_cache_free(event_recorder_cache, event_recorder_return);
1015 ret = -ENOMEM;
1016 goto register_error;
1017 }
1018 event_recorder_return_priv->pub = event_recorder_return;
1019 event_recorder_return_priv->parent.pub = &event_recorder_return->parent;
1020 event_recorder_return->priv = event_recorder_return_priv;
1021 event_recorder_return->parent.priv = &event_recorder_return_priv->parent;
1022 event_recorder_return->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
1023
1024 event_recorder_return->parent.run_filter = lttng_kernel_interpret_event_filter;
1025 event_recorder_return->chan = chan;
1026 event_recorder_return->priv->id = chan->priv->free_event_id++;
1027 event_recorder_return->priv->parent.instrumentation = itype;
1028 event_recorder_return->parent.enabled = 0;
1029 event_recorder_return->priv->parent.registered = 1;
1030 INIT_LIST_HEAD(&event_recorder_return->priv->parent.filter_bytecode_runtime_head);
1031 INIT_LIST_HEAD(&event_recorder_return->priv->parent.enablers_ref_head);
1032 /*
1033 * Populate lttng_event structure before kretprobe registration.
1034 */
1035 smp_wmb();
1036 ret = lttng_kretprobes_register(event_name,
1037 event_param->u.kretprobe.symbol_name,
1038 event_param->u.kretprobe.offset,
1039 event_param->u.kretprobe.addr,
1040 &event_recorder->parent, &event_recorder_return->parent);
1041 if (ret) {
1042 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1043 kmem_cache_free(event_recorder_cache, event_recorder_return);
1044 ret = -EINVAL;
1045 goto register_error;
1046 }
1047 /* Take 2 refs on the module: one per event. */
1048 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1049 WARN_ON_ONCE(!ret);
1050 ret = try_module_get(event_recorder_return->priv->parent.desc->owner);
1051 WARN_ON_ONCE(!ret);
1052 ret = _lttng_event_metadata_statedump(chan->parent.session, chan,
1053 event_recorder_return);
1054 WARN_ON_ONCE(ret > 0);
1055 if (ret) {
1056 kmem_cache_free(event_recorder_private_cache, event_recorder_return_priv);
1057 kmem_cache_free(event_recorder_cache, event_recorder_return);
1058 module_put(event_recorder_return->priv->parent.desc->owner);
1059 module_put(event_recorder->priv->parent.desc->owner);
1060 goto statedump_error;
1061 }
1062 list_add(&event_recorder_return->priv->parent.node, &chan->parent.session->priv->events);
1063 break;
1064 }
1065
1066 case LTTNG_KERNEL_ABI_SYSCALL:
1067 /*
1068 * Needs to be explicitly enabled after creation, since
1069 * we may want to apply filters.
1070 */
1071 event_recorder->parent.enabled = 0;
1072 event_recorder->priv->parent.registered = 0;
1073 event_recorder->priv->parent.desc = event_desc;
1074 switch (event_param->u.syscall.entryexit) {
1075 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1076 ret = -EINVAL;
1077 goto register_error;
1078 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1079 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1080 break;
1081 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1082 event_recorder->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1083 break;
1084 }
1085 switch (event_param->u.syscall.abi) {
1086 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1087 ret = -EINVAL;
1088 goto register_error;
1089 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1090 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1091 break;
1092 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1093 event_recorder->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1094 break;
1095 }
1096 if (!event_recorder->priv->parent.desc) {
1097 ret = -EINVAL;
1098 goto register_error;
1099 }
1100 break;
1101
1102 case LTTNG_KERNEL_ABI_UPROBE:
1103 /*
1104 * Needs to be explicitly enabled after creation, since
1105 * we may want to apply filters.
1106 */
1107 event_recorder->parent.enabled = 0;
1108 event_recorder->priv->parent.registered = 1;
1109
1110 /*
1111 * Populate lttng_event structure before event
1112 * registration.
1113 */
1114 smp_wmb();
1115
1116 ret = lttng_uprobes_register_event(event_param->name,
1117 event_param->u.uprobe.fd,
1118 &event_recorder->parent);
1119 if (ret)
1120 goto register_error;
1121 ret = try_module_get(event_recorder->priv->parent.desc->owner);
1122 WARN_ON_ONCE(!ret);
1123 break;
1124
1125 case LTTNG_KERNEL_ABI_FUNCTION:
1126 lttng_fallthrough;
1127 case LTTNG_KERNEL_ABI_NOOP:
1128 lttng_fallthrough;
1129 default:
1130 WARN_ON_ONCE(1);
1131 ret = -EINVAL;
1132 goto register_error;
1133 }
1134 ret = _lttng_event_metadata_statedump(chan->parent.session, chan, event_recorder);
1135 WARN_ON_ONCE(ret > 0);
1136 if (ret) {
1137 goto statedump_error;
1138 }
1139 hlist_add_head(&event_recorder->priv->parent.hlist_node, head);
1140 list_add(&event_recorder->priv->parent.node, &chan->parent.session->priv->events);
1141 return event_recorder;
1142
1143 statedump_error:
1144 /* If a statedump error occurs, events will not be readable. */
1145 register_error:
1146 kmem_cache_free(event_recorder_private_cache, event_recorder_priv);
1147 cache_private_error:
1148 kmem_cache_free(event_recorder_cache, event_recorder);
1149 cache_error:
1150 exist:
1151 type_error:
1152 full:
1153 return ERR_PTR(ret);
1154 }
1155
1156 static
1157 struct lttng_kernel_event_notifier *_lttng_kernel_event_notifier_create(struct lttng_event_notifier_enabler *event_enabler,
1158 const struct lttng_kernel_event_desc *event_desc)
1159 {
1160 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1161 struct lttng_event_notifier_group *event_notifier_group = event_enabler->group;
1162 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1163 uint64_t token = event_enabler->parent.user_token;
1164 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1165 struct lttng_kernel_event_notifier *event_notifier;
1166 struct lttng_kernel_event_notifier_private *event_notifier_priv;
1167 struct lttng_kernel_event_common_private *event_priv;
1168 struct lttng_counter *error_counter;
1169 const char *event_name;
1170 struct hlist_head *head;
1171 int ret;
1172
1173 switch (itype) {
1174 case LTTNG_KERNEL_ABI_TRACEPOINT:
1175 event_name = event_desc->event_name;
1176 break;
1177
1178 case LTTNG_KERNEL_ABI_KPROBE:
1179 lttng_fallthrough;
1180 case LTTNG_KERNEL_ABI_UPROBE:
1181 lttng_fallthrough;
1182 case LTTNG_KERNEL_ABI_SYSCALL:
1183 event_name = event_param->name;
1184 break;
1185
1186 case LTTNG_KERNEL_ABI_KRETPROBE:
1187 lttng_fallthrough;
1188 case LTTNG_KERNEL_ABI_FUNCTION:
1189 lttng_fallthrough;
1190 case LTTNG_KERNEL_ABI_NOOP:
1191 lttng_fallthrough;
1192 default:
1193 WARN_ON_ONCE(1);
1194 ret = -EINVAL;
1195 goto type_error;
1196 }
1197
1198 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1199 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1200 event_notifier_priv = container_of(event_priv, struct lttng_kernel_event_notifier_private, parent);
1201
1202 WARN_ON_ONCE(!event_priv->desc);
1203 if (!strncmp(event_priv->desc->event_name, event_name,
1204 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1205 && event_notifier_group == event_notifier_priv->group
1206 && token == event_priv->user_token) {
1207 ret = -EEXIST;
1208 goto exist;
1209 }
1210 }
1211
1212 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1213 if (!event_notifier) {
1214 ret = -ENOMEM;
1215 goto cache_error;
1216 }
1217 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
1218 if (!event_notifier_priv) {
1219 ret = -ENOMEM;
1220 goto cache_private_error;
1221 }
1222 event_notifier_priv->pub = event_notifier;
1223 event_notifier_priv->parent.pub = &event_notifier->parent;
1224 event_notifier->priv = event_notifier_priv;
1225 event_notifier->parent.priv = &event_notifier_priv->parent;
1226 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
1227
1228 event_notifier->priv->group = event_notifier_group;
1229 event_notifier->priv->parent.user_token = event_enabler->parent.user_token;
1230 event_notifier->priv->error_counter_index = event_enabler->error_counter_index;
1231 event_notifier->priv->num_captures = 0;
1232 event_notifier->priv->parent.instrumentation = itype;
1233 event_notifier->notification_send = lttng_event_notifier_notification_send;
1234 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
1235 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
1236 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
1237 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
1238
1239 switch (itype) {
1240 case LTTNG_KERNEL_ABI_TRACEPOINT:
1241 /* Event will be enabled by enabler sync. */
1242 event_notifier->parent.enabled = 0;
1243 event_notifier->priv->parent.registered = 0;
1244 event_notifier->priv->parent.desc = lttng_event_desc_get(event_name);
1245 if (!event_notifier->priv->parent.desc) {
1246 ret = -ENOENT;
1247 goto register_error;
1248 }
1249 /* Populate lttng_event_notifier structure before event registration. */
1250 smp_wmb();
1251 break;
1252
1253 case LTTNG_KERNEL_ABI_KPROBE:
1254 /*
1255 * Needs to be explicitly enabled after creation, since
1256 * we may want to apply filters.
1257 */
1258 event_notifier->parent.enabled = 0;
1259 event_notifier->priv->parent.registered = 1;
1260 /*
1261 * Populate lttng_event_notifier structure before event
1262 * registration.
1263 */
1264 smp_wmb();
1265 ret = lttng_kprobes_register_event(event_param->u.kprobe.symbol_name,
1266 event_param->u.kprobe.symbol_name,
1267 event_param->u.kprobe.offset,
1268 event_param->u.kprobe.addr,
1269 &event_notifier->parent);
1270 if (ret) {
1271 ret = -EINVAL;
1272 goto register_error;
1273 }
1274 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1275 WARN_ON_ONCE(!ret);
1276 break;
1277
1278 case LTTNG_KERNEL_ABI_SYSCALL:
1279 /*
1280 * Needs to be explicitly enabled after creation, since
1281 * we may want to apply filters.
1282 */
1283 event_notifier->parent.enabled = 0;
1284 event_notifier->priv->parent.registered = 0;
1285 event_notifier->priv->parent.desc = event_desc;
1286 switch (event_param->u.syscall.entryexit) {
1287 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1288 ret = -EINVAL;
1289 goto register_error;
1290 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1291 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1292 break;
1293 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1294 event_notifier->priv->parent.u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1295 break;
1296 }
1297 switch (event_param->u.syscall.abi) {
1298 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1299 ret = -EINVAL;
1300 goto register_error;
1301 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1302 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1303 break;
1304 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1305 event_notifier->priv->parent.u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1306 break;
1307 }
1308
1309 if (!event_notifier->priv->parent.desc) {
1310 ret = -EINVAL;
1311 goto register_error;
1312 }
1313 break;
1314
1315 case LTTNG_KERNEL_ABI_UPROBE:
1316 /*
1317 * Needs to be explicitly enabled after creation, since
1318 * we may want to apply filters.
1319 */
1320 event_notifier->parent.enabled = 0;
1321 event_notifier->priv->parent.registered = 1;
1322
1323 /*
1324 * Populate lttng_event_notifier structure before
1325 * event_notifier registration.
1326 */
1327 smp_wmb();
1328
1329 ret = lttng_uprobes_register_event(event_param->name,
1330 event_param->u.uprobe.fd,
1331 &event_notifier->parent);
1332 if (ret)
1333 goto register_error;
1334 ret = try_module_get(event_notifier->priv->parent.desc->owner);
1335 WARN_ON_ONCE(!ret);
1336 break;
1337
1338 case LTTNG_KERNEL_ABI_KRETPROBE:
1339 lttng_fallthrough;
1340 case LTTNG_KERNEL_ABI_FUNCTION:
1341 lttng_fallthrough;
1342 case LTTNG_KERNEL_ABI_NOOP:
1343 lttng_fallthrough;
1344 default:
1345 WARN_ON_ONCE(1);
1346 ret = -EINVAL;
1347 goto register_error;
1348 }
1349
1350 list_add(&event_notifier->priv->parent.node, &event_notifier_group->event_notifiers_head);
1351 hlist_add_head(&event_notifier->priv->parent.hlist_node, head);
1352
1353 /*
1354 * Clear the error counter bucket. The sessiond keeps track of which
1355 * bucket is currently in use. We trust it. The session lock
1356 * synchronizes against concurrent creation of the error
1357 * counter.
1358 */
1359 error_counter = event_notifier_group->error_counter;
1360 if (error_counter) {
1361 size_t dimension_index[1];
1362
1363 /*
1364 * Check that the index is within the boundary of the counter.
1365 */
1366 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1367 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1368 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1369 ret = -EINVAL;
1370 goto register_error;
1371 }
1372
1373 dimension_index[0] = event_notifier->priv->error_counter_index;
1374 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1375 if (ret) {
1376 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1377 event_notifier->priv->error_counter_index);
1378 goto register_error;
1379 }
1380 }
1381
1382 return event_notifier;
1383
1384 register_error:
1385 kmem_cache_free(event_notifier_private_cache, event_notifier_priv);
1386 cache_private_error:
1387 kmem_cache_free(event_notifier_cache, event_notifier);
1388 cache_error:
1389 exist:
1390 type_error:
1391 return ERR_PTR(ret);
1392 }
1393
1394 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1395 const struct lttng_kernel_event_desc *event_desc)
1396 {
1397 switch (event_enabler->enabler_type) {
1398 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1399 {
1400 struct lttng_event_recorder_enabler *event_recorder_enabler =
1401 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1402 struct lttng_kernel_event_recorder *event_recorder;
1403
1404 event_recorder = _lttng_kernel_event_recorder_create(event_recorder_enabler, event_desc);
1405 if (!event_recorder)
1406 return NULL;
1407 return &event_recorder->parent;
1408 }
1409 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1410 {
1411 struct lttng_event_notifier_enabler *event_notifier_enabler =
1412 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1413 struct lttng_kernel_event_notifier *event_notifier;
1414
1415 event_notifier = _lttng_kernel_event_notifier_create(event_notifier_enabler, event_desc);
1416 if (!event_notifier)
1417 return NULL;
1418 return &event_notifier->parent;
1419 }
1420 default:
1421 return NULL;
1422 }
1423 }
1424
1425 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1426 const struct lttng_kernel_event_desc *event_desc)
1427 {
1428 struct lttng_kernel_event_common *event;
1429
1430 mutex_lock(&sessions_mutex);
1431 event = _lttng_kernel_event_create(event_enabler, event_desc);
1432 mutex_unlock(&sessions_mutex);
1433 return event;
1434 }
1435
1436
1437
1438 int lttng_kernel_counter_read(struct lttng_counter *counter,
1439 const size_t *dim_indexes, int32_t cpu,
1440 int64_t *val, bool *overflow, bool *underflow)
1441 {
1442 return counter->ops->counter_read(counter->counter, dim_indexes,
1443 cpu, val, overflow, underflow);
1444 }
1445
1446 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1447 const size_t *dim_indexes, int64_t *val,
1448 bool *overflow, bool *underflow)
1449 {
1450 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1451 val, overflow, underflow);
1452 }
1453
1454 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1455 const size_t *dim_indexes)
1456 {
1457 return counter->ops->counter_clear(counter->counter, dim_indexes);
1458 }
1459
1460 /* Only used for tracepoints for now. */
1461 static
1462 void register_event(struct lttng_kernel_event_common *event)
1463 {
1464 const struct lttng_kernel_event_desc *desc;
1465 int ret = -EINVAL;
1466
1467 if (event->priv->registered)
1468 return;
1469
1470 desc = event->priv->desc;
1471 switch (event->priv->instrumentation) {
1472 case LTTNG_KERNEL_ABI_TRACEPOINT:
1473 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1474 desc->tp_class->probe_callback,
1475 event);
1476 break;
1477
1478 case LTTNG_KERNEL_ABI_SYSCALL:
1479 ret = lttng_syscall_filter_enable_event(event);
1480 break;
1481
1482 case LTTNG_KERNEL_ABI_KPROBE:
1483 lttng_fallthrough;
1484 case LTTNG_KERNEL_ABI_UPROBE:
1485 ret = 0;
1486 break;
1487
1488 case LTTNG_KERNEL_ABI_KRETPROBE:
1489 switch (event->type) {
1490 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1491 ret = 0;
1492 break;
1493 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1494 WARN_ON_ONCE(1);
1495 break;
1496 }
1497 break;
1498
1499 case LTTNG_KERNEL_ABI_FUNCTION:
1500 lttng_fallthrough;
1501 case LTTNG_KERNEL_ABI_NOOP:
1502 lttng_fallthrough;
1503 default:
1504 WARN_ON_ONCE(1);
1505 }
1506 if (!ret)
1507 event->priv->registered = 1;
1508 }
1509
1510 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1511 {
1512 struct lttng_kernel_event_common_private *event_priv = event->priv;
1513 const struct lttng_kernel_event_desc *desc;
1514 int ret = -EINVAL;
1515
1516 if (!event_priv->registered)
1517 return 0;
1518
1519 desc = event_priv->desc;
1520 switch (event_priv->instrumentation) {
1521 case LTTNG_KERNEL_ABI_TRACEPOINT:
1522 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1523 event_priv->desc->tp_class->probe_callback,
1524 event);
1525 break;
1526
1527 case LTTNG_KERNEL_ABI_KPROBE:
1528 lttng_kprobes_unregister_event(event);
1529 ret = 0;
1530 break;
1531
1532 case LTTNG_KERNEL_ABI_KRETPROBE:
1533 switch (event->type) {
1534 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1535 lttng_kretprobes_unregister(event);
1536 ret = 0;
1537 break;
1538 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1539 WARN_ON_ONCE(1);
1540 break;
1541 }
1542 break;
1543
1544 case LTTNG_KERNEL_ABI_SYSCALL:
1545 ret = lttng_syscall_filter_disable_event(event);
1546 break;
1547
1548 case LTTNG_KERNEL_ABI_NOOP:
1549 switch (event->type) {
1550 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1551 ret = 0;
1552 break;
1553 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1554 WARN_ON_ONCE(1);
1555 break;
1556 }
1557 break;
1558
1559 case LTTNG_KERNEL_ABI_UPROBE:
1560 lttng_uprobes_unregister_event(event);
1561 ret = 0;
1562 break;
1563
1564 case LTTNG_KERNEL_ABI_FUNCTION:
1565 lttng_fallthrough;
1566 default:
1567 WARN_ON_ONCE(1);
1568 }
1569 if (!ret)
1570 event_priv->registered = 0;
1571 return ret;
1572 }
1573
1574 /*
1575 * Only used internally at session destruction.
1576 */
1577 static
1578 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1579 {
1580 struct lttng_kernel_event_common_private *event_priv = event->priv;
1581 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1582
1583 lttng_free_event_filter_runtime(event);
1584 /* Free event enabler refs */
1585 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1586 &event_priv->enablers_ref_head, node)
1587 kfree(enabler_ref);
1588
1589 switch (event->type) {
1590 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1591 {
1592 struct lttng_kernel_event_recorder *event_recorder =
1593 container_of(event, struct lttng_kernel_event_recorder, parent);
1594
1595 switch (event_priv->instrumentation) {
1596 case LTTNG_KERNEL_ABI_TRACEPOINT:
1597 lttng_event_desc_put(event_priv->desc);
1598 break;
1599
1600 case LTTNG_KERNEL_ABI_KPROBE:
1601 module_put(event_priv->desc->owner);
1602 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1603 break;
1604
1605 case LTTNG_KERNEL_ABI_KRETPROBE:
1606 module_put(event_priv->desc->owner);
1607 lttng_kretprobes_destroy_private(&event_recorder->parent);
1608 break;
1609
1610 case LTTNG_KERNEL_ABI_SYSCALL:
1611 break;
1612
1613 case LTTNG_KERNEL_ABI_UPROBE:
1614 module_put(event_priv->desc->owner);
1615 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1616 break;
1617
1618 case LTTNG_KERNEL_ABI_FUNCTION:
1619 lttng_fallthrough;
1620 case LTTNG_KERNEL_ABI_NOOP:
1621 lttng_fallthrough;
1622 default:
1623 WARN_ON_ONCE(1);
1624 }
1625 list_del(&event_recorder->priv->parent.node);
1626 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1627 kmem_cache_free(event_recorder_cache, event_recorder);
1628 break;
1629 }
1630 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1631 {
1632 struct lttng_kernel_event_notifier *event_notifier =
1633 container_of(event, struct lttng_kernel_event_notifier, parent);
1634
1635 switch (event_notifier->priv->parent.instrumentation) {
1636 case LTTNG_KERNEL_ABI_TRACEPOINT:
1637 lttng_event_desc_put(event_notifier->priv->parent.desc);
1638 break;
1639
1640 case LTTNG_KERNEL_ABI_KPROBE:
1641 module_put(event_notifier->priv->parent.desc->owner);
1642 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1643 break;
1644
1645 case LTTNG_KERNEL_ABI_SYSCALL:
1646 break;
1647
1648 case LTTNG_KERNEL_ABI_UPROBE:
1649 module_put(event_notifier->priv->parent.desc->owner);
1650 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1651 break;
1652
1653 case LTTNG_KERNEL_ABI_KRETPROBE:
1654 lttng_fallthrough;
1655 case LTTNG_KERNEL_ABI_FUNCTION:
1656 lttng_fallthrough;
1657 case LTTNG_KERNEL_ABI_NOOP:
1658 lttng_fallthrough;
1659 default:
1660 WARN_ON_ONCE(1);
1661 }
1662 list_del(&event_notifier->priv->parent.node);
1663 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1664 kmem_cache_free(event_notifier_cache, event_notifier);
1665 break;
1666 }
1667 default:
1668 WARN_ON_ONCE(1);
1669 }
1670 }
1671
1672 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1673 enum tracker_type tracker_type)
1674 {
1675 switch (tracker_type) {
1676 case TRACKER_PID:
1677 return &session->pid_tracker;
1678 case TRACKER_VPID:
1679 return &session->vpid_tracker;
1680 case TRACKER_UID:
1681 return &session->uid_tracker;
1682 case TRACKER_VUID:
1683 return &session->vuid_tracker;
1684 case TRACKER_GID:
1685 return &session->gid_tracker;
1686 case TRACKER_VGID:
1687 return &session->vgid_tracker;
1688 default:
1689 WARN_ON_ONCE(1);
1690 return NULL;
1691 }
1692 }
1693
1694 int lttng_session_track_id(struct lttng_kernel_session *session,
1695 enum tracker_type tracker_type, int id)
1696 {
1697 struct lttng_kernel_id_tracker *tracker;
1698 int ret;
1699
1700 tracker = get_tracker(session, tracker_type);
1701 if (!tracker)
1702 return -EINVAL;
1703 if (id < -1)
1704 return -EINVAL;
1705 mutex_lock(&sessions_mutex);
1706 if (id == -1) {
1707 /* track all ids: destroy tracker. */
1708 lttng_id_tracker_destroy(tracker, true);
1709 ret = 0;
1710 } else {
1711 ret = lttng_id_tracker_add(tracker, id);
1712 }
1713 mutex_unlock(&sessions_mutex);
1714 return ret;
1715 }
1716
1717 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1718 enum tracker_type tracker_type, int id)
1719 {
1720 struct lttng_kernel_id_tracker *tracker;
1721 int ret;
1722
1723 tracker = get_tracker(session, tracker_type);
1724 if (!tracker)
1725 return -EINVAL;
1726 if (id < -1)
1727 return -EINVAL;
1728 mutex_lock(&sessions_mutex);
1729 if (id == -1) {
1730 /* untrack all ids: replace by empty tracker. */
1731 ret = lttng_id_tracker_empty_set(tracker);
1732 } else {
1733 ret = lttng_id_tracker_del(tracker, id);
1734 }
1735 mutex_unlock(&sessions_mutex);
1736 return ret;
1737 }
1738
1739 static
1740 void *id_list_start(struct seq_file *m, loff_t *pos)
1741 {
1742 struct lttng_kernel_id_tracker *id_tracker = m->private;
1743 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1744 struct lttng_id_hash_node *e;
1745 int iter = 0, i;
1746
1747 mutex_lock(&sessions_mutex);
1748 if (id_tracker_p) {
1749 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1750 struct hlist_head *head = &id_tracker_p->id_hash[i];
1751
1752 lttng_hlist_for_each_entry(e, head, hlist) {
1753 if (iter++ >= *pos)
1754 return e;
1755 }
1756 }
1757 } else {
1758 /* ID tracker disabled. */
1759 if (iter >= *pos && iter == 0) {
1760 return id_tracker_p; /* empty tracker */
1761 }
1762 iter++;
1763 }
1764 /* End of list */
1765 return NULL;
1766 }
1767
1768 /* Called with sessions_mutex held. */
1769 static
1770 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1771 {
1772 struct lttng_kernel_id_tracker *id_tracker = m->private;
1773 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1774 struct lttng_id_hash_node *e;
1775 int iter = 0, i;
1776
1777 (*ppos)++;
1778 if (id_tracker_p) {
1779 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1780 struct hlist_head *head = &id_tracker_p->id_hash[i];
1781
1782 lttng_hlist_for_each_entry(e, head, hlist) {
1783 if (iter++ >= *ppos)
1784 return e;
1785 }
1786 }
1787 } else {
1788 /* ID tracker disabled. */
1789 if (iter >= *ppos && iter == 0)
1790 return p; /* empty tracker */
1791 iter++;
1792 }
1793
1794 /* End of list */
1795 return NULL;
1796 }
1797
1798 static
1799 void id_list_stop(struct seq_file *m, void *p)
1800 {
1801 mutex_unlock(&sessions_mutex);
1802 }
1803
1804 static
1805 int id_list_show(struct seq_file *m, void *p)
1806 {
1807 struct lttng_kernel_id_tracker *id_tracker = m->private;
1808 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1809 int id;
1810
1811 if (p == id_tracker_p) {
1812 /* Tracker disabled. */
1813 id = -1;
1814 } else {
1815 const struct lttng_id_hash_node *e = p;
1816
1817 id = lttng_id_tracker_get_node_id(e);
1818 }
1819 switch (id_tracker->priv->tracker_type) {
1820 case TRACKER_PID:
1821 seq_printf(m, "process { pid = %d; };\n", id);
1822 break;
1823 case TRACKER_VPID:
1824 seq_printf(m, "process { vpid = %d; };\n", id);
1825 break;
1826 case TRACKER_UID:
1827 seq_printf(m, "user { uid = %d; };\n", id);
1828 break;
1829 case TRACKER_VUID:
1830 seq_printf(m, "user { vuid = %d; };\n", id);
1831 break;
1832 case TRACKER_GID:
1833 seq_printf(m, "group { gid = %d; };\n", id);
1834 break;
1835 case TRACKER_VGID:
1836 seq_printf(m, "group { vgid = %d; };\n", id);
1837 break;
1838 default:
1839 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1840 }
1841 return 0;
1842 }
1843
1844 static
1845 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1846 .start = id_list_start,
1847 .next = id_list_next,
1848 .stop = id_list_stop,
1849 .show = id_list_show,
1850 };
1851
1852 static
1853 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1854 {
1855 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1856 }
1857
1858 static
1859 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1860 {
1861 struct seq_file *m = file->private_data;
1862 struct lttng_kernel_id_tracker *id_tracker = m->private;
1863 int ret;
1864
1865 WARN_ON_ONCE(!id_tracker);
1866 ret = seq_release(inode, file);
1867 if (!ret)
1868 fput(id_tracker->priv->session->priv->file);
1869 return ret;
1870 }
1871
1872 const struct file_operations lttng_tracker_ids_list_fops = {
1873 .owner = THIS_MODULE,
1874 .open = lttng_tracker_ids_list_open,
1875 .read = seq_read,
1876 .llseek = seq_lseek,
1877 .release = lttng_tracker_ids_list_release,
1878 };
1879
1880 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1881 enum tracker_type tracker_type)
1882 {
1883 struct file *tracker_ids_list_file;
1884 struct seq_file *m;
1885 int file_fd, ret;
1886
1887 file_fd = lttng_get_unused_fd();
1888 if (file_fd < 0) {
1889 ret = file_fd;
1890 goto fd_error;
1891 }
1892
1893 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1894 &lttng_tracker_ids_list_fops,
1895 NULL, O_RDWR);
1896 if (IS_ERR(tracker_ids_list_file)) {
1897 ret = PTR_ERR(tracker_ids_list_file);
1898 goto file_error;
1899 }
1900 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1901 ret = -EOVERFLOW;
1902 goto refcount_error;
1903 }
1904 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1905 if (ret < 0)
1906 goto open_error;
1907 m = tracker_ids_list_file->private_data;
1908
1909 m->private = get_tracker(session, tracker_type);
1910 BUG_ON(!m->private);
1911 fd_install(file_fd, tracker_ids_list_file);
1912
1913 return file_fd;
1914
1915 open_error:
1916 atomic_long_dec(&session->priv->file->f_count);
1917 refcount_error:
1918 fput(tracker_ids_list_file);
1919 file_error:
1920 put_unused_fd(file_fd);
1921 fd_error:
1922 return ret;
1923 }
1924
1925 /*
1926 * Enabler management.
1927 */
1928 static
1929 int lttng_match_enabler_star_glob(const char *desc_name,
1930 const char *pattern)
1931 {
1932 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1933 desc_name, LTTNG_SIZE_MAX))
1934 return 0;
1935 return 1;
1936 }
1937
1938 static
1939 int lttng_match_enabler_name(const char *desc_name,
1940 const char *name)
1941 {
1942 if (strcmp(desc_name, name))
1943 return 0;
1944 return 1;
1945 }
1946
1947 static
1948 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1949 struct lttng_event_enabler_common *enabler)
1950 {
1951 const char *desc_name, *enabler_name;
1952 bool compat = false, entry = false;
1953
1954 enabler_name = enabler->event_param.name;
1955 switch (enabler->event_param.instrumentation) {
1956 case LTTNG_KERNEL_ABI_TRACEPOINT:
1957 desc_name = desc->event_name;
1958 switch (enabler->format_type) {
1959 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1960 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1961 case LTTNG_ENABLER_FORMAT_NAME:
1962 return lttng_match_enabler_name(desc_name, enabler_name);
1963 default:
1964 return -EINVAL;
1965 }
1966 break;
1967
1968 case LTTNG_KERNEL_ABI_SYSCALL:
1969 desc_name = desc->event_name;
1970 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1971 desc_name += strlen("compat_");
1972 compat = true;
1973 }
1974 if (!strncmp(desc_name, "syscall_exit_",
1975 strlen("syscall_exit_"))) {
1976 desc_name += strlen("syscall_exit_");
1977 } else if (!strncmp(desc_name, "syscall_entry_",
1978 strlen("syscall_entry_"))) {
1979 desc_name += strlen("syscall_entry_");
1980 entry = true;
1981 } else {
1982 WARN_ON_ONCE(1);
1983 return -EINVAL;
1984 }
1985 switch (enabler->event_param.u.syscall.entryexit) {
1986 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1987 break;
1988 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1989 if (!entry)
1990 return 0;
1991 break;
1992 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1993 if (entry)
1994 return 0;
1995 break;
1996 default:
1997 return -EINVAL;
1998 }
1999 switch (enabler->event_param.u.syscall.abi) {
2000 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
2001 break;
2002 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
2003 if (compat)
2004 return 0;
2005 break;
2006 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
2007 if (!compat)
2008 return 0;
2009 break;
2010 default:
2011 return -EINVAL;
2012 }
2013 switch (enabler->event_param.u.syscall.match) {
2014 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
2015 switch (enabler->format_type) {
2016 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2017 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2018 case LTTNG_ENABLER_FORMAT_NAME:
2019 return lttng_match_enabler_name(desc_name, enabler_name);
2020 default:
2021 return -EINVAL;
2022 }
2023 break;
2024 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2025 return -EINVAL; /* Not implemented. */
2026 default:
2027 return -EINVAL;
2028 }
2029 break;
2030
2031 default:
2032 WARN_ON_ONCE(1);
2033 return -EINVAL;
2034 }
2035 }
2036
2037 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
2038 struct lttng_event_enabler_common *enabler)
2039 {
2040 int ret;
2041
2042 ret = lttng_desc_match_enabler_check(desc, enabler);
2043 if (ret < 0) {
2044 WARN_ON_ONCE(1);
2045 return false;
2046 }
2047 return ret;
2048 }
2049
2050 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
2051 struct lttng_kernel_event_common *event)
2052 {
2053 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2054 return false;
2055
2056 switch (event_enabler->enabler_type) {
2057 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2058 {
2059 struct lttng_event_recorder_enabler *event_recorder_enabler =
2060 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2061 struct lttng_kernel_event_recorder *event_recorder =
2062 container_of(event, struct lttng_kernel_event_recorder, parent);
2063
2064 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2065 && event_recorder->chan == event_recorder_enabler->chan)
2066 return true;
2067 else
2068 return false;
2069 }
2070 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2071 {
2072 struct lttng_event_notifier_enabler *event_notifier_enabler =
2073 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2074 struct lttng_kernel_event_notifier *event_notifier =
2075 container_of(event, struct lttng_kernel_event_notifier, parent);
2076
2077 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2078 && event_notifier->priv->group == event_notifier_enabler->group
2079 && event->priv->user_token == event_enabler->user_token)
2080 return true;
2081 else
2082 return false;
2083 }
2084 default:
2085 WARN_ON_ONCE(1);
2086 return false;
2087 }
2088 }
2089
2090 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
2091 const struct lttng_kernel_event_desc *desc,
2092 struct lttng_kernel_event_common *event)
2093 {
2094 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2095 return false;
2096
2097 switch (event_enabler->enabler_type) {
2098 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2099 {
2100 struct lttng_event_recorder_enabler *event_recorder_enabler =
2101 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2102 struct lttng_kernel_event_recorder *event_recorder =
2103 container_of(event, struct lttng_kernel_event_recorder, parent);
2104
2105 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
2106 return true;
2107 else
2108 return false;
2109 }
2110 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2111 {
2112 struct lttng_event_notifier_enabler *event_notifier_enabler =
2113 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2114 struct lttng_kernel_event_notifier *event_notifier =
2115 container_of(event, struct lttng_kernel_event_notifier, parent);
2116
2117 if (event->priv->desc == desc
2118 && event_notifier->priv->group == event_notifier_enabler->group
2119 && event->priv->user_token == event_enabler->user_token)
2120 return true;
2121 else
2122 return false;
2123 }
2124 default:
2125 WARN_ON_ONCE(1);
2126 return false;
2127 }
2128 }
2129
2130 static
2131 struct lttng_enabler_ref *lttng_enabler_ref(
2132 struct list_head *enablers_ref_list,
2133 struct lttng_event_enabler_common *enabler)
2134 {
2135 struct lttng_enabler_ref *enabler_ref;
2136
2137 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2138 if (enabler_ref->ref == enabler)
2139 return enabler_ref;
2140 }
2141 return NULL;
2142 }
2143
2144 static
2145 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2146 {
2147 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2148 struct lttng_kernel_probe_desc *probe_desc;
2149 const struct lttng_kernel_event_desc *desc;
2150 struct list_head *probe_list;
2151 int i;
2152
2153 probe_list = lttng_get_probe_list_head();
2154 /*
2155 * For each probe event, if we find that a probe event matches
2156 * our enabler, create an associated lttng_event if not
2157 * already present.
2158 */
2159 list_for_each_entry(probe_desc, probe_list, head) {
2160 for (i = 0; i < probe_desc->nr_events; i++) {
2161 int found = 0;
2162 struct hlist_head *head;
2163 struct lttng_kernel_event_common *event;
2164 struct lttng_kernel_event_common_private *event_priv;
2165
2166 desc = probe_desc->event_desc[i];
2167 if (!lttng_desc_match_enabler(desc, event_enabler))
2168 continue;
2169
2170 /*
2171 * Check if already created.
2172 */
2173 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2174 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2175 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub))
2176 found = 1;
2177 }
2178 if (found)
2179 continue;
2180
2181 /*
2182 * We need to create an event for this event probe.
2183 */
2184 event = _lttng_kernel_event_create(event_enabler, desc);
2185 if (!event) {
2186 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2187 probe_desc->event_desc[i]->event_name);
2188 }
2189 }
2190 }
2191 }
2192
2193 static
2194 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2195 {
2196 int ret;
2197
2198 ret = lttng_syscalls_register_event(event_enabler);
2199 WARN_ON_ONCE(ret);
2200 }
2201
2202 /*
2203 * Create event if it is missing and present in the list of tracepoint probes.
2204 * Should be called with sessions mutex held.
2205 */
2206 static
2207 void lttng_create_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2208 {
2209 switch (event_enabler->event_param.instrumentation) {
2210 case LTTNG_KERNEL_ABI_TRACEPOINT:
2211 lttng_create_tracepoint_event_if_missing(event_enabler);
2212 break;
2213
2214 case LTTNG_KERNEL_ABI_SYSCALL:
2215 lttng_create_syscall_event_if_missing(event_enabler);
2216 break;
2217
2218 default:
2219 WARN_ON_ONCE(1);
2220 break;
2221 }
2222 }
2223
2224 static
2225 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2226 struct lttng_kernel_event_common *event)
2227 {
2228 /* Link filter bytecodes if not linked yet. */
2229 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2230 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2231 }
2232
2233 static
2234 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2235 struct lttng_kernel_event_common *event)
2236 {
2237 switch (event_enabler->enabler_type) {
2238 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2239 break;
2240 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2241 {
2242 struct lttng_event_notifier_enabler *event_notifier_enabler =
2243 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2244 struct lttng_kernel_event_notifier *event_notifier =
2245 container_of(event, struct lttng_kernel_event_notifier, parent);
2246
2247 /* Link capture bytecodes if not linked yet. */
2248 lttng_enabler_link_bytecode(event->priv->desc,
2249 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2250 &event_notifier_enabler->capture_bytecode_head);
2251 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2252 break;
2253 }
2254 default:
2255 WARN_ON_ONCE(1);
2256 }
2257 }
2258
2259 /*
2260 * Create events associated with an event_enabler (if not already present),
2261 * and add backward reference from the event to the enabler.
2262 * Should be called with sessions mutex held.
2263 */
2264 static
2265 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2266 {
2267 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2268 struct lttng_kernel_event_common_private *event_priv;
2269
2270 lttng_syscall_table_set_wildcard_all(event_enabler);
2271
2272 /* First ensure that probe events are created for this enabler. */
2273 lttng_create_event_if_missing(event_enabler);
2274
2275 /* Link the created event with its associated enabler. */
2276 list_for_each_entry(event_priv, event_list_head, node) {
2277 struct lttng_kernel_event_common *event = event_priv->pub;
2278 struct lttng_enabler_ref *enabler_ref;
2279
2280 if (!lttng_event_enabler_match_event(event_enabler, event))
2281 continue;
2282
2283 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2284 if (!enabler_ref) {
2285 /*
2286 * If no backward ref, create it.
2287 * Add backward ref from event_notifier to enabler.
2288 */
2289 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2290 if (!enabler_ref)
2291 return -ENOMEM;
2292
2293 enabler_ref->ref = event_enabler;
2294 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2295 }
2296
2297 lttng_event_enabler_init_event_filter(event_enabler, event);
2298 lttng_event_enabler_init_event_capture(event_enabler, event);
2299 }
2300 return 0;
2301 }
2302
2303 /*
2304 * Called at module load: connect the probe on all enablers matching
2305 * this event.
2306 * Called with sessions lock held.
2307 */
2308 int lttng_fix_pending_events(void)
2309 {
2310 struct lttng_kernel_session_private *session_priv;
2311
2312 list_for_each_entry(session_priv, &sessions, list)
2313 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2314 return 0;
2315 }
2316
2317 static bool lttng_event_notifier_group_has_active_event_notifiers(
2318 struct lttng_event_notifier_group *event_notifier_group)
2319 {
2320 struct lttng_event_enabler_common *event_enabler;
2321
2322 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2323 if (event_enabler->enabled)
2324 return true;
2325 }
2326 return false;
2327 }
2328
2329 bool lttng_event_notifier_active(void)
2330 {
2331 struct lttng_event_notifier_group *event_notifier_group;
2332
2333 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2334 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2335 return true;
2336 }
2337 return false;
2338 }
2339
2340 int lttng_fix_pending_event_notifiers(void)
2341 {
2342 struct lttng_event_notifier_group *event_notifier_group;
2343
2344 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2345 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2346 return 0;
2347 }
2348
2349 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2350 enum lttng_enabler_format_type format_type,
2351 struct lttng_kernel_abi_event *event_param,
2352 struct lttng_kernel_channel_buffer *chan)
2353 {
2354 struct lttng_event_recorder_enabler *event_enabler;
2355
2356 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2357 if (!event_enabler)
2358 return NULL;
2359 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2360 event_enabler->parent.format_type = format_type;
2361 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2362 memcpy(&event_enabler->parent.event_param, event_param,
2363 sizeof(event_enabler->parent.event_param));
2364 event_enabler->chan = chan;
2365 /* ctx left NULL */
2366 event_enabler->parent.enabled = 0;
2367 return event_enabler;
2368 }
2369
2370 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2371 struct lttng_event_recorder_enabler *event_enabler)
2372 {
2373 mutex_lock(&sessions_mutex);
2374 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2375 event_enabler->parent.published = true;
2376 lttng_session_lazy_sync_event_enablers(session);
2377 mutex_unlock(&sessions_mutex);
2378 }
2379
2380 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2381 {
2382 mutex_lock(&sessions_mutex);
2383 event_enabler->enabled = 1;
2384 lttng_event_enabler_sync(event_enabler);
2385 mutex_unlock(&sessions_mutex);
2386 return 0;
2387 }
2388
2389 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2390 {
2391 mutex_lock(&sessions_mutex);
2392 event_enabler->enabled = 0;
2393 lttng_event_enabler_sync(event_enabler);
2394 mutex_unlock(&sessions_mutex);
2395 return 0;
2396 }
2397
2398 static
2399 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2400 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2401 {
2402 struct lttng_kernel_bytecode_node *bytecode_node;
2403 uint32_t bytecode_len;
2404 int ret;
2405
2406 ret = get_user(bytecode_len, &bytecode->len);
2407 if (ret)
2408 return ret;
2409 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2410 GFP_KERNEL);
2411 if (!bytecode_node)
2412 return -ENOMEM;
2413 ret = copy_from_user(&bytecode_node->bc, bytecode,
2414 sizeof(*bytecode) + bytecode_len);
2415 if (ret)
2416 goto error_free;
2417
2418 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2419 bytecode_node->enabler = enabler;
2420 /* Enforce length based on allocated size */
2421 bytecode_node->bc.len = bytecode_len;
2422 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2423
2424 return 0;
2425
2426 error_free:
2427 lttng_kvfree(bytecode_node);
2428 return ret;
2429 }
2430
2431 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2432 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2433 {
2434 int ret;
2435 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2436 if (ret)
2437 goto error;
2438 lttng_event_enabler_sync(event_enabler);
2439 return 0;
2440
2441 error:
2442 return ret;
2443 }
2444
2445 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2446 struct lttng_kernel_abi_event_callsite __user *callsite)
2447 {
2448
2449 switch (event->priv->instrumentation) {
2450 case LTTNG_KERNEL_ABI_UPROBE:
2451 return lttng_uprobes_event_add_callsite(event, callsite);
2452 default:
2453 return -EINVAL;
2454 }
2455 }
2456
2457 static
2458 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2459 {
2460 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2461
2462 /* Destroy filter bytecode */
2463 list_for_each_entry_safe(filter_node, tmp_filter_node,
2464 &enabler->filter_bytecode_head, node) {
2465 lttng_kvfree(filter_node);
2466 }
2467 }
2468
2469 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2470 {
2471 lttng_enabler_destroy(event_enabler);
2472 if (event_enabler->published)
2473 list_del(&event_enabler->node);
2474
2475 switch (event_enabler->enabler_type) {
2476 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2477 {
2478 struct lttng_event_recorder_enabler *event_recorder_enabler =
2479 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2480
2481 kfree(event_recorder_enabler);
2482 break;
2483 }
2484 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2485 {
2486 struct lttng_event_notifier_enabler *event_notifier_enabler =
2487 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2488
2489 kfree(event_notifier_enabler);
2490 break;
2491 }
2492 default:
2493 WARN_ON_ONCE(1);
2494 }
2495 }
2496
2497 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2498 enum lttng_enabler_format_type format_type,
2499 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2500 struct lttng_event_notifier_group *event_notifier_group)
2501 {
2502 struct lttng_event_notifier_enabler *event_notifier_enabler;
2503
2504 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2505 if (!event_notifier_enabler)
2506 return NULL;
2507
2508 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2509 event_notifier_enabler->parent.format_type = format_type;
2510 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2511 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2512
2513 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2514 event_notifier_enabler->num_captures = 0;
2515
2516 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2517 sizeof(event_notifier_enabler->parent.event_param));
2518
2519 event_notifier_enabler->parent.enabled = 0;
2520 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2521 event_notifier_enabler->group = event_notifier_group;
2522 return event_notifier_enabler;
2523 }
2524
2525 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2526 struct lttng_event_notifier_enabler *event_notifier_enabler)
2527 {
2528 mutex_lock(&sessions_mutex);
2529 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2530 event_notifier_enabler->parent.published = true;
2531 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2532 mutex_unlock(&sessions_mutex);
2533 }
2534
2535 int lttng_event_notifier_enabler_enable(
2536 struct lttng_event_notifier_enabler *event_notifier_enabler)
2537 {
2538 mutex_lock(&sessions_mutex);
2539 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2540 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2541 mutex_unlock(&sessions_mutex);
2542 return 0;
2543 }
2544
2545 int lttng_event_notifier_enabler_disable(
2546 struct lttng_event_notifier_enabler *event_notifier_enabler)
2547 {
2548 mutex_lock(&sessions_mutex);
2549 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2550 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2551 mutex_unlock(&sessions_mutex);
2552 return 0;
2553 }
2554
2555 int lttng_event_notifier_enabler_attach_capture_bytecode(
2556 struct lttng_event_notifier_enabler *event_notifier_enabler,
2557 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2558 {
2559 struct lttng_kernel_bytecode_node *bytecode_node;
2560 struct lttng_event_enabler_common *enabler =
2561 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2562 uint32_t bytecode_len;
2563 int ret;
2564
2565 ret = get_user(bytecode_len, &bytecode->len);
2566 if (ret)
2567 return ret;
2568
2569 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2570 GFP_KERNEL);
2571 if (!bytecode_node)
2572 return -ENOMEM;
2573
2574 ret = copy_from_user(&bytecode_node->bc, bytecode,
2575 sizeof(*bytecode) + bytecode_len);
2576 if (ret)
2577 goto error_free;
2578
2579 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2580 bytecode_node->enabler = enabler;
2581
2582 /* Enforce length based on allocated size */
2583 bytecode_node->bc.len = bytecode_len;
2584 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2585
2586 event_notifier_enabler->num_captures++;
2587
2588 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2589 goto end;
2590
2591 error_free:
2592 lttng_kvfree(bytecode_node);
2593 end:
2594 return ret;
2595 }
2596
2597 static
2598 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2599 {
2600 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2601 struct lttng_kernel_bytecode_runtime *runtime;
2602 struct lttng_enabler_ref *enabler_ref;
2603
2604 /* Check if has enablers without bytecode enabled */
2605 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2606 if (enabler_ref->ref->enabled
2607 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2608 has_enablers_without_filter_bytecode = 1;
2609 break;
2610 }
2611 }
2612 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2613
2614 /* Enable filters */
2615 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2616 lttng_bytecode_sync_state(runtime);
2617 nr_filters++;
2618 }
2619 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2620 }
2621
2622 static
2623 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2624 {
2625 switch (event->type) {
2626 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2627 break;
2628 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2629 {
2630 struct lttng_kernel_event_notifier *event_notifier =
2631 container_of(event, struct lttng_kernel_event_notifier, parent);
2632 struct lttng_kernel_bytecode_runtime *runtime;
2633 int nr_captures = 0;
2634
2635 /* Enable captures */
2636 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2637 lttng_bytecode_sync_state(runtime);
2638 nr_captures++;
2639 }
2640 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2641 break;
2642 }
2643 default:
2644 WARN_ON_ONCE(1);
2645 }
2646 }
2647
2648 static
2649 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2650 {
2651 struct lttng_enabler_ref *enabler_ref;
2652 bool enabled = false;
2653
2654 switch (event->priv->instrumentation) {
2655 case LTTNG_KERNEL_ABI_TRACEPOINT:
2656 lttng_fallthrough;
2657 case LTTNG_KERNEL_ABI_SYSCALL:
2658 /* Enable events */
2659 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2660 if (enabler_ref->ref->enabled) {
2661 enabled = true;
2662 break;
2663 }
2664 }
2665 break;
2666 default:
2667 WARN_ON_ONCE(1);
2668 return false;
2669 }
2670
2671 switch (event->type) {
2672 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2673 {
2674 struct lttng_kernel_event_recorder *event_recorder =
2675 container_of(event, struct lttng_kernel_event_recorder, parent);
2676
2677 /*
2678 * Enabled state is based on union of enablers, with
2679 * intersection of session and channel transient enable
2680 * states.
2681 */
2682 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2683 }
2684 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2685 return enabled;
2686 default:
2687 WARN_ON_ONCE(1);
2688 return false;
2689 }
2690 }
2691
2692 static
2693 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2694 {
2695 switch (event->priv->instrumentation) {
2696 case LTTNG_KERNEL_ABI_TRACEPOINT:
2697 lttng_fallthrough;
2698 case LTTNG_KERNEL_ABI_SYSCALL:
2699 return true;
2700
2701 default:
2702 /* Not handled with lazy sync. */
2703 return false;
2704 }
2705 }
2706
2707 /*
2708 * Should be called with sessions mutex held.
2709 */
2710 static
2711 void lttng_sync_event_list(struct list_head *event_enabler_list,
2712 struct list_head *event_list)
2713 {
2714 struct lttng_kernel_event_common_private *event_priv;
2715 struct lttng_event_enabler_common *event_enabler;
2716
2717 list_for_each_entry(event_enabler, event_enabler_list, node)
2718 lttng_event_enabler_ref_events(event_enabler);
2719
2720 /*
2721 * For each event, if at least one of its enablers is enabled,
2722 * and its channel and session transient states are enabled, we
2723 * enable the event, else we disable it.
2724 */
2725 list_for_each_entry(event_priv, event_list, node) {
2726 struct lttng_kernel_event_common *event = event_priv->pub;
2727 bool enabled;
2728
2729 if (!lttng_event_is_lazy_sync(event))
2730 continue;
2731
2732 enabled = lttng_get_event_enabled_state(event);
2733 WRITE_ONCE(event->enabled, enabled);
2734 /*
2735 * Sync tracepoint registration with event enabled state.
2736 */
2737 if (enabled) {
2738 register_event(event);
2739 } else {
2740 _lttng_event_unregister(event);
2741 }
2742
2743 lttng_event_sync_filter_state(event);
2744 lttng_event_sync_capture_state(event);
2745 }
2746 }
2747
2748 /*
2749 * lttng_session_sync_event_enablers should be called just before starting a
2750 * session.
2751 */
2752 static
2753 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2754 {
2755 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2756 }
2757
2758 /*
2759 * Apply enablers to session events, adding events to session if need
2760 * be. It is required after each modification applied to an active
2761 * session, and right before session "start".
2762 * "lazy" sync means we only sync if required.
2763 * Should be called with sessions mutex held.
2764 */
2765 static
2766 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2767 {
2768 /* We can skip if session is not active */
2769 if (!session->active)
2770 return;
2771 lttng_session_sync_event_enablers(session);
2772 }
2773
2774 static
2775 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2776 {
2777 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2778 }
2779
2780 static
2781 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2782 {
2783 switch (event_enabler->enabler_type) {
2784 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2785 {
2786 struct lttng_event_recorder_enabler *event_recorder_enabler =
2787 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2788 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2789 break;
2790 }
2791 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2792 {
2793 struct lttng_event_notifier_enabler *event_notifier_enabler =
2794 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2795 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2796 break;
2797 }
2798 default:
2799 WARN_ON_ONCE(1);
2800 }
2801 }
2802
2803 /*
2804 * Serialize at most one packet worth of metadata into a metadata
2805 * channel.
2806 * We grab the metadata cache mutex to get exclusive access to our metadata
2807 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2808 * allows us to do racy operations such as looking for remaining space left in
2809 * packet and write, since mutual exclusion protects us from concurrent writes.
2810 * Mutual exclusion on the metadata cache allow us to read the cache content
2811 * without racing against reallocation of the cache by updates.
2812 * Returns the number of bytes written in the channel, 0 if no data
2813 * was written and a negative value on error.
2814 */
2815 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2816 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2817 {
2818 struct lttng_kernel_ring_buffer_ctx ctx;
2819 int ret = 0;
2820 size_t len, reserve_len;
2821
2822 /*
2823 * Ensure we support mutiple get_next / put sequences followed by
2824 * put_next. The metadata cache lock protects reading the metadata
2825 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2826 * "flush" operations on the buffer invoked by different processes.
2827 * Moreover, since the metadata cache memory can be reallocated, we
2828 * need to have exclusive access against updates even though we only
2829 * read it.
2830 */
2831 mutex_lock(&stream->metadata_cache->lock);
2832 WARN_ON(stream->metadata_in < stream->metadata_out);
2833 if (stream->metadata_in != stream->metadata_out)
2834 goto end;
2835
2836 /* Metadata regenerated, change the version. */
2837 if (stream->metadata_cache->version != stream->version)
2838 stream->version = stream->metadata_cache->version;
2839
2840 len = stream->metadata_cache->metadata_written -
2841 stream->metadata_in;
2842 if (!len)
2843 goto end;
2844 reserve_len = min_t(size_t,
2845 stream->transport->ops.priv->packet_avail_size(chan),
2846 len);
2847 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2848 sizeof(char), NULL);
2849 /*
2850 * If reservation failed, return an error to the caller.
2851 */
2852 ret = stream->transport->ops.event_reserve(&ctx);
2853 if (ret != 0) {
2854 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2855 stream->coherent = false;
2856 goto end;
2857 }
2858 stream->transport->ops.event_write(&ctx,
2859 stream->metadata_cache->data + stream->metadata_in,
2860 reserve_len, 1);
2861 stream->transport->ops.event_commit(&ctx);
2862 stream->metadata_in += reserve_len;
2863 if (reserve_len < len)
2864 stream->coherent = false;
2865 else
2866 stream->coherent = true;
2867 ret = reserve_len;
2868
2869 end:
2870 if (coherent)
2871 *coherent = stream->coherent;
2872 mutex_unlock(&stream->metadata_cache->lock);
2873 return ret;
2874 }
2875
2876 static
2877 void lttng_metadata_begin(struct lttng_kernel_session *session)
2878 {
2879 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2880 mutex_lock(&session->priv->metadata_cache->lock);
2881 }
2882
2883 static
2884 void lttng_metadata_end(struct lttng_kernel_session *session)
2885 {
2886 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2887 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2888 struct lttng_metadata_stream *stream;
2889
2890 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2891 wake_up_interruptible(&stream->read_wait);
2892 mutex_unlock(&session->priv->metadata_cache->lock);
2893 }
2894 }
2895
2896 /*
2897 * Write the metadata to the metadata cache.
2898 * Must be called with sessions_mutex held.
2899 * The metadata cache lock protects us from concurrent read access from
2900 * thread outputting metadata content to ring buffer.
2901 * The content of the printf is printed as a single atomic metadata
2902 * transaction.
2903 */
2904 int lttng_metadata_printf(struct lttng_kernel_session *session,
2905 const char *fmt, ...)
2906 {
2907 char *str;
2908 size_t len;
2909 va_list ap;
2910
2911 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2912
2913 va_start(ap, fmt);
2914 str = kvasprintf(GFP_KERNEL, fmt, ap);
2915 va_end(ap);
2916 if (!str)
2917 return -ENOMEM;
2918
2919 len = strlen(str);
2920 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2921 if (session->priv->metadata_cache->metadata_written + len >
2922 session->priv->metadata_cache->cache_alloc) {
2923 char *tmp_cache_realloc;
2924 unsigned int tmp_cache_alloc_size;
2925
2926 tmp_cache_alloc_size = max_t(unsigned int,
2927 session->priv->metadata_cache->cache_alloc + len,
2928 session->priv->metadata_cache->cache_alloc << 1);
2929 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2930 if (!tmp_cache_realloc)
2931 goto err;
2932 if (session->priv->metadata_cache->data) {
2933 memcpy(tmp_cache_realloc,
2934 session->priv->metadata_cache->data,
2935 session->priv->metadata_cache->cache_alloc);
2936 vfree(session->priv->metadata_cache->data);
2937 }
2938
2939 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2940 session->priv->metadata_cache->data = tmp_cache_realloc;
2941 }
2942 memcpy(session->priv->metadata_cache->data +
2943 session->priv->metadata_cache->metadata_written,
2944 str, len);
2945 session->priv->metadata_cache->metadata_written += len;
2946 kfree(str);
2947
2948 return 0;
2949
2950 err:
2951 kfree(str);
2952 return -ENOMEM;
2953 }
2954
2955 static
2956 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
2957 {
2958 size_t i;
2959
2960 for (i = 0; i < nesting; i++) {
2961 int ret;
2962
2963 ret = lttng_metadata_printf(session, " ");
2964 if (ret) {
2965 return ret;
2966 }
2967 }
2968 return 0;
2969 }
2970
2971 static
2972 int lttng_field_name_statedump(struct lttng_kernel_session *session,
2973 const struct lttng_kernel_event_field *field,
2974 size_t nesting)
2975 {
2976 return lttng_metadata_printf(session, " _%s;\n", field->name);
2977 }
2978
2979 static
2980 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
2981 const struct lttng_kernel_type_integer *type,
2982 enum lttng_kernel_string_encoding parent_encoding,
2983 size_t nesting)
2984 {
2985 int ret;
2986
2987 ret = print_tabs(session, nesting);
2988 if (ret)
2989 return ret;
2990 ret = lttng_metadata_printf(session,
2991 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2992 type->size,
2993 type->alignment,
2994 type->signedness,
2995 (parent_encoding == lttng_kernel_string_encoding_none)
2996 ? "none"
2997 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
2998 ? "UTF8"
2999 : "ASCII",
3000 type->base,
3001 #if __BYTE_ORDER == __BIG_ENDIAN
3002 type->reverse_byte_order ? " byte_order = le;" : ""
3003 #else
3004 type->reverse_byte_order ? " byte_order = be;" : ""
3005 #endif
3006 );
3007 return ret;
3008 }
3009
3010 /*
3011 * Must be called with sessions_mutex held.
3012 */
3013 static
3014 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3015 const struct lttng_kernel_type_struct *type,
3016 size_t nesting)
3017 {
3018 const char *prev_field_name = NULL;
3019 int ret;
3020 uint32_t i, nr_fields;
3021 unsigned int alignment;
3022
3023 ret = print_tabs(session, nesting);
3024 if (ret)
3025 return ret;
3026 ret = lttng_metadata_printf(session,
3027 "struct {\n");
3028 if (ret)
3029 return ret;
3030 nr_fields = type->nr_fields;
3031 for (i = 0; i < nr_fields; i++) {
3032 const struct lttng_kernel_event_field *iter_field;
3033
3034 iter_field = type->fields[i];
3035 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3036 if (ret)
3037 return ret;
3038 }
3039 ret = print_tabs(session, nesting);
3040 if (ret)
3041 return ret;
3042 alignment = type->alignment;
3043 if (alignment) {
3044 ret = lttng_metadata_printf(session,
3045 "} align(%u)",
3046 alignment);
3047 } else {
3048 ret = lttng_metadata_printf(session,
3049 "}");
3050 }
3051 return ret;
3052 }
3053
3054 /*
3055 * Must be called with sessions_mutex held.
3056 */
3057 static
3058 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3059 const struct lttng_kernel_event_field *field,
3060 size_t nesting)
3061 {
3062 int ret;
3063
3064 ret = _lttng_struct_type_statedump(session,
3065 lttng_kernel_get_type_struct(field->type), nesting);
3066 if (ret)
3067 return ret;
3068 return lttng_field_name_statedump(session, field, nesting);
3069 }
3070
3071 /*
3072 * Must be called with sessions_mutex held.
3073 */
3074 static
3075 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3076 const struct lttng_kernel_type_variant *type,
3077 size_t nesting,
3078 const char *prev_field_name)
3079 {
3080 const char *tag_name;
3081 int ret;
3082 uint32_t i, nr_choices;
3083
3084 tag_name = type->tag_name;
3085 if (!tag_name)
3086 tag_name = prev_field_name;
3087 if (!tag_name)
3088 return -EINVAL;
3089 /*
3090 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3091 */
3092 if (type->alignment != 0)
3093 return -EINVAL;
3094 ret = print_tabs(session, nesting);
3095 if (ret)
3096 return ret;
3097 ret = lttng_metadata_printf(session,
3098 "variant <_%s> {\n",
3099 tag_name);
3100 if (ret)
3101 return ret;
3102 nr_choices = type->nr_choices;
3103 for (i = 0; i < nr_choices; i++) {
3104 const struct lttng_kernel_event_field *iter_field;
3105
3106 iter_field = type->choices[i];
3107 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3108 if (ret)
3109 return ret;
3110 }
3111 ret = print_tabs(session, nesting);
3112 if (ret)
3113 return ret;
3114 ret = lttng_metadata_printf(session,
3115 "}");
3116 return ret;
3117 }
3118
3119 /*
3120 * Must be called with sessions_mutex held.
3121 */
3122 static
3123 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3124 const struct lttng_kernel_event_field *field,
3125 size_t nesting,
3126 const char *prev_field_name)
3127 {
3128 int ret;
3129
3130 ret = _lttng_variant_type_statedump(session,
3131 lttng_kernel_get_type_variant(field->type), nesting,
3132 prev_field_name);
3133 if (ret)
3134 return ret;
3135 return lttng_field_name_statedump(session, field, nesting);
3136 }
3137
3138 /*
3139 * Must be called with sessions_mutex held.
3140 */
3141 static
3142 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3143 const struct lttng_kernel_event_field *field,
3144 size_t nesting)
3145 {
3146 int ret;
3147 const struct lttng_kernel_type_array *array_type;
3148 const struct lttng_kernel_type_common *elem_type;
3149
3150 array_type = lttng_kernel_get_type_array(field->type);
3151 WARN_ON_ONCE(!array_type);
3152
3153 if (array_type->alignment) {
3154 ret = print_tabs(session, nesting);
3155 if (ret)
3156 return ret;
3157 ret = lttng_metadata_printf(session,
3158 "struct { } align(%u) _%s_padding;\n",
3159 array_type->alignment * CHAR_BIT,
3160 field->name);
3161 if (ret)
3162 return ret;
3163 }
3164 /*
3165 * Nested compound types: Only array of structures and variants are
3166 * currently supported.
3167 */
3168 elem_type = array_type->elem_type;
3169 switch (elem_type->type) {
3170 case lttng_kernel_type_integer:
3171 case lttng_kernel_type_struct:
3172 case lttng_kernel_type_variant:
3173 ret = _lttng_type_statedump(session, elem_type,
3174 array_type->encoding, nesting);
3175 if (ret)
3176 return ret;
3177 break;
3178
3179 default:
3180 return -EINVAL;
3181 }
3182 ret = lttng_metadata_printf(session,
3183 " _%s[%u];\n",
3184 field->name,
3185 array_type->length);
3186 return ret;
3187 }
3188
3189 /*
3190 * Must be called with sessions_mutex held.
3191 */
3192 static
3193 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3194 const struct lttng_kernel_event_field *field,
3195 size_t nesting,
3196 const char *prev_field_name)
3197 {
3198 int ret;
3199 const char *length_name;
3200 const struct lttng_kernel_type_sequence *sequence_type;
3201 const struct lttng_kernel_type_common *elem_type;
3202
3203 sequence_type = lttng_kernel_get_type_sequence(field->type);
3204 WARN_ON_ONCE(!sequence_type);
3205
3206 length_name = sequence_type->length_name;
3207 if (!length_name)
3208 length_name = prev_field_name;
3209 if (!length_name)
3210 return -EINVAL;
3211
3212 if (sequence_type->alignment) {
3213 ret = print_tabs(session, nesting);
3214 if (ret)
3215 return ret;
3216 ret = lttng_metadata_printf(session,
3217 "struct { } align(%u) _%s_padding;\n",
3218 sequence_type->alignment * CHAR_BIT,
3219 field->name);
3220 if (ret)
3221 return ret;
3222 }
3223
3224 /*
3225 * Nested compound types: Only array of structures and variants are
3226 * currently supported.
3227 */
3228 elem_type = sequence_type->elem_type;
3229 switch (elem_type->type) {
3230 case lttng_kernel_type_integer:
3231 case lttng_kernel_type_struct:
3232 case lttng_kernel_type_variant:
3233 ret = _lttng_type_statedump(session, elem_type,
3234 sequence_type->encoding, nesting);
3235 if (ret)
3236 return ret;
3237 break;
3238
3239 default:
3240 return -EINVAL;
3241 }
3242 ret = lttng_metadata_printf(session,
3243 " _%s[ _%s ];\n",
3244 field->name,
3245 length_name);
3246 return ret;
3247 }
3248
3249 /*
3250 * Must be called with sessions_mutex held.
3251 */
3252 static
3253 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3254 const struct lttng_kernel_type_enum *type,
3255 size_t nesting)
3256 {
3257 const struct lttng_kernel_enum_desc *enum_desc;
3258 const struct lttng_kernel_type_common *container_type;
3259 int ret;
3260 unsigned int i, nr_entries;
3261
3262 container_type = type->container_type;
3263 if (container_type->type != lttng_kernel_type_integer) {
3264 ret = -EINVAL;
3265 goto end;
3266 }
3267 enum_desc = type->desc;
3268 nr_entries = enum_desc->nr_entries;
3269
3270 ret = print_tabs(session, nesting);
3271 if (ret)
3272 goto end;
3273 ret = lttng_metadata_printf(session, "enum : ");
3274 if (ret)
3275 goto end;
3276 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3277 lttng_kernel_string_encoding_none, 0);
3278 if (ret)
3279 goto end;
3280 ret = lttng_metadata_printf(session, " {\n");
3281 if (ret)
3282 goto end;
3283 /* Dump all entries */
3284 for (i = 0; i < nr_entries; i++) {
3285 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3286 int j, len;
3287
3288 ret = print_tabs(session, nesting + 1);
3289 if (ret)
3290 goto end;
3291 ret = lttng_metadata_printf(session,
3292 "\"");
3293 if (ret)
3294 goto end;
3295 len = strlen(entry->string);
3296 /* Escape the character '"' */
3297 for (j = 0; j < len; j++) {
3298 char c = entry->string[j];
3299
3300 switch (c) {
3301 case '"':
3302 ret = lttng_metadata_printf(session,
3303 "\\\"");
3304 break;
3305 case '\\':
3306 ret = lttng_metadata_printf(session,
3307 "\\\\");
3308 break;
3309 default:
3310 ret = lttng_metadata_printf(session,
3311 "%c", c);
3312 break;
3313 }
3314 if (ret)
3315 goto end;
3316 }
3317 ret = lttng_metadata_printf(session, "\"");
3318 if (ret)
3319 goto end;
3320
3321 if (entry->options.is_auto) {
3322 ret = lttng_metadata_printf(session, ",\n");
3323 if (ret)
3324 goto end;
3325 } else {
3326 ret = lttng_metadata_printf(session,
3327 " = ");
3328 if (ret)
3329 goto end;
3330 if (entry->start.signedness)
3331 ret = lttng_metadata_printf(session,
3332 "%lld", (long long) entry->start.value);
3333 else
3334 ret = lttng_metadata_printf(session,
3335 "%llu", entry->start.value);
3336 if (ret)
3337 goto end;
3338 if (entry->start.signedness == entry->end.signedness &&
3339 entry->start.value
3340 == entry->end.value) {
3341 ret = lttng_metadata_printf(session,
3342 ",\n");
3343 } else {
3344 if (entry->end.signedness) {
3345 ret = lttng_metadata_printf(session,
3346 " ... %lld,\n",
3347 (long long) entry->end.value);
3348 } else {
3349 ret = lttng_metadata_printf(session,
3350 " ... %llu,\n",
3351 entry->end.value);
3352 }
3353 }
3354 if (ret)
3355 goto end;
3356 }
3357 }
3358 ret = print_tabs(session, nesting);
3359 if (ret)
3360 goto end;
3361 ret = lttng_metadata_printf(session, "}");
3362 end:
3363 return ret;
3364 }
3365
3366 /*
3367 * Must be called with sessions_mutex held.
3368 */
3369 static
3370 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3371 const struct lttng_kernel_event_field *field,
3372 size_t nesting)
3373 {
3374 int ret;
3375 const struct lttng_kernel_type_enum *enum_type;
3376
3377 enum_type = lttng_kernel_get_type_enum(field->type);
3378 WARN_ON_ONCE(!enum_type);
3379 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3380 if (ret)
3381 return ret;
3382 return lttng_field_name_statedump(session, field, nesting);
3383 }
3384
3385 static
3386 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3387 const struct lttng_kernel_event_field *field,
3388 size_t nesting)
3389 {
3390 int ret;
3391
3392 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3393 lttng_kernel_string_encoding_none, nesting);
3394 if (ret)
3395 return ret;
3396 return lttng_field_name_statedump(session, field, nesting);
3397 }
3398
3399 static
3400 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3401 const struct lttng_kernel_type_string *type,
3402 size_t nesting)
3403 {
3404 int ret;
3405
3406 /* Default encoding is UTF8 */
3407 ret = print_tabs(session, nesting);
3408 if (ret)
3409 return ret;
3410 ret = lttng_metadata_printf(session,
3411 "string%s",
3412 type->encoding == lttng_kernel_string_encoding_ASCII ?
3413 " { encoding = ASCII; }" : "");
3414 return ret;
3415 }
3416
3417 static
3418 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3419 const struct lttng_kernel_event_field *field,
3420 size_t nesting)
3421 {
3422 const struct lttng_kernel_type_string *string_type;
3423 int ret;
3424
3425 string_type = lttng_kernel_get_type_string(field->type);
3426 WARN_ON_ONCE(!string_type);
3427 ret = _lttng_string_type_statedump(session, string_type, nesting);
3428 if (ret)
3429 return ret;
3430 return lttng_field_name_statedump(session, field, nesting);
3431 }
3432
3433 /*
3434 * Must be called with sessions_mutex held.
3435 */
3436 static
3437 int _lttng_type_statedump(struct lttng_kernel_session *session,
3438 const struct lttng_kernel_type_common *type,
3439 enum lttng_kernel_string_encoding parent_encoding,
3440 size_t nesting)
3441 {
3442 int ret = 0;
3443
3444 switch (type->type) {
3445 case lttng_kernel_type_integer:
3446 ret = _lttng_integer_type_statedump(session,
3447 lttng_kernel_get_type_integer(type),
3448 parent_encoding, nesting);
3449 break;
3450 case lttng_kernel_type_enum:
3451 ret = _lttng_enum_type_statedump(session,
3452 lttng_kernel_get_type_enum(type),
3453 nesting);
3454 break;
3455 case lttng_kernel_type_string:
3456 ret = _lttng_string_type_statedump(session,
3457 lttng_kernel_get_type_string(type),
3458 nesting);
3459 break;
3460 case lttng_kernel_type_struct:
3461 ret = _lttng_struct_type_statedump(session,
3462 lttng_kernel_get_type_struct(type),
3463 nesting);
3464 break;
3465 case lttng_kernel_type_variant:
3466 ret = _lttng_variant_type_statedump(session,
3467 lttng_kernel_get_type_variant(type),
3468 nesting, NULL);
3469 break;
3470
3471 /* Nested arrays and sequences are not supported yet. */
3472 case lttng_kernel_type_array:
3473 case lttng_kernel_type_sequence:
3474 default:
3475 WARN_ON_ONCE(1);
3476 return -EINVAL;
3477 }
3478 return ret;
3479 }
3480
3481 /*
3482 * Must be called with sessions_mutex held.
3483 */
3484 static
3485 int _lttng_field_statedump(struct lttng_kernel_session *session,
3486 const struct lttng_kernel_event_field *field,
3487 size_t nesting,
3488 const char **prev_field_name_p)
3489 {
3490 const char *prev_field_name = NULL;
3491 int ret = 0;
3492
3493 if (prev_field_name_p)
3494 prev_field_name = *prev_field_name_p;
3495 switch (field->type->type) {
3496 case lttng_kernel_type_integer:
3497 ret = _lttng_integer_field_statedump(session, field, nesting);
3498 break;
3499 case lttng_kernel_type_enum:
3500 ret = _lttng_enum_field_statedump(session, field, nesting);
3501 break;
3502 case lttng_kernel_type_string:
3503 ret = _lttng_string_field_statedump(session, field, nesting);
3504 break;
3505 case lttng_kernel_type_struct:
3506 ret = _lttng_struct_field_statedump(session, field, nesting);
3507 break;
3508 case lttng_kernel_type_array:
3509 ret = _lttng_array_field_statedump(session, field, nesting);
3510 break;
3511 case lttng_kernel_type_sequence:
3512 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3513 break;
3514 case lttng_kernel_type_variant:
3515 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3516 break;
3517
3518 default:
3519 WARN_ON_ONCE(1);
3520 return -EINVAL;
3521 }
3522 if (prev_field_name_p)
3523 *prev_field_name_p = field->name;
3524 return ret;
3525 }
3526
3527 static
3528 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3529 struct lttng_kernel_ctx *ctx)
3530 {
3531 const char *prev_field_name = NULL;
3532 int ret = 0;
3533 int i;
3534
3535 if (!ctx)
3536 return 0;
3537 for (i = 0; i < ctx->nr_fields; i++) {
3538 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3539
3540 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3541 if (ret)
3542 return ret;
3543 }
3544 return ret;
3545 }
3546
3547 static
3548 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3549 struct lttng_kernel_event_recorder *event_recorder)
3550 {
3551 const char *prev_field_name = NULL;
3552 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3553 int ret = 0;
3554 int i;
3555
3556 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3557 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3558
3559 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3560 if (ret)
3561 return ret;
3562 }
3563 return ret;
3564 }
3565
3566 /*
3567 * Must be called with sessions_mutex held.
3568 * The entire event metadata is printed as a single atomic metadata
3569 * transaction.
3570 */
3571 static
3572 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
3573 struct lttng_kernel_channel_buffer *chan,
3574 struct lttng_kernel_event_recorder *event_recorder)
3575 {
3576 int ret = 0;
3577
3578 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3579 return 0;
3580 if (chan->priv->channel_type == METADATA_CHANNEL)
3581 return 0;
3582
3583 lttng_metadata_begin(session);
3584
3585 ret = lttng_metadata_printf(session,
3586 "event {\n"
3587 " name = \"%s\";\n"
3588 " id = %u;\n"
3589 " stream_id = %u;\n",
3590 event_recorder->priv->parent.desc->event_name,
3591 event_recorder->priv->id,
3592 event_recorder->chan->priv->id);
3593 if (ret)
3594 goto end;
3595
3596 ret = lttng_metadata_printf(session,
3597 " fields := struct {\n"
3598 );
3599 if (ret)
3600 goto end;
3601
3602 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3603 if (ret)
3604 goto end;
3605
3606 /*
3607 * LTTng space reservation can only reserve multiples of the
3608 * byte size.
3609 */
3610 ret = lttng_metadata_printf(session,
3611 " };\n"
3612 "};\n\n");
3613 if (ret)
3614 goto end;
3615
3616 event_recorder->priv->metadata_dumped = 1;
3617 end:
3618 lttng_metadata_end(session);
3619 return ret;
3620
3621 }
3622
3623 /*
3624 * Must be called with sessions_mutex held.
3625 * The entire channel metadata is printed as a single atomic metadata
3626 * transaction.
3627 */
3628 static
3629 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3630 struct lttng_kernel_channel_buffer *chan)
3631 {
3632 int ret = 0;
3633
3634 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3635 return 0;
3636
3637 if (chan->priv->channel_type == METADATA_CHANNEL)
3638 return 0;
3639
3640 lttng_metadata_begin(session);
3641
3642 WARN_ON_ONCE(!chan->priv->header_type);
3643 ret = lttng_metadata_printf(session,
3644 "stream {\n"
3645 " id = %u;\n"
3646 " event.header := %s;\n"
3647 " packet.context := struct packet_context;\n",
3648 chan->priv->id,
3649 chan->priv->header_type == 1 ? "struct event_header_compact" :
3650 "struct event_header_large");
3651 if (ret)
3652 goto end;
3653
3654 if (chan->priv->ctx) {
3655 ret = lttng_metadata_printf(session,
3656 " event.context := struct {\n");
3657 if (ret)
3658 goto end;
3659 }
3660 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3661 if (ret)
3662 goto end;
3663 if (chan->priv->ctx) {
3664 ret = lttng_metadata_printf(session,
3665 " };\n");
3666 if (ret)
3667 goto end;
3668 }
3669
3670 ret = lttng_metadata_printf(session,
3671 "};\n\n");
3672
3673 chan->priv->metadata_dumped = 1;
3674 end:
3675 lttng_metadata_end(session);
3676 return ret;
3677 }
3678
3679 /*
3680 * Must be called with sessions_mutex held.
3681 */
3682 static
3683 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3684 {
3685 return lttng_metadata_printf(session,
3686 "struct packet_context {\n"
3687 " uint64_clock_monotonic_t timestamp_begin;\n"
3688 " uint64_clock_monotonic_t timestamp_end;\n"
3689 " uint64_t content_size;\n"
3690 " uint64_t packet_size;\n"
3691 " uint64_t packet_seq_num;\n"
3692 " unsigned long events_discarded;\n"
3693 " uint32_t cpu_id;\n"
3694 "};\n\n"
3695 );
3696 }
3697
3698 /*
3699 * Compact header:
3700 * id: range: 0 - 30.
3701 * id 31 is reserved to indicate an extended header.
3702 *
3703 * Large header:
3704 * id: range: 0 - 65534.
3705 * id 65535 is reserved to indicate an extended header.
3706 *
3707 * Must be called with sessions_mutex held.
3708 */
3709 static
3710 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3711 {
3712 return lttng_metadata_printf(session,
3713 "struct event_header_compact {\n"
3714 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3715 " variant <id> {\n"
3716 " struct {\n"
3717 " uint27_clock_monotonic_t timestamp;\n"
3718 " } compact;\n"
3719 " struct {\n"
3720 " uint32_t id;\n"
3721 " uint64_clock_monotonic_t timestamp;\n"
3722 " } extended;\n"
3723 " } v;\n"
3724 "} align(%u);\n"
3725 "\n"
3726 "struct event_header_large {\n"
3727 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3728 " variant <id> {\n"
3729 " struct {\n"
3730 " uint32_clock_monotonic_t timestamp;\n"
3731 " } compact;\n"
3732 " struct {\n"
3733 " uint32_t id;\n"
3734 " uint64_clock_monotonic_t timestamp;\n"
3735 " } extended;\n"
3736 " } v;\n"
3737 "} align(%u);\n\n",
3738 lttng_alignof(uint32_t) * CHAR_BIT,
3739 lttng_alignof(uint16_t) * CHAR_BIT
3740 );
3741 }
3742
3743 /*
3744 * Approximation of NTP time of day to clock monotonic correlation,
3745 * taken at start of trace.
3746 * Yes, this is only an approximation. Yes, we can (and will) do better
3747 * in future versions.
3748 * This function may return a negative offset. It may happen if the
3749 * system sets the REALTIME clock to 0 after boot.
3750 *
3751 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3752 * y2038 compliant.
3753 */
3754 static
3755 int64_t measure_clock_offset(void)
3756 {
3757 uint64_t monotonic_avg, monotonic[2], realtime;
3758 uint64_t tcf = trace_clock_freq();
3759 int64_t offset;
3760 unsigned long flags;
3761 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3762 struct timespec64 rts = { 0, 0 };
3763 #else
3764 struct timespec rts = { 0, 0 };
3765 #endif
3766
3767 /* Disable interrupts to increase correlation precision. */
3768 local_irq_save(flags);
3769 monotonic[0] = trace_clock_read64();
3770 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3771 ktime_get_real_ts64(&rts);
3772 #else
3773 getnstimeofday(&rts);
3774 #endif
3775 monotonic[1] = trace_clock_read64();
3776 local_irq_restore(flags);
3777
3778 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3779 realtime = (uint64_t) rts.tv_sec * tcf;
3780 if (tcf == NSEC_PER_SEC) {
3781 realtime += rts.tv_nsec;
3782 } else {
3783 uint64_t n = rts.tv_nsec * tcf;
3784
3785 do_div(n, NSEC_PER_SEC);
3786 realtime += n;
3787 }
3788 offset = (int64_t) realtime - monotonic_avg;
3789 return offset;
3790 }
3791
3792 static
3793 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3794 {
3795 int ret = 0;
3796 size_t i;
3797 char cur;
3798
3799 i = 0;
3800 cur = string[i];
3801 while (cur != '\0') {
3802 switch (cur) {
3803 case '\n':
3804 ret = lttng_metadata_printf(session, "%s", "\\n");
3805 break;
3806 case '\\':
3807 case '"':
3808 ret = lttng_metadata_printf(session, "%c", '\\');
3809 if (ret)
3810 goto error;
3811 /* We still print the current char */
3812 lttng_fallthrough;
3813 default:
3814 ret = lttng_metadata_printf(session, "%c", cur);
3815 break;
3816 }
3817
3818 if (ret)
3819 goto error;
3820
3821 cur = string[++i];
3822 }
3823 error:
3824 return ret;
3825 }
3826
3827 static
3828 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3829 const char *field_value)
3830 {
3831 int ret;
3832
3833 ret = lttng_metadata_printf(session, " %s = \"", field);
3834 if (ret)
3835 goto error;
3836
3837 ret = print_escaped_ctf_string(session, field_value);
3838 if (ret)
3839 goto error;
3840
3841 ret = lttng_metadata_printf(session, "\";\n");
3842
3843 error:
3844 return ret;
3845 }
3846
3847 /*
3848 * Output metadata into this session's metadata buffers.
3849 * Must be called with sessions_mutex held.
3850 */
3851 static
3852 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3853 {
3854 unsigned char *uuid_c = session->priv->uuid.b;
3855 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3856 const char *product_uuid;
3857 struct lttng_kernel_channel_buffer_private *chan_priv;
3858 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3859 int ret = 0;
3860
3861 if (!LTTNG_READ_ONCE(session->active))
3862 return 0;
3863
3864 lttng_metadata_begin(session);
3865
3866 if (session->priv->metadata_dumped)
3867 goto skip_session;
3868
3869 snprintf(uuid_s, sizeof(uuid_s),
3870 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3871 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3872 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3873 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3874 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3875
3876 ret = lttng_metadata_printf(session,
3877 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3878 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3879 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3880 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3881 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3882 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3883 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3884 "\n"
3885 "trace {\n"
3886 " major = %u;\n"
3887 " minor = %u;\n"
3888 " uuid = \"%s\";\n"
3889 " byte_order = %s;\n"
3890 " packet.header := struct {\n"
3891 " uint32_t magic;\n"
3892 " uint8_t uuid[16];\n"
3893 " uint32_t stream_id;\n"
3894 " uint64_t stream_instance_id;\n"
3895 " };\n"
3896 "};\n\n",
3897 lttng_alignof(uint8_t) * CHAR_BIT,
3898 lttng_alignof(uint16_t) * CHAR_BIT,
3899 lttng_alignof(uint32_t) * CHAR_BIT,
3900 lttng_alignof(uint64_t) * CHAR_BIT,
3901 sizeof(unsigned long) * CHAR_BIT,
3902 lttng_alignof(unsigned long) * CHAR_BIT,
3903 CTF_SPEC_MAJOR,
3904 CTF_SPEC_MINOR,
3905 uuid_s,
3906 #if __BYTE_ORDER == __BIG_ENDIAN
3907 "be"
3908 #else
3909 "le"
3910 #endif
3911 );
3912 if (ret)
3913 goto end;
3914
3915 ret = lttng_metadata_printf(session,
3916 "env {\n"
3917 " hostname = \"%s\";\n"
3918 " domain = \"kernel\";\n"
3919 " sysname = \"%s\";\n"
3920 " kernel_release = \"%s\";\n"
3921 " kernel_version = \"%s\";\n"
3922 " tracer_name = \"lttng-modules\";\n"
3923 " tracer_major = %d;\n"
3924 " tracer_minor = %d;\n"
3925 " tracer_patchlevel = %d;\n"
3926 " trace_buffering_scheme = \"global\";\n",
3927 current->nsproxy->uts_ns->name.nodename,
3928 utsname()->sysname,
3929 utsname()->release,
3930 utsname()->version,
3931 LTTNG_MODULES_MAJOR_VERSION,
3932 LTTNG_MODULES_MINOR_VERSION,
3933 LTTNG_MODULES_PATCHLEVEL_VERSION
3934 );
3935 if (ret)
3936 goto end;
3937
3938 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3939 if (ret)
3940 goto end;
3941 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3942 session->priv->creation_time);
3943 if (ret)
3944 goto end;
3945
3946 /* Add the product UUID to the 'env' section */
3947 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3948 if (product_uuid) {
3949 ret = lttng_metadata_printf(session,
3950 " product_uuid = \"%s\";\n",
3951 product_uuid
3952 );
3953 if (ret)
3954 goto end;
3955 }
3956
3957 /* Close the 'env' section */
3958 ret = lttng_metadata_printf(session, "};\n\n");
3959 if (ret)
3960 goto end;
3961
3962 ret = lttng_metadata_printf(session,
3963 "clock {\n"
3964 " name = \"%s\";\n",
3965 trace_clock_name()
3966 );
3967 if (ret)
3968 goto end;
3969
3970 if (!trace_clock_uuid(clock_uuid_s)) {
3971 ret = lttng_metadata_printf(session,
3972 " uuid = \"%s\";\n",
3973 clock_uuid_s
3974 );
3975 if (ret)
3976 goto end;
3977 }
3978
3979 ret = lttng_metadata_printf(session,
3980 " description = \"%s\";\n"
3981 " freq = %llu; /* Frequency, in Hz */\n"
3982 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3983 " offset = %lld;\n"
3984 "};\n\n",
3985 trace_clock_description(),
3986 (unsigned long long) trace_clock_freq(),
3987 (long long) measure_clock_offset()
3988 );
3989 if (ret)
3990 goto end;
3991
3992 ret = lttng_metadata_printf(session,
3993 "typealias integer {\n"
3994 " size = 27; align = 1; signed = false;\n"
3995 " map = clock.%s.value;\n"
3996 "} := uint27_clock_monotonic_t;\n"
3997 "\n"
3998 "typealias integer {\n"
3999 " size = 32; align = %u; signed = false;\n"
4000 " map = clock.%s.value;\n"
4001 "} := uint32_clock_monotonic_t;\n"
4002 "\n"
4003 "typealias integer {\n"
4004 " size = 64; align = %u; signed = false;\n"
4005 " map = clock.%s.value;\n"
4006 "} := uint64_clock_monotonic_t;\n\n",
4007 trace_clock_name(),
4008 lttng_alignof(uint32_t) * CHAR_BIT,
4009 trace_clock_name(),
4010 lttng_alignof(uint64_t) * CHAR_BIT,
4011 trace_clock_name()
4012 );
4013 if (ret)
4014 goto end;
4015
4016 ret = _lttng_stream_packet_context_declare(session);
4017 if (ret)
4018 goto end;
4019
4020 ret = _lttng_event_header_declare(session);
4021 if (ret)
4022 goto end;
4023
4024 skip_session:
4025 list_for_each_entry(chan_priv, &session->priv->chan, node) {
4026 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
4027 if (ret)
4028 goto end;
4029 }
4030
4031 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
4032 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4033 event_recorder_priv->pub);
4034 if (ret)
4035 goto end;
4036 }
4037 session->priv->metadata_dumped = 1;
4038 end:
4039 lttng_metadata_end(session);
4040 return ret;
4041 }
4042
4043 /**
4044 * lttng_transport_register - LTT transport registration
4045 * @transport: transport structure
4046 *
4047 * Registers a transport which can be used as output to extract the data out of
4048 * LTTng. The module calling this registration function must ensure that no
4049 * trap-inducing code will be executed by the transport functions. E.g.
4050 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4051 * is made visible to the transport function. This registration acts as a
4052 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4053 * after its registration must it synchronize the TLBs.
4054 */
4055 void lttng_transport_register(struct lttng_transport *transport)
4056 {
4057 /*
4058 * Make sure no page fault can be triggered by the module about to be
4059 * registered. We deal with this here so we don't have to call
4060 * vmalloc_sync_mappings() in each module's init.
4061 */
4062 wrapper_vmalloc_sync_mappings();
4063
4064 mutex_lock(&sessions_mutex);
4065 list_add_tail(&transport->node, &lttng_transport_list);
4066 mutex_unlock(&sessions_mutex);
4067 }
4068 EXPORT_SYMBOL_GPL(lttng_transport_register);
4069
4070 /**
4071 * lttng_transport_unregister - LTT transport unregistration
4072 * @transport: transport structure
4073 */
4074 void lttng_transport_unregister(struct lttng_transport *transport)
4075 {
4076 mutex_lock(&sessions_mutex);
4077 list_del(&transport->node);
4078 mutex_unlock(&sessions_mutex);
4079 }
4080 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4081
4082 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4083 {
4084 /*
4085 * Make sure no page fault can be triggered by the module about to be
4086 * registered. We deal with this here so we don't have to call
4087 * vmalloc_sync_mappings() in each module's init.
4088 */
4089 wrapper_vmalloc_sync_mappings();
4090
4091 mutex_lock(&sessions_mutex);
4092 list_add_tail(&transport->node, &lttng_counter_transport_list);
4093 mutex_unlock(&sessions_mutex);
4094 }
4095 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4096
4097 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4098 {
4099 mutex_lock(&sessions_mutex);
4100 list_del(&transport->node);
4101 mutex_unlock(&sessions_mutex);
4102 }
4103 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4104
4105 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4106
4107 enum cpuhp_state lttng_hp_prepare;
4108 enum cpuhp_state lttng_hp_online;
4109
4110 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4111 {
4112 struct lttng_cpuhp_node *lttng_node;
4113
4114 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4115 switch (lttng_node->component) {
4116 case LTTNG_RING_BUFFER_FRONTEND:
4117 return 0;
4118 case LTTNG_RING_BUFFER_BACKEND:
4119 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4120 case LTTNG_RING_BUFFER_ITER:
4121 return 0;
4122 case LTTNG_CONTEXT_PERF_COUNTERS:
4123 return 0;
4124 default:
4125 return -EINVAL;
4126 }
4127 }
4128
4129 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4130 {
4131 struct lttng_cpuhp_node *lttng_node;
4132
4133 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4134 switch (lttng_node->component) {
4135 case LTTNG_RING_BUFFER_FRONTEND:
4136 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4137 case LTTNG_RING_BUFFER_BACKEND:
4138 return 0;
4139 case LTTNG_RING_BUFFER_ITER:
4140 return 0;
4141 case LTTNG_CONTEXT_PERF_COUNTERS:
4142 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4143 default:
4144 return -EINVAL;
4145 }
4146 }
4147
4148 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4149 {
4150 struct lttng_cpuhp_node *lttng_node;
4151
4152 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4153 switch (lttng_node->component) {
4154 case LTTNG_RING_BUFFER_FRONTEND:
4155 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4156 case LTTNG_RING_BUFFER_BACKEND:
4157 return 0;
4158 case LTTNG_RING_BUFFER_ITER:
4159 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4160 case LTTNG_CONTEXT_PERF_COUNTERS:
4161 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4162 default:
4163 return -EINVAL;
4164 }
4165 }
4166
4167 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4168 {
4169 struct lttng_cpuhp_node *lttng_node;
4170
4171 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4172 switch (lttng_node->component) {
4173 case LTTNG_RING_BUFFER_FRONTEND:
4174 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4175 case LTTNG_RING_BUFFER_BACKEND:
4176 return 0;
4177 case LTTNG_RING_BUFFER_ITER:
4178 return 0;
4179 case LTTNG_CONTEXT_PERF_COUNTERS:
4180 return 0;
4181 default:
4182 return -EINVAL;
4183 }
4184 }
4185
4186 static int __init lttng_init_cpu_hotplug(void)
4187 {
4188 int ret;
4189
4190 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4191 lttng_hotplug_prepare,
4192 lttng_hotplug_dead);
4193 if (ret < 0) {
4194 return ret;
4195 }
4196 lttng_hp_prepare = ret;
4197 lttng_rb_set_hp_prepare(ret);
4198
4199 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4200 lttng_hotplug_online,
4201 lttng_hotplug_offline);
4202 if (ret < 0) {
4203 cpuhp_remove_multi_state(lttng_hp_prepare);
4204 lttng_hp_prepare = 0;
4205 return ret;
4206 }
4207 lttng_hp_online = ret;
4208 lttng_rb_set_hp_online(ret);
4209
4210 return 0;
4211 }
4212
4213 static void __exit lttng_exit_cpu_hotplug(void)
4214 {
4215 lttng_rb_set_hp_online(0);
4216 cpuhp_remove_multi_state(lttng_hp_online);
4217 lttng_rb_set_hp_prepare(0);
4218 cpuhp_remove_multi_state(lttng_hp_prepare);
4219 }
4220
4221 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4222 static int lttng_init_cpu_hotplug(void)
4223 {
4224 return 0;
4225 }
4226 static void lttng_exit_cpu_hotplug(void)
4227 {
4228 }
4229 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4230
4231 static int __init lttng_events_init(void)
4232 {
4233 int ret;
4234
4235 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4236 if (ret)
4237 return ret;
4238 ret = wrapper_get_pfnblock_flags_mask_init();
4239 if (ret)
4240 return ret;
4241 ret = wrapper_get_pageblock_flags_mask_init();
4242 if (ret)
4243 return ret;
4244 ret = lttng_probes_init();
4245 if (ret)
4246 return ret;
4247 ret = lttng_context_init();
4248 if (ret)
4249 return ret;
4250 ret = lttng_tracepoint_init();
4251 if (ret)
4252 goto error_tp;
4253 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4254 if (!event_recorder_cache) {
4255 ret = -ENOMEM;
4256 goto error_kmem_event_recorder;
4257 }
4258 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4259 if (!event_recorder_private_cache) {
4260 ret = -ENOMEM;
4261 goto error_kmem_event_recorder_private;
4262 }
4263 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4264 if (!event_notifier_cache) {
4265 ret = -ENOMEM;
4266 goto error_kmem_event_notifier;
4267 }
4268 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4269 if (!event_notifier_private_cache) {
4270 ret = -ENOMEM;
4271 goto error_kmem_event_notifier_private;
4272 }
4273 ret = lttng_abi_init();
4274 if (ret)
4275 goto error_abi;
4276 ret = lttng_logger_init();
4277 if (ret)
4278 goto error_logger;
4279 ret = lttng_init_cpu_hotplug();
4280 if (ret)
4281 goto error_hotplug;
4282 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4283 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4284 __stringify(LTTNG_MODULES_MINOR_VERSION),
4285 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4286 LTTNG_MODULES_EXTRAVERSION,
4287 LTTNG_VERSION_NAME,
4288 #ifdef LTTNG_EXTRA_VERSION_GIT
4289 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4290 #else
4291 "",
4292 #endif
4293 #ifdef LTTNG_EXTRA_VERSION_NAME
4294 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4295 #else
4296 "");
4297 #endif
4298 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4299 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4300 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4301 return 0;
4302
4303 error_hotplug:
4304 lttng_logger_exit();
4305 error_logger:
4306 lttng_abi_exit();
4307 error_abi:
4308 kmem_cache_destroy(event_notifier_private_cache);
4309 error_kmem_event_notifier_private:
4310 kmem_cache_destroy(event_notifier_cache);
4311 error_kmem_event_notifier:
4312 kmem_cache_destroy(event_recorder_private_cache);
4313 error_kmem_event_recorder_private:
4314 kmem_cache_destroy(event_recorder_cache);
4315 error_kmem_event_recorder:
4316 lttng_tracepoint_exit();
4317 error_tp:
4318 lttng_context_exit();
4319 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4320 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4321 __stringify(LTTNG_MODULES_MINOR_VERSION),
4322 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4323 LTTNG_MODULES_EXTRAVERSION,
4324 LTTNG_VERSION_NAME,
4325 #ifdef LTTNG_EXTRA_VERSION_GIT
4326 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4327 #else
4328 "",
4329 #endif
4330 #ifdef LTTNG_EXTRA_VERSION_NAME
4331 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4332 #else
4333 "");
4334 #endif
4335 return ret;
4336 }
4337
4338 module_init(lttng_events_init);
4339
4340 static void __exit lttng_events_exit(void)
4341 {
4342 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4343
4344 lttng_exit_cpu_hotplug();
4345 lttng_logger_exit();
4346 lttng_abi_exit();
4347 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4348 lttng_session_destroy(session_priv->pub);
4349 kmem_cache_destroy(event_recorder_cache);
4350 kmem_cache_destroy(event_recorder_private_cache);
4351 kmem_cache_destroy(event_notifier_cache);
4352 kmem_cache_destroy(event_notifier_private_cache);
4353 lttng_tracepoint_exit();
4354 lttng_context_exit();
4355 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4356 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4357 __stringify(LTTNG_MODULES_MINOR_VERSION),
4358 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4359 LTTNG_MODULES_EXTRAVERSION,
4360 LTTNG_VERSION_NAME,
4361 #ifdef LTTNG_EXTRA_VERSION_GIT
4362 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4363 #else
4364 "",
4365 #endif
4366 #ifdef LTTNG_EXTRA_VERSION_NAME
4367 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4368 #else
4369 "");
4370 #endif
4371 }
4372
4373 module_exit(lttng_events_exit);
4374
4375 #include <generated/patches.h>
4376 #ifdef LTTNG_EXTRA_VERSION_GIT
4377 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4378 #endif
4379 #ifdef LTTNG_EXTRA_VERSION_NAME
4380 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4381 #endif
4382 MODULE_LICENSE("GPL and additional rights");
4383 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4384 MODULE_DESCRIPTION("LTTng tracer");
4385 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4386 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4387 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4388 LTTNG_MODULES_EXTRAVERSION);
This page took 0.165179 seconds and 3 git commands to generate.