Refactoring: introduce lttng_event_enabler_event_name_match_event
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_recorder_cache;
63 static struct kmem_cache *event_recorder_private_cache;
64 static struct kmem_cache *event_notifier_cache;
65 static struct kmem_cache *event_notifier_private_cache;
66
67 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
68 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
71
72 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
73 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
74 static int _lttng_event_unregister(struct lttng_kernel_event_common *event);
75 static
76 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
77 struct lttng_kernel_channel_buffer *chan,
78 struct lttng_kernel_event_recorder *event);
79 static
80 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
81 static
82 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
83 static
84 int _lttng_type_statedump(struct lttng_kernel_session *session,
85 const struct lttng_kernel_type_common *type,
86 enum lttng_kernel_string_encoding parent_encoding,
87 size_t nesting);
88 static
89 int _lttng_field_statedump(struct lttng_kernel_session *session,
90 const struct lttng_kernel_event_field *field,
91 size_t nesting, const char **prev_field_name_p);
92
93 void synchronize_trace(void)
94 {
95 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
96 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
97 synchronize_rcu();
98 #else
99 synchronize_sched();
100 #endif
101
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
103 #ifdef CONFIG_PREEMPT_RT_FULL
104 synchronize_rcu();
105 #endif
106 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
107 #ifdef CONFIG_PREEMPT_RT
108 synchronize_rcu();
109 #endif
110 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 }
112
113 void lttng_lock_sessions(void)
114 {
115 mutex_lock(&sessions_mutex);
116 }
117
118 void lttng_unlock_sessions(void)
119 {
120 mutex_unlock(&sessions_mutex);
121 }
122
123 static struct lttng_transport *lttng_transport_find(const char *name)
124 {
125 struct lttng_transport *transport;
126
127 list_for_each_entry(transport, &lttng_transport_list, node) {
128 if (!strcmp(transport->name, name))
129 return transport;
130 }
131 return NULL;
132 }
133
134 /*
135 * Called with sessions lock held.
136 */
137 int lttng_session_active(void)
138 {
139 struct lttng_kernel_session_private *iter;
140
141 list_for_each_entry(iter, &sessions, list) {
142 if (iter->pub->active)
143 return 1;
144 }
145 return 0;
146 }
147
148 struct lttng_kernel_session *lttng_session_create(void)
149 {
150 struct lttng_kernel_session *session;
151 struct lttng_kernel_session_private *session_priv;
152 struct lttng_metadata_cache *metadata_cache;
153 int i;
154
155 mutex_lock(&sessions_mutex);
156 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
157 if (!session)
158 goto err;
159 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
160 if (!session_priv)
161 goto err_free_session;
162 session->priv = session_priv;
163 session_priv->pub = session;
164
165 INIT_LIST_HEAD(&session_priv->chan);
166 INIT_LIST_HEAD(&session_priv->events);
167 lttng_guid_gen(&session_priv->uuid);
168
169 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
170 GFP_KERNEL);
171 if (!metadata_cache)
172 goto err_free_session_private;
173 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
174 if (!metadata_cache->data)
175 goto err_free_cache;
176 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
177 kref_init(&metadata_cache->refcount);
178 mutex_init(&metadata_cache->lock);
179 session_priv->metadata_cache = metadata_cache;
180 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
181 memcpy(&metadata_cache->uuid, &session_priv->uuid,
182 sizeof(metadata_cache->uuid));
183 INIT_LIST_HEAD(&session_priv->enablers_head);
184 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
185 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
186 list_add(&session_priv->list, &sessions);
187
188 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
189 goto tracker_alloc_error;
190 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
191 goto tracker_alloc_error;
192 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
197 goto tracker_alloc_error;
198 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
199 goto tracker_alloc_error;
200
201 mutex_unlock(&sessions_mutex);
202
203 return session;
204
205 tracker_alloc_error:
206 lttng_id_tracker_fini(&session->pid_tracker);
207 lttng_id_tracker_fini(&session->vpid_tracker);
208 lttng_id_tracker_fini(&session->uid_tracker);
209 lttng_id_tracker_fini(&session->vuid_tracker);
210 lttng_id_tracker_fini(&session->gid_tracker);
211 lttng_id_tracker_fini(&session->vgid_tracker);
212 err_free_cache:
213 kfree(metadata_cache);
214 err_free_session_private:
215 lttng_kvfree(session_priv);
216 err_free_session:
217 lttng_kvfree(session);
218 err:
219 mutex_unlock(&sessions_mutex);
220 return NULL;
221 }
222
223 static
224 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
225 {
226 struct lttng_counter_transport *transport;
227
228 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
229 if (!strcmp(transport->name, name))
230 return transport;
231 }
232 return NULL;
233 }
234
235 struct lttng_counter *lttng_kernel_counter_create(
236 const char *counter_transport_name,
237 size_t number_dimensions, const size_t *dimensions_sizes)
238 {
239 struct lttng_counter *counter = NULL;
240 struct lttng_counter_transport *counter_transport = NULL;
241
242 counter_transport = lttng_counter_transport_find(counter_transport_name);
243 if (!counter_transport) {
244 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
245 counter_transport_name);
246 goto notransport;
247 }
248 if (!try_module_get(counter_transport->owner)) {
249 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
250 goto notransport;
251 }
252
253 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
254 if (!counter)
255 goto nomem;
256
257 /* Create event notifier error counter. */
258 counter->ops = &counter_transport->ops;
259 counter->transport = counter_transport;
260
261 counter->counter = counter->ops->counter_create(
262 number_dimensions, dimensions_sizes, 0);
263 if (!counter->counter) {
264 goto create_error;
265 }
266
267 return counter;
268
269 create_error:
270 lttng_kvfree(counter);
271 nomem:
272 if (counter_transport)
273 module_put(counter_transport->owner);
274 notransport:
275 return NULL;
276 }
277
278 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
279 {
280 struct lttng_transport *transport = NULL;
281 struct lttng_event_notifier_group *event_notifier_group;
282 const char *transport_name = "relay-event-notifier";
283 size_t subbuf_size = 4096; //TODO
284 size_t num_subbuf = 16; //TODO
285 unsigned int switch_timer_interval = 0;
286 unsigned int read_timer_interval = 0;
287 int i;
288
289 mutex_lock(&sessions_mutex);
290
291 transport = lttng_transport_find(transport_name);
292 if (!transport) {
293 printk(KERN_WARNING "LTTng: transport %s not found\n",
294 transport_name);
295 goto notransport;
296 }
297 if (!try_module_get(transport->owner)) {
298 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
299 transport_name);
300 goto notransport;
301 }
302
303 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
304 GFP_KERNEL);
305 if (!event_notifier_group)
306 goto nomem;
307
308 /*
309 * Initialize the ring buffer used to store event notifier
310 * notifications.
311 */
312 event_notifier_group->ops = &transport->ops;
313 event_notifier_group->chan = transport->ops.priv->channel_create(
314 transport_name, event_notifier_group, NULL,
315 subbuf_size, num_subbuf, switch_timer_interval,
316 read_timer_interval);
317 if (!event_notifier_group->chan)
318 goto create_error;
319
320 event_notifier_group->transport = transport;
321
322 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
323 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
324 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
325 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
326
327 list_add(&event_notifier_group->node, &event_notifier_groups);
328
329 mutex_unlock(&sessions_mutex);
330
331 return event_notifier_group;
332
333 create_error:
334 lttng_kvfree(event_notifier_group);
335 nomem:
336 if (transport)
337 module_put(transport->owner);
338 notransport:
339 mutex_unlock(&sessions_mutex);
340 return NULL;
341 }
342
343 void metadata_cache_destroy(struct kref *kref)
344 {
345 struct lttng_metadata_cache *cache =
346 container_of(kref, struct lttng_metadata_cache, refcount);
347 vfree(cache->data);
348 kfree(cache);
349 }
350
351 void lttng_session_destroy(struct lttng_kernel_session *session)
352 {
353 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
354 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
355 struct lttng_metadata_stream *metadata_stream;
356 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
357 int ret;
358
359 mutex_lock(&sessions_mutex);
360 WRITE_ONCE(session->active, 0);
361 list_for_each_entry(chan_priv, &session->priv->chan, node) {
362 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
363 WARN_ON(ret);
364 }
365 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
366 ret = _lttng_event_unregister(&event_recorder_priv->pub->parent);
367 WARN_ON(ret);
368 }
369 synchronize_trace(); /* Wait for in-flight events to complete */
370 list_for_each_entry(chan_priv, &session->priv->chan, node) {
371 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
372 WARN_ON(ret);
373 }
374 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
375 lttng_event_enabler_destroy(event_enabler);
376 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
377 _lttng_event_destroy(&event_recorder_priv->pub->parent);
378 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
379 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
380 _lttng_channel_destroy(chan_priv->pub);
381 }
382 mutex_lock(&session->priv->metadata_cache->lock);
383 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
384 _lttng_metadata_channel_hangup(metadata_stream);
385 mutex_unlock(&session->priv->metadata_cache->lock);
386 lttng_id_tracker_fini(&session->pid_tracker);
387 lttng_id_tracker_fini(&session->vpid_tracker);
388 lttng_id_tracker_fini(&session->uid_tracker);
389 lttng_id_tracker_fini(&session->vuid_tracker);
390 lttng_id_tracker_fini(&session->gid_tracker);
391 lttng_id_tracker_fini(&session->vgid_tracker);
392 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
393 list_del(&session->priv->list);
394 mutex_unlock(&sessions_mutex);
395 lttng_kvfree(session->priv);
396 lttng_kvfree(session);
397 }
398
399 void lttng_event_notifier_group_destroy(
400 struct lttng_event_notifier_group *event_notifier_group)
401 {
402 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
403 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
404 int ret;
405
406 if (!event_notifier_group)
407 return;
408
409 mutex_lock(&sessions_mutex);
410
411 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
412 WARN_ON(ret);
413
414 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
415 &event_notifier_group->event_notifiers_head, parent.node) {
416 ret = _lttng_event_unregister(&event_notifier_priv->pub->parent);
417 WARN_ON(ret);
418 }
419
420 /* Wait for in-flight event notifier to complete */
421 synchronize_trace();
422
423 irq_work_sync(&event_notifier_group->wakeup_pending);
424
425 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
426 WARN_ON(ret);
427
428 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
429 &event_notifier_group->enablers_head, node)
430 lttng_event_enabler_destroy(event_enabler);
431
432 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
433 &event_notifier_group->event_notifiers_head, parent.node)
434 _lttng_event_destroy(&event_notifier_priv->pub->parent);
435
436 if (event_notifier_group->error_counter) {
437 struct lttng_counter *error_counter = event_notifier_group->error_counter;
438
439 error_counter->ops->counter_destroy(error_counter->counter);
440 module_put(error_counter->transport->owner);
441 lttng_kvfree(error_counter);
442 event_notifier_group->error_counter = NULL;
443 }
444
445 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
446 module_put(event_notifier_group->transport->owner);
447 list_del(&event_notifier_group->node);
448
449 mutex_unlock(&sessions_mutex);
450 lttng_kvfree(event_notifier_group);
451 }
452
453 int lttng_session_statedump(struct lttng_kernel_session *session)
454 {
455 int ret;
456
457 mutex_lock(&sessions_mutex);
458 ret = lttng_statedump_start(session);
459 mutex_unlock(&sessions_mutex);
460 return ret;
461 }
462
463 int lttng_session_enable(struct lttng_kernel_session *session)
464 {
465 int ret = 0;
466 struct lttng_kernel_channel_buffer_private *chan_priv;
467
468 mutex_lock(&sessions_mutex);
469 if (session->active) {
470 ret = -EBUSY;
471 goto end;
472 }
473
474 /* Set transient enabler state to "enabled" */
475 session->priv->tstate = 1;
476
477 /* We need to sync enablers with session before activation. */
478 lttng_session_sync_event_enablers(session);
479
480 /*
481 * Snapshot the number of events per channel to know the type of header
482 * we need to use.
483 */
484 list_for_each_entry(chan_priv, &session->priv->chan, node) {
485 if (chan_priv->header_type)
486 continue; /* don't change it if session stop/restart */
487 if (chan_priv->free_event_id < 31)
488 chan_priv->header_type = 1; /* compact */
489 else
490 chan_priv->header_type = 2; /* large */
491 }
492
493 /* Clear each stream's quiescent state. */
494 list_for_each_entry(chan_priv, &session->priv->chan, node) {
495 if (chan_priv->channel_type != METADATA_CHANNEL)
496 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
497 }
498
499 WRITE_ONCE(session->active, 1);
500 WRITE_ONCE(session->priv->been_active, 1);
501 ret = _lttng_session_metadata_statedump(session);
502 if (ret) {
503 WRITE_ONCE(session->active, 0);
504 goto end;
505 }
506 ret = lttng_statedump_start(session);
507 if (ret)
508 WRITE_ONCE(session->active, 0);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_session_disable(struct lttng_kernel_session *session)
515 {
516 int ret = 0;
517 struct lttng_kernel_channel_buffer_private *chan_priv;
518
519 mutex_lock(&sessions_mutex);
520 if (!session->active) {
521 ret = -EBUSY;
522 goto end;
523 }
524 WRITE_ONCE(session->active, 0);
525
526 /* Set transient enabler state to "disabled" */
527 session->priv->tstate = 0;
528 lttng_session_sync_event_enablers(session);
529
530 /* Set each stream's quiescent state. */
531 list_for_each_entry(chan_priv, &session->priv->chan, node) {
532 if (chan_priv->channel_type != METADATA_CHANNEL)
533 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
534 }
535 end:
536 mutex_unlock(&sessions_mutex);
537 return ret;
538 }
539
540 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
541 {
542 int ret = 0;
543 struct lttng_kernel_channel_buffer_private *chan_priv;
544 struct lttng_kernel_event_recorder_private *event_recorder_priv;
545 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
546 struct lttng_metadata_stream *stream;
547
548 mutex_lock(&sessions_mutex);
549 if (!session->active) {
550 ret = -EBUSY;
551 goto end;
552 }
553
554 mutex_lock(&cache->lock);
555 memset(cache->data, 0, cache->cache_alloc);
556 cache->metadata_written = 0;
557 cache->version++;
558 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
559 stream->metadata_out = 0;
560 stream->metadata_in = 0;
561 }
562 mutex_unlock(&cache->lock);
563
564 session->priv->metadata_dumped = 0;
565 list_for_each_entry(chan_priv, &session->priv->chan, node) {
566 chan_priv->metadata_dumped = 0;
567 }
568
569 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
570 event_recorder_priv->metadata_dumped = 0;
571 }
572
573 ret = _lttng_session_metadata_statedump(session);
574
575 end:
576 mutex_unlock(&sessions_mutex);
577 return ret;
578 }
579
580 static
581 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
582 {
583 struct lttng_kernel_channel_buffer *chan_buf;
584
585 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
586 return false;
587 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
588 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
589 return true;
590 return false;
591 }
592
593 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
594 {
595 int ret = 0;
596
597 mutex_lock(&sessions_mutex);
598 if (is_channel_buffer_metadata(channel)) {
599 ret = -EPERM;
600 goto end;
601 }
602 if (channel->enabled) {
603 ret = -EEXIST;
604 goto end;
605 }
606 /* Set transient enabler state to "enabled" */
607 channel->priv->tstate = 1;
608 lttng_session_sync_event_enablers(channel->session);
609 /* Set atomically the state to "enabled" */
610 WRITE_ONCE(channel->enabled, 1);
611 end:
612 mutex_unlock(&sessions_mutex);
613 return ret;
614 }
615
616 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
617 {
618 int ret = 0;
619
620 mutex_lock(&sessions_mutex);
621 if (is_channel_buffer_metadata(channel)) {
622 ret = -EPERM;
623 goto end;
624 }
625 if (!channel->enabled) {
626 ret = -EEXIST;
627 goto end;
628 }
629 /* Set atomically the state to "disabled" */
630 WRITE_ONCE(channel->enabled, 0);
631 /* Set transient enabler state to "enabled" */
632 channel->priv->tstate = 0;
633 lttng_session_sync_event_enablers(channel->session);
634 end:
635 mutex_unlock(&sessions_mutex);
636 return ret;
637 }
638
639 int lttng_event_enable(struct lttng_kernel_event_common *event)
640 {
641 int ret = 0;
642
643 mutex_lock(&sessions_mutex);
644 switch (event->type) {
645 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
646 {
647 struct lttng_kernel_event_recorder *event_recorder =
648 container_of(event, struct lttng_kernel_event_recorder, parent);
649
650 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
651 ret = -EPERM;
652 goto end;
653 }
654 break;
655 }
656 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
657 switch (event->priv->instrumentation) {
658 case LTTNG_KERNEL_ABI_KRETPROBE:
659 ret = -EINVAL;
660 goto end;
661 default:
662 break;
663 }
664 break;
665 default:
666 break;
667 }
668
669 if (event->enabled) {
670 ret = -EEXIST;
671 goto end;
672 }
673 switch (event->priv->instrumentation) {
674 case LTTNG_KERNEL_ABI_TRACEPOINT:
675 lttng_fallthrough;
676 case LTTNG_KERNEL_ABI_SYSCALL:
677 ret = -EINVAL;
678 break;
679
680 case LTTNG_KERNEL_ABI_KPROBE:
681 lttng_fallthrough;
682 case LTTNG_KERNEL_ABI_UPROBE:
683 WRITE_ONCE(event->enabled, 1);
684 break;
685
686 case LTTNG_KERNEL_ABI_KRETPROBE:
687 ret = lttng_kretprobes_event_enable_state(event, 1);
688 break;
689
690 case LTTNG_KERNEL_ABI_FUNCTION:
691 lttng_fallthrough;
692 case LTTNG_KERNEL_ABI_NOOP:
693 lttng_fallthrough;
694 default:
695 WARN_ON_ONCE(1);
696 ret = -EINVAL;
697 }
698 end:
699 mutex_unlock(&sessions_mutex);
700 return ret;
701 }
702
703 int lttng_event_disable(struct lttng_kernel_event_common *event)
704 {
705 int ret = 0;
706
707 mutex_lock(&sessions_mutex);
708 switch (event->type) {
709 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
710 {
711 struct lttng_kernel_event_recorder *event_recorder =
712 container_of(event, struct lttng_kernel_event_recorder, parent);
713
714 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
715 ret = -EPERM;
716 goto end;
717 }
718 break;
719 }
720 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
721 switch (event->priv->instrumentation) {
722 case LTTNG_KERNEL_ABI_KRETPROBE:
723 ret = -EINVAL;
724 goto end;
725 default:
726 break;
727 }
728 break;
729 default:
730 break;
731 }
732
733 if (!event->enabled) {
734 ret = -EEXIST;
735 goto end;
736 }
737 switch (event->priv->instrumentation) {
738 case LTTNG_KERNEL_ABI_TRACEPOINT:
739 lttng_fallthrough;
740 case LTTNG_KERNEL_ABI_SYSCALL:
741 ret = -EINVAL;
742 break;
743
744 case LTTNG_KERNEL_ABI_KPROBE:
745 lttng_fallthrough;
746 case LTTNG_KERNEL_ABI_UPROBE:
747 WRITE_ONCE(event->enabled, 0);
748 break;
749
750 case LTTNG_KERNEL_ABI_KRETPROBE:
751 ret = lttng_kretprobes_event_enable_state(event, 0);
752 break;
753
754 case LTTNG_KERNEL_ABI_FUNCTION:
755 lttng_fallthrough;
756 case LTTNG_KERNEL_ABI_NOOP:
757 lttng_fallthrough;
758 default:
759 WARN_ON_ONCE(1);
760 ret = -EINVAL;
761 }
762 end:
763 mutex_unlock(&sessions_mutex);
764 return ret;
765 }
766
767 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
768 const char *transport_name,
769 void *buf_addr,
770 size_t subbuf_size, size_t num_subbuf,
771 unsigned int switch_timer_interval,
772 unsigned int read_timer_interval,
773 enum channel_type channel_type)
774 {
775 struct lttng_kernel_channel_buffer *chan;
776 struct lttng_kernel_channel_buffer_private *chan_priv;
777 struct lttng_transport *transport = NULL;
778
779 mutex_lock(&sessions_mutex);
780 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
781 goto active; /* Refuse to add channel to active session */
782 transport = lttng_transport_find(transport_name);
783 if (!transport) {
784 printk(KERN_WARNING "LTTng: transport %s not found\n",
785 transport_name);
786 goto notransport;
787 }
788 if (!try_module_get(transport->owner)) {
789 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
790 goto notransport;
791 }
792 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
793 if (!chan)
794 goto nomem;
795 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
796 if (!chan_priv)
797 goto nomem_priv;
798 chan->priv = chan_priv;
799 chan_priv->pub = chan;
800 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
801 chan->parent.session = session;
802 chan->priv->id = session->priv->free_chan_id++;
803 chan->ops = &transport->ops;
804 /*
805 * Note: the channel creation op already writes into the packet
806 * headers. Therefore the "chan" information used as input
807 * should be already accessible.
808 */
809 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
810 chan, buf_addr, subbuf_size, num_subbuf,
811 switch_timer_interval, read_timer_interval);
812 if (!chan->priv->rb_chan)
813 goto create_error;
814 chan->priv->parent.tstate = 1;
815 chan->parent.enabled = 1;
816 chan->priv->transport = transport;
817 chan->priv->channel_type = channel_type;
818 list_add(&chan->priv->node, &session->priv->chan);
819 mutex_unlock(&sessions_mutex);
820 return chan;
821
822 create_error:
823 kfree(chan_priv);
824 nomem_priv:
825 kfree(chan);
826 nomem:
827 if (transport)
828 module_put(transport->owner);
829 notransport:
830 active:
831 mutex_unlock(&sessions_mutex);
832 return NULL;
833 }
834
835 /*
836 * Only used internally at session destruction for per-cpu channels, and
837 * when metadata channel is released.
838 * Needs to be called with sessions mutex held.
839 */
840 static
841 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
842 {
843 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
844 module_put(chan->priv->transport->owner);
845 list_del(&chan->priv->node);
846 lttng_kernel_destroy_context(chan->priv->ctx);
847 kfree(chan->priv);
848 kfree(chan);
849 }
850
851 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
852 {
853 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
854
855 /* Protect the metadata cache with the sessions_mutex. */
856 mutex_lock(&sessions_mutex);
857 _lttng_channel_destroy(chan);
858 mutex_unlock(&sessions_mutex);
859 }
860 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
861
862 static
863 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
864 {
865 stream->finalized = 1;
866 wake_up_interruptible(&stream->read_wait);
867 }
868
869 static
870 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
871 {
872 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
873 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
874
875 switch (event_enabler->enabler_type) {
876 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
877 {
878 struct lttng_event_recorder_enabler *event_recorder_enabler =
879 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
880 struct lttng_kernel_event_recorder *event_recorder;
881 struct lttng_kernel_event_recorder_private *event_recorder_priv;
882 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
883
884 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
885 if (!event_recorder)
886 return NULL;
887 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
888 if (!event_recorder_priv) {
889 kmem_cache_free(event_recorder_private_cache, event_recorder);
890 return NULL;
891 }
892 event_recorder_priv->pub = event_recorder;
893 event_recorder_priv->parent.pub = &event_recorder->parent;
894 event_recorder->priv = event_recorder_priv;
895 event_recorder->parent.priv = &event_recorder_priv->parent;
896
897 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
898 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
899 event_recorder->priv->parent.instrumentation = itype;
900 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
901 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
902
903 event_recorder->chan = chan;
904 event_recorder->priv->id = chan->priv->free_event_id++;
905 return &event_recorder->parent;
906 }
907 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
908 {
909 struct lttng_event_notifier_enabler *event_notifier_enabler =
910 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
911 struct lttng_kernel_event_notifier *event_notifier;
912 struct lttng_kernel_event_notifier_private *event_notifier_priv;
913
914 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
915 if (!event_notifier)
916 return NULL;
917 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
918 if (!event_notifier_priv) {
919 kmem_cache_free(event_notifier_private_cache, event_notifier);
920 return NULL;
921 }
922 event_notifier_priv->pub = event_notifier;
923 event_notifier_priv->parent.pub = &event_notifier->parent;
924 event_notifier->priv = event_notifier_priv;
925 event_notifier->parent.priv = &event_notifier_priv->parent;
926
927 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
928 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
929 event_notifier->priv->parent.instrumentation = itype;
930 event_notifier->priv->parent.user_token = event_enabler->user_token;
931 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
932 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
933
934 event_notifier->priv->group = event_notifier_enabler->group;
935 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
936 event_notifier->priv->num_captures = 0;
937 event_notifier->notification_send = lttng_event_notifier_notification_send;
938 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
939 return &event_notifier->parent;
940 }
941 default:
942 return NULL;
943 }
944 }
945
946 static
947 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
948 {
949 switch (event->type) {
950 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
951 {
952 struct lttng_kernel_event_recorder *event_recorder =
953 container_of(event, struct lttng_kernel_event_recorder, parent);
954
955 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
956 kmem_cache_free(event_recorder_cache, event_recorder);
957 break;
958 }
959 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
960 {
961 struct lttng_kernel_event_notifier *event_notifier =
962 container_of(event, struct lttng_kernel_event_notifier, parent);
963
964 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
965 kmem_cache_free(event_notifier_cache, event_notifier);
966 break;
967 }
968 default:
969 WARN_ON_ONCE(1);
970 }
971 }
972
973 /*
974 * Supports event creation while tracing session is active.
975 * Needs to be called with sessions mutex held.
976 */
977 static
978 struct lttng_kernel_event_recorder *_lttng_kernel_event_recorder_create(struct lttng_event_recorder_enabler *event_enabler,
979 const struct lttng_kernel_event_desc *event_desc)
980 {
981 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
982 struct lttng_kernel_channel_buffer *chan = event_enabler->chan;
983 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
984 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
985 struct lttng_kernel_event_common_private *event_priv;
986 struct lttng_kernel_event_common *event;
987 struct lttng_kernel_event_recorder *event_recorder;
988 const char *event_name;
989 struct hlist_head *head;
990 int ret;
991
992 if (chan->priv->free_event_id == -1U) {
993 ret = -EMFILE;
994 goto full;
995 }
996
997 switch (itype) {
998 case LTTNG_KERNEL_ABI_TRACEPOINT:
999 event_name = event_desc->event_name;
1000 break;
1001
1002 case LTTNG_KERNEL_ABI_KPROBE:
1003 lttng_fallthrough;
1004 case LTTNG_KERNEL_ABI_UPROBE:
1005 lttng_fallthrough;
1006 case LTTNG_KERNEL_ABI_KRETPROBE:
1007 lttng_fallthrough;
1008 case LTTNG_KERNEL_ABI_SYSCALL:
1009 event_name = event_param->name;
1010 break;
1011
1012 case LTTNG_KERNEL_ABI_FUNCTION:
1013 lttng_fallthrough;
1014 case LTTNG_KERNEL_ABI_NOOP:
1015 lttng_fallthrough;
1016 default:
1017 WARN_ON_ONCE(1);
1018 ret = -EINVAL;
1019 goto type_error;
1020 }
1021
1022 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1023 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1024 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1025 ret = -EEXIST;
1026 goto exist;
1027 }
1028 }
1029
1030 event = lttng_kernel_event_alloc(&event_enabler->parent);
1031 if (!event) {
1032 ret = -ENOMEM;
1033 goto alloc_error;
1034 }
1035 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
1036
1037 switch (itype) {
1038 case LTTNG_KERNEL_ABI_TRACEPOINT:
1039 /* Event will be enabled by enabler sync. */
1040 event->enabled = 0;
1041 event->priv->registered = 0;
1042 event->priv->desc = lttng_event_desc_get(event_name);
1043 if (!event->priv->desc) {
1044 ret = -ENOENT;
1045 goto register_error;
1046 }
1047 /* Populate lttng_event structure before event registration. */
1048 smp_wmb();
1049 break;
1050
1051 case LTTNG_KERNEL_ABI_KPROBE:
1052 /*
1053 * Needs to be explicitly enabled after creation, since
1054 * we may want to apply filters.
1055 */
1056 event->enabled = 0;
1057 event->priv->registered = 1;
1058 /*
1059 * Populate lttng_event structure before event
1060 * registration.
1061 */
1062 smp_wmb();
1063 ret = lttng_kprobes_register_event(event_name,
1064 event_param->u.kprobe.symbol_name,
1065 event_param->u.kprobe.offset,
1066 event_param->u.kprobe.addr,
1067 event);
1068 if (ret) {
1069 ret = -EINVAL;
1070 goto register_error;
1071 }
1072 ret = try_module_get(event->priv->desc->owner);
1073 WARN_ON_ONCE(!ret);
1074 break;
1075
1076 case LTTNG_KERNEL_ABI_KRETPROBE:
1077 {
1078 struct lttng_kernel_event_common *event_return;
1079 struct lttng_kernel_event_recorder *event_recorder_return;
1080
1081 /* kretprobe defines 2 events */
1082 /*
1083 * Needs to be explicitly enabled after creation, since
1084 * we may want to apply filters.
1085 */
1086 event->enabled = 0;
1087 event->priv->registered = 1;
1088
1089 event_return = lttng_kernel_event_alloc(&event_enabler->parent);
1090 if (!event) {
1091 ret = -ENOMEM;
1092 goto alloc_error;
1093 }
1094 event_recorder_return = container_of(event_return, struct lttng_kernel_event_recorder, parent);
1095
1096 event_return->enabled = 0;
1097 event_return->priv->registered = 1;
1098
1099 /*
1100 * Populate lttng_event structure before kretprobe registration.
1101 */
1102 smp_wmb();
1103 ret = lttng_kretprobes_register(event_name,
1104 event_param->u.kretprobe.symbol_name,
1105 event_param->u.kretprobe.offset,
1106 event_param->u.kretprobe.addr,
1107 event, event_return);
1108 if (ret) {
1109 lttng_kernel_event_free(event_return);
1110 ret = -EINVAL;
1111 goto register_error;
1112 }
1113 /* Take 2 refs on the module: one per event. */
1114 ret = try_module_get(event->priv->desc->owner);
1115 WARN_ON_ONCE(!ret);
1116 ret = try_module_get(event_return->priv->desc->owner);
1117 WARN_ON_ONCE(!ret);
1118 ret = _lttng_event_metadata_statedump(chan->parent.session, chan, event_recorder_return);
1119 WARN_ON_ONCE(ret > 0);
1120 if (ret) {
1121 lttng_kernel_event_free(event_return);
1122 module_put(event_return->priv->desc->owner);
1123 module_put(event->priv->desc->owner);
1124 goto statedump_error;
1125 }
1126 list_add(&event_return->priv->node, &chan->parent.session->priv->events);
1127 break;
1128 }
1129
1130 case LTTNG_KERNEL_ABI_SYSCALL:
1131 /*
1132 * Needs to be explicitly enabled after creation, since
1133 * we may want to apply filters.
1134 */
1135 event->enabled = 0;
1136 event->priv->registered = 0;
1137 event->priv->desc = event_desc;
1138 switch (event_param->u.syscall.entryexit) {
1139 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1140 ret = -EINVAL;
1141 goto register_error;
1142 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1143 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1144 break;
1145 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1146 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1147 break;
1148 }
1149 switch (event_param->u.syscall.abi) {
1150 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1151 ret = -EINVAL;
1152 goto register_error;
1153 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1154 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1155 break;
1156 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1157 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1158 break;
1159 }
1160 if (!event->priv->desc) {
1161 ret = -EINVAL;
1162 goto register_error;
1163 }
1164 break;
1165
1166 case LTTNG_KERNEL_ABI_UPROBE:
1167 /*
1168 * Needs to be explicitly enabled after creation, since
1169 * we may want to apply filters.
1170 */
1171 event->enabled = 0;
1172 event->priv->registered = 1;
1173
1174 /*
1175 * Populate lttng_event structure before event
1176 * registration.
1177 */
1178 smp_wmb();
1179
1180 ret = lttng_uprobes_register_event(event_param->name,
1181 event_param->u.uprobe.fd,
1182 event);
1183 if (ret)
1184 goto register_error;
1185 ret = try_module_get(event->priv->desc->owner);
1186 WARN_ON_ONCE(!ret);
1187 break;
1188
1189 case LTTNG_KERNEL_ABI_FUNCTION:
1190 lttng_fallthrough;
1191 case LTTNG_KERNEL_ABI_NOOP:
1192 lttng_fallthrough;
1193 default:
1194 WARN_ON_ONCE(1);
1195 ret = -EINVAL;
1196 goto register_error;
1197 }
1198 ret = _lttng_event_metadata_statedump(chan->parent.session, chan, event_recorder);
1199 WARN_ON_ONCE(ret > 0);
1200 if (ret) {
1201 goto statedump_error;
1202 }
1203 hlist_add_head(&event->priv->hlist_node, head);
1204 list_add(&event->priv->node, &chan->parent.session->priv->events);
1205 return event_recorder;
1206
1207 statedump_error:
1208 /* If a statedump error occurs, events will not be readable. */
1209 register_error:
1210 lttng_kernel_event_free(event);
1211 alloc_error:
1212 exist:
1213 type_error:
1214 full:
1215 return ERR_PTR(ret);
1216 }
1217
1218 static
1219 struct lttng_kernel_event_notifier *_lttng_kernel_event_notifier_create(struct lttng_event_notifier_enabler *event_enabler,
1220 const struct lttng_kernel_event_desc *event_desc)
1221 {
1222 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(&event_enabler->parent);
1223 struct lttng_event_notifier_group *event_notifier_group = event_enabler->group;
1224 struct lttng_kernel_abi_event *event_param = &event_enabler->parent.event_param;
1225 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1226 struct lttng_kernel_event_common_private *event_priv;
1227 struct lttng_kernel_event_common *event;
1228 struct lttng_kernel_event_notifier *event_notifier;
1229 struct lttng_counter *error_counter;
1230 const char *event_name;
1231 struct hlist_head *head;
1232 int ret;
1233
1234 switch (itype) {
1235 case LTTNG_KERNEL_ABI_TRACEPOINT:
1236 event_name = event_desc->event_name;
1237 break;
1238
1239 case LTTNG_KERNEL_ABI_KPROBE:
1240 lttng_fallthrough;
1241 case LTTNG_KERNEL_ABI_UPROBE:
1242 lttng_fallthrough;
1243 case LTTNG_KERNEL_ABI_SYSCALL:
1244 event_name = event_param->name;
1245 break;
1246
1247 case LTTNG_KERNEL_ABI_KRETPROBE:
1248 lttng_fallthrough;
1249 case LTTNG_KERNEL_ABI_FUNCTION:
1250 lttng_fallthrough;
1251 case LTTNG_KERNEL_ABI_NOOP:
1252 lttng_fallthrough;
1253 default:
1254 WARN_ON_ONCE(1);
1255 ret = -EINVAL;
1256 goto type_error;
1257 }
1258
1259 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1260 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1261 if (lttng_event_enabler_event_name_match_event(&event_enabler->parent, event_name, event_priv->pub)) {
1262 ret = -EEXIST;
1263 goto exist;
1264 }
1265 }
1266
1267 event = lttng_kernel_event_alloc(&event_enabler->parent);
1268 if (!event) {
1269 ret = -ENOMEM;
1270 goto alloc_error;
1271 }
1272 event_notifier = container_of(event, struct lttng_kernel_event_notifier, parent);
1273
1274 switch (itype) {
1275 case LTTNG_KERNEL_ABI_TRACEPOINT:
1276 /* Event will be enabled by enabler sync. */
1277 event->enabled = 0;
1278 event->priv->registered = 0;
1279 event->priv->desc = lttng_event_desc_get(event_name);
1280 if (!event->priv->desc) {
1281 ret = -ENOENT;
1282 goto register_error;
1283 }
1284 /* Populate lttng_event_notifier structure before event registration. */
1285 smp_wmb();
1286 break;
1287
1288 case LTTNG_KERNEL_ABI_KPROBE:
1289 /*
1290 * Needs to be explicitly enabled after creation, since
1291 * we may want to apply filters.
1292 */
1293 event->enabled = 0;
1294 event->priv->registered = 1;
1295 /*
1296 * Populate lttng_event_notifier structure before event
1297 * registration.
1298 */
1299 smp_wmb();
1300 ret = lttng_kprobes_register_event(event_param->u.kprobe.symbol_name,
1301 event_param->u.kprobe.symbol_name,
1302 event_param->u.kprobe.offset,
1303 event_param->u.kprobe.addr,
1304 event);
1305 if (ret) {
1306 ret = -EINVAL;
1307 goto register_error;
1308 }
1309 ret = try_module_get(event->priv->desc->owner);
1310 WARN_ON_ONCE(!ret);
1311 break;
1312
1313 case LTTNG_KERNEL_ABI_SYSCALL:
1314 /*
1315 * Needs to be explicitly enabled after creation, since
1316 * we may want to apply filters.
1317 */
1318 event->enabled = 0;
1319 event->priv->registered = 0;
1320 event->priv->desc = event_desc;
1321 switch (event_param->u.syscall.entryexit) {
1322 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1323 ret = -EINVAL;
1324 goto register_error;
1325 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1326 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1327 break;
1328 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1329 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1330 break;
1331 }
1332 switch (event_param->u.syscall.abi) {
1333 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1334 ret = -EINVAL;
1335 goto register_error;
1336 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1337 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1338 break;
1339 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1340 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1341 break;
1342 }
1343
1344 if (!event->priv->desc) {
1345 ret = -EINVAL;
1346 goto register_error;
1347 }
1348 break;
1349
1350 case LTTNG_KERNEL_ABI_UPROBE:
1351 /*
1352 * Needs to be explicitly enabled after creation, since
1353 * we may want to apply filters.
1354 */
1355 event->enabled = 0;
1356 event->priv->registered = 1;
1357
1358 /*
1359 * Populate lttng_event_notifier structure before
1360 * event_notifier registration.
1361 */
1362 smp_wmb();
1363
1364 ret = lttng_uprobes_register_event(event_param->name,
1365 event_param->u.uprobe.fd,
1366 event);
1367 if (ret)
1368 goto register_error;
1369 ret = try_module_get(event->priv->desc->owner);
1370 WARN_ON_ONCE(!ret);
1371 break;
1372
1373 case LTTNG_KERNEL_ABI_KRETPROBE:
1374 lttng_fallthrough;
1375 case LTTNG_KERNEL_ABI_FUNCTION:
1376 lttng_fallthrough;
1377 case LTTNG_KERNEL_ABI_NOOP:
1378 lttng_fallthrough;
1379 default:
1380 WARN_ON_ONCE(1);
1381 ret = -EINVAL;
1382 goto register_error;
1383 }
1384
1385 list_add(&event->priv->node, &event_notifier_group->event_notifiers_head);
1386 hlist_add_head(&event->priv->hlist_node, head);
1387
1388 /*
1389 * Clear the error counter bucket. The sessiond keeps track of which
1390 * bucket is currently in use. We trust it. The session lock
1391 * synchronizes against concurrent creation of the error
1392 * counter.
1393 */
1394 error_counter = event_notifier_group->error_counter;
1395 if (error_counter) {
1396 size_t dimension_index[1];
1397
1398 /*
1399 * Check that the index is within the boundary of the counter.
1400 */
1401 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1402 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1403 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1404 ret = -EINVAL;
1405 goto register_error;
1406 }
1407
1408 dimension_index[0] = event_notifier->priv->error_counter_index;
1409 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1410 if (ret) {
1411 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1412 event_notifier->priv->error_counter_index);
1413 goto register_error;
1414 }
1415 }
1416
1417 return event_notifier;
1418
1419 register_error:
1420 lttng_kernel_event_free(event);
1421 alloc_error:
1422 exist:
1423 type_error:
1424 return ERR_PTR(ret);
1425 }
1426
1427 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1428 const struct lttng_kernel_event_desc *event_desc)
1429 {
1430 switch (event_enabler->enabler_type) {
1431 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1432 {
1433 struct lttng_event_recorder_enabler *event_recorder_enabler =
1434 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1435 struct lttng_kernel_event_recorder *event_recorder;
1436
1437 event_recorder = _lttng_kernel_event_recorder_create(event_recorder_enabler, event_desc);
1438 if (!event_recorder)
1439 return NULL;
1440 return &event_recorder->parent;
1441 }
1442 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1443 {
1444 struct lttng_event_notifier_enabler *event_notifier_enabler =
1445 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1446 struct lttng_kernel_event_notifier *event_notifier;
1447
1448 event_notifier = _lttng_kernel_event_notifier_create(event_notifier_enabler, event_desc);
1449 if (!event_notifier)
1450 return NULL;
1451 return &event_notifier->parent;
1452 }
1453 default:
1454 return NULL;
1455 }
1456 }
1457
1458 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1459 const struct lttng_kernel_event_desc *event_desc)
1460 {
1461 struct lttng_kernel_event_common *event;
1462
1463 mutex_lock(&sessions_mutex);
1464 event = _lttng_kernel_event_create(event_enabler, event_desc);
1465 mutex_unlock(&sessions_mutex);
1466 return event;
1467 }
1468
1469
1470
1471 int lttng_kernel_counter_read(struct lttng_counter *counter,
1472 const size_t *dim_indexes, int32_t cpu,
1473 int64_t *val, bool *overflow, bool *underflow)
1474 {
1475 return counter->ops->counter_read(counter->counter, dim_indexes,
1476 cpu, val, overflow, underflow);
1477 }
1478
1479 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1480 const size_t *dim_indexes, int64_t *val,
1481 bool *overflow, bool *underflow)
1482 {
1483 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1484 val, overflow, underflow);
1485 }
1486
1487 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1488 const size_t *dim_indexes)
1489 {
1490 return counter->ops->counter_clear(counter->counter, dim_indexes);
1491 }
1492
1493 /* Only used for tracepoints for now. */
1494 static
1495 void register_event(struct lttng_kernel_event_common *event)
1496 {
1497 const struct lttng_kernel_event_desc *desc;
1498 int ret = -EINVAL;
1499
1500 if (event->priv->registered)
1501 return;
1502
1503 desc = event->priv->desc;
1504 switch (event->priv->instrumentation) {
1505 case LTTNG_KERNEL_ABI_TRACEPOINT:
1506 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1507 desc->tp_class->probe_callback,
1508 event);
1509 break;
1510
1511 case LTTNG_KERNEL_ABI_SYSCALL:
1512 ret = lttng_syscall_filter_enable_event(event);
1513 break;
1514
1515 case LTTNG_KERNEL_ABI_KPROBE:
1516 lttng_fallthrough;
1517 case LTTNG_KERNEL_ABI_UPROBE:
1518 ret = 0;
1519 break;
1520
1521 case LTTNG_KERNEL_ABI_KRETPROBE:
1522 switch (event->type) {
1523 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1524 ret = 0;
1525 break;
1526 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1527 WARN_ON_ONCE(1);
1528 break;
1529 }
1530 break;
1531
1532 case LTTNG_KERNEL_ABI_FUNCTION:
1533 lttng_fallthrough;
1534 case LTTNG_KERNEL_ABI_NOOP:
1535 lttng_fallthrough;
1536 default:
1537 WARN_ON_ONCE(1);
1538 }
1539 if (!ret)
1540 event->priv->registered = 1;
1541 }
1542
1543 int _lttng_event_unregister(struct lttng_kernel_event_common *event)
1544 {
1545 struct lttng_kernel_event_common_private *event_priv = event->priv;
1546 const struct lttng_kernel_event_desc *desc;
1547 int ret = -EINVAL;
1548
1549 if (!event_priv->registered)
1550 return 0;
1551
1552 desc = event_priv->desc;
1553 switch (event_priv->instrumentation) {
1554 case LTTNG_KERNEL_ABI_TRACEPOINT:
1555 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1556 event_priv->desc->tp_class->probe_callback,
1557 event);
1558 break;
1559
1560 case LTTNG_KERNEL_ABI_KPROBE:
1561 lttng_kprobes_unregister_event(event);
1562 ret = 0;
1563 break;
1564
1565 case LTTNG_KERNEL_ABI_KRETPROBE:
1566 switch (event->type) {
1567 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1568 lttng_kretprobes_unregister(event);
1569 ret = 0;
1570 break;
1571 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1572 WARN_ON_ONCE(1);
1573 break;
1574 }
1575 break;
1576
1577 case LTTNG_KERNEL_ABI_SYSCALL:
1578 ret = lttng_syscall_filter_disable_event(event);
1579 break;
1580
1581 case LTTNG_KERNEL_ABI_NOOP:
1582 switch (event->type) {
1583 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1584 ret = 0;
1585 break;
1586 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1587 WARN_ON_ONCE(1);
1588 break;
1589 }
1590 break;
1591
1592 case LTTNG_KERNEL_ABI_UPROBE:
1593 lttng_uprobes_unregister_event(event);
1594 ret = 0;
1595 break;
1596
1597 case LTTNG_KERNEL_ABI_FUNCTION:
1598 lttng_fallthrough;
1599 default:
1600 WARN_ON_ONCE(1);
1601 }
1602 if (!ret)
1603 event_priv->registered = 0;
1604 return ret;
1605 }
1606
1607 /*
1608 * Only used internally at session destruction.
1609 */
1610 static
1611 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1612 {
1613 struct lttng_kernel_event_common_private *event_priv = event->priv;
1614 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1615
1616 lttng_free_event_filter_runtime(event);
1617 /* Free event enabler refs */
1618 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1619 &event_priv->enablers_ref_head, node)
1620 kfree(enabler_ref);
1621
1622 switch (event->type) {
1623 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1624 {
1625 struct lttng_kernel_event_recorder *event_recorder =
1626 container_of(event, struct lttng_kernel_event_recorder, parent);
1627
1628 switch (event_priv->instrumentation) {
1629 case LTTNG_KERNEL_ABI_TRACEPOINT:
1630 lttng_event_desc_put(event_priv->desc);
1631 break;
1632
1633 case LTTNG_KERNEL_ABI_KPROBE:
1634 module_put(event_priv->desc->owner);
1635 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1636 break;
1637
1638 case LTTNG_KERNEL_ABI_KRETPROBE:
1639 module_put(event_priv->desc->owner);
1640 lttng_kretprobes_destroy_private(&event_recorder->parent);
1641 break;
1642
1643 case LTTNG_KERNEL_ABI_SYSCALL:
1644 break;
1645
1646 case LTTNG_KERNEL_ABI_UPROBE:
1647 module_put(event_priv->desc->owner);
1648 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1649 break;
1650
1651 case LTTNG_KERNEL_ABI_FUNCTION:
1652 lttng_fallthrough;
1653 case LTTNG_KERNEL_ABI_NOOP:
1654 lttng_fallthrough;
1655 default:
1656 WARN_ON_ONCE(1);
1657 }
1658 list_del(&event_recorder->priv->parent.node);
1659 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1660 kmem_cache_free(event_recorder_cache, event_recorder);
1661 break;
1662 }
1663 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1664 {
1665 struct lttng_kernel_event_notifier *event_notifier =
1666 container_of(event, struct lttng_kernel_event_notifier, parent);
1667
1668 switch (event_notifier->priv->parent.instrumentation) {
1669 case LTTNG_KERNEL_ABI_TRACEPOINT:
1670 lttng_event_desc_put(event_notifier->priv->parent.desc);
1671 break;
1672
1673 case LTTNG_KERNEL_ABI_KPROBE:
1674 module_put(event_notifier->priv->parent.desc->owner);
1675 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1676 break;
1677
1678 case LTTNG_KERNEL_ABI_SYSCALL:
1679 break;
1680
1681 case LTTNG_KERNEL_ABI_UPROBE:
1682 module_put(event_notifier->priv->parent.desc->owner);
1683 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1684 break;
1685
1686 case LTTNG_KERNEL_ABI_KRETPROBE:
1687 lttng_fallthrough;
1688 case LTTNG_KERNEL_ABI_FUNCTION:
1689 lttng_fallthrough;
1690 case LTTNG_KERNEL_ABI_NOOP:
1691 lttng_fallthrough;
1692 default:
1693 WARN_ON_ONCE(1);
1694 }
1695 list_del(&event_notifier->priv->parent.node);
1696 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1697 kmem_cache_free(event_notifier_cache, event_notifier);
1698 break;
1699 }
1700 default:
1701 WARN_ON_ONCE(1);
1702 }
1703 }
1704
1705 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1706 enum tracker_type tracker_type)
1707 {
1708 switch (tracker_type) {
1709 case TRACKER_PID:
1710 return &session->pid_tracker;
1711 case TRACKER_VPID:
1712 return &session->vpid_tracker;
1713 case TRACKER_UID:
1714 return &session->uid_tracker;
1715 case TRACKER_VUID:
1716 return &session->vuid_tracker;
1717 case TRACKER_GID:
1718 return &session->gid_tracker;
1719 case TRACKER_VGID:
1720 return &session->vgid_tracker;
1721 default:
1722 WARN_ON_ONCE(1);
1723 return NULL;
1724 }
1725 }
1726
1727 int lttng_session_track_id(struct lttng_kernel_session *session,
1728 enum tracker_type tracker_type, int id)
1729 {
1730 struct lttng_kernel_id_tracker *tracker;
1731 int ret;
1732
1733 tracker = get_tracker(session, tracker_type);
1734 if (!tracker)
1735 return -EINVAL;
1736 if (id < -1)
1737 return -EINVAL;
1738 mutex_lock(&sessions_mutex);
1739 if (id == -1) {
1740 /* track all ids: destroy tracker. */
1741 lttng_id_tracker_destroy(tracker, true);
1742 ret = 0;
1743 } else {
1744 ret = lttng_id_tracker_add(tracker, id);
1745 }
1746 mutex_unlock(&sessions_mutex);
1747 return ret;
1748 }
1749
1750 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1751 enum tracker_type tracker_type, int id)
1752 {
1753 struct lttng_kernel_id_tracker *tracker;
1754 int ret;
1755
1756 tracker = get_tracker(session, tracker_type);
1757 if (!tracker)
1758 return -EINVAL;
1759 if (id < -1)
1760 return -EINVAL;
1761 mutex_lock(&sessions_mutex);
1762 if (id == -1) {
1763 /* untrack all ids: replace by empty tracker. */
1764 ret = lttng_id_tracker_empty_set(tracker);
1765 } else {
1766 ret = lttng_id_tracker_del(tracker, id);
1767 }
1768 mutex_unlock(&sessions_mutex);
1769 return ret;
1770 }
1771
1772 static
1773 void *id_list_start(struct seq_file *m, loff_t *pos)
1774 {
1775 struct lttng_kernel_id_tracker *id_tracker = m->private;
1776 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1777 struct lttng_id_hash_node *e;
1778 int iter = 0, i;
1779
1780 mutex_lock(&sessions_mutex);
1781 if (id_tracker_p) {
1782 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1783 struct hlist_head *head = &id_tracker_p->id_hash[i];
1784
1785 lttng_hlist_for_each_entry(e, head, hlist) {
1786 if (iter++ >= *pos)
1787 return e;
1788 }
1789 }
1790 } else {
1791 /* ID tracker disabled. */
1792 if (iter >= *pos && iter == 0) {
1793 return id_tracker_p; /* empty tracker */
1794 }
1795 iter++;
1796 }
1797 /* End of list */
1798 return NULL;
1799 }
1800
1801 /* Called with sessions_mutex held. */
1802 static
1803 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1804 {
1805 struct lttng_kernel_id_tracker *id_tracker = m->private;
1806 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1807 struct lttng_id_hash_node *e;
1808 int iter = 0, i;
1809
1810 (*ppos)++;
1811 if (id_tracker_p) {
1812 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1813 struct hlist_head *head = &id_tracker_p->id_hash[i];
1814
1815 lttng_hlist_for_each_entry(e, head, hlist) {
1816 if (iter++ >= *ppos)
1817 return e;
1818 }
1819 }
1820 } else {
1821 /* ID tracker disabled. */
1822 if (iter >= *ppos && iter == 0)
1823 return p; /* empty tracker */
1824 iter++;
1825 }
1826
1827 /* End of list */
1828 return NULL;
1829 }
1830
1831 static
1832 void id_list_stop(struct seq_file *m, void *p)
1833 {
1834 mutex_unlock(&sessions_mutex);
1835 }
1836
1837 static
1838 int id_list_show(struct seq_file *m, void *p)
1839 {
1840 struct lttng_kernel_id_tracker *id_tracker = m->private;
1841 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1842 int id;
1843
1844 if (p == id_tracker_p) {
1845 /* Tracker disabled. */
1846 id = -1;
1847 } else {
1848 const struct lttng_id_hash_node *e = p;
1849
1850 id = lttng_id_tracker_get_node_id(e);
1851 }
1852 switch (id_tracker->priv->tracker_type) {
1853 case TRACKER_PID:
1854 seq_printf(m, "process { pid = %d; };\n", id);
1855 break;
1856 case TRACKER_VPID:
1857 seq_printf(m, "process { vpid = %d; };\n", id);
1858 break;
1859 case TRACKER_UID:
1860 seq_printf(m, "user { uid = %d; };\n", id);
1861 break;
1862 case TRACKER_VUID:
1863 seq_printf(m, "user { vuid = %d; };\n", id);
1864 break;
1865 case TRACKER_GID:
1866 seq_printf(m, "group { gid = %d; };\n", id);
1867 break;
1868 case TRACKER_VGID:
1869 seq_printf(m, "group { vgid = %d; };\n", id);
1870 break;
1871 default:
1872 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1873 }
1874 return 0;
1875 }
1876
1877 static
1878 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1879 .start = id_list_start,
1880 .next = id_list_next,
1881 .stop = id_list_stop,
1882 .show = id_list_show,
1883 };
1884
1885 static
1886 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1887 {
1888 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1889 }
1890
1891 static
1892 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1893 {
1894 struct seq_file *m = file->private_data;
1895 struct lttng_kernel_id_tracker *id_tracker = m->private;
1896 int ret;
1897
1898 WARN_ON_ONCE(!id_tracker);
1899 ret = seq_release(inode, file);
1900 if (!ret)
1901 fput(id_tracker->priv->session->priv->file);
1902 return ret;
1903 }
1904
1905 const struct file_operations lttng_tracker_ids_list_fops = {
1906 .owner = THIS_MODULE,
1907 .open = lttng_tracker_ids_list_open,
1908 .read = seq_read,
1909 .llseek = seq_lseek,
1910 .release = lttng_tracker_ids_list_release,
1911 };
1912
1913 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1914 enum tracker_type tracker_type)
1915 {
1916 struct file *tracker_ids_list_file;
1917 struct seq_file *m;
1918 int file_fd, ret;
1919
1920 file_fd = lttng_get_unused_fd();
1921 if (file_fd < 0) {
1922 ret = file_fd;
1923 goto fd_error;
1924 }
1925
1926 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1927 &lttng_tracker_ids_list_fops,
1928 NULL, O_RDWR);
1929 if (IS_ERR(tracker_ids_list_file)) {
1930 ret = PTR_ERR(tracker_ids_list_file);
1931 goto file_error;
1932 }
1933 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1934 ret = -EOVERFLOW;
1935 goto refcount_error;
1936 }
1937 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1938 if (ret < 0)
1939 goto open_error;
1940 m = tracker_ids_list_file->private_data;
1941
1942 m->private = get_tracker(session, tracker_type);
1943 BUG_ON(!m->private);
1944 fd_install(file_fd, tracker_ids_list_file);
1945
1946 return file_fd;
1947
1948 open_error:
1949 atomic_long_dec(&session->priv->file->f_count);
1950 refcount_error:
1951 fput(tracker_ids_list_file);
1952 file_error:
1953 put_unused_fd(file_fd);
1954 fd_error:
1955 return ret;
1956 }
1957
1958 /*
1959 * Enabler management.
1960 */
1961 static
1962 int lttng_match_enabler_star_glob(const char *desc_name,
1963 const char *pattern)
1964 {
1965 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1966 desc_name, LTTNG_SIZE_MAX))
1967 return 0;
1968 return 1;
1969 }
1970
1971 static
1972 int lttng_match_enabler_name(const char *desc_name,
1973 const char *name)
1974 {
1975 if (strcmp(desc_name, name))
1976 return 0;
1977 return 1;
1978 }
1979
1980 static
1981 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1982 struct lttng_event_enabler_common *enabler)
1983 {
1984 const char *desc_name, *enabler_name;
1985 bool compat = false, entry = false;
1986
1987 enabler_name = enabler->event_param.name;
1988 switch (enabler->event_param.instrumentation) {
1989 case LTTNG_KERNEL_ABI_TRACEPOINT:
1990 desc_name = desc->event_name;
1991 switch (enabler->format_type) {
1992 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1993 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1994 case LTTNG_ENABLER_FORMAT_NAME:
1995 return lttng_match_enabler_name(desc_name, enabler_name);
1996 default:
1997 return -EINVAL;
1998 }
1999 break;
2000
2001 case LTTNG_KERNEL_ABI_SYSCALL:
2002 desc_name = desc->event_name;
2003 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
2004 desc_name += strlen("compat_");
2005 compat = true;
2006 }
2007 if (!strncmp(desc_name, "syscall_exit_",
2008 strlen("syscall_exit_"))) {
2009 desc_name += strlen("syscall_exit_");
2010 } else if (!strncmp(desc_name, "syscall_entry_",
2011 strlen("syscall_entry_"))) {
2012 desc_name += strlen("syscall_entry_");
2013 entry = true;
2014 } else {
2015 WARN_ON_ONCE(1);
2016 return -EINVAL;
2017 }
2018 switch (enabler->event_param.u.syscall.entryexit) {
2019 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
2020 break;
2021 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
2022 if (!entry)
2023 return 0;
2024 break;
2025 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
2026 if (entry)
2027 return 0;
2028 break;
2029 default:
2030 return -EINVAL;
2031 }
2032 switch (enabler->event_param.u.syscall.abi) {
2033 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
2034 break;
2035 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
2036 if (compat)
2037 return 0;
2038 break;
2039 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
2040 if (!compat)
2041 return 0;
2042 break;
2043 default:
2044 return -EINVAL;
2045 }
2046 switch (enabler->event_param.u.syscall.match) {
2047 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
2048 switch (enabler->format_type) {
2049 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2050 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2051 case LTTNG_ENABLER_FORMAT_NAME:
2052 return lttng_match_enabler_name(desc_name, enabler_name);
2053 default:
2054 return -EINVAL;
2055 }
2056 break;
2057 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
2058 return -EINVAL; /* Not implemented. */
2059 default:
2060 return -EINVAL;
2061 }
2062 break;
2063
2064 default:
2065 WARN_ON_ONCE(1);
2066 return -EINVAL;
2067 }
2068 }
2069
2070 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
2071 struct lttng_event_enabler_common *enabler)
2072 {
2073 int ret;
2074
2075 ret = lttng_desc_match_enabler_check(desc, enabler);
2076 if (ret < 0) {
2077 WARN_ON_ONCE(1);
2078 return false;
2079 }
2080 return ret;
2081 }
2082
2083 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
2084 struct lttng_kernel_event_common *event)
2085 {
2086 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2087 return false;
2088
2089 switch (event_enabler->enabler_type) {
2090 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2091 {
2092 struct lttng_event_recorder_enabler *event_recorder_enabler =
2093 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2094 struct lttng_kernel_event_recorder *event_recorder =
2095 container_of(event, struct lttng_kernel_event_recorder, parent);
2096
2097 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2098 && event_recorder->chan == event_recorder_enabler->chan)
2099 return true;
2100 else
2101 return false;
2102 }
2103 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2104 {
2105 struct lttng_event_notifier_enabler *event_notifier_enabler =
2106 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2107 struct lttng_kernel_event_notifier *event_notifier =
2108 container_of(event, struct lttng_kernel_event_notifier, parent);
2109
2110 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
2111 && event_notifier->priv->group == event_notifier_enabler->group
2112 && event->priv->user_token == event_enabler->user_token)
2113 return true;
2114 else
2115 return false;
2116 }
2117 default:
2118 WARN_ON_ONCE(1);
2119 return false;
2120 }
2121 }
2122
2123 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
2124 const struct lttng_kernel_event_desc *desc,
2125 struct lttng_kernel_event_common *event)
2126 {
2127 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2128 return false;
2129
2130 switch (event_enabler->enabler_type) {
2131 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2132 {
2133 struct lttng_event_recorder_enabler *event_recorder_enabler =
2134 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2135 struct lttng_kernel_event_recorder *event_recorder =
2136 container_of(event, struct lttng_kernel_event_recorder, parent);
2137
2138 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
2139 return true;
2140 else
2141 return false;
2142 }
2143 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2144 {
2145 struct lttng_event_notifier_enabler *event_notifier_enabler =
2146 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2147 struct lttng_kernel_event_notifier *event_notifier =
2148 container_of(event, struct lttng_kernel_event_notifier, parent);
2149
2150 if (event->priv->desc == desc
2151 && event_notifier->priv->group == event_notifier_enabler->group
2152 && event->priv->user_token == event_enabler->user_token)
2153 return true;
2154 else
2155 return false;
2156 }
2157 default:
2158 WARN_ON_ONCE(1);
2159 return false;
2160 }
2161 }
2162
2163 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2164 const char *event_name,
2165 struct lttng_kernel_event_common *event)
2166 {
2167 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2168 return false;
2169
2170 switch (event_enabler->enabler_type) {
2171 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2172 {
2173 struct lttng_event_recorder_enabler *event_recorder_enabler =
2174 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2175 struct lttng_kernel_event_recorder *event_recorder =
2176 container_of(event, struct lttng_kernel_event_recorder, parent);
2177
2178 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2179 && event_recorder->chan == event_recorder_enabler->chan)
2180 return true;
2181 else
2182 return false;
2183 }
2184 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2185 {
2186 struct lttng_event_notifier_enabler *event_notifier_enabler =
2187 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2188 struct lttng_kernel_event_notifier *event_notifier =
2189 container_of(event, struct lttng_kernel_event_notifier, parent);
2190
2191 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2192 && event_notifier->priv->group == event_notifier_enabler->group
2193 && event->priv->user_token == event_enabler->user_token)
2194 return true;
2195 else
2196 return false;
2197 }
2198 default:
2199 WARN_ON_ONCE(1);
2200 return false;
2201 }
2202 }
2203
2204 static
2205 struct lttng_enabler_ref *lttng_enabler_ref(
2206 struct list_head *enablers_ref_list,
2207 struct lttng_event_enabler_common *enabler)
2208 {
2209 struct lttng_enabler_ref *enabler_ref;
2210
2211 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2212 if (enabler_ref->ref == enabler)
2213 return enabler_ref;
2214 }
2215 return NULL;
2216 }
2217
2218 static
2219 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2220 {
2221 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2222 struct lttng_kernel_probe_desc *probe_desc;
2223 const struct lttng_kernel_event_desc *desc;
2224 struct list_head *probe_list;
2225 int i;
2226
2227 probe_list = lttng_get_probe_list_head();
2228 /*
2229 * For each probe event, if we find that a probe event matches
2230 * our enabler, create an associated lttng_event if not
2231 * already present.
2232 */
2233 list_for_each_entry(probe_desc, probe_list, head) {
2234 for (i = 0; i < probe_desc->nr_events; i++) {
2235 int found = 0;
2236 struct hlist_head *head;
2237 struct lttng_kernel_event_common *event;
2238 struct lttng_kernel_event_common_private *event_priv;
2239
2240 desc = probe_desc->event_desc[i];
2241 if (!lttng_desc_match_enabler(desc, event_enabler))
2242 continue;
2243
2244 /*
2245 * Check if already created.
2246 */
2247 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2248 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2249 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub))
2250 found = 1;
2251 }
2252 if (found)
2253 continue;
2254
2255 /*
2256 * We need to create an event for this event probe.
2257 */
2258 event = _lttng_kernel_event_create(event_enabler, desc);
2259 if (!event) {
2260 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2261 probe_desc->event_desc[i]->event_name);
2262 }
2263 }
2264 }
2265 }
2266
2267 static
2268 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2269 {
2270 int ret;
2271
2272 ret = lttng_syscalls_register_event(event_enabler);
2273 WARN_ON_ONCE(ret);
2274 }
2275
2276 /*
2277 * Create event if it is missing and present in the list of tracepoint probes.
2278 * Should be called with sessions mutex held.
2279 */
2280 static
2281 void lttng_create_event_if_missing(struct lttng_event_enabler_common *event_enabler)
2282 {
2283 switch (event_enabler->event_param.instrumentation) {
2284 case LTTNG_KERNEL_ABI_TRACEPOINT:
2285 lttng_create_tracepoint_event_if_missing(event_enabler);
2286 break;
2287
2288 case LTTNG_KERNEL_ABI_SYSCALL:
2289 lttng_create_syscall_event_if_missing(event_enabler);
2290 break;
2291
2292 default:
2293 WARN_ON_ONCE(1);
2294 break;
2295 }
2296 }
2297
2298 static
2299 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2300 struct lttng_kernel_event_common *event)
2301 {
2302 /* Link filter bytecodes if not linked yet. */
2303 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2304 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2305 }
2306
2307 static
2308 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2309 struct lttng_kernel_event_common *event)
2310 {
2311 switch (event_enabler->enabler_type) {
2312 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2313 break;
2314 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2315 {
2316 struct lttng_event_notifier_enabler *event_notifier_enabler =
2317 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2318 struct lttng_kernel_event_notifier *event_notifier =
2319 container_of(event, struct lttng_kernel_event_notifier, parent);
2320
2321 /* Link capture bytecodes if not linked yet. */
2322 lttng_enabler_link_bytecode(event->priv->desc,
2323 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2324 &event_notifier_enabler->capture_bytecode_head);
2325 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2326 break;
2327 }
2328 default:
2329 WARN_ON_ONCE(1);
2330 }
2331 }
2332
2333 /*
2334 * Create events associated with an event_enabler (if not already present),
2335 * and add backward reference from the event to the enabler.
2336 * Should be called with sessions mutex held.
2337 */
2338 static
2339 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2340 {
2341 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2342 struct lttng_kernel_event_common_private *event_priv;
2343
2344 lttng_syscall_table_set_wildcard_all(event_enabler);
2345
2346 /* First ensure that probe events are created for this enabler. */
2347 lttng_create_event_if_missing(event_enabler);
2348
2349 /* Link the created event with its associated enabler. */
2350 list_for_each_entry(event_priv, event_list_head, node) {
2351 struct lttng_kernel_event_common *event = event_priv->pub;
2352 struct lttng_enabler_ref *enabler_ref;
2353
2354 if (!lttng_event_enabler_match_event(event_enabler, event))
2355 continue;
2356
2357 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2358 if (!enabler_ref) {
2359 /*
2360 * If no backward ref, create it.
2361 * Add backward ref from event_notifier to enabler.
2362 */
2363 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2364 if (!enabler_ref)
2365 return -ENOMEM;
2366
2367 enabler_ref->ref = event_enabler;
2368 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2369 }
2370
2371 lttng_event_enabler_init_event_filter(event_enabler, event);
2372 lttng_event_enabler_init_event_capture(event_enabler, event);
2373 }
2374 return 0;
2375 }
2376
2377 /*
2378 * Called at module load: connect the probe on all enablers matching
2379 * this event.
2380 * Called with sessions lock held.
2381 */
2382 int lttng_fix_pending_events(void)
2383 {
2384 struct lttng_kernel_session_private *session_priv;
2385
2386 list_for_each_entry(session_priv, &sessions, list)
2387 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2388 return 0;
2389 }
2390
2391 static bool lttng_event_notifier_group_has_active_event_notifiers(
2392 struct lttng_event_notifier_group *event_notifier_group)
2393 {
2394 struct lttng_event_enabler_common *event_enabler;
2395
2396 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2397 if (event_enabler->enabled)
2398 return true;
2399 }
2400 return false;
2401 }
2402
2403 bool lttng_event_notifier_active(void)
2404 {
2405 struct lttng_event_notifier_group *event_notifier_group;
2406
2407 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2408 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2409 return true;
2410 }
2411 return false;
2412 }
2413
2414 int lttng_fix_pending_event_notifiers(void)
2415 {
2416 struct lttng_event_notifier_group *event_notifier_group;
2417
2418 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2419 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2420 return 0;
2421 }
2422
2423 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2424 enum lttng_enabler_format_type format_type,
2425 struct lttng_kernel_abi_event *event_param,
2426 struct lttng_kernel_channel_buffer *chan)
2427 {
2428 struct lttng_event_recorder_enabler *event_enabler;
2429
2430 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2431 if (!event_enabler)
2432 return NULL;
2433 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2434 event_enabler->parent.format_type = format_type;
2435 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2436 memcpy(&event_enabler->parent.event_param, event_param,
2437 sizeof(event_enabler->parent.event_param));
2438 event_enabler->chan = chan;
2439 /* ctx left NULL */
2440 event_enabler->parent.enabled = 0;
2441 return event_enabler;
2442 }
2443
2444 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2445 struct lttng_event_recorder_enabler *event_enabler)
2446 {
2447 mutex_lock(&sessions_mutex);
2448 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2449 event_enabler->parent.published = true;
2450 lttng_session_lazy_sync_event_enablers(session);
2451 mutex_unlock(&sessions_mutex);
2452 }
2453
2454 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2455 {
2456 mutex_lock(&sessions_mutex);
2457 event_enabler->enabled = 1;
2458 lttng_event_enabler_sync(event_enabler);
2459 mutex_unlock(&sessions_mutex);
2460 return 0;
2461 }
2462
2463 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2464 {
2465 mutex_lock(&sessions_mutex);
2466 event_enabler->enabled = 0;
2467 lttng_event_enabler_sync(event_enabler);
2468 mutex_unlock(&sessions_mutex);
2469 return 0;
2470 }
2471
2472 static
2473 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2474 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2475 {
2476 struct lttng_kernel_bytecode_node *bytecode_node;
2477 uint32_t bytecode_len;
2478 int ret;
2479
2480 ret = get_user(bytecode_len, &bytecode->len);
2481 if (ret)
2482 return ret;
2483 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2484 GFP_KERNEL);
2485 if (!bytecode_node)
2486 return -ENOMEM;
2487 ret = copy_from_user(&bytecode_node->bc, bytecode,
2488 sizeof(*bytecode) + bytecode_len);
2489 if (ret)
2490 goto error_free;
2491
2492 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2493 bytecode_node->enabler = enabler;
2494 /* Enforce length based on allocated size */
2495 bytecode_node->bc.len = bytecode_len;
2496 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2497
2498 return 0;
2499
2500 error_free:
2501 lttng_kvfree(bytecode_node);
2502 return ret;
2503 }
2504
2505 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2506 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2507 {
2508 int ret;
2509 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2510 if (ret)
2511 goto error;
2512 lttng_event_enabler_sync(event_enabler);
2513 return 0;
2514
2515 error:
2516 return ret;
2517 }
2518
2519 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2520 struct lttng_kernel_abi_event_callsite __user *callsite)
2521 {
2522
2523 switch (event->priv->instrumentation) {
2524 case LTTNG_KERNEL_ABI_UPROBE:
2525 return lttng_uprobes_event_add_callsite(event, callsite);
2526 default:
2527 return -EINVAL;
2528 }
2529 }
2530
2531 static
2532 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2533 {
2534 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2535
2536 /* Destroy filter bytecode */
2537 list_for_each_entry_safe(filter_node, tmp_filter_node,
2538 &enabler->filter_bytecode_head, node) {
2539 lttng_kvfree(filter_node);
2540 }
2541 }
2542
2543 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2544 {
2545 lttng_enabler_destroy(event_enabler);
2546 if (event_enabler->published)
2547 list_del(&event_enabler->node);
2548
2549 switch (event_enabler->enabler_type) {
2550 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2551 {
2552 struct lttng_event_recorder_enabler *event_recorder_enabler =
2553 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2554
2555 kfree(event_recorder_enabler);
2556 break;
2557 }
2558 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2559 {
2560 struct lttng_event_notifier_enabler *event_notifier_enabler =
2561 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2562
2563 kfree(event_notifier_enabler);
2564 break;
2565 }
2566 default:
2567 WARN_ON_ONCE(1);
2568 }
2569 }
2570
2571 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2572 enum lttng_enabler_format_type format_type,
2573 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2574 struct lttng_event_notifier_group *event_notifier_group)
2575 {
2576 struct lttng_event_notifier_enabler *event_notifier_enabler;
2577
2578 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2579 if (!event_notifier_enabler)
2580 return NULL;
2581
2582 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2583 event_notifier_enabler->parent.format_type = format_type;
2584 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2585 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2586
2587 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2588 event_notifier_enabler->num_captures = 0;
2589
2590 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2591 sizeof(event_notifier_enabler->parent.event_param));
2592
2593 event_notifier_enabler->parent.enabled = 0;
2594 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2595 event_notifier_enabler->group = event_notifier_group;
2596 return event_notifier_enabler;
2597 }
2598
2599 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2600 struct lttng_event_notifier_enabler *event_notifier_enabler)
2601 {
2602 mutex_lock(&sessions_mutex);
2603 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2604 event_notifier_enabler->parent.published = true;
2605 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2606 mutex_unlock(&sessions_mutex);
2607 }
2608
2609 int lttng_event_notifier_enabler_enable(
2610 struct lttng_event_notifier_enabler *event_notifier_enabler)
2611 {
2612 mutex_lock(&sessions_mutex);
2613 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2614 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2615 mutex_unlock(&sessions_mutex);
2616 return 0;
2617 }
2618
2619 int lttng_event_notifier_enabler_disable(
2620 struct lttng_event_notifier_enabler *event_notifier_enabler)
2621 {
2622 mutex_lock(&sessions_mutex);
2623 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2624 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2625 mutex_unlock(&sessions_mutex);
2626 return 0;
2627 }
2628
2629 int lttng_event_notifier_enabler_attach_capture_bytecode(
2630 struct lttng_event_notifier_enabler *event_notifier_enabler,
2631 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2632 {
2633 struct lttng_kernel_bytecode_node *bytecode_node;
2634 struct lttng_event_enabler_common *enabler =
2635 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2636 uint32_t bytecode_len;
2637 int ret;
2638
2639 ret = get_user(bytecode_len, &bytecode->len);
2640 if (ret)
2641 return ret;
2642
2643 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2644 GFP_KERNEL);
2645 if (!bytecode_node)
2646 return -ENOMEM;
2647
2648 ret = copy_from_user(&bytecode_node->bc, bytecode,
2649 sizeof(*bytecode) + bytecode_len);
2650 if (ret)
2651 goto error_free;
2652
2653 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2654 bytecode_node->enabler = enabler;
2655
2656 /* Enforce length based on allocated size */
2657 bytecode_node->bc.len = bytecode_len;
2658 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2659
2660 event_notifier_enabler->num_captures++;
2661
2662 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2663 goto end;
2664
2665 error_free:
2666 lttng_kvfree(bytecode_node);
2667 end:
2668 return ret;
2669 }
2670
2671 static
2672 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2673 {
2674 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2675 struct lttng_kernel_bytecode_runtime *runtime;
2676 struct lttng_enabler_ref *enabler_ref;
2677
2678 /* Check if has enablers without bytecode enabled */
2679 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2680 if (enabler_ref->ref->enabled
2681 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2682 has_enablers_without_filter_bytecode = 1;
2683 break;
2684 }
2685 }
2686 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2687
2688 /* Enable filters */
2689 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2690 lttng_bytecode_sync_state(runtime);
2691 nr_filters++;
2692 }
2693 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2694 }
2695
2696 static
2697 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2698 {
2699 switch (event->type) {
2700 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2701 break;
2702 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2703 {
2704 struct lttng_kernel_event_notifier *event_notifier =
2705 container_of(event, struct lttng_kernel_event_notifier, parent);
2706 struct lttng_kernel_bytecode_runtime *runtime;
2707 int nr_captures = 0;
2708
2709 /* Enable captures */
2710 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2711 lttng_bytecode_sync_state(runtime);
2712 nr_captures++;
2713 }
2714 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2715 break;
2716 }
2717 default:
2718 WARN_ON_ONCE(1);
2719 }
2720 }
2721
2722 static
2723 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2724 {
2725 struct lttng_enabler_ref *enabler_ref;
2726 bool enabled = false;
2727
2728 switch (event->priv->instrumentation) {
2729 case LTTNG_KERNEL_ABI_TRACEPOINT:
2730 lttng_fallthrough;
2731 case LTTNG_KERNEL_ABI_SYSCALL:
2732 /* Enable events */
2733 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2734 if (enabler_ref->ref->enabled) {
2735 enabled = true;
2736 break;
2737 }
2738 }
2739 break;
2740 default:
2741 WARN_ON_ONCE(1);
2742 return false;
2743 }
2744
2745 switch (event->type) {
2746 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2747 {
2748 struct lttng_kernel_event_recorder *event_recorder =
2749 container_of(event, struct lttng_kernel_event_recorder, parent);
2750
2751 /*
2752 * Enabled state is based on union of enablers, with
2753 * intersection of session and channel transient enable
2754 * states.
2755 */
2756 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2757 }
2758 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2759 return enabled;
2760 default:
2761 WARN_ON_ONCE(1);
2762 return false;
2763 }
2764 }
2765
2766 static
2767 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2768 {
2769 switch (event->priv->instrumentation) {
2770 case LTTNG_KERNEL_ABI_TRACEPOINT:
2771 lttng_fallthrough;
2772 case LTTNG_KERNEL_ABI_SYSCALL:
2773 return true;
2774
2775 default:
2776 /* Not handled with lazy sync. */
2777 return false;
2778 }
2779 }
2780
2781 /*
2782 * Should be called with sessions mutex held.
2783 */
2784 static
2785 void lttng_sync_event_list(struct list_head *event_enabler_list,
2786 struct list_head *event_list)
2787 {
2788 struct lttng_kernel_event_common_private *event_priv;
2789 struct lttng_event_enabler_common *event_enabler;
2790
2791 list_for_each_entry(event_enabler, event_enabler_list, node)
2792 lttng_event_enabler_ref_events(event_enabler);
2793
2794 /*
2795 * For each event, if at least one of its enablers is enabled,
2796 * and its channel and session transient states are enabled, we
2797 * enable the event, else we disable it.
2798 */
2799 list_for_each_entry(event_priv, event_list, node) {
2800 struct lttng_kernel_event_common *event = event_priv->pub;
2801 bool enabled;
2802
2803 if (!lttng_event_is_lazy_sync(event))
2804 continue;
2805
2806 enabled = lttng_get_event_enabled_state(event);
2807 WRITE_ONCE(event->enabled, enabled);
2808 /*
2809 * Sync tracepoint registration with event enabled state.
2810 */
2811 if (enabled) {
2812 register_event(event);
2813 } else {
2814 _lttng_event_unregister(event);
2815 }
2816
2817 lttng_event_sync_filter_state(event);
2818 lttng_event_sync_capture_state(event);
2819 }
2820 }
2821
2822 /*
2823 * lttng_session_sync_event_enablers should be called just before starting a
2824 * session.
2825 */
2826 static
2827 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2828 {
2829 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2830 }
2831
2832 /*
2833 * Apply enablers to session events, adding events to session if need
2834 * be. It is required after each modification applied to an active
2835 * session, and right before session "start".
2836 * "lazy" sync means we only sync if required.
2837 * Should be called with sessions mutex held.
2838 */
2839 static
2840 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2841 {
2842 /* We can skip if session is not active */
2843 if (!session->active)
2844 return;
2845 lttng_session_sync_event_enablers(session);
2846 }
2847
2848 static
2849 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2850 {
2851 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2852 }
2853
2854 static
2855 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2856 {
2857 switch (event_enabler->enabler_type) {
2858 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2859 {
2860 struct lttng_event_recorder_enabler *event_recorder_enabler =
2861 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2862 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2863 break;
2864 }
2865 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2866 {
2867 struct lttng_event_notifier_enabler *event_notifier_enabler =
2868 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2869 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2870 break;
2871 }
2872 default:
2873 WARN_ON_ONCE(1);
2874 }
2875 }
2876
2877 /*
2878 * Serialize at most one packet worth of metadata into a metadata
2879 * channel.
2880 * We grab the metadata cache mutex to get exclusive access to our metadata
2881 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2882 * allows us to do racy operations such as looking for remaining space left in
2883 * packet and write, since mutual exclusion protects us from concurrent writes.
2884 * Mutual exclusion on the metadata cache allow us to read the cache content
2885 * without racing against reallocation of the cache by updates.
2886 * Returns the number of bytes written in the channel, 0 if no data
2887 * was written and a negative value on error.
2888 */
2889 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2890 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2891 {
2892 struct lttng_kernel_ring_buffer_ctx ctx;
2893 int ret = 0;
2894 size_t len, reserve_len;
2895
2896 /*
2897 * Ensure we support mutiple get_next / put sequences followed by
2898 * put_next. The metadata cache lock protects reading the metadata
2899 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2900 * "flush" operations on the buffer invoked by different processes.
2901 * Moreover, since the metadata cache memory can be reallocated, we
2902 * need to have exclusive access against updates even though we only
2903 * read it.
2904 */
2905 mutex_lock(&stream->metadata_cache->lock);
2906 WARN_ON(stream->metadata_in < stream->metadata_out);
2907 if (stream->metadata_in != stream->metadata_out)
2908 goto end;
2909
2910 /* Metadata regenerated, change the version. */
2911 if (stream->metadata_cache->version != stream->version)
2912 stream->version = stream->metadata_cache->version;
2913
2914 len = stream->metadata_cache->metadata_written -
2915 stream->metadata_in;
2916 if (!len)
2917 goto end;
2918 reserve_len = min_t(size_t,
2919 stream->transport->ops.priv->packet_avail_size(chan),
2920 len);
2921 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2922 sizeof(char), NULL);
2923 /*
2924 * If reservation failed, return an error to the caller.
2925 */
2926 ret = stream->transport->ops.event_reserve(&ctx);
2927 if (ret != 0) {
2928 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2929 stream->coherent = false;
2930 goto end;
2931 }
2932 stream->transport->ops.event_write(&ctx,
2933 stream->metadata_cache->data + stream->metadata_in,
2934 reserve_len, 1);
2935 stream->transport->ops.event_commit(&ctx);
2936 stream->metadata_in += reserve_len;
2937 if (reserve_len < len)
2938 stream->coherent = false;
2939 else
2940 stream->coherent = true;
2941 ret = reserve_len;
2942
2943 end:
2944 if (coherent)
2945 *coherent = stream->coherent;
2946 mutex_unlock(&stream->metadata_cache->lock);
2947 return ret;
2948 }
2949
2950 static
2951 void lttng_metadata_begin(struct lttng_kernel_session *session)
2952 {
2953 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2954 mutex_lock(&session->priv->metadata_cache->lock);
2955 }
2956
2957 static
2958 void lttng_metadata_end(struct lttng_kernel_session *session)
2959 {
2960 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2961 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2962 struct lttng_metadata_stream *stream;
2963
2964 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2965 wake_up_interruptible(&stream->read_wait);
2966 mutex_unlock(&session->priv->metadata_cache->lock);
2967 }
2968 }
2969
2970 /*
2971 * Write the metadata to the metadata cache.
2972 * Must be called with sessions_mutex held.
2973 * The metadata cache lock protects us from concurrent read access from
2974 * thread outputting metadata content to ring buffer.
2975 * The content of the printf is printed as a single atomic metadata
2976 * transaction.
2977 */
2978 int lttng_metadata_printf(struct lttng_kernel_session *session,
2979 const char *fmt, ...)
2980 {
2981 char *str;
2982 size_t len;
2983 va_list ap;
2984
2985 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2986
2987 va_start(ap, fmt);
2988 str = kvasprintf(GFP_KERNEL, fmt, ap);
2989 va_end(ap);
2990 if (!str)
2991 return -ENOMEM;
2992
2993 len = strlen(str);
2994 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2995 if (session->priv->metadata_cache->metadata_written + len >
2996 session->priv->metadata_cache->cache_alloc) {
2997 char *tmp_cache_realloc;
2998 unsigned int tmp_cache_alloc_size;
2999
3000 tmp_cache_alloc_size = max_t(unsigned int,
3001 session->priv->metadata_cache->cache_alloc + len,
3002 session->priv->metadata_cache->cache_alloc << 1);
3003 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
3004 if (!tmp_cache_realloc)
3005 goto err;
3006 if (session->priv->metadata_cache->data) {
3007 memcpy(tmp_cache_realloc,
3008 session->priv->metadata_cache->data,
3009 session->priv->metadata_cache->cache_alloc);
3010 vfree(session->priv->metadata_cache->data);
3011 }
3012
3013 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3014 session->priv->metadata_cache->data = tmp_cache_realloc;
3015 }
3016 memcpy(session->priv->metadata_cache->data +
3017 session->priv->metadata_cache->metadata_written,
3018 str, len);
3019 session->priv->metadata_cache->metadata_written += len;
3020 kfree(str);
3021
3022 return 0;
3023
3024 err:
3025 kfree(str);
3026 return -ENOMEM;
3027 }
3028
3029 static
3030 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
3031 {
3032 size_t i;
3033
3034 for (i = 0; i < nesting; i++) {
3035 int ret;
3036
3037 ret = lttng_metadata_printf(session, " ");
3038 if (ret) {
3039 return ret;
3040 }
3041 }
3042 return 0;
3043 }
3044
3045 static
3046 int lttng_field_name_statedump(struct lttng_kernel_session *session,
3047 const struct lttng_kernel_event_field *field,
3048 size_t nesting)
3049 {
3050 return lttng_metadata_printf(session, " _%s;\n", field->name);
3051 }
3052
3053 static
3054 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
3055 const struct lttng_kernel_type_integer *type,
3056 enum lttng_kernel_string_encoding parent_encoding,
3057 size_t nesting)
3058 {
3059 int ret;
3060
3061 ret = print_tabs(session, nesting);
3062 if (ret)
3063 return ret;
3064 ret = lttng_metadata_printf(session,
3065 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3066 type->size,
3067 type->alignment,
3068 type->signedness,
3069 (parent_encoding == lttng_kernel_string_encoding_none)
3070 ? "none"
3071 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3072 ? "UTF8"
3073 : "ASCII",
3074 type->base,
3075 #if __BYTE_ORDER == __BIG_ENDIAN
3076 type->reverse_byte_order ? " byte_order = le;" : ""
3077 #else
3078 type->reverse_byte_order ? " byte_order = be;" : ""
3079 #endif
3080 );
3081 return ret;
3082 }
3083
3084 /*
3085 * Must be called with sessions_mutex held.
3086 */
3087 static
3088 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
3089 const struct lttng_kernel_type_struct *type,
3090 size_t nesting)
3091 {
3092 const char *prev_field_name = NULL;
3093 int ret;
3094 uint32_t i, nr_fields;
3095 unsigned int alignment;
3096
3097 ret = print_tabs(session, nesting);
3098 if (ret)
3099 return ret;
3100 ret = lttng_metadata_printf(session,
3101 "struct {\n");
3102 if (ret)
3103 return ret;
3104 nr_fields = type->nr_fields;
3105 for (i = 0; i < nr_fields; i++) {
3106 const struct lttng_kernel_event_field *iter_field;
3107
3108 iter_field = type->fields[i];
3109 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
3110 if (ret)
3111 return ret;
3112 }
3113 ret = print_tabs(session, nesting);
3114 if (ret)
3115 return ret;
3116 alignment = type->alignment;
3117 if (alignment) {
3118 ret = lttng_metadata_printf(session,
3119 "} align(%u)",
3120 alignment);
3121 } else {
3122 ret = lttng_metadata_printf(session,
3123 "}");
3124 }
3125 return ret;
3126 }
3127
3128 /*
3129 * Must be called with sessions_mutex held.
3130 */
3131 static
3132 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
3133 const struct lttng_kernel_event_field *field,
3134 size_t nesting)
3135 {
3136 int ret;
3137
3138 ret = _lttng_struct_type_statedump(session,
3139 lttng_kernel_get_type_struct(field->type), nesting);
3140 if (ret)
3141 return ret;
3142 return lttng_field_name_statedump(session, field, nesting);
3143 }
3144
3145 /*
3146 * Must be called with sessions_mutex held.
3147 */
3148 static
3149 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3150 const struct lttng_kernel_type_variant *type,
3151 size_t nesting,
3152 const char *prev_field_name)
3153 {
3154 const char *tag_name;
3155 int ret;
3156 uint32_t i, nr_choices;
3157
3158 tag_name = type->tag_name;
3159 if (!tag_name)
3160 tag_name = prev_field_name;
3161 if (!tag_name)
3162 return -EINVAL;
3163 /*
3164 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3165 */
3166 if (type->alignment != 0)
3167 return -EINVAL;
3168 ret = print_tabs(session, nesting);
3169 if (ret)
3170 return ret;
3171 ret = lttng_metadata_printf(session,
3172 "variant <_%s> {\n",
3173 tag_name);
3174 if (ret)
3175 return ret;
3176 nr_choices = type->nr_choices;
3177 for (i = 0; i < nr_choices; i++) {
3178 const struct lttng_kernel_event_field *iter_field;
3179
3180 iter_field = type->choices[i];
3181 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3182 if (ret)
3183 return ret;
3184 }
3185 ret = print_tabs(session, nesting);
3186 if (ret)
3187 return ret;
3188 ret = lttng_metadata_printf(session,
3189 "}");
3190 return ret;
3191 }
3192
3193 /*
3194 * Must be called with sessions_mutex held.
3195 */
3196 static
3197 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3198 const struct lttng_kernel_event_field *field,
3199 size_t nesting,
3200 const char *prev_field_name)
3201 {
3202 int ret;
3203
3204 ret = _lttng_variant_type_statedump(session,
3205 lttng_kernel_get_type_variant(field->type), nesting,
3206 prev_field_name);
3207 if (ret)
3208 return ret;
3209 return lttng_field_name_statedump(session, field, nesting);
3210 }
3211
3212 /*
3213 * Must be called with sessions_mutex held.
3214 */
3215 static
3216 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3217 const struct lttng_kernel_event_field *field,
3218 size_t nesting)
3219 {
3220 int ret;
3221 const struct lttng_kernel_type_array *array_type;
3222 const struct lttng_kernel_type_common *elem_type;
3223
3224 array_type = lttng_kernel_get_type_array(field->type);
3225 WARN_ON_ONCE(!array_type);
3226
3227 if (array_type->alignment) {
3228 ret = print_tabs(session, nesting);
3229 if (ret)
3230 return ret;
3231 ret = lttng_metadata_printf(session,
3232 "struct { } align(%u) _%s_padding;\n",
3233 array_type->alignment * CHAR_BIT,
3234 field->name);
3235 if (ret)
3236 return ret;
3237 }
3238 /*
3239 * Nested compound types: Only array of structures and variants are
3240 * currently supported.
3241 */
3242 elem_type = array_type->elem_type;
3243 switch (elem_type->type) {
3244 case lttng_kernel_type_integer:
3245 case lttng_kernel_type_struct:
3246 case lttng_kernel_type_variant:
3247 ret = _lttng_type_statedump(session, elem_type,
3248 array_type->encoding, nesting);
3249 if (ret)
3250 return ret;
3251 break;
3252
3253 default:
3254 return -EINVAL;
3255 }
3256 ret = lttng_metadata_printf(session,
3257 " _%s[%u];\n",
3258 field->name,
3259 array_type->length);
3260 return ret;
3261 }
3262
3263 /*
3264 * Must be called with sessions_mutex held.
3265 */
3266 static
3267 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3268 const struct lttng_kernel_event_field *field,
3269 size_t nesting,
3270 const char *prev_field_name)
3271 {
3272 int ret;
3273 const char *length_name;
3274 const struct lttng_kernel_type_sequence *sequence_type;
3275 const struct lttng_kernel_type_common *elem_type;
3276
3277 sequence_type = lttng_kernel_get_type_sequence(field->type);
3278 WARN_ON_ONCE(!sequence_type);
3279
3280 length_name = sequence_type->length_name;
3281 if (!length_name)
3282 length_name = prev_field_name;
3283 if (!length_name)
3284 return -EINVAL;
3285
3286 if (sequence_type->alignment) {
3287 ret = print_tabs(session, nesting);
3288 if (ret)
3289 return ret;
3290 ret = lttng_metadata_printf(session,
3291 "struct { } align(%u) _%s_padding;\n",
3292 sequence_type->alignment * CHAR_BIT,
3293 field->name);
3294 if (ret)
3295 return ret;
3296 }
3297
3298 /*
3299 * Nested compound types: Only array of structures and variants are
3300 * currently supported.
3301 */
3302 elem_type = sequence_type->elem_type;
3303 switch (elem_type->type) {
3304 case lttng_kernel_type_integer:
3305 case lttng_kernel_type_struct:
3306 case lttng_kernel_type_variant:
3307 ret = _lttng_type_statedump(session, elem_type,
3308 sequence_type->encoding, nesting);
3309 if (ret)
3310 return ret;
3311 break;
3312
3313 default:
3314 return -EINVAL;
3315 }
3316 ret = lttng_metadata_printf(session,
3317 " _%s[ _%s ];\n",
3318 field->name,
3319 length_name);
3320 return ret;
3321 }
3322
3323 /*
3324 * Must be called with sessions_mutex held.
3325 */
3326 static
3327 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3328 const struct lttng_kernel_type_enum *type,
3329 size_t nesting)
3330 {
3331 const struct lttng_kernel_enum_desc *enum_desc;
3332 const struct lttng_kernel_type_common *container_type;
3333 int ret;
3334 unsigned int i, nr_entries;
3335
3336 container_type = type->container_type;
3337 if (container_type->type != lttng_kernel_type_integer) {
3338 ret = -EINVAL;
3339 goto end;
3340 }
3341 enum_desc = type->desc;
3342 nr_entries = enum_desc->nr_entries;
3343
3344 ret = print_tabs(session, nesting);
3345 if (ret)
3346 goto end;
3347 ret = lttng_metadata_printf(session, "enum : ");
3348 if (ret)
3349 goto end;
3350 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3351 lttng_kernel_string_encoding_none, 0);
3352 if (ret)
3353 goto end;
3354 ret = lttng_metadata_printf(session, " {\n");
3355 if (ret)
3356 goto end;
3357 /* Dump all entries */
3358 for (i = 0; i < nr_entries; i++) {
3359 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3360 int j, len;
3361
3362 ret = print_tabs(session, nesting + 1);
3363 if (ret)
3364 goto end;
3365 ret = lttng_metadata_printf(session,
3366 "\"");
3367 if (ret)
3368 goto end;
3369 len = strlen(entry->string);
3370 /* Escape the character '"' */
3371 for (j = 0; j < len; j++) {
3372 char c = entry->string[j];
3373
3374 switch (c) {
3375 case '"':
3376 ret = lttng_metadata_printf(session,
3377 "\\\"");
3378 break;
3379 case '\\':
3380 ret = lttng_metadata_printf(session,
3381 "\\\\");
3382 break;
3383 default:
3384 ret = lttng_metadata_printf(session,
3385 "%c", c);
3386 break;
3387 }
3388 if (ret)
3389 goto end;
3390 }
3391 ret = lttng_metadata_printf(session, "\"");
3392 if (ret)
3393 goto end;
3394
3395 if (entry->options.is_auto) {
3396 ret = lttng_metadata_printf(session, ",\n");
3397 if (ret)
3398 goto end;
3399 } else {
3400 ret = lttng_metadata_printf(session,
3401 " = ");
3402 if (ret)
3403 goto end;
3404 if (entry->start.signedness)
3405 ret = lttng_metadata_printf(session,
3406 "%lld", (long long) entry->start.value);
3407 else
3408 ret = lttng_metadata_printf(session,
3409 "%llu", entry->start.value);
3410 if (ret)
3411 goto end;
3412 if (entry->start.signedness == entry->end.signedness &&
3413 entry->start.value
3414 == entry->end.value) {
3415 ret = lttng_metadata_printf(session,
3416 ",\n");
3417 } else {
3418 if (entry->end.signedness) {
3419 ret = lttng_metadata_printf(session,
3420 " ... %lld,\n",
3421 (long long) entry->end.value);
3422 } else {
3423 ret = lttng_metadata_printf(session,
3424 " ... %llu,\n",
3425 entry->end.value);
3426 }
3427 }
3428 if (ret)
3429 goto end;
3430 }
3431 }
3432 ret = print_tabs(session, nesting);
3433 if (ret)
3434 goto end;
3435 ret = lttng_metadata_printf(session, "}");
3436 end:
3437 return ret;
3438 }
3439
3440 /*
3441 * Must be called with sessions_mutex held.
3442 */
3443 static
3444 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3445 const struct lttng_kernel_event_field *field,
3446 size_t nesting)
3447 {
3448 int ret;
3449 const struct lttng_kernel_type_enum *enum_type;
3450
3451 enum_type = lttng_kernel_get_type_enum(field->type);
3452 WARN_ON_ONCE(!enum_type);
3453 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3454 if (ret)
3455 return ret;
3456 return lttng_field_name_statedump(session, field, nesting);
3457 }
3458
3459 static
3460 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3461 const struct lttng_kernel_event_field *field,
3462 size_t nesting)
3463 {
3464 int ret;
3465
3466 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3467 lttng_kernel_string_encoding_none, nesting);
3468 if (ret)
3469 return ret;
3470 return lttng_field_name_statedump(session, field, nesting);
3471 }
3472
3473 static
3474 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3475 const struct lttng_kernel_type_string *type,
3476 size_t nesting)
3477 {
3478 int ret;
3479
3480 /* Default encoding is UTF8 */
3481 ret = print_tabs(session, nesting);
3482 if (ret)
3483 return ret;
3484 ret = lttng_metadata_printf(session,
3485 "string%s",
3486 type->encoding == lttng_kernel_string_encoding_ASCII ?
3487 " { encoding = ASCII; }" : "");
3488 return ret;
3489 }
3490
3491 static
3492 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3493 const struct lttng_kernel_event_field *field,
3494 size_t nesting)
3495 {
3496 const struct lttng_kernel_type_string *string_type;
3497 int ret;
3498
3499 string_type = lttng_kernel_get_type_string(field->type);
3500 WARN_ON_ONCE(!string_type);
3501 ret = _lttng_string_type_statedump(session, string_type, nesting);
3502 if (ret)
3503 return ret;
3504 return lttng_field_name_statedump(session, field, nesting);
3505 }
3506
3507 /*
3508 * Must be called with sessions_mutex held.
3509 */
3510 static
3511 int _lttng_type_statedump(struct lttng_kernel_session *session,
3512 const struct lttng_kernel_type_common *type,
3513 enum lttng_kernel_string_encoding parent_encoding,
3514 size_t nesting)
3515 {
3516 int ret = 0;
3517
3518 switch (type->type) {
3519 case lttng_kernel_type_integer:
3520 ret = _lttng_integer_type_statedump(session,
3521 lttng_kernel_get_type_integer(type),
3522 parent_encoding, nesting);
3523 break;
3524 case lttng_kernel_type_enum:
3525 ret = _lttng_enum_type_statedump(session,
3526 lttng_kernel_get_type_enum(type),
3527 nesting);
3528 break;
3529 case lttng_kernel_type_string:
3530 ret = _lttng_string_type_statedump(session,
3531 lttng_kernel_get_type_string(type),
3532 nesting);
3533 break;
3534 case lttng_kernel_type_struct:
3535 ret = _lttng_struct_type_statedump(session,
3536 lttng_kernel_get_type_struct(type),
3537 nesting);
3538 break;
3539 case lttng_kernel_type_variant:
3540 ret = _lttng_variant_type_statedump(session,
3541 lttng_kernel_get_type_variant(type),
3542 nesting, NULL);
3543 break;
3544
3545 /* Nested arrays and sequences are not supported yet. */
3546 case lttng_kernel_type_array:
3547 case lttng_kernel_type_sequence:
3548 default:
3549 WARN_ON_ONCE(1);
3550 return -EINVAL;
3551 }
3552 return ret;
3553 }
3554
3555 /*
3556 * Must be called with sessions_mutex held.
3557 */
3558 static
3559 int _lttng_field_statedump(struct lttng_kernel_session *session,
3560 const struct lttng_kernel_event_field *field,
3561 size_t nesting,
3562 const char **prev_field_name_p)
3563 {
3564 const char *prev_field_name = NULL;
3565 int ret = 0;
3566
3567 if (prev_field_name_p)
3568 prev_field_name = *prev_field_name_p;
3569 switch (field->type->type) {
3570 case lttng_kernel_type_integer:
3571 ret = _lttng_integer_field_statedump(session, field, nesting);
3572 break;
3573 case lttng_kernel_type_enum:
3574 ret = _lttng_enum_field_statedump(session, field, nesting);
3575 break;
3576 case lttng_kernel_type_string:
3577 ret = _lttng_string_field_statedump(session, field, nesting);
3578 break;
3579 case lttng_kernel_type_struct:
3580 ret = _lttng_struct_field_statedump(session, field, nesting);
3581 break;
3582 case lttng_kernel_type_array:
3583 ret = _lttng_array_field_statedump(session, field, nesting);
3584 break;
3585 case lttng_kernel_type_sequence:
3586 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3587 break;
3588 case lttng_kernel_type_variant:
3589 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3590 break;
3591
3592 default:
3593 WARN_ON_ONCE(1);
3594 return -EINVAL;
3595 }
3596 if (prev_field_name_p)
3597 *prev_field_name_p = field->name;
3598 return ret;
3599 }
3600
3601 static
3602 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3603 struct lttng_kernel_ctx *ctx)
3604 {
3605 const char *prev_field_name = NULL;
3606 int ret = 0;
3607 int i;
3608
3609 if (!ctx)
3610 return 0;
3611 for (i = 0; i < ctx->nr_fields; i++) {
3612 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3613
3614 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3615 if (ret)
3616 return ret;
3617 }
3618 return ret;
3619 }
3620
3621 static
3622 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3623 struct lttng_kernel_event_recorder *event_recorder)
3624 {
3625 const char *prev_field_name = NULL;
3626 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3627 int ret = 0;
3628 int i;
3629
3630 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3631 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3632
3633 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3634 if (ret)
3635 return ret;
3636 }
3637 return ret;
3638 }
3639
3640 /*
3641 * Must be called with sessions_mutex held.
3642 * The entire event metadata is printed as a single atomic metadata
3643 * transaction.
3644 */
3645 static
3646 int _lttng_event_metadata_statedump(struct lttng_kernel_session *session,
3647 struct lttng_kernel_channel_buffer *chan,
3648 struct lttng_kernel_event_recorder *event_recorder)
3649 {
3650 int ret = 0;
3651
3652 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3653 return 0;
3654 if (chan->priv->channel_type == METADATA_CHANNEL)
3655 return 0;
3656
3657 lttng_metadata_begin(session);
3658
3659 ret = lttng_metadata_printf(session,
3660 "event {\n"
3661 " name = \"%s\";\n"
3662 " id = %u;\n"
3663 " stream_id = %u;\n",
3664 event_recorder->priv->parent.desc->event_name,
3665 event_recorder->priv->id,
3666 event_recorder->chan->priv->id);
3667 if (ret)
3668 goto end;
3669
3670 ret = lttng_metadata_printf(session,
3671 " fields := struct {\n"
3672 );
3673 if (ret)
3674 goto end;
3675
3676 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3677 if (ret)
3678 goto end;
3679
3680 /*
3681 * LTTng space reservation can only reserve multiples of the
3682 * byte size.
3683 */
3684 ret = lttng_metadata_printf(session,
3685 " };\n"
3686 "};\n\n");
3687 if (ret)
3688 goto end;
3689
3690 event_recorder->priv->metadata_dumped = 1;
3691 end:
3692 lttng_metadata_end(session);
3693 return ret;
3694
3695 }
3696
3697 /*
3698 * Must be called with sessions_mutex held.
3699 * The entire channel metadata is printed as a single atomic metadata
3700 * transaction.
3701 */
3702 static
3703 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3704 struct lttng_kernel_channel_buffer *chan)
3705 {
3706 int ret = 0;
3707
3708 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3709 return 0;
3710
3711 if (chan->priv->channel_type == METADATA_CHANNEL)
3712 return 0;
3713
3714 lttng_metadata_begin(session);
3715
3716 WARN_ON_ONCE(!chan->priv->header_type);
3717 ret = lttng_metadata_printf(session,
3718 "stream {\n"
3719 " id = %u;\n"
3720 " event.header := %s;\n"
3721 " packet.context := struct packet_context;\n",
3722 chan->priv->id,
3723 chan->priv->header_type == 1 ? "struct event_header_compact" :
3724 "struct event_header_large");
3725 if (ret)
3726 goto end;
3727
3728 if (chan->priv->ctx) {
3729 ret = lttng_metadata_printf(session,
3730 " event.context := struct {\n");
3731 if (ret)
3732 goto end;
3733 }
3734 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3735 if (ret)
3736 goto end;
3737 if (chan->priv->ctx) {
3738 ret = lttng_metadata_printf(session,
3739 " };\n");
3740 if (ret)
3741 goto end;
3742 }
3743
3744 ret = lttng_metadata_printf(session,
3745 "};\n\n");
3746
3747 chan->priv->metadata_dumped = 1;
3748 end:
3749 lttng_metadata_end(session);
3750 return ret;
3751 }
3752
3753 /*
3754 * Must be called with sessions_mutex held.
3755 */
3756 static
3757 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3758 {
3759 return lttng_metadata_printf(session,
3760 "struct packet_context {\n"
3761 " uint64_clock_monotonic_t timestamp_begin;\n"
3762 " uint64_clock_monotonic_t timestamp_end;\n"
3763 " uint64_t content_size;\n"
3764 " uint64_t packet_size;\n"
3765 " uint64_t packet_seq_num;\n"
3766 " unsigned long events_discarded;\n"
3767 " uint32_t cpu_id;\n"
3768 "};\n\n"
3769 );
3770 }
3771
3772 /*
3773 * Compact header:
3774 * id: range: 0 - 30.
3775 * id 31 is reserved to indicate an extended header.
3776 *
3777 * Large header:
3778 * id: range: 0 - 65534.
3779 * id 65535 is reserved to indicate an extended header.
3780 *
3781 * Must be called with sessions_mutex held.
3782 */
3783 static
3784 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3785 {
3786 return lttng_metadata_printf(session,
3787 "struct event_header_compact {\n"
3788 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3789 " variant <id> {\n"
3790 " struct {\n"
3791 " uint27_clock_monotonic_t timestamp;\n"
3792 " } compact;\n"
3793 " struct {\n"
3794 " uint32_t id;\n"
3795 " uint64_clock_monotonic_t timestamp;\n"
3796 " } extended;\n"
3797 " } v;\n"
3798 "} align(%u);\n"
3799 "\n"
3800 "struct event_header_large {\n"
3801 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3802 " variant <id> {\n"
3803 " struct {\n"
3804 " uint32_clock_monotonic_t timestamp;\n"
3805 " } compact;\n"
3806 " struct {\n"
3807 " uint32_t id;\n"
3808 " uint64_clock_monotonic_t timestamp;\n"
3809 " } extended;\n"
3810 " } v;\n"
3811 "} align(%u);\n\n",
3812 lttng_alignof(uint32_t) * CHAR_BIT,
3813 lttng_alignof(uint16_t) * CHAR_BIT
3814 );
3815 }
3816
3817 /*
3818 * Approximation of NTP time of day to clock monotonic correlation,
3819 * taken at start of trace.
3820 * Yes, this is only an approximation. Yes, we can (and will) do better
3821 * in future versions.
3822 * This function may return a negative offset. It may happen if the
3823 * system sets the REALTIME clock to 0 after boot.
3824 *
3825 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3826 * y2038 compliant.
3827 */
3828 static
3829 int64_t measure_clock_offset(void)
3830 {
3831 uint64_t monotonic_avg, monotonic[2], realtime;
3832 uint64_t tcf = trace_clock_freq();
3833 int64_t offset;
3834 unsigned long flags;
3835 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3836 struct timespec64 rts = { 0, 0 };
3837 #else
3838 struct timespec rts = { 0, 0 };
3839 #endif
3840
3841 /* Disable interrupts to increase correlation precision. */
3842 local_irq_save(flags);
3843 monotonic[0] = trace_clock_read64();
3844 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3845 ktime_get_real_ts64(&rts);
3846 #else
3847 getnstimeofday(&rts);
3848 #endif
3849 monotonic[1] = trace_clock_read64();
3850 local_irq_restore(flags);
3851
3852 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3853 realtime = (uint64_t) rts.tv_sec * tcf;
3854 if (tcf == NSEC_PER_SEC) {
3855 realtime += rts.tv_nsec;
3856 } else {
3857 uint64_t n = rts.tv_nsec * tcf;
3858
3859 do_div(n, NSEC_PER_SEC);
3860 realtime += n;
3861 }
3862 offset = (int64_t) realtime - monotonic_avg;
3863 return offset;
3864 }
3865
3866 static
3867 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3868 {
3869 int ret = 0;
3870 size_t i;
3871 char cur;
3872
3873 i = 0;
3874 cur = string[i];
3875 while (cur != '\0') {
3876 switch (cur) {
3877 case '\n':
3878 ret = lttng_metadata_printf(session, "%s", "\\n");
3879 break;
3880 case '\\':
3881 case '"':
3882 ret = lttng_metadata_printf(session, "%c", '\\');
3883 if (ret)
3884 goto error;
3885 /* We still print the current char */
3886 lttng_fallthrough;
3887 default:
3888 ret = lttng_metadata_printf(session, "%c", cur);
3889 break;
3890 }
3891
3892 if (ret)
3893 goto error;
3894
3895 cur = string[++i];
3896 }
3897 error:
3898 return ret;
3899 }
3900
3901 static
3902 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3903 const char *field_value)
3904 {
3905 int ret;
3906
3907 ret = lttng_metadata_printf(session, " %s = \"", field);
3908 if (ret)
3909 goto error;
3910
3911 ret = print_escaped_ctf_string(session, field_value);
3912 if (ret)
3913 goto error;
3914
3915 ret = lttng_metadata_printf(session, "\";\n");
3916
3917 error:
3918 return ret;
3919 }
3920
3921 /*
3922 * Output metadata into this session's metadata buffers.
3923 * Must be called with sessions_mutex held.
3924 */
3925 static
3926 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3927 {
3928 unsigned char *uuid_c = session->priv->uuid.b;
3929 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3930 const char *product_uuid;
3931 struct lttng_kernel_channel_buffer_private *chan_priv;
3932 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3933 int ret = 0;
3934
3935 if (!LTTNG_READ_ONCE(session->active))
3936 return 0;
3937
3938 lttng_metadata_begin(session);
3939
3940 if (session->priv->metadata_dumped)
3941 goto skip_session;
3942
3943 snprintf(uuid_s, sizeof(uuid_s),
3944 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3945 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3946 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3947 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3948 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3949
3950 ret = lttng_metadata_printf(session,
3951 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3952 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3953 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3954 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3955 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3956 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3957 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3958 "\n"
3959 "trace {\n"
3960 " major = %u;\n"
3961 " minor = %u;\n"
3962 " uuid = \"%s\";\n"
3963 " byte_order = %s;\n"
3964 " packet.header := struct {\n"
3965 " uint32_t magic;\n"
3966 " uint8_t uuid[16];\n"
3967 " uint32_t stream_id;\n"
3968 " uint64_t stream_instance_id;\n"
3969 " };\n"
3970 "};\n\n",
3971 lttng_alignof(uint8_t) * CHAR_BIT,
3972 lttng_alignof(uint16_t) * CHAR_BIT,
3973 lttng_alignof(uint32_t) * CHAR_BIT,
3974 lttng_alignof(uint64_t) * CHAR_BIT,
3975 sizeof(unsigned long) * CHAR_BIT,
3976 lttng_alignof(unsigned long) * CHAR_BIT,
3977 CTF_SPEC_MAJOR,
3978 CTF_SPEC_MINOR,
3979 uuid_s,
3980 #if __BYTE_ORDER == __BIG_ENDIAN
3981 "be"
3982 #else
3983 "le"
3984 #endif
3985 );
3986 if (ret)
3987 goto end;
3988
3989 ret = lttng_metadata_printf(session,
3990 "env {\n"
3991 " hostname = \"%s\";\n"
3992 " domain = \"kernel\";\n"
3993 " sysname = \"%s\";\n"
3994 " kernel_release = \"%s\";\n"
3995 " kernel_version = \"%s\";\n"
3996 " tracer_name = \"lttng-modules\";\n"
3997 " tracer_major = %d;\n"
3998 " tracer_minor = %d;\n"
3999 " tracer_patchlevel = %d;\n"
4000 " trace_buffering_scheme = \"global\";\n",
4001 current->nsproxy->uts_ns->name.nodename,
4002 utsname()->sysname,
4003 utsname()->release,
4004 utsname()->version,
4005 LTTNG_MODULES_MAJOR_VERSION,
4006 LTTNG_MODULES_MINOR_VERSION,
4007 LTTNG_MODULES_PATCHLEVEL_VERSION
4008 );
4009 if (ret)
4010 goto end;
4011
4012 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
4013 if (ret)
4014 goto end;
4015 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
4016 session->priv->creation_time);
4017 if (ret)
4018 goto end;
4019
4020 /* Add the product UUID to the 'env' section */
4021 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4022 if (product_uuid) {
4023 ret = lttng_metadata_printf(session,
4024 " product_uuid = \"%s\";\n",
4025 product_uuid
4026 );
4027 if (ret)
4028 goto end;
4029 }
4030
4031 /* Close the 'env' section */
4032 ret = lttng_metadata_printf(session, "};\n\n");
4033 if (ret)
4034 goto end;
4035
4036 ret = lttng_metadata_printf(session,
4037 "clock {\n"
4038 " name = \"%s\";\n",
4039 trace_clock_name()
4040 );
4041 if (ret)
4042 goto end;
4043
4044 if (!trace_clock_uuid(clock_uuid_s)) {
4045 ret = lttng_metadata_printf(session,
4046 " uuid = \"%s\";\n",
4047 clock_uuid_s
4048 );
4049 if (ret)
4050 goto end;
4051 }
4052
4053 ret = lttng_metadata_printf(session,
4054 " description = \"%s\";\n"
4055 " freq = %llu; /* Frequency, in Hz */\n"
4056 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4057 " offset = %lld;\n"
4058 "};\n\n",
4059 trace_clock_description(),
4060 (unsigned long long) trace_clock_freq(),
4061 (long long) measure_clock_offset()
4062 );
4063 if (ret)
4064 goto end;
4065
4066 ret = lttng_metadata_printf(session,
4067 "typealias integer {\n"
4068 " size = 27; align = 1; signed = false;\n"
4069 " map = clock.%s.value;\n"
4070 "} := uint27_clock_monotonic_t;\n"
4071 "\n"
4072 "typealias integer {\n"
4073 " size = 32; align = %u; signed = false;\n"
4074 " map = clock.%s.value;\n"
4075 "} := uint32_clock_monotonic_t;\n"
4076 "\n"
4077 "typealias integer {\n"
4078 " size = 64; align = %u; signed = false;\n"
4079 " map = clock.%s.value;\n"
4080 "} := uint64_clock_monotonic_t;\n\n",
4081 trace_clock_name(),
4082 lttng_alignof(uint32_t) * CHAR_BIT,
4083 trace_clock_name(),
4084 lttng_alignof(uint64_t) * CHAR_BIT,
4085 trace_clock_name()
4086 );
4087 if (ret)
4088 goto end;
4089
4090 ret = _lttng_stream_packet_context_declare(session);
4091 if (ret)
4092 goto end;
4093
4094 ret = _lttng_event_header_declare(session);
4095 if (ret)
4096 goto end;
4097
4098 skip_session:
4099 list_for_each_entry(chan_priv, &session->priv->chan, node) {
4100 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
4101 if (ret)
4102 goto end;
4103 }
4104
4105 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
4106 ret = _lttng_event_metadata_statedump(session, event_recorder_priv->pub->chan,
4107 event_recorder_priv->pub);
4108 if (ret)
4109 goto end;
4110 }
4111 session->priv->metadata_dumped = 1;
4112 end:
4113 lttng_metadata_end(session);
4114 return ret;
4115 }
4116
4117 /**
4118 * lttng_transport_register - LTT transport registration
4119 * @transport: transport structure
4120 *
4121 * Registers a transport which can be used as output to extract the data out of
4122 * LTTng. The module calling this registration function must ensure that no
4123 * trap-inducing code will be executed by the transport functions. E.g.
4124 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4125 * is made visible to the transport function. This registration acts as a
4126 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4127 * after its registration must it synchronize the TLBs.
4128 */
4129 void lttng_transport_register(struct lttng_transport *transport)
4130 {
4131 /*
4132 * Make sure no page fault can be triggered by the module about to be
4133 * registered. We deal with this here so we don't have to call
4134 * vmalloc_sync_mappings() in each module's init.
4135 */
4136 wrapper_vmalloc_sync_mappings();
4137
4138 mutex_lock(&sessions_mutex);
4139 list_add_tail(&transport->node, &lttng_transport_list);
4140 mutex_unlock(&sessions_mutex);
4141 }
4142 EXPORT_SYMBOL_GPL(lttng_transport_register);
4143
4144 /**
4145 * lttng_transport_unregister - LTT transport unregistration
4146 * @transport: transport structure
4147 */
4148 void lttng_transport_unregister(struct lttng_transport *transport)
4149 {
4150 mutex_lock(&sessions_mutex);
4151 list_del(&transport->node);
4152 mutex_unlock(&sessions_mutex);
4153 }
4154 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4155
4156 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4157 {
4158 /*
4159 * Make sure no page fault can be triggered by the module about to be
4160 * registered. We deal with this here so we don't have to call
4161 * vmalloc_sync_mappings() in each module's init.
4162 */
4163 wrapper_vmalloc_sync_mappings();
4164
4165 mutex_lock(&sessions_mutex);
4166 list_add_tail(&transport->node, &lttng_counter_transport_list);
4167 mutex_unlock(&sessions_mutex);
4168 }
4169 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4170
4171 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4172 {
4173 mutex_lock(&sessions_mutex);
4174 list_del(&transport->node);
4175 mutex_unlock(&sessions_mutex);
4176 }
4177 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4178
4179 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4180
4181 enum cpuhp_state lttng_hp_prepare;
4182 enum cpuhp_state lttng_hp_online;
4183
4184 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4185 {
4186 struct lttng_cpuhp_node *lttng_node;
4187
4188 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4189 switch (lttng_node->component) {
4190 case LTTNG_RING_BUFFER_FRONTEND:
4191 return 0;
4192 case LTTNG_RING_BUFFER_BACKEND:
4193 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4194 case LTTNG_RING_BUFFER_ITER:
4195 return 0;
4196 case LTTNG_CONTEXT_PERF_COUNTERS:
4197 return 0;
4198 default:
4199 return -EINVAL;
4200 }
4201 }
4202
4203 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4204 {
4205 struct lttng_cpuhp_node *lttng_node;
4206
4207 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4208 switch (lttng_node->component) {
4209 case LTTNG_RING_BUFFER_FRONTEND:
4210 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4211 case LTTNG_RING_BUFFER_BACKEND:
4212 return 0;
4213 case LTTNG_RING_BUFFER_ITER:
4214 return 0;
4215 case LTTNG_CONTEXT_PERF_COUNTERS:
4216 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4217 default:
4218 return -EINVAL;
4219 }
4220 }
4221
4222 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4223 {
4224 struct lttng_cpuhp_node *lttng_node;
4225
4226 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4227 switch (lttng_node->component) {
4228 case LTTNG_RING_BUFFER_FRONTEND:
4229 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4230 case LTTNG_RING_BUFFER_BACKEND:
4231 return 0;
4232 case LTTNG_RING_BUFFER_ITER:
4233 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4234 case LTTNG_CONTEXT_PERF_COUNTERS:
4235 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4236 default:
4237 return -EINVAL;
4238 }
4239 }
4240
4241 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4242 {
4243 struct lttng_cpuhp_node *lttng_node;
4244
4245 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4246 switch (lttng_node->component) {
4247 case LTTNG_RING_BUFFER_FRONTEND:
4248 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4249 case LTTNG_RING_BUFFER_BACKEND:
4250 return 0;
4251 case LTTNG_RING_BUFFER_ITER:
4252 return 0;
4253 case LTTNG_CONTEXT_PERF_COUNTERS:
4254 return 0;
4255 default:
4256 return -EINVAL;
4257 }
4258 }
4259
4260 static int __init lttng_init_cpu_hotplug(void)
4261 {
4262 int ret;
4263
4264 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4265 lttng_hotplug_prepare,
4266 lttng_hotplug_dead);
4267 if (ret < 0) {
4268 return ret;
4269 }
4270 lttng_hp_prepare = ret;
4271 lttng_rb_set_hp_prepare(ret);
4272
4273 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4274 lttng_hotplug_online,
4275 lttng_hotplug_offline);
4276 if (ret < 0) {
4277 cpuhp_remove_multi_state(lttng_hp_prepare);
4278 lttng_hp_prepare = 0;
4279 return ret;
4280 }
4281 lttng_hp_online = ret;
4282 lttng_rb_set_hp_online(ret);
4283
4284 return 0;
4285 }
4286
4287 static void __exit lttng_exit_cpu_hotplug(void)
4288 {
4289 lttng_rb_set_hp_online(0);
4290 cpuhp_remove_multi_state(lttng_hp_online);
4291 lttng_rb_set_hp_prepare(0);
4292 cpuhp_remove_multi_state(lttng_hp_prepare);
4293 }
4294
4295 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4296 static int lttng_init_cpu_hotplug(void)
4297 {
4298 return 0;
4299 }
4300 static void lttng_exit_cpu_hotplug(void)
4301 {
4302 }
4303 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4304
4305 static int __init lttng_events_init(void)
4306 {
4307 int ret;
4308
4309 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4310 if (ret)
4311 return ret;
4312 ret = wrapper_get_pfnblock_flags_mask_init();
4313 if (ret)
4314 return ret;
4315 ret = wrapper_get_pageblock_flags_mask_init();
4316 if (ret)
4317 return ret;
4318 ret = lttng_probes_init();
4319 if (ret)
4320 return ret;
4321 ret = lttng_context_init();
4322 if (ret)
4323 return ret;
4324 ret = lttng_tracepoint_init();
4325 if (ret)
4326 goto error_tp;
4327 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4328 if (!event_recorder_cache) {
4329 ret = -ENOMEM;
4330 goto error_kmem_event_recorder;
4331 }
4332 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4333 if (!event_recorder_private_cache) {
4334 ret = -ENOMEM;
4335 goto error_kmem_event_recorder_private;
4336 }
4337 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4338 if (!event_notifier_cache) {
4339 ret = -ENOMEM;
4340 goto error_kmem_event_notifier;
4341 }
4342 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4343 if (!event_notifier_private_cache) {
4344 ret = -ENOMEM;
4345 goto error_kmem_event_notifier_private;
4346 }
4347 ret = lttng_abi_init();
4348 if (ret)
4349 goto error_abi;
4350 ret = lttng_logger_init();
4351 if (ret)
4352 goto error_logger;
4353 ret = lttng_init_cpu_hotplug();
4354 if (ret)
4355 goto error_hotplug;
4356 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4357 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4358 __stringify(LTTNG_MODULES_MINOR_VERSION),
4359 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4360 LTTNG_MODULES_EXTRAVERSION,
4361 LTTNG_VERSION_NAME,
4362 #ifdef LTTNG_EXTRA_VERSION_GIT
4363 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4364 #else
4365 "",
4366 #endif
4367 #ifdef LTTNG_EXTRA_VERSION_NAME
4368 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4369 #else
4370 "");
4371 #endif
4372 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4373 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4374 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4375 return 0;
4376
4377 error_hotplug:
4378 lttng_logger_exit();
4379 error_logger:
4380 lttng_abi_exit();
4381 error_abi:
4382 kmem_cache_destroy(event_notifier_private_cache);
4383 error_kmem_event_notifier_private:
4384 kmem_cache_destroy(event_notifier_cache);
4385 error_kmem_event_notifier:
4386 kmem_cache_destroy(event_recorder_private_cache);
4387 error_kmem_event_recorder_private:
4388 kmem_cache_destroy(event_recorder_cache);
4389 error_kmem_event_recorder:
4390 lttng_tracepoint_exit();
4391 error_tp:
4392 lttng_context_exit();
4393 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4394 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4395 __stringify(LTTNG_MODULES_MINOR_VERSION),
4396 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4397 LTTNG_MODULES_EXTRAVERSION,
4398 LTTNG_VERSION_NAME,
4399 #ifdef LTTNG_EXTRA_VERSION_GIT
4400 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4401 #else
4402 "",
4403 #endif
4404 #ifdef LTTNG_EXTRA_VERSION_NAME
4405 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4406 #else
4407 "");
4408 #endif
4409 return ret;
4410 }
4411
4412 module_init(lttng_events_init);
4413
4414 static void __exit lttng_events_exit(void)
4415 {
4416 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4417
4418 lttng_exit_cpu_hotplug();
4419 lttng_logger_exit();
4420 lttng_abi_exit();
4421 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4422 lttng_session_destroy(session_priv->pub);
4423 kmem_cache_destroy(event_recorder_cache);
4424 kmem_cache_destroy(event_recorder_private_cache);
4425 kmem_cache_destroy(event_notifier_cache);
4426 kmem_cache_destroy(event_notifier_private_cache);
4427 lttng_tracepoint_exit();
4428 lttng_context_exit();
4429 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4430 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4431 __stringify(LTTNG_MODULES_MINOR_VERSION),
4432 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4433 LTTNG_MODULES_EXTRAVERSION,
4434 LTTNG_VERSION_NAME,
4435 #ifdef LTTNG_EXTRA_VERSION_GIT
4436 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4437 #else
4438 "",
4439 #endif
4440 #ifdef LTTNG_EXTRA_VERSION_NAME
4441 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4442 #else
4443 "");
4444 #endif
4445 }
4446
4447 module_exit(lttng_events_exit);
4448
4449 #include <generated/patches.h>
4450 #ifdef LTTNG_EXTRA_VERSION_GIT
4451 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4452 #endif
4453 #ifdef LTTNG_EXTRA_VERSION_NAME
4454 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4455 #endif
4456 MODULE_LICENSE("GPL and additional rights");
4457 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4458 MODULE_DESCRIPTION("LTTng tracer");
4459 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4460 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4461 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4462 LTTNG_MODULES_EXTRAVERSION);
This page took 0.163416 seconds and 4 git commands to generate.