Cleanup: implement dedicated file operations for events and enablers
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/events-internal.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <ringbuffer/backend.h>
48 #include <ringbuffer/frontend.h>
49 #include <wrapper/time.h>
50
51 #define METADATA_CACHE_DEFAULT_SIZE 4096
52
53 static LIST_HEAD(sessions);
54 static LIST_HEAD(event_notifier_groups);
55 static LIST_HEAD(lttng_transport_list);
56 static LIST_HEAD(lttng_counter_transport_list);
57 /*
58 * Protect the sessions and metadata caches.
59 */
60 static DEFINE_MUTEX(sessions_mutex);
61 static struct kmem_cache *event_cache;
62 static struct kmem_cache *event_notifier_cache;
63
64 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
65 static void lttng_session_sync_event_enablers(struct lttng_session *session);
66 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
67 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
68 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
69
70 static void _lttng_event_destroy(struct lttng_event *event);
71 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
72 static void _lttng_channel_destroy(struct lttng_channel *chan);
73 static int _lttng_event_unregister(struct lttng_event *event);
74 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
75 static
76 int _lttng_event_metadata_statedump(struct lttng_session *session,
77 struct lttng_channel *chan,
78 struct lttng_event *event);
79 static
80 int _lttng_session_metadata_statedump(struct lttng_session *session);
81 static
82 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
83 static
84 int _lttng_type_statedump(struct lttng_session *session,
85 const struct lttng_kernel_type_common *type,
86 enum lttng_kernel_string_encoding parent_encoding,
87 size_t nesting);
88 static
89 int _lttng_field_statedump(struct lttng_session *session,
90 const struct lttng_kernel_event_field *field,
91 size_t nesting);
92
93 void synchronize_trace(void)
94 {
95 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
96 synchronize_rcu();
97 #else
98 synchronize_sched();
99 #endif
100
101 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
102 #ifdef CONFIG_PREEMPT_RT_FULL
103 synchronize_rcu();
104 #endif
105 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
106 #ifdef CONFIG_PREEMPT_RT
107 synchronize_rcu();
108 #endif
109 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
110 }
111
112 void lttng_lock_sessions(void)
113 {
114 mutex_lock(&sessions_mutex);
115 }
116
117 void lttng_unlock_sessions(void)
118 {
119 mutex_unlock(&sessions_mutex);
120 }
121
122 static struct lttng_transport *lttng_transport_find(const char *name)
123 {
124 struct lttng_transport *transport;
125
126 list_for_each_entry(transport, &lttng_transport_list, node) {
127 if (!strcmp(transport->name, name))
128 return transport;
129 }
130 return NULL;
131 }
132
133 /*
134 * Called with sessions lock held.
135 */
136 int lttng_session_active(void)
137 {
138 struct lttng_session *iter;
139
140 list_for_each_entry(iter, &sessions, list) {
141 if (iter->active)
142 return 1;
143 }
144 return 0;
145 }
146
147 struct lttng_session *lttng_session_create(void)
148 {
149 struct lttng_session *session;
150 struct lttng_metadata_cache *metadata_cache;
151 int i;
152
153 mutex_lock(&sessions_mutex);
154 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
155 if (!session)
156 goto err;
157 INIT_LIST_HEAD(&session->chan);
158 INIT_LIST_HEAD(&session->events);
159 lttng_guid_gen(&session->uuid);
160
161 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
162 GFP_KERNEL);
163 if (!metadata_cache)
164 goto err_free_session;
165 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
166 if (!metadata_cache->data)
167 goto err_free_cache;
168 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
169 kref_init(&metadata_cache->refcount);
170 mutex_init(&metadata_cache->lock);
171 session->metadata_cache = metadata_cache;
172 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
173 memcpy(&metadata_cache->uuid, &session->uuid,
174 sizeof(metadata_cache->uuid));
175 INIT_LIST_HEAD(&session->enablers_head);
176 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
177 INIT_HLIST_HEAD(&session->events_ht.table[i]);
178 list_add(&session->list, &sessions);
179 session->pid_tracker.session = session;
180 session->pid_tracker.tracker_type = TRACKER_PID;
181 session->vpid_tracker.session = session;
182 session->vpid_tracker.tracker_type = TRACKER_VPID;
183 session->uid_tracker.session = session;
184 session->uid_tracker.tracker_type = TRACKER_UID;
185 session->vuid_tracker.session = session;
186 session->vuid_tracker.tracker_type = TRACKER_VUID;
187 session->gid_tracker.session = session;
188 session->gid_tracker.tracker_type = TRACKER_GID;
189 session->vgid_tracker.session = session;
190 session->vgid_tracker.tracker_type = TRACKER_VGID;
191 mutex_unlock(&sessions_mutex);
192 return session;
193
194 err_free_cache:
195 kfree(metadata_cache);
196 err_free_session:
197 lttng_kvfree(session);
198 err:
199 mutex_unlock(&sessions_mutex);
200 return NULL;
201 }
202
203 static
204 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
205 {
206 struct lttng_counter_transport *transport;
207
208 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
209 if (!strcmp(transport->name, name))
210 return transport;
211 }
212 return NULL;
213 }
214
215 struct lttng_counter *lttng_kernel_counter_create(
216 const char *counter_transport_name,
217 size_t number_dimensions, const size_t *dimensions_sizes)
218 {
219 struct lttng_counter *counter = NULL;
220 struct lttng_counter_transport *counter_transport = NULL;
221
222 counter_transport = lttng_counter_transport_find(counter_transport_name);
223 if (!counter_transport) {
224 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
225 counter_transport_name);
226 goto notransport;
227 }
228 if (!try_module_get(counter_transport->owner)) {
229 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
230 goto notransport;
231 }
232
233 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
234 if (!counter)
235 goto nomem;
236
237 /* Create event notifier error counter. */
238 counter->ops = &counter_transport->ops;
239 counter->transport = counter_transport;
240
241 counter->counter = counter->ops->counter_create(
242 number_dimensions, dimensions_sizes, 0);
243 if (!counter->counter) {
244 goto create_error;
245 }
246
247 return counter;
248
249 create_error:
250 lttng_kvfree(counter);
251 nomem:
252 if (counter_transport)
253 module_put(counter_transport->owner);
254 notransport:
255 return NULL;
256 }
257
258 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
259 {
260 struct lttng_transport *transport = NULL;
261 struct lttng_event_notifier_group *event_notifier_group;
262 const char *transport_name = "relay-event-notifier";
263 size_t subbuf_size = 4096; //TODO
264 size_t num_subbuf = 16; //TODO
265 unsigned int switch_timer_interval = 0;
266 unsigned int read_timer_interval = 0;
267 int i;
268
269 mutex_lock(&sessions_mutex);
270
271 transport = lttng_transport_find(transport_name);
272 if (!transport) {
273 printk(KERN_WARNING "LTTng: transport %s not found\n",
274 transport_name);
275 goto notransport;
276 }
277 if (!try_module_get(transport->owner)) {
278 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
279 transport_name);
280 goto notransport;
281 }
282
283 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
284 GFP_KERNEL);
285 if (!event_notifier_group)
286 goto nomem;
287
288 /*
289 * Initialize the ring buffer used to store event notifier
290 * notifications.
291 */
292 event_notifier_group->ops = &transport->ops;
293 event_notifier_group->chan = transport->ops.channel_create(
294 transport_name, event_notifier_group, NULL,
295 subbuf_size, num_subbuf, switch_timer_interval,
296 read_timer_interval);
297 if (!event_notifier_group->chan)
298 goto create_error;
299
300 event_notifier_group->transport = transport;
301
302 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
303 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
304 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
305 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
306
307 list_add(&event_notifier_group->node, &event_notifier_groups);
308
309 mutex_unlock(&sessions_mutex);
310
311 return event_notifier_group;
312
313 create_error:
314 lttng_kvfree(event_notifier_group);
315 nomem:
316 if (transport)
317 module_put(transport->owner);
318 notransport:
319 mutex_unlock(&sessions_mutex);
320 return NULL;
321 }
322
323 void metadata_cache_destroy(struct kref *kref)
324 {
325 struct lttng_metadata_cache *cache =
326 container_of(kref, struct lttng_metadata_cache, refcount);
327 vfree(cache->data);
328 kfree(cache);
329 }
330
331 void lttng_session_destroy(struct lttng_session *session)
332 {
333 struct lttng_channel *chan, *tmpchan;
334 struct lttng_event *event, *tmpevent;
335 struct lttng_metadata_stream *metadata_stream;
336 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
337 int ret;
338
339 mutex_lock(&sessions_mutex);
340 WRITE_ONCE(session->active, 0);
341 list_for_each_entry(chan, &session->chan, list) {
342 ret = lttng_syscalls_unregister_channel(chan);
343 WARN_ON(ret);
344 }
345 list_for_each_entry(event, &session->events, list) {
346 ret = _lttng_event_unregister(event);
347 WARN_ON(ret);
348 }
349 synchronize_trace(); /* Wait for in-flight events to complete */
350 list_for_each_entry(chan, &session->chan, list) {
351 ret = lttng_syscalls_destroy_event(chan);
352 WARN_ON(ret);
353 }
354 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
355 &session->enablers_head, node)
356 lttng_event_enabler_destroy(event_enabler);
357 list_for_each_entry_safe(event, tmpevent, &session->events, list)
358 _lttng_event_destroy(event);
359 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
360 BUG_ON(chan->channel_type == METADATA_CHANNEL);
361 _lttng_channel_destroy(chan);
362 }
363 mutex_lock(&session->metadata_cache->lock);
364 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
365 _lttng_metadata_channel_hangup(metadata_stream);
366 mutex_unlock(&session->metadata_cache->lock);
367 lttng_id_tracker_destroy(&session->pid_tracker, false);
368 lttng_id_tracker_destroy(&session->vpid_tracker, false);
369 lttng_id_tracker_destroy(&session->uid_tracker, false);
370 lttng_id_tracker_destroy(&session->vuid_tracker, false);
371 lttng_id_tracker_destroy(&session->gid_tracker, false);
372 lttng_id_tracker_destroy(&session->vgid_tracker, false);
373 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
374 list_del(&session->list);
375 mutex_unlock(&sessions_mutex);
376 lttng_kvfree(session);
377 }
378
379 void lttng_event_notifier_group_destroy(
380 struct lttng_event_notifier_group *event_notifier_group)
381 {
382 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
383 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
384 int ret;
385
386 if (!event_notifier_group)
387 return;
388
389 mutex_lock(&sessions_mutex);
390
391 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
392 WARN_ON(ret);
393
394 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
395 &event_notifier_group->event_notifiers_head, list) {
396 ret = _lttng_event_notifier_unregister(event_notifier);
397 WARN_ON(ret);
398 }
399
400 /* Wait for in-flight event notifier to complete */
401 synchronize_trace();
402
403 irq_work_sync(&event_notifier_group->wakeup_pending);
404
405 kfree(event_notifier_group->sc_filter);
406
407 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
408 &event_notifier_group->enablers_head, node)
409 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
410
411 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
412 &event_notifier_group->event_notifiers_head, list)
413 _lttng_event_notifier_destroy(event_notifier);
414
415 if (event_notifier_group->error_counter) {
416 struct lttng_counter *error_counter = event_notifier_group->error_counter;
417
418 error_counter->ops->counter_destroy(error_counter->counter);
419 module_put(error_counter->transport->owner);
420 lttng_kvfree(error_counter);
421 event_notifier_group->error_counter = NULL;
422 }
423
424 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
425 module_put(event_notifier_group->transport->owner);
426 list_del(&event_notifier_group->node);
427
428 mutex_unlock(&sessions_mutex);
429 lttng_kvfree(event_notifier_group);
430 }
431
432 int lttng_session_statedump(struct lttng_session *session)
433 {
434 int ret;
435
436 mutex_lock(&sessions_mutex);
437 ret = lttng_statedump_start(session);
438 mutex_unlock(&sessions_mutex);
439 return ret;
440 }
441
442 int lttng_session_enable(struct lttng_session *session)
443 {
444 int ret = 0;
445 struct lttng_channel *chan;
446
447 mutex_lock(&sessions_mutex);
448 if (session->active) {
449 ret = -EBUSY;
450 goto end;
451 }
452
453 /* Set transient enabler state to "enabled" */
454 session->tstate = 1;
455
456 /* We need to sync enablers with session before activation. */
457 lttng_session_sync_event_enablers(session);
458
459 /*
460 * Snapshot the number of events per channel to know the type of header
461 * we need to use.
462 */
463 list_for_each_entry(chan, &session->chan, list) {
464 if (chan->header_type)
465 continue; /* don't change it if session stop/restart */
466 if (chan->free_event_id < 31)
467 chan->header_type = 1; /* compact */
468 else
469 chan->header_type = 2; /* large */
470 }
471
472 /* Clear each stream's quiescent state. */
473 list_for_each_entry(chan, &session->chan, list) {
474 if (chan->channel_type != METADATA_CHANNEL)
475 lib_ring_buffer_clear_quiescent_channel(chan->chan);
476 }
477
478 WRITE_ONCE(session->active, 1);
479 WRITE_ONCE(session->been_active, 1);
480 ret = _lttng_session_metadata_statedump(session);
481 if (ret) {
482 WRITE_ONCE(session->active, 0);
483 goto end;
484 }
485 ret = lttng_statedump_start(session);
486 if (ret)
487 WRITE_ONCE(session->active, 0);
488 end:
489 mutex_unlock(&sessions_mutex);
490 return ret;
491 }
492
493 int lttng_session_disable(struct lttng_session *session)
494 {
495 int ret = 0;
496 struct lttng_channel *chan;
497
498 mutex_lock(&sessions_mutex);
499 if (!session->active) {
500 ret = -EBUSY;
501 goto end;
502 }
503 WRITE_ONCE(session->active, 0);
504
505 /* Set transient enabler state to "disabled" */
506 session->tstate = 0;
507 lttng_session_sync_event_enablers(session);
508
509 /* Set each stream's quiescent state. */
510 list_for_each_entry(chan, &session->chan, list) {
511 if (chan->channel_type != METADATA_CHANNEL)
512 lib_ring_buffer_set_quiescent_channel(chan->chan);
513 }
514 end:
515 mutex_unlock(&sessions_mutex);
516 return ret;
517 }
518
519 int lttng_session_metadata_regenerate(struct lttng_session *session)
520 {
521 int ret = 0;
522 struct lttng_channel *chan;
523 struct lttng_event *event;
524 struct lttng_metadata_cache *cache = session->metadata_cache;
525 struct lttng_metadata_stream *stream;
526
527 mutex_lock(&sessions_mutex);
528 if (!session->active) {
529 ret = -EBUSY;
530 goto end;
531 }
532
533 mutex_lock(&cache->lock);
534 memset(cache->data, 0, cache->cache_alloc);
535 cache->metadata_written = 0;
536 cache->version++;
537 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
538 stream->metadata_out = 0;
539 stream->metadata_in = 0;
540 }
541 mutex_unlock(&cache->lock);
542
543 session->metadata_dumped = 0;
544 list_for_each_entry(chan, &session->chan, list) {
545 chan->metadata_dumped = 0;
546 }
547
548 list_for_each_entry(event, &session->events, list) {
549 event->metadata_dumped = 0;
550 }
551
552 ret = _lttng_session_metadata_statedump(session);
553
554 end:
555 mutex_unlock(&sessions_mutex);
556 return ret;
557 }
558
559 int lttng_channel_enable(struct lttng_channel *channel)
560 {
561 int ret = 0;
562
563 mutex_lock(&sessions_mutex);
564 if (channel->channel_type == METADATA_CHANNEL) {
565 ret = -EPERM;
566 goto end;
567 }
568 if (channel->enabled) {
569 ret = -EEXIST;
570 goto end;
571 }
572 /* Set transient enabler state to "enabled" */
573 channel->tstate = 1;
574 lttng_session_sync_event_enablers(channel->session);
575 /* Set atomically the state to "enabled" */
576 WRITE_ONCE(channel->enabled, 1);
577 end:
578 mutex_unlock(&sessions_mutex);
579 return ret;
580 }
581
582 int lttng_channel_disable(struct lttng_channel *channel)
583 {
584 int ret = 0;
585
586 mutex_lock(&sessions_mutex);
587 if (channel->channel_type == METADATA_CHANNEL) {
588 ret = -EPERM;
589 goto end;
590 }
591 if (!channel->enabled) {
592 ret = -EEXIST;
593 goto end;
594 }
595 /* Set atomically the state to "disabled" */
596 WRITE_ONCE(channel->enabled, 0);
597 /* Set transient enabler state to "enabled" */
598 channel->tstate = 0;
599 lttng_session_sync_event_enablers(channel->session);
600 end:
601 mutex_unlock(&sessions_mutex);
602 return ret;
603 }
604
605 int lttng_event_enable(struct lttng_event *event)
606 {
607 int ret = 0;
608
609 mutex_lock(&sessions_mutex);
610 if (event->chan->channel_type == METADATA_CHANNEL) {
611 ret = -EPERM;
612 goto end;
613 }
614 if (event->enabled) {
615 ret = -EEXIST;
616 goto end;
617 }
618 switch (event->instrumentation) {
619 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
620 case LTTNG_KERNEL_ABI_SYSCALL:
621 ret = -EINVAL;
622 break;
623
624 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
625 case LTTNG_KERNEL_ABI_UPROBE:
626 WRITE_ONCE(event->enabled, 1);
627 break;
628
629 case LTTNG_KERNEL_ABI_KRETPROBE:
630 ret = lttng_kretprobes_event_enable_state(event, 1);
631 break;
632
633 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
634 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
635 default:
636 WARN_ON_ONCE(1);
637 ret = -EINVAL;
638 }
639 end:
640 mutex_unlock(&sessions_mutex);
641 return ret;
642 }
643
644 int lttng_event_disable(struct lttng_event *event)
645 {
646 int ret = 0;
647
648 mutex_lock(&sessions_mutex);
649 if (event->chan->channel_type == METADATA_CHANNEL) {
650 ret = -EPERM;
651 goto end;
652 }
653 if (!event->enabled) {
654 ret = -EEXIST;
655 goto end;
656 }
657 switch (event->instrumentation) {
658 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
659 case LTTNG_KERNEL_ABI_SYSCALL:
660 ret = -EINVAL;
661 break;
662
663 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
664 case LTTNG_KERNEL_ABI_UPROBE:
665 WRITE_ONCE(event->enabled, 0);
666 break;
667
668 case LTTNG_KERNEL_ABI_KRETPROBE:
669
670 ret = lttng_kretprobes_event_enable_state(event, 0);
671 break;
672
673 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
674 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
675 default:
676 WARN_ON_ONCE(1);
677 ret = -EINVAL;
678 }
679 end:
680 mutex_unlock(&sessions_mutex);
681 return ret;
682 }
683
684 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
685 {
686 int ret = 0;
687
688 mutex_lock(&sessions_mutex);
689 if (event_notifier->enabled) {
690 ret = -EEXIST;
691 goto end;
692 }
693 switch (event_notifier->instrumentation) {
694 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
695 case LTTNG_KERNEL_ABI_SYSCALL:
696 ret = -EINVAL;
697 break;
698
699 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
700 case LTTNG_KERNEL_ABI_UPROBE:
701 WRITE_ONCE(event_notifier->enabled, 1);
702 break;
703
704 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
705 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
706 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
707 default:
708 WARN_ON_ONCE(1);
709 ret = -EINVAL;
710 }
711 end:
712 mutex_unlock(&sessions_mutex);
713 return ret;
714 }
715
716 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
717 {
718 int ret = 0;
719
720 mutex_lock(&sessions_mutex);
721 if (!event_notifier->enabled) {
722 ret = -EEXIST;
723 goto end;
724 }
725 switch (event_notifier->instrumentation) {
726 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
727 case LTTNG_KERNEL_ABI_SYSCALL:
728 ret = -EINVAL;
729 break;
730
731 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
732 case LTTNG_KERNEL_ABI_UPROBE:
733 WRITE_ONCE(event_notifier->enabled, 0);
734 break;
735
736 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
737 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
738 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
739 default:
740 WARN_ON_ONCE(1);
741 ret = -EINVAL;
742 }
743 end:
744 mutex_unlock(&sessions_mutex);
745 return ret;
746 }
747
748 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
749 const char *transport_name,
750 void *buf_addr,
751 size_t subbuf_size, size_t num_subbuf,
752 unsigned int switch_timer_interval,
753 unsigned int read_timer_interval,
754 enum channel_type channel_type)
755 {
756 struct lttng_channel *chan;
757 struct lttng_transport *transport = NULL;
758
759 mutex_lock(&sessions_mutex);
760 if (session->been_active && channel_type != METADATA_CHANNEL)
761 goto active; /* Refuse to add channel to active session */
762 transport = lttng_transport_find(transport_name);
763 if (!transport) {
764 printk(KERN_WARNING "LTTng: transport %s not found\n",
765 transport_name);
766 goto notransport;
767 }
768 if (!try_module_get(transport->owner)) {
769 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
770 goto notransport;
771 }
772 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
773 if (!chan)
774 goto nomem;
775 chan->session = session;
776 chan->id = session->free_chan_id++;
777 chan->ops = &transport->ops;
778 /*
779 * Note: the channel creation op already writes into the packet
780 * headers. Therefore the "chan" information used as input
781 * should be already accessible.
782 */
783 chan->chan = transport->ops.channel_create(transport_name,
784 chan, buf_addr, subbuf_size, num_subbuf,
785 switch_timer_interval, read_timer_interval);
786 if (!chan->chan)
787 goto create_error;
788 chan->tstate = 1;
789 chan->enabled = 1;
790 chan->transport = transport;
791 chan->channel_type = channel_type;
792 list_add(&chan->list, &session->chan);
793 mutex_unlock(&sessions_mutex);
794 return chan;
795
796 create_error:
797 kfree(chan);
798 nomem:
799 if (transport)
800 module_put(transport->owner);
801 notransport:
802 active:
803 mutex_unlock(&sessions_mutex);
804 return NULL;
805 }
806
807 /*
808 * Only used internally at session destruction for per-cpu channels, and
809 * when metadata channel is released.
810 * Needs to be called with sessions mutex held.
811 */
812 static
813 void _lttng_channel_destroy(struct lttng_channel *chan)
814 {
815 chan->ops->channel_destroy(chan->chan);
816 module_put(chan->transport->owner);
817 list_del(&chan->list);
818 lttng_kernel_destroy_context(chan->ctx);
819 kfree(chan);
820 }
821
822 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
823 {
824 BUG_ON(chan->channel_type != METADATA_CHANNEL);
825
826 /* Protect the metadata cache with the sessions_mutex. */
827 mutex_lock(&sessions_mutex);
828 _lttng_channel_destroy(chan);
829 mutex_unlock(&sessions_mutex);
830 }
831 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
832
833 static
834 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
835 {
836 stream->finalized = 1;
837 wake_up_interruptible(&stream->read_wait);
838 }
839
840
841 /*
842 * Supports event creation while tracing session is active.
843 * Needs to be called with sessions mutex held.
844 */
845 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
846 struct lttng_kernel_abi_event *event_param,
847 void *filter,
848 const struct lttng_kernel_event_desc *event_desc,
849 enum lttng_kernel_abi_instrumentation itype)
850 {
851 struct lttng_session *session = chan->session;
852 struct lttng_event *event;
853 const char *event_name;
854 struct hlist_head *head;
855 int ret;
856
857 if (chan->free_event_id == -1U) {
858 ret = -EMFILE;
859 goto full;
860 }
861
862 switch (itype) {
863 case LTTNG_KERNEL_ABI_TRACEPOINT:
864 event_name = event_desc->event_name;
865 break;
866
867 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
868 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
869 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
870 case LTTNG_KERNEL_ABI_SYSCALL:
871 event_name = event_param->name;
872 break;
873
874 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
875 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
876 default:
877 WARN_ON_ONCE(1);
878 ret = -EINVAL;
879 goto type_error;
880 }
881
882 head = utils_borrow_hash_table_bucket(session->events_ht.table,
883 LTTNG_EVENT_HT_SIZE, event_name);
884 lttng_hlist_for_each_entry(event, head, hlist) {
885 WARN_ON_ONCE(!event->desc);
886 if (!strncmp(event->desc->event_name, event_name,
887 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
888 && chan == event->chan) {
889 ret = -EEXIST;
890 goto exist;
891 }
892 }
893
894 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
895 if (!event) {
896 ret = -ENOMEM;
897 goto cache_error;
898 }
899 event->chan = chan;
900 event->filter = filter;
901 event->id = chan->free_event_id++;
902 event->instrumentation = itype;
903 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
904 INIT_LIST_HEAD(&event->enablers_ref_head);
905
906 switch (itype) {
907 case LTTNG_KERNEL_ABI_TRACEPOINT:
908 /* Event will be enabled by enabler sync. */
909 event->enabled = 0;
910 event->registered = 0;
911 event->desc = lttng_event_desc_get(event_name);
912 if (!event->desc) {
913 ret = -ENOENT;
914 goto register_error;
915 }
916 /* Populate lttng_event structure before event registration. */
917 smp_wmb();
918 break;
919
920 case LTTNG_KERNEL_ABI_KPROBE:
921 /*
922 * Needs to be explicitly enabled after creation, since
923 * we may want to apply filters.
924 */
925 event->enabled = 0;
926 event->registered = 1;
927 /*
928 * Populate lttng_event structure before event
929 * registration.
930 */
931 smp_wmb();
932 ret = lttng_kprobes_register_event(event_name,
933 event_param->u.kprobe.symbol_name,
934 event_param->u.kprobe.offset,
935 event_param->u.kprobe.addr,
936 event);
937 if (ret) {
938 ret = -EINVAL;
939 goto register_error;
940 }
941 ret = try_module_get(event->desc->owner);
942 WARN_ON_ONCE(!ret);
943 break;
944
945 case LTTNG_KERNEL_ABI_KRETPROBE:
946 {
947 struct lttng_event *event_return;
948
949 /* kretprobe defines 2 events */
950 /*
951 * Needs to be explicitly enabled after creation, since
952 * we may want to apply filters.
953 */
954 event->enabled = 0;
955 event->registered = 1;
956 event_return =
957 kmem_cache_zalloc(event_cache, GFP_KERNEL);
958 if (!event_return) {
959 ret = -ENOMEM;
960 goto register_error;
961 }
962 event_return->chan = chan;
963 event_return->filter = filter;
964 event_return->id = chan->free_event_id++;
965 event_return->enabled = 0;
966 event_return->registered = 1;
967 event_return->instrumentation = itype;
968 INIT_LIST_HEAD(&event_return->filter_bytecode_runtime_head);
969 INIT_LIST_HEAD(&event_return->enablers_ref_head);
970 /*
971 * Populate lttng_event structure before kretprobe registration.
972 */
973 smp_wmb();
974 ret = lttng_kretprobes_register(event_name,
975 event_param->u.kretprobe.symbol_name,
976 event_param->u.kretprobe.offset,
977 event_param->u.kretprobe.addr,
978 event, event_return);
979 if (ret) {
980 kmem_cache_free(event_cache, event_return);
981 ret = -EINVAL;
982 goto register_error;
983 }
984 /* Take 2 refs on the module: one per event. */
985 ret = try_module_get(event->desc->owner);
986 WARN_ON_ONCE(!ret);
987 ret = try_module_get(event->desc->owner);
988 WARN_ON_ONCE(!ret);
989 ret = _lttng_event_metadata_statedump(chan->session, chan,
990 event_return);
991 WARN_ON_ONCE(ret > 0);
992 if (ret) {
993 kmem_cache_free(event_cache, event_return);
994 module_put(event->desc->owner);
995 module_put(event->desc->owner);
996 goto statedump_error;
997 }
998 list_add(&event_return->list, &chan->session->events);
999 break;
1000 }
1001
1002 case LTTNG_KERNEL_ABI_SYSCALL:
1003 /*
1004 * Needs to be explicitly enabled after creation, since
1005 * we may want to apply filters.
1006 */
1007 event->enabled = 0;
1008 event->registered = 0;
1009 event->desc = event_desc;
1010 switch (event_param->u.syscall.entryexit) {
1011 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1012 ret = -EINVAL;
1013 goto register_error;
1014 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1015 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1016 break;
1017 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1018 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1019 break;
1020 }
1021 switch (event_param->u.syscall.abi) {
1022 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1023 ret = -EINVAL;
1024 goto register_error;
1025 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1026 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1027 break;
1028 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1029 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1030 break;
1031 }
1032 if (!event->desc) {
1033 ret = -EINVAL;
1034 goto register_error;
1035 }
1036 break;
1037
1038 case LTTNG_KERNEL_ABI_UPROBE:
1039 /*
1040 * Needs to be explicitly enabled after creation, since
1041 * we may want to apply filters.
1042 */
1043 event->enabled = 0;
1044 event->registered = 1;
1045
1046 /*
1047 * Populate lttng_event structure before event
1048 * registration.
1049 */
1050 smp_wmb();
1051
1052 ret = lttng_uprobes_register_event(event_param->name,
1053 event_param->u.uprobe.fd,
1054 event);
1055 if (ret)
1056 goto register_error;
1057 ret = try_module_get(event->desc->owner);
1058 WARN_ON_ONCE(!ret);
1059 break;
1060
1061 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1062 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1063 default:
1064 WARN_ON_ONCE(1);
1065 ret = -EINVAL;
1066 goto register_error;
1067 }
1068 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
1069 WARN_ON_ONCE(ret > 0);
1070 if (ret) {
1071 goto statedump_error;
1072 }
1073 hlist_add_head(&event->hlist, head);
1074 list_add(&event->list, &chan->session->events);
1075 return event;
1076
1077 statedump_error:
1078 /* If a statedump error occurs, events will not be readable. */
1079 register_error:
1080 kmem_cache_free(event_cache, event);
1081 cache_error:
1082 exist:
1083 type_error:
1084 full:
1085 return ERR_PTR(ret);
1086 }
1087
1088 struct lttng_event_notifier *_lttng_event_notifier_create(
1089 const struct lttng_kernel_event_desc *event_desc,
1090 uint64_t token, uint64_t error_counter_index,
1091 struct lttng_event_notifier_group *event_notifier_group,
1092 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1093 void *filter, enum lttng_kernel_abi_instrumentation itype)
1094 {
1095 struct lttng_event_notifier *event_notifier;
1096 struct lttng_counter *error_counter;
1097 const char *event_name;
1098 struct hlist_head *head;
1099 int ret;
1100
1101 switch (itype) {
1102 case LTTNG_KERNEL_ABI_TRACEPOINT:
1103 event_name = event_desc->event_name;
1104 break;
1105
1106 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1107 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1108 case LTTNG_KERNEL_ABI_SYSCALL:
1109 event_name = event_notifier_param->event.name;
1110 break;
1111
1112 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1113 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1114 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1115 default:
1116 WARN_ON_ONCE(1);
1117 ret = -EINVAL;
1118 goto type_error;
1119 }
1120
1121 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1122 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1123 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1124 WARN_ON_ONCE(!event_notifier->desc);
1125 if (!strncmp(event_notifier->desc->event_name, event_name,
1126 LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
1127 && event_notifier_group == event_notifier->group
1128 && token == event_notifier->user_token) {
1129 ret = -EEXIST;
1130 goto exist;
1131 }
1132 }
1133
1134 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1135 if (!event_notifier) {
1136 ret = -ENOMEM;
1137 goto cache_error;
1138 }
1139
1140 event_notifier->group = event_notifier_group;
1141 event_notifier->user_token = token;
1142 event_notifier->error_counter_index = error_counter_index;
1143 event_notifier->num_captures = 0;
1144 event_notifier->filter = filter;
1145 event_notifier->instrumentation = itype;
1146 event_notifier->send_notification = lttng_event_notifier_notification_send;
1147 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1148 INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
1149 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1150
1151 switch (itype) {
1152 case LTTNG_KERNEL_ABI_TRACEPOINT:
1153 /* Event will be enabled by enabler sync. */
1154 event_notifier->enabled = 0;
1155 event_notifier->registered = 0;
1156 event_notifier->desc = lttng_event_desc_get(event_name);
1157 if (!event_notifier->desc) {
1158 ret = -ENOENT;
1159 goto register_error;
1160 }
1161 /* Populate lttng_event_notifier structure before event registration. */
1162 smp_wmb();
1163 break;
1164
1165 case LTTNG_KERNEL_ABI_KPROBE:
1166 /*
1167 * Needs to be explicitly enabled after creation, since
1168 * we may want to apply filters.
1169 */
1170 event_notifier->enabled = 0;
1171 event_notifier->registered = 1;
1172 /*
1173 * Populate lttng_event_notifier structure before event
1174 * registration.
1175 */
1176 smp_wmb();
1177 ret = lttng_kprobes_register_event_notifier(
1178 event_notifier_param->event.u.kprobe.symbol_name,
1179 event_notifier_param->event.u.kprobe.offset,
1180 event_notifier_param->event.u.kprobe.addr,
1181 event_notifier);
1182 if (ret) {
1183 ret = -EINVAL;
1184 goto register_error;
1185 }
1186 ret = try_module_get(event_notifier->desc->owner);
1187 WARN_ON_ONCE(!ret);
1188 break;
1189
1190 case LTTNG_KERNEL_ABI_SYSCALL:
1191 /*
1192 * Needs to be explicitly enabled after creation, since
1193 * we may want to apply filters.
1194 */
1195 event_notifier->enabled = 0;
1196 event_notifier->registered = 0;
1197 event_notifier->desc = event_desc;
1198 switch (event_notifier_param->event.u.syscall.entryexit) {
1199 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1200 ret = -EINVAL;
1201 goto register_error;
1202 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1203 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1204 break;
1205 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1206 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1207 break;
1208 }
1209 switch (event_notifier_param->event.u.syscall.abi) {
1210 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1211 ret = -EINVAL;
1212 goto register_error;
1213 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1214 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1215 break;
1216 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1217 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1218 break;
1219 }
1220
1221 if (!event_notifier->desc) {
1222 ret = -EINVAL;
1223 goto register_error;
1224 }
1225 break;
1226
1227 case LTTNG_KERNEL_ABI_UPROBE:
1228 /*
1229 * Needs to be explicitly enabled after creation, since
1230 * we may want to apply filters.
1231 */
1232 event_notifier->enabled = 0;
1233 event_notifier->registered = 1;
1234
1235 /*
1236 * Populate lttng_event_notifier structure before
1237 * event_notifier registration.
1238 */
1239 smp_wmb();
1240
1241 ret = lttng_uprobes_register_event_notifier(
1242 event_notifier_param->event.name,
1243 event_notifier_param->event.u.uprobe.fd,
1244 event_notifier);
1245 if (ret)
1246 goto register_error;
1247 ret = try_module_get(event_notifier->desc->owner);
1248 WARN_ON_ONCE(!ret);
1249 break;
1250
1251 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1252 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1253 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1254 default:
1255 WARN_ON_ONCE(1);
1256 ret = -EINVAL;
1257 goto register_error;
1258 }
1259
1260 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1261 hlist_add_head(&event_notifier->hlist, head);
1262
1263 /*
1264 * Clear the error counter bucket. The sessiond keeps track of which
1265 * bucket is currently in use. We trust it. The session lock
1266 * synchronizes against concurrent creation of the error
1267 * counter.
1268 */
1269 error_counter = event_notifier_group->error_counter;
1270 if (error_counter) {
1271 size_t dimension_index[1];
1272
1273 /*
1274 * Check that the index is within the boundary of the counter.
1275 */
1276 if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
1277 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1278 event_notifier_group->error_counter_len, event_notifier->error_counter_index);
1279 ret = -EINVAL;
1280 goto register_error;
1281 }
1282
1283 dimension_index[0] = event_notifier->error_counter_index;
1284 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1285 if (ret) {
1286 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1287 event_notifier->error_counter_index);
1288 goto register_error;
1289 }
1290 }
1291
1292 return event_notifier;
1293
1294 register_error:
1295 kmem_cache_free(event_notifier_cache, event_notifier);
1296 cache_error:
1297 exist:
1298 type_error:
1299 return ERR_PTR(ret);
1300 }
1301
1302 int lttng_kernel_counter_read(struct lttng_counter *counter,
1303 const size_t *dim_indexes, int32_t cpu,
1304 int64_t *val, bool *overflow, bool *underflow)
1305 {
1306 return counter->ops->counter_read(counter->counter, dim_indexes,
1307 cpu, val, overflow, underflow);
1308 }
1309
1310 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1311 const size_t *dim_indexes, int64_t *val,
1312 bool *overflow, bool *underflow)
1313 {
1314 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1315 val, overflow, underflow);
1316 }
1317
1318 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1319 const size_t *dim_indexes)
1320 {
1321 return counter->ops->counter_clear(counter->counter, dim_indexes);
1322 }
1323
1324 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1325 struct lttng_kernel_abi_event *event_param,
1326 void *filter,
1327 const struct lttng_kernel_event_desc *event_desc,
1328 enum lttng_kernel_abi_instrumentation itype)
1329 {
1330 struct lttng_event *event;
1331
1332 mutex_lock(&sessions_mutex);
1333 event = _lttng_event_create(chan, event_param, filter, event_desc,
1334 itype);
1335 mutex_unlock(&sessions_mutex);
1336 return event;
1337 }
1338
1339 struct lttng_event_notifier *lttng_event_notifier_create(
1340 const struct lttng_kernel_event_desc *event_desc,
1341 uint64_t id, uint64_t error_counter_index,
1342 struct lttng_event_notifier_group *event_notifier_group,
1343 struct lttng_kernel_abi_event_notifier *event_notifier_param,
1344 void *filter, enum lttng_kernel_abi_instrumentation itype)
1345 {
1346 struct lttng_event_notifier *event_notifier;
1347
1348 mutex_lock(&sessions_mutex);
1349 event_notifier = _lttng_event_notifier_create(event_desc, id,
1350 error_counter_index, event_notifier_group,
1351 event_notifier_param, filter, itype);
1352 mutex_unlock(&sessions_mutex);
1353 return event_notifier;
1354 }
1355
1356 /* Only used for tracepoints for now. */
1357 static
1358 void register_event(struct lttng_event *event)
1359 {
1360 const struct lttng_kernel_event_desc *desc;
1361 int ret = -EINVAL;
1362
1363 if (event->registered)
1364 return;
1365
1366 desc = event->desc;
1367 switch (event->instrumentation) {
1368 case LTTNG_KERNEL_ABI_TRACEPOINT:
1369 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1370 desc->probe_callback,
1371 event);
1372 break;
1373
1374 case LTTNG_KERNEL_ABI_SYSCALL:
1375 ret = lttng_syscall_filter_enable_event(event->chan, event);
1376 break;
1377
1378 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1379 case LTTNG_KERNEL_ABI_UPROBE: /* Fall-through */
1380 case LTTNG_KERNEL_ABI_KRETPROBE:
1381 ret = 0;
1382 break;
1383
1384 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1385 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1386 default:
1387 WARN_ON_ONCE(1);
1388 }
1389 if (!ret)
1390 event->registered = 1;
1391 }
1392
1393 /*
1394 * Only used internally at session destruction.
1395 */
1396 int _lttng_event_unregister(struct lttng_event *event)
1397 {
1398 const struct lttng_kernel_event_desc *desc;
1399 int ret = -EINVAL;
1400
1401 if (!event->registered)
1402 return 0;
1403
1404 desc = event->desc;
1405 switch (event->instrumentation) {
1406 case LTTNG_KERNEL_ABI_TRACEPOINT:
1407 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->event_kname,
1408 event->desc->probe_callback,
1409 event);
1410 break;
1411
1412 case LTTNG_KERNEL_ABI_KPROBE:
1413 lttng_kprobes_unregister_event(event);
1414 ret = 0;
1415 break;
1416
1417 case LTTNG_KERNEL_ABI_KRETPROBE:
1418 lttng_kretprobes_unregister(event);
1419 ret = 0;
1420 break;
1421
1422 case LTTNG_KERNEL_ABI_SYSCALL:
1423 ret = lttng_syscall_filter_disable_event(event->chan, event);
1424 break;
1425
1426 case LTTNG_KERNEL_ABI_NOOP:
1427 ret = 0;
1428 break;
1429
1430 case LTTNG_KERNEL_ABI_UPROBE:
1431 lttng_uprobes_unregister_event(event);
1432 ret = 0;
1433 break;
1434
1435 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1436 default:
1437 WARN_ON_ONCE(1);
1438 }
1439 if (!ret)
1440 event->registered = 0;
1441 return ret;
1442 }
1443
1444 /* Only used for tracepoints for now. */
1445 static
1446 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1447 {
1448 const struct lttng_kernel_event_desc *desc;
1449 int ret = -EINVAL;
1450
1451 if (event_notifier->registered)
1452 return;
1453
1454 desc = event_notifier->desc;
1455 switch (event_notifier->instrumentation) {
1456 case LTTNG_KERNEL_ABI_TRACEPOINT:
1457 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1458 desc->event_notifier_callback,
1459 event_notifier);
1460 break;
1461
1462 case LTTNG_KERNEL_ABI_SYSCALL:
1463 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1464 break;
1465
1466 case LTTNG_KERNEL_ABI_KPROBE: /* Fall-through */
1467 case LTTNG_KERNEL_ABI_UPROBE:
1468 ret = 0;
1469 break;
1470
1471 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1472 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1473 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1474 default:
1475 WARN_ON_ONCE(1);
1476 }
1477 if (!ret)
1478 event_notifier->registered = 1;
1479 }
1480
1481 static
1482 int _lttng_event_notifier_unregister(
1483 struct lttng_event_notifier *event_notifier)
1484 {
1485 const struct lttng_kernel_event_desc *desc;
1486 int ret = -EINVAL;
1487
1488 if (!event_notifier->registered)
1489 return 0;
1490
1491 desc = event_notifier->desc;
1492 switch (event_notifier->instrumentation) {
1493 case LTTNG_KERNEL_ABI_TRACEPOINT:
1494 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->event_kname,
1495 event_notifier->desc->event_notifier_callback,
1496 event_notifier);
1497 break;
1498
1499 case LTTNG_KERNEL_ABI_KPROBE:
1500 lttng_kprobes_unregister_event_notifier(event_notifier);
1501 ret = 0;
1502 break;
1503
1504 case LTTNG_KERNEL_ABI_UPROBE:
1505 lttng_uprobes_unregister_event_notifier(event_notifier);
1506 ret = 0;
1507 break;
1508
1509 case LTTNG_KERNEL_ABI_SYSCALL:
1510 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1511 break;
1512
1513 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1514 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1515 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1516 default:
1517 WARN_ON_ONCE(1);
1518 }
1519 if (!ret)
1520 event_notifier->registered = 0;
1521 return ret;
1522 }
1523
1524 /*
1525 * Only used internally at session destruction.
1526 */
1527 static
1528 void _lttng_event_destroy(struct lttng_event *event)
1529 {
1530 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1531
1532 switch (event->instrumentation) {
1533 case LTTNG_KERNEL_ABI_TRACEPOINT:
1534 lttng_event_desc_put(event->desc);
1535 break;
1536
1537 case LTTNG_KERNEL_ABI_KPROBE:
1538 module_put(event->desc->owner);
1539 lttng_kprobes_destroy_event_private(event);
1540 break;
1541
1542 case LTTNG_KERNEL_ABI_KRETPROBE:
1543 module_put(event->desc->owner);
1544 lttng_kretprobes_destroy_private(event);
1545 break;
1546
1547 case LTTNG_KERNEL_ABI_SYSCALL:
1548 break;
1549
1550 case LTTNG_KERNEL_ABI_UPROBE:
1551 module_put(event->desc->owner);
1552 lttng_uprobes_destroy_event_private(event);
1553 break;
1554
1555 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1556 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1557 default:
1558 WARN_ON_ONCE(1);
1559 }
1560 list_del(&event->list);
1561 lttng_kernel_destroy_context(event->ctx);
1562 lttng_free_event_filter_runtime(event);
1563 /* Free event enabler refs */
1564 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1565 &event->enablers_ref_head, node)
1566 kfree(enabler_ref);
1567 kmem_cache_free(event_cache, event);
1568 }
1569
1570 /*
1571 * Only used internally at session destruction.
1572 */
1573 static
1574 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1575 {
1576 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1577
1578 switch (event_notifier->instrumentation) {
1579 case LTTNG_KERNEL_ABI_TRACEPOINT:
1580 lttng_event_desc_put(event_notifier->desc);
1581 break;
1582
1583 case LTTNG_KERNEL_ABI_KPROBE:
1584 module_put(event_notifier->desc->owner);
1585 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1586 break;
1587
1588 case LTTNG_KERNEL_ABI_SYSCALL:
1589 break;
1590
1591 case LTTNG_KERNEL_ABI_UPROBE:
1592 module_put(event_notifier->desc->owner);
1593 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1594 break;
1595
1596 case LTTNG_KERNEL_ABI_KRETPROBE: /* Fall-through */
1597 case LTTNG_KERNEL_ABI_FUNCTION: /* Fall-through */
1598 case LTTNG_KERNEL_ABI_NOOP: /* Fall-through */
1599 default:
1600 WARN_ON_ONCE(1);
1601 }
1602 list_del(&event_notifier->list);
1603 lttng_free_event_notifier_filter_runtime(event_notifier);
1604 /* Free event enabler refs */
1605 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1606 &event_notifier->enablers_ref_head, node)
1607 kfree(enabler_ref);
1608 kmem_cache_free(event_notifier_cache, event_notifier);
1609 }
1610
1611 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1612 enum tracker_type tracker_type)
1613 {
1614 switch (tracker_type) {
1615 case TRACKER_PID:
1616 return &session->pid_tracker;
1617 case TRACKER_VPID:
1618 return &session->vpid_tracker;
1619 case TRACKER_UID:
1620 return &session->uid_tracker;
1621 case TRACKER_VUID:
1622 return &session->vuid_tracker;
1623 case TRACKER_GID:
1624 return &session->gid_tracker;
1625 case TRACKER_VGID:
1626 return &session->vgid_tracker;
1627 default:
1628 WARN_ON_ONCE(1);
1629 return NULL;
1630 }
1631 }
1632
1633 int lttng_session_track_id(struct lttng_session *session,
1634 enum tracker_type tracker_type, int id)
1635 {
1636 struct lttng_id_tracker *tracker;
1637 int ret;
1638
1639 tracker = get_tracker(session, tracker_type);
1640 if (!tracker)
1641 return -EINVAL;
1642 if (id < -1)
1643 return -EINVAL;
1644 mutex_lock(&sessions_mutex);
1645 if (id == -1) {
1646 /* track all ids: destroy tracker. */
1647 lttng_id_tracker_destroy(tracker, true);
1648 ret = 0;
1649 } else {
1650 ret = lttng_id_tracker_add(tracker, id);
1651 }
1652 mutex_unlock(&sessions_mutex);
1653 return ret;
1654 }
1655
1656 int lttng_session_untrack_id(struct lttng_session *session,
1657 enum tracker_type tracker_type, int id)
1658 {
1659 struct lttng_id_tracker *tracker;
1660 int ret;
1661
1662 tracker = get_tracker(session, tracker_type);
1663 if (!tracker)
1664 return -EINVAL;
1665 if (id < -1)
1666 return -EINVAL;
1667 mutex_lock(&sessions_mutex);
1668 if (id == -1) {
1669 /* untrack all ids: replace by empty tracker. */
1670 ret = lttng_id_tracker_empty_set(tracker);
1671 } else {
1672 ret = lttng_id_tracker_del(tracker, id);
1673 }
1674 mutex_unlock(&sessions_mutex);
1675 return ret;
1676 }
1677
1678 static
1679 void *id_list_start(struct seq_file *m, loff_t *pos)
1680 {
1681 struct lttng_id_tracker *id_tracker = m->private;
1682 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1683 struct lttng_id_hash_node *e;
1684 int iter = 0, i;
1685
1686 mutex_lock(&sessions_mutex);
1687 if (id_tracker_p) {
1688 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1689 struct hlist_head *head = &id_tracker_p->id_hash[i];
1690
1691 lttng_hlist_for_each_entry(e, head, hlist) {
1692 if (iter++ >= *pos)
1693 return e;
1694 }
1695 }
1696 } else {
1697 /* ID tracker disabled. */
1698 if (iter >= *pos && iter == 0) {
1699 return id_tracker_p; /* empty tracker */
1700 }
1701 iter++;
1702 }
1703 /* End of list */
1704 return NULL;
1705 }
1706
1707 /* Called with sessions_mutex held. */
1708 static
1709 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1710 {
1711 struct lttng_id_tracker *id_tracker = m->private;
1712 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1713 struct lttng_id_hash_node *e;
1714 int iter = 0, i;
1715
1716 (*ppos)++;
1717 if (id_tracker_p) {
1718 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1719 struct hlist_head *head = &id_tracker_p->id_hash[i];
1720
1721 lttng_hlist_for_each_entry(e, head, hlist) {
1722 if (iter++ >= *ppos)
1723 return e;
1724 }
1725 }
1726 } else {
1727 /* ID tracker disabled. */
1728 if (iter >= *ppos && iter == 0)
1729 return p; /* empty tracker */
1730 iter++;
1731 }
1732
1733 /* End of list */
1734 return NULL;
1735 }
1736
1737 static
1738 void id_list_stop(struct seq_file *m, void *p)
1739 {
1740 mutex_unlock(&sessions_mutex);
1741 }
1742
1743 static
1744 int id_list_show(struct seq_file *m, void *p)
1745 {
1746 struct lttng_id_tracker *id_tracker = m->private;
1747 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1748 int id;
1749
1750 if (p == id_tracker_p) {
1751 /* Tracker disabled. */
1752 id = -1;
1753 } else {
1754 const struct lttng_id_hash_node *e = p;
1755
1756 id = lttng_id_tracker_get_node_id(e);
1757 }
1758 switch (id_tracker->tracker_type) {
1759 case TRACKER_PID:
1760 seq_printf(m, "process { pid = %d; };\n", id);
1761 break;
1762 case TRACKER_VPID:
1763 seq_printf(m, "process { vpid = %d; };\n", id);
1764 break;
1765 case TRACKER_UID:
1766 seq_printf(m, "user { uid = %d; };\n", id);
1767 break;
1768 case TRACKER_VUID:
1769 seq_printf(m, "user { vuid = %d; };\n", id);
1770 break;
1771 case TRACKER_GID:
1772 seq_printf(m, "group { gid = %d; };\n", id);
1773 break;
1774 case TRACKER_VGID:
1775 seq_printf(m, "group { vgid = %d; };\n", id);
1776 break;
1777 default:
1778 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1779 }
1780 return 0;
1781 }
1782
1783 static
1784 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1785 .start = id_list_start,
1786 .next = id_list_next,
1787 .stop = id_list_stop,
1788 .show = id_list_show,
1789 };
1790
1791 static
1792 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1793 {
1794 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1795 }
1796
1797 static
1798 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1799 {
1800 struct seq_file *m = file->private_data;
1801 struct lttng_id_tracker *id_tracker = m->private;
1802 int ret;
1803
1804 WARN_ON_ONCE(!id_tracker);
1805 ret = seq_release(inode, file);
1806 if (!ret)
1807 fput(id_tracker->session->file);
1808 return ret;
1809 }
1810
1811 const struct file_operations lttng_tracker_ids_list_fops = {
1812 .owner = THIS_MODULE,
1813 .open = lttng_tracker_ids_list_open,
1814 .read = seq_read,
1815 .llseek = seq_lseek,
1816 .release = lttng_tracker_ids_list_release,
1817 };
1818
1819 int lttng_session_list_tracker_ids(struct lttng_session *session,
1820 enum tracker_type tracker_type)
1821 {
1822 struct file *tracker_ids_list_file;
1823 struct seq_file *m;
1824 int file_fd, ret;
1825
1826 file_fd = lttng_get_unused_fd();
1827 if (file_fd < 0) {
1828 ret = file_fd;
1829 goto fd_error;
1830 }
1831
1832 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1833 &lttng_tracker_ids_list_fops,
1834 NULL, O_RDWR);
1835 if (IS_ERR(tracker_ids_list_file)) {
1836 ret = PTR_ERR(tracker_ids_list_file);
1837 goto file_error;
1838 }
1839 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1840 ret = -EOVERFLOW;
1841 goto refcount_error;
1842 }
1843 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1844 if (ret < 0)
1845 goto open_error;
1846 m = tracker_ids_list_file->private_data;
1847
1848 m->private = get_tracker(session, tracker_type);
1849 BUG_ON(!m->private);
1850 fd_install(file_fd, tracker_ids_list_file);
1851
1852 return file_fd;
1853
1854 open_error:
1855 atomic_long_dec(&session->file->f_count);
1856 refcount_error:
1857 fput(tracker_ids_list_file);
1858 file_error:
1859 put_unused_fd(file_fd);
1860 fd_error:
1861 return ret;
1862 }
1863
1864 /*
1865 * Enabler management.
1866 */
1867 static
1868 int lttng_match_enabler_star_glob(const char *desc_name,
1869 const char *pattern)
1870 {
1871 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1872 desc_name, LTTNG_SIZE_MAX))
1873 return 0;
1874 return 1;
1875 }
1876
1877 static
1878 int lttng_match_enabler_name(const char *desc_name,
1879 const char *name)
1880 {
1881 if (strcmp(desc_name, name))
1882 return 0;
1883 return 1;
1884 }
1885
1886 int lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1887 struct lttng_enabler *enabler)
1888 {
1889 const char *desc_name, *enabler_name;
1890 bool compat = false, entry = false;
1891
1892 enabler_name = enabler->event_param.name;
1893 switch (enabler->event_param.instrumentation) {
1894 case LTTNG_KERNEL_ABI_TRACEPOINT:
1895 desc_name = desc->event_name;
1896 switch (enabler->format_type) {
1897 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1898 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1899 case LTTNG_ENABLER_FORMAT_NAME:
1900 return lttng_match_enabler_name(desc_name, enabler_name);
1901 default:
1902 return -EINVAL;
1903 }
1904 break;
1905
1906 case LTTNG_KERNEL_ABI_SYSCALL:
1907 desc_name = desc->event_name;
1908 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1909 desc_name += strlen("compat_");
1910 compat = true;
1911 }
1912 if (!strncmp(desc_name, "syscall_exit_",
1913 strlen("syscall_exit_"))) {
1914 desc_name += strlen("syscall_exit_");
1915 } else if (!strncmp(desc_name, "syscall_entry_",
1916 strlen("syscall_entry_"))) {
1917 desc_name += strlen("syscall_entry_");
1918 entry = true;
1919 } else {
1920 WARN_ON_ONCE(1);
1921 return -EINVAL;
1922 }
1923 switch (enabler->event_param.u.syscall.entryexit) {
1924 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1925 break;
1926 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1927 if (!entry)
1928 return 0;
1929 break;
1930 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1931 if (entry)
1932 return 0;
1933 break;
1934 default:
1935 return -EINVAL;
1936 }
1937 switch (enabler->event_param.u.syscall.abi) {
1938 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1939 break;
1940 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1941 if (compat)
1942 return 0;
1943 break;
1944 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1945 if (!compat)
1946 return 0;
1947 break;
1948 default:
1949 return -EINVAL;
1950 }
1951 switch (enabler->event_param.u.syscall.match) {
1952 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1953 switch (enabler->format_type) {
1954 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1955 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1956 case LTTNG_ENABLER_FORMAT_NAME:
1957 return lttng_match_enabler_name(desc_name, enabler_name);
1958 default:
1959 return -EINVAL;
1960 }
1961 break;
1962 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
1963 return -EINVAL; /* Not implemented. */
1964 default:
1965 return -EINVAL;
1966 }
1967 break;
1968
1969 default:
1970 WARN_ON_ONCE(1);
1971 return -EINVAL;
1972 }
1973 }
1974
1975 static
1976 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1977 struct lttng_event *event)
1978 {
1979 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1980 event_enabler);
1981
1982 if (base_enabler->event_param.instrumentation != event->instrumentation)
1983 return 0;
1984 if (lttng_desc_match_enabler(event->desc, base_enabler)
1985 && event->chan == event_enabler->chan)
1986 return 1;
1987 else
1988 return 0;
1989 }
1990
1991 static
1992 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1993 struct lttng_event_notifier *event_notifier)
1994 {
1995 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1996 event_notifier_enabler);
1997
1998 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1999 return 0;
2000 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
2001 && event_notifier->group == event_notifier_enabler->group
2002 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2003 return 1;
2004 else
2005 return 0;
2006 }
2007
2008 static
2009 struct lttng_enabler_ref *lttng_enabler_ref(
2010 struct list_head *enablers_ref_list,
2011 struct lttng_enabler *enabler)
2012 {
2013 struct lttng_enabler_ref *enabler_ref;
2014
2015 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2016 if (enabler_ref->ref == enabler)
2017 return enabler_ref;
2018 }
2019 return NULL;
2020 }
2021
2022 static
2023 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
2024 {
2025 struct lttng_session *session = event_enabler->chan->session;
2026 struct lttng_kernel_probe_desc *probe_desc;
2027 const struct lttng_kernel_event_desc *desc;
2028 int i;
2029 struct list_head *probe_list;
2030
2031 probe_list = lttng_get_probe_list_head();
2032 /*
2033 * For each probe event, if we find that a probe event matches
2034 * our enabler, create an associated lttng_event if not
2035 * already present.
2036 */
2037 list_for_each_entry(probe_desc, probe_list, head) {
2038 for (i = 0; i < probe_desc->nr_events; i++) {
2039 int found = 0;
2040 struct hlist_head *head;
2041 struct lttng_event *event;
2042
2043 desc = probe_desc->event_desc[i];
2044 if (!lttng_desc_match_enabler(desc,
2045 lttng_event_enabler_as_enabler(event_enabler)))
2046 continue;
2047
2048 /*
2049 * Check if already created.
2050 */
2051 head = utils_borrow_hash_table_bucket(
2052 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
2053 desc->event_name);
2054 lttng_hlist_for_each_entry(event, head, hlist) {
2055 if (event->desc == desc
2056 && event->chan == event_enabler->chan)
2057 found = 1;
2058 }
2059 if (found)
2060 continue;
2061
2062 /*
2063 * We need to create an event for this
2064 * event probe.
2065 */
2066 event = _lttng_event_create(event_enabler->chan,
2067 NULL, NULL, desc,
2068 LTTNG_KERNEL_ABI_TRACEPOINT);
2069 if (!event) {
2070 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2071 probe_desc->event_desc[i]->event_name);
2072 }
2073 }
2074 }
2075 }
2076
2077 static
2078 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2079 {
2080 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2081 struct lttng_kernel_probe_desc *probe_desc;
2082 const struct lttng_kernel_event_desc *desc;
2083 int i;
2084 struct list_head *probe_list;
2085
2086 probe_list = lttng_get_probe_list_head();
2087 /*
2088 * For each probe event, if we find that a probe event matches
2089 * our enabler, create an associated lttng_event_notifier if not
2090 * already present.
2091 */
2092 list_for_each_entry(probe_desc, probe_list, head) {
2093 for (i = 0; i < probe_desc->nr_events; i++) {
2094 int found = 0;
2095 struct hlist_head *head;
2096 struct lttng_event_notifier *event_notifier;
2097
2098 desc = probe_desc->event_desc[i];
2099 if (!lttng_desc_match_enabler(desc,
2100 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2101 continue;
2102
2103 /*
2104 * Check if already created.
2105 */
2106 head = utils_borrow_hash_table_bucket(
2107 event_notifier_group->event_notifiers_ht.table,
2108 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->event_name);
2109 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
2110 if (event_notifier->desc == desc
2111 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2112 found = 1;
2113 }
2114 if (found)
2115 continue;
2116
2117 /*
2118 * We need to create a event_notifier for this event probe.
2119 */
2120 event_notifier = _lttng_event_notifier_create(desc,
2121 event_notifier_enabler->base.user_token,
2122 event_notifier_enabler->error_counter_index,
2123 event_notifier_group, NULL, NULL,
2124 LTTNG_KERNEL_ABI_TRACEPOINT);
2125 if (IS_ERR(event_notifier)) {
2126 printk(KERN_INFO "Unable to create event_notifier %s\n",
2127 probe_desc->event_desc[i]->event_name);
2128 }
2129 }
2130 }
2131 }
2132
2133 static
2134 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2135 {
2136 int ret;
2137
2138 ret = lttng_syscalls_register_event(event_enabler, NULL);
2139 WARN_ON_ONCE(ret);
2140 }
2141
2142 static
2143 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2144 {
2145 int ret;
2146
2147 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
2148 WARN_ON_ONCE(ret);
2149 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
2150 WARN_ON_ONCE(ret);
2151 }
2152
2153 /*
2154 * Create struct lttng_event if it is missing and present in the list of
2155 * tracepoint probes.
2156 * Should be called with sessions mutex held.
2157 */
2158 static
2159 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2160 {
2161 switch (event_enabler->base.event_param.instrumentation) {
2162 case LTTNG_KERNEL_ABI_TRACEPOINT:
2163 lttng_create_tracepoint_event_if_missing(event_enabler);
2164 break;
2165
2166 case LTTNG_KERNEL_ABI_SYSCALL:
2167 lttng_create_syscall_event_if_missing(event_enabler);
2168 break;
2169
2170 default:
2171 WARN_ON_ONCE(1);
2172 break;
2173 }
2174 }
2175
2176 /*
2177 * Create events associated with an event_enabler (if not already present),
2178 * and add backward reference from the event to the enabler.
2179 * Should be called with sessions mutex held.
2180 */
2181 static
2182 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2183 {
2184 struct lttng_channel *chan = event_enabler->chan;
2185 struct lttng_session *session = event_enabler->chan->session;
2186 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2187 struct lttng_event *event;
2188
2189 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2190 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2191 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2192 !strcmp(base_enabler->event_param.name, "*")) {
2193 int enabled = base_enabler->enabled;
2194 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2195
2196 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2197 WRITE_ONCE(chan->syscall_all_entry, enabled);
2198
2199 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2200 WRITE_ONCE(chan->syscall_all_exit, enabled);
2201 }
2202
2203 /* First ensure that probe events are created for this enabler. */
2204 lttng_create_event_if_missing(event_enabler);
2205
2206 /* For each event matching event_enabler in session event list. */
2207 list_for_each_entry(event, &session->events, list) {
2208 struct lttng_enabler_ref *enabler_ref;
2209
2210 if (!lttng_event_enabler_match_event(event_enabler, event))
2211 continue;
2212 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2213 lttng_event_enabler_as_enabler(event_enabler));
2214 if (!enabler_ref) {
2215 /*
2216 * If no backward ref, create it.
2217 * Add backward ref from event to event_enabler.
2218 */
2219 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2220 if (!enabler_ref)
2221 return -ENOMEM;
2222 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2223 list_add(&enabler_ref->node,
2224 &event->enablers_ref_head);
2225 }
2226
2227 /*
2228 * Link filter bytecodes if not linked yet.
2229 */
2230 lttng_enabler_link_bytecode(event->desc,
2231 lttng_static_ctx,
2232 &event->filter_bytecode_runtime_head,
2233 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2234
2235 /* TODO: merge event context. */
2236 }
2237 return 0;
2238 }
2239
2240 /*
2241 * Create struct lttng_event_notifier if it is missing and present in the list of
2242 * tracepoint probes.
2243 * Should be called with sessions mutex held.
2244 */
2245 static
2246 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2247 {
2248 switch (event_notifier_enabler->base.event_param.instrumentation) {
2249 case LTTNG_KERNEL_ABI_TRACEPOINT:
2250 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2251 break;
2252
2253 case LTTNG_KERNEL_ABI_SYSCALL:
2254 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2255 break;
2256
2257 default:
2258 WARN_ON_ONCE(1);
2259 break;
2260 }
2261 }
2262
2263 /*
2264 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2265 */
2266 static
2267 int lttng_event_notifier_enabler_ref_event_notifiers(
2268 struct lttng_event_notifier_enabler *event_notifier_enabler)
2269 {
2270 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2271 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2272 struct lttng_event_notifier *event_notifier;
2273
2274 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_ABI_SYSCALL &&
2275 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL &&
2276 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME &&
2277 !strcmp(base_enabler->event_param.name, "*")) {
2278
2279 int enabled = base_enabler->enabled;
2280 enum lttng_kernel_abi_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2281
2282 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2283 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2284
2285 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
2286 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2287
2288 }
2289
2290 /* First ensure that probe event_notifiers are created for this enabler. */
2291 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2292
2293 /* Link the created event_notifier with its associated enabler. */
2294 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2295 struct lttng_enabler_ref *enabler_ref;
2296
2297 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2298 continue;
2299
2300 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2301 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2302 if (!enabler_ref) {
2303 /*
2304 * If no backward ref, create it.
2305 * Add backward ref from event_notifier to enabler.
2306 */
2307 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2308 if (!enabler_ref)
2309 return -ENOMEM;
2310
2311 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2312 event_notifier_enabler);
2313 list_add(&enabler_ref->node,
2314 &event_notifier->enablers_ref_head);
2315 }
2316
2317 /*
2318 * Link filter bytecodes if not linked yet.
2319 */
2320 lttng_enabler_link_bytecode(event_notifier->desc,
2321 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2322 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2323
2324 /* Link capture bytecodes if not linked yet. */
2325 lttng_enabler_link_bytecode(event_notifier->desc,
2326 lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
2327 &event_notifier_enabler->capture_bytecode_head);
2328
2329 event_notifier->num_captures = event_notifier_enabler->num_captures;
2330 }
2331 return 0;
2332 }
2333
2334 /*
2335 * Called at module load: connect the probe on all enablers matching
2336 * this event.
2337 * Called with sessions lock held.
2338 */
2339 int lttng_fix_pending_events(void)
2340 {
2341 struct lttng_session *session;
2342
2343 list_for_each_entry(session, &sessions, list)
2344 lttng_session_lazy_sync_event_enablers(session);
2345 return 0;
2346 }
2347
2348 static bool lttng_event_notifier_group_has_active_event_notifiers(
2349 struct lttng_event_notifier_group *event_notifier_group)
2350 {
2351 struct lttng_event_notifier_enabler *event_notifier_enabler;
2352
2353 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2354 node) {
2355 if (event_notifier_enabler->base.enabled)
2356 return true;
2357 }
2358 return false;
2359 }
2360
2361 bool lttng_event_notifier_active(void)
2362 {
2363 struct lttng_event_notifier_group *event_notifier_group;
2364
2365 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2366 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2367 return true;
2368 }
2369 return false;
2370 }
2371
2372 int lttng_fix_pending_event_notifiers(void)
2373 {
2374 struct lttng_event_notifier_group *event_notifier_group;
2375
2376 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2377 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2378 return 0;
2379 }
2380
2381 struct lttng_event_enabler *lttng_event_enabler_create(
2382 enum lttng_enabler_format_type format_type,
2383 struct lttng_kernel_abi_event *event_param,
2384 struct lttng_channel *chan)
2385 {
2386 struct lttng_event_enabler *event_enabler;
2387
2388 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2389 if (!event_enabler)
2390 return NULL;
2391 event_enabler->base.format_type = format_type;
2392 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2393 memcpy(&event_enabler->base.event_param, event_param,
2394 sizeof(event_enabler->base.event_param));
2395 event_enabler->chan = chan;
2396 /* ctx left NULL */
2397 event_enabler->base.enabled = 0;
2398 mutex_lock(&sessions_mutex);
2399 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2400 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2401 mutex_unlock(&sessions_mutex);
2402 return event_enabler;
2403 }
2404
2405 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2406 {
2407 mutex_lock(&sessions_mutex);
2408 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2409 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2410 mutex_unlock(&sessions_mutex);
2411 return 0;
2412 }
2413
2414 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2415 {
2416 mutex_lock(&sessions_mutex);
2417 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2418 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2419 mutex_unlock(&sessions_mutex);
2420 return 0;
2421 }
2422
2423 static
2424 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2425 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2426 {
2427 struct lttng_bytecode_node *bytecode_node;
2428 uint32_t bytecode_len;
2429 int ret;
2430
2431 ret = get_user(bytecode_len, &bytecode->len);
2432 if (ret)
2433 return ret;
2434 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2435 GFP_KERNEL);
2436 if (!bytecode_node)
2437 return -ENOMEM;
2438 ret = copy_from_user(&bytecode_node->bc, bytecode,
2439 sizeof(*bytecode) + bytecode_len);
2440 if (ret)
2441 goto error_free;
2442
2443 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2444 bytecode_node->enabler = enabler;
2445 /* Enforce length based on allocated size */
2446 bytecode_node->bc.len = bytecode_len;
2447 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2448
2449 return 0;
2450
2451 error_free:
2452 lttng_kvfree(bytecode_node);
2453 return ret;
2454 }
2455
2456 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2457 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2458 {
2459 int ret;
2460 ret = lttng_enabler_attach_filter_bytecode(
2461 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2462 if (ret)
2463 goto error;
2464
2465 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2466 return 0;
2467
2468 error:
2469 return ret;
2470 }
2471
2472 int lttng_event_add_callsite(struct lttng_event *event,
2473 struct lttng_kernel_abi_event_callsite __user *callsite)
2474 {
2475
2476 switch (event->instrumentation) {
2477 case LTTNG_KERNEL_ABI_UPROBE:
2478 return lttng_uprobes_event_add_callsite(event, callsite);
2479 default:
2480 return -EINVAL;
2481 }
2482 }
2483
2484 static
2485 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2486 {
2487 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2488
2489 /* Destroy filter bytecode */
2490 list_for_each_entry_safe(filter_node, tmp_filter_node,
2491 &enabler->filter_bytecode_head, node) {
2492 lttng_kvfree(filter_node);
2493 }
2494 }
2495
2496 static
2497 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2498 {
2499 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2500
2501 /* Destroy contexts */
2502 lttng_kernel_destroy_context(event_enabler->ctx);
2503
2504 list_del(&event_enabler->node);
2505 kfree(event_enabler);
2506 }
2507
2508 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2509 struct lttng_event_notifier_group *event_notifier_group,
2510 enum lttng_enabler_format_type format_type,
2511 struct lttng_kernel_abi_event_notifier *event_notifier_param)
2512 {
2513 struct lttng_event_notifier_enabler *event_notifier_enabler;
2514
2515 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2516 if (!event_notifier_enabler)
2517 return NULL;
2518
2519 event_notifier_enabler->base.format_type = format_type;
2520 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2521 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2522
2523 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2524 event_notifier_enabler->num_captures = 0;
2525
2526 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2527 sizeof(event_notifier_enabler->base.event_param));
2528
2529 event_notifier_enabler->base.enabled = 0;
2530 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2531 event_notifier_enabler->group = event_notifier_group;
2532
2533 mutex_lock(&sessions_mutex);
2534 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2535 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2536
2537 mutex_unlock(&sessions_mutex);
2538
2539 return event_notifier_enabler;
2540 }
2541
2542 int lttng_event_notifier_enabler_enable(
2543 struct lttng_event_notifier_enabler *event_notifier_enabler)
2544 {
2545 mutex_lock(&sessions_mutex);
2546 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2547 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2548 mutex_unlock(&sessions_mutex);
2549 return 0;
2550 }
2551
2552 int lttng_event_notifier_enabler_disable(
2553 struct lttng_event_notifier_enabler *event_notifier_enabler)
2554 {
2555 mutex_lock(&sessions_mutex);
2556 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2557 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2558 mutex_unlock(&sessions_mutex);
2559 return 0;
2560 }
2561
2562 int lttng_event_notifier_enabler_attach_filter_bytecode(
2563 struct lttng_event_notifier_enabler *event_notifier_enabler,
2564 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2565 {
2566 int ret;
2567
2568 ret = lttng_enabler_attach_filter_bytecode(
2569 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2570 bytecode);
2571 if (ret)
2572 goto error;
2573
2574 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2575 return 0;
2576
2577 error:
2578 return ret;
2579 }
2580
2581 int lttng_event_notifier_enabler_attach_capture_bytecode(
2582 struct lttng_event_notifier_enabler *event_notifier_enabler,
2583 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2584 {
2585 struct lttng_bytecode_node *bytecode_node;
2586 struct lttng_enabler *enabler =
2587 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2588 uint32_t bytecode_len;
2589 int ret;
2590
2591 ret = get_user(bytecode_len, &bytecode->len);
2592 if (ret)
2593 return ret;
2594
2595 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2596 GFP_KERNEL);
2597 if (!bytecode_node)
2598 return -ENOMEM;
2599
2600 ret = copy_from_user(&bytecode_node->bc, bytecode,
2601 sizeof(*bytecode) + bytecode_len);
2602 if (ret)
2603 goto error_free;
2604
2605 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2606 bytecode_node->enabler = enabler;
2607
2608 /* Enforce length based on allocated size */
2609 bytecode_node->bc.len = bytecode_len;
2610 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2611
2612 event_notifier_enabler->num_captures++;
2613
2614 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2615 goto end;
2616
2617 error_free:
2618 lttng_kvfree(bytecode_node);
2619 end:
2620 return ret;
2621 }
2622
2623 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2624 struct lttng_kernel_abi_event_callsite __user *callsite)
2625 {
2626
2627 switch (event_notifier->instrumentation) {
2628 case LTTNG_KERNEL_ABI_UPROBE:
2629 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2630 callsite);
2631 default:
2632 return -EINVAL;
2633 }
2634 }
2635
2636 static
2637 void lttng_event_notifier_enabler_destroy(
2638 struct lttng_event_notifier_enabler *event_notifier_enabler)
2639 {
2640 if (!event_notifier_enabler) {
2641 return;
2642 }
2643
2644 list_del(&event_notifier_enabler->node);
2645
2646 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2647 kfree(event_notifier_enabler);
2648 }
2649
2650 /*
2651 * lttng_session_sync_event_enablers should be called just before starting a
2652 * session.
2653 * Should be called with sessions mutex held.
2654 */
2655 static
2656 void lttng_session_sync_event_enablers(struct lttng_session *session)
2657 {
2658 struct lttng_event_enabler *event_enabler;
2659 struct lttng_event *event;
2660
2661 list_for_each_entry(event_enabler, &session->enablers_head, node)
2662 lttng_event_enabler_ref_events(event_enabler);
2663 /*
2664 * For each event, if at least one of its enablers is enabled,
2665 * and its channel and session transient states are enabled, we
2666 * enable the event, else we disable it.
2667 */
2668 list_for_each_entry(event, &session->events, list) {
2669 struct lttng_enabler_ref *enabler_ref;
2670 struct lttng_bytecode_runtime *runtime;
2671 int enabled = 0, has_enablers_without_bytecode = 0;
2672
2673 switch (event->instrumentation) {
2674 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2675 case LTTNG_KERNEL_ABI_SYSCALL:
2676 /* Enable events */
2677 list_for_each_entry(enabler_ref,
2678 &event->enablers_ref_head, node) {
2679 if (enabler_ref->ref->enabled) {
2680 enabled = 1;
2681 break;
2682 }
2683 }
2684 break;
2685
2686 default:
2687 /* Not handled with lazy sync. */
2688 continue;
2689 }
2690 /*
2691 * Enabled state is based on union of enablers, with
2692 * intesection of session and channel transient enable
2693 * states.
2694 */
2695 enabled = enabled && session->tstate && event->chan->tstate;
2696
2697 WRITE_ONCE(event->enabled, enabled);
2698 /*
2699 * Sync tracepoint registration with event enabled
2700 * state.
2701 */
2702 if (enabled) {
2703 register_event(event);
2704 } else {
2705 _lttng_event_unregister(event);
2706 }
2707
2708 /* Check if has enablers without bytecode enabled */
2709 list_for_each_entry(enabler_ref,
2710 &event->enablers_ref_head, node) {
2711 if (enabler_ref->ref->enabled
2712 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2713 has_enablers_without_bytecode = 1;
2714 break;
2715 }
2716 }
2717 event->has_enablers_without_bytecode =
2718 has_enablers_without_bytecode;
2719
2720 /* Enable filters */
2721 list_for_each_entry(runtime,
2722 &event->filter_bytecode_runtime_head, node)
2723 lttng_bytecode_filter_sync_state(runtime);
2724 }
2725 }
2726
2727 /*
2728 * Apply enablers to session events, adding events to session if need
2729 * be. It is required after each modification applied to an active
2730 * session, and right before session "start".
2731 * "lazy" sync means we only sync if required.
2732 * Should be called with sessions mutex held.
2733 */
2734 static
2735 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2736 {
2737 /* We can skip if session is not active */
2738 if (!session->active)
2739 return;
2740 lttng_session_sync_event_enablers(session);
2741 }
2742
2743 static
2744 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2745 {
2746 struct lttng_event_notifier_enabler *event_notifier_enabler;
2747 struct lttng_event_notifier *event_notifier;
2748
2749 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2750 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2751
2752 /*
2753 * For each event_notifier, if at least one of its enablers is enabled,
2754 * we enable the event_notifier, else we disable it.
2755 */
2756 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2757 struct lttng_enabler_ref *enabler_ref;
2758 struct lttng_bytecode_runtime *runtime;
2759 int enabled = 0, has_enablers_without_bytecode = 0;
2760
2761 switch (event_notifier->instrumentation) {
2762 case LTTNG_KERNEL_ABI_TRACEPOINT: /* Fall-through */
2763 case LTTNG_KERNEL_ABI_SYSCALL:
2764 /* Enable event_notifiers */
2765 list_for_each_entry(enabler_ref,
2766 &event_notifier->enablers_ref_head, node) {
2767 if (enabler_ref->ref->enabled) {
2768 enabled = 1;
2769 break;
2770 }
2771 }
2772 break;
2773
2774 default:
2775 /* Not handled with sync. */
2776 continue;
2777 }
2778
2779 WRITE_ONCE(event_notifier->enabled, enabled);
2780 /*
2781 * Sync tracepoint registration with event_notifier enabled
2782 * state.
2783 */
2784 if (enabled) {
2785 if (!event_notifier->registered)
2786 register_event_notifier(event_notifier);
2787 } else {
2788 if (event_notifier->registered)
2789 _lttng_event_notifier_unregister(event_notifier);
2790 }
2791
2792 /* Check if has enablers without bytecode enabled */
2793 list_for_each_entry(enabler_ref,
2794 &event_notifier->enablers_ref_head, node) {
2795 if (enabler_ref->ref->enabled
2796 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2797 has_enablers_without_bytecode = 1;
2798 break;
2799 }
2800 }
2801 event_notifier->has_enablers_without_bytecode =
2802 has_enablers_without_bytecode;
2803
2804 /* Enable filters */
2805 list_for_each_entry(runtime,
2806 &event_notifier->filter_bytecode_runtime_head, node)
2807 lttng_bytecode_filter_sync_state(runtime);
2808
2809 /* Enable captures */
2810 list_for_each_entry(runtime,
2811 &event_notifier->capture_bytecode_runtime_head, node)
2812 lttng_bytecode_capture_sync_state(runtime);
2813
2814 WRITE_ONCE(event_notifier->eval_capture, !!event_notifier->num_captures);
2815 }
2816 }
2817
2818 /*
2819 * Serialize at most one packet worth of metadata into a metadata
2820 * channel.
2821 * We grab the metadata cache mutex to get exclusive access to our metadata
2822 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2823 * allows us to do racy operations such as looking for remaining space left in
2824 * packet and write, since mutual exclusion protects us from concurrent writes.
2825 * Mutual exclusion on the metadata cache allow us to read the cache content
2826 * without racing against reallocation of the cache by updates.
2827 * Returns the number of bytes written in the channel, 0 if no data
2828 * was written and a negative value on error.
2829 */
2830 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2831 struct channel *chan, bool *coherent)
2832 {
2833 struct lib_ring_buffer_ctx ctx;
2834 int ret = 0;
2835 size_t len, reserve_len;
2836
2837 /*
2838 * Ensure we support mutiple get_next / put sequences followed by
2839 * put_next. The metadata cache lock protects reading the metadata
2840 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2841 * "flush" operations on the buffer invoked by different processes.
2842 * Moreover, since the metadata cache memory can be reallocated, we
2843 * need to have exclusive access against updates even though we only
2844 * read it.
2845 */
2846 mutex_lock(&stream->metadata_cache->lock);
2847 WARN_ON(stream->metadata_in < stream->metadata_out);
2848 if (stream->metadata_in != stream->metadata_out)
2849 goto end;
2850
2851 /* Metadata regenerated, change the version. */
2852 if (stream->metadata_cache->version != stream->version)
2853 stream->version = stream->metadata_cache->version;
2854
2855 len = stream->metadata_cache->metadata_written -
2856 stream->metadata_in;
2857 if (!len)
2858 goto end;
2859 reserve_len = min_t(size_t,
2860 stream->transport->ops.packet_avail_size(chan),
2861 len);
2862 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2863 sizeof(char), -1);
2864 /*
2865 * If reservation failed, return an error to the caller.
2866 */
2867 ret = stream->transport->ops.event_reserve(&ctx, 0);
2868 if (ret != 0) {
2869 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2870 stream->coherent = false;
2871 goto end;
2872 }
2873 stream->transport->ops.event_write(&ctx,
2874 stream->metadata_cache->data + stream->metadata_in,
2875 reserve_len);
2876 stream->transport->ops.event_commit(&ctx);
2877 stream->metadata_in += reserve_len;
2878 if (reserve_len < len)
2879 stream->coherent = false;
2880 else
2881 stream->coherent = true;
2882 ret = reserve_len;
2883
2884 end:
2885 if (coherent)
2886 *coherent = stream->coherent;
2887 mutex_unlock(&stream->metadata_cache->lock);
2888 return ret;
2889 }
2890
2891 static
2892 void lttng_metadata_begin(struct lttng_session *session)
2893 {
2894 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2895 mutex_lock(&session->metadata_cache->lock);
2896 }
2897
2898 static
2899 void lttng_metadata_end(struct lttng_session *session)
2900 {
2901 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2902 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2903 struct lttng_metadata_stream *stream;
2904
2905 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2906 wake_up_interruptible(&stream->read_wait);
2907 mutex_unlock(&session->metadata_cache->lock);
2908 }
2909 }
2910
2911 /*
2912 * Write the metadata to the metadata cache.
2913 * Must be called with sessions_mutex held.
2914 * The metadata cache lock protects us from concurrent read access from
2915 * thread outputting metadata content to ring buffer.
2916 * The content of the printf is printed as a single atomic metadata
2917 * transaction.
2918 */
2919 int lttng_metadata_printf(struct lttng_session *session,
2920 const char *fmt, ...)
2921 {
2922 char *str;
2923 size_t len;
2924 va_list ap;
2925
2926 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2927
2928 va_start(ap, fmt);
2929 str = kvasprintf(GFP_KERNEL, fmt, ap);
2930 va_end(ap);
2931 if (!str)
2932 return -ENOMEM;
2933
2934 len = strlen(str);
2935 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2936 if (session->metadata_cache->metadata_written + len >
2937 session->metadata_cache->cache_alloc) {
2938 char *tmp_cache_realloc;
2939 unsigned int tmp_cache_alloc_size;
2940
2941 tmp_cache_alloc_size = max_t(unsigned int,
2942 session->metadata_cache->cache_alloc + len,
2943 session->metadata_cache->cache_alloc << 1);
2944 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2945 if (!tmp_cache_realloc)
2946 goto err;
2947 if (session->metadata_cache->data) {
2948 memcpy(tmp_cache_realloc,
2949 session->metadata_cache->data,
2950 session->metadata_cache->cache_alloc);
2951 vfree(session->metadata_cache->data);
2952 }
2953
2954 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2955 session->metadata_cache->data = tmp_cache_realloc;
2956 }
2957 memcpy(session->metadata_cache->data +
2958 session->metadata_cache->metadata_written,
2959 str, len);
2960 session->metadata_cache->metadata_written += len;
2961 kfree(str);
2962
2963 return 0;
2964
2965 err:
2966 kfree(str);
2967 return -ENOMEM;
2968 }
2969
2970 static
2971 int print_tabs(struct lttng_session *session, size_t nesting)
2972 {
2973 size_t i;
2974
2975 for (i = 0; i < nesting; i++) {
2976 int ret;
2977
2978 ret = lttng_metadata_printf(session, " ");
2979 if (ret) {
2980 return ret;
2981 }
2982 }
2983 return 0;
2984 }
2985
2986 static
2987 int lttng_field_name_statedump(struct lttng_session *session,
2988 const struct lttng_kernel_event_field *field,
2989 size_t nesting)
2990 {
2991 return lttng_metadata_printf(session, " _%s;\n", field->name);
2992 }
2993
2994 static
2995 int _lttng_integer_type_statedump(struct lttng_session *session,
2996 const struct lttng_kernel_type_integer *type,
2997 enum lttng_kernel_string_encoding parent_encoding,
2998 size_t nesting)
2999 {
3000 int ret;
3001
3002 ret = print_tabs(session, nesting);
3003 if (ret)
3004 return ret;
3005 ret = lttng_metadata_printf(session,
3006 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3007 type->size,
3008 type->alignment,
3009 type->signedness,
3010 (parent_encoding == lttng_kernel_string_encoding_none)
3011 ? "none"
3012 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
3013 ? "UTF8"
3014 : "ASCII",
3015 type->base,
3016 #if __BYTE_ORDER == __BIG_ENDIAN
3017 type->reverse_byte_order ? " byte_order = le;" : ""
3018 #else
3019 type->reverse_byte_order ? " byte_order = be;" : ""
3020 #endif
3021 );
3022 return ret;
3023 }
3024
3025 /*
3026 * Must be called with sessions_mutex held.
3027 */
3028 static
3029 int _lttng_struct_type_statedump(struct lttng_session *session,
3030 const struct lttng_kernel_type_struct *type,
3031 size_t nesting)
3032 {
3033 int ret;
3034 uint32_t i, nr_fields;
3035 unsigned int alignment;
3036
3037 ret = print_tabs(session, nesting);
3038 if (ret)
3039 return ret;
3040 ret = lttng_metadata_printf(session,
3041 "struct {\n");
3042 if (ret)
3043 return ret;
3044 nr_fields = type->nr_fields;
3045 for (i = 0; i < nr_fields; i++) {
3046 const struct lttng_kernel_event_field *iter_field;
3047
3048 iter_field = type->fields[i];
3049 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3050 if (ret)
3051 return ret;
3052 }
3053 ret = print_tabs(session, nesting);
3054 if (ret)
3055 return ret;
3056 alignment = type->alignment;
3057 if (alignment) {
3058 ret = lttng_metadata_printf(session,
3059 "} align(%u)",
3060 alignment);
3061 } else {
3062 ret = lttng_metadata_printf(session,
3063 "}");
3064 }
3065 return ret;
3066 }
3067
3068 /*
3069 * Must be called with sessions_mutex held.
3070 */
3071 static
3072 int _lttng_struct_field_statedump(struct lttng_session *session,
3073 const struct lttng_kernel_event_field *field,
3074 size_t nesting)
3075 {
3076 int ret;
3077
3078 ret = _lttng_struct_type_statedump(session,
3079 lttng_kernel_get_type_struct(field->type), nesting);
3080 if (ret)
3081 return ret;
3082 return lttng_field_name_statedump(session, field, nesting);
3083 }
3084
3085 /*
3086 * Must be called with sessions_mutex held.
3087 */
3088 static
3089 int _lttng_variant_type_statedump(struct lttng_session *session,
3090 const struct lttng_kernel_type_variant *type,
3091 size_t nesting)
3092 {
3093 int ret;
3094 uint32_t i, nr_choices;
3095
3096 /*
3097 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3098 */
3099 if (type->alignment != 0)
3100 return -EINVAL;
3101 ret = print_tabs(session, nesting);
3102 if (ret)
3103 return ret;
3104 ret = lttng_metadata_printf(session,
3105 "variant <_%s> {\n",
3106 type->tag_name);
3107 if (ret)
3108 return ret;
3109 nr_choices = type->nr_choices;
3110 for (i = 0; i < nr_choices; i++) {
3111 const struct lttng_kernel_event_field *iter_field;
3112
3113 iter_field = type->choices[i];
3114 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3115 if (ret)
3116 return ret;
3117 }
3118 ret = print_tabs(session, nesting);
3119 if (ret)
3120 return ret;
3121 ret = lttng_metadata_printf(session,
3122 "}");
3123 return ret;
3124 }
3125
3126 /*
3127 * Must be called with sessions_mutex held.
3128 */
3129 static
3130 int _lttng_variant_field_statedump(struct lttng_session *session,
3131 const struct lttng_kernel_event_field *field,
3132 size_t nesting)
3133 {
3134 int ret;
3135
3136 ret = _lttng_variant_type_statedump(session,
3137 lttng_kernel_get_type_variant(field->type), nesting);
3138 if (ret)
3139 return ret;
3140 return lttng_field_name_statedump(session, field, nesting);
3141 }
3142
3143 /*
3144 * Must be called with sessions_mutex held.
3145 */
3146 static
3147 int _lttng_array_field_statedump(struct lttng_session *session,
3148 const struct lttng_kernel_event_field *field,
3149 size_t nesting)
3150 {
3151 int ret;
3152 const struct lttng_kernel_type_array *array_type;
3153 const struct lttng_kernel_type_common *elem_type;
3154
3155 array_type = lttng_kernel_get_type_array(field->type);
3156 WARN_ON_ONCE(!array_type);
3157
3158 if (array_type->alignment) {
3159 ret = print_tabs(session, nesting);
3160 if (ret)
3161 return ret;
3162 ret = lttng_metadata_printf(session,
3163 "struct { } align(%u) _%s_padding;\n",
3164 array_type->alignment * CHAR_BIT,
3165 field->name);
3166 if (ret)
3167 return ret;
3168 }
3169 /*
3170 * Nested compound types: Only array of structures and variants are
3171 * currently supported.
3172 */
3173 elem_type = array_type->elem_type;
3174 switch (elem_type->type) {
3175 case lttng_kernel_type_integer:
3176 case lttng_kernel_type_struct:
3177 case lttng_kernel_type_variant:
3178 ret = _lttng_type_statedump(session, elem_type,
3179 array_type->encoding, nesting);
3180 if (ret)
3181 return ret;
3182 break;
3183
3184 default:
3185 return -EINVAL;
3186 }
3187 ret = lttng_metadata_printf(session,
3188 " _%s[%u];\n",
3189 field->name,
3190 array_type->length);
3191 return ret;
3192 }
3193
3194 /*
3195 * Must be called with sessions_mutex held.
3196 */
3197 static
3198 int _lttng_sequence_field_statedump(struct lttng_session *session,
3199 const struct lttng_kernel_event_field *field,
3200 size_t nesting)
3201 {
3202 int ret;
3203 const char *length_name;
3204 const struct lttng_kernel_type_sequence *sequence_type;
3205 const struct lttng_kernel_type_common *elem_type;
3206
3207 sequence_type = lttng_kernel_get_type_sequence(field->type);
3208 WARN_ON_ONCE(!sequence_type);
3209
3210 length_name = sequence_type->length_name;
3211
3212 if (sequence_type->alignment) {
3213 ret = print_tabs(session, nesting);
3214 if (ret)
3215 return ret;
3216 ret = lttng_metadata_printf(session,
3217 "struct { } align(%u) _%s_padding;\n",
3218 sequence_type->alignment * CHAR_BIT,
3219 field->name);
3220 if (ret)
3221 return ret;
3222 }
3223
3224 /*
3225 * Nested compound types: Only array of structures and variants are
3226 * currently supported.
3227 */
3228 elem_type = sequence_type->elem_type;
3229 switch (elem_type->type) {
3230 case lttng_kernel_type_integer:
3231 case lttng_kernel_type_struct:
3232 case lttng_kernel_type_variant:
3233 ret = _lttng_type_statedump(session, elem_type,
3234 sequence_type->encoding, nesting);
3235 if (ret)
3236 return ret;
3237 break;
3238
3239 default:
3240 return -EINVAL;
3241 }
3242 ret = lttng_metadata_printf(session,
3243 " _%s[ _%s ];\n",
3244 field->name,
3245 sequence_type->length_name);
3246 return ret;
3247 }
3248
3249 /*
3250 * Must be called with sessions_mutex held.
3251 */
3252 static
3253 int _lttng_enum_type_statedump(struct lttng_session *session,
3254 const struct lttng_kernel_type_enum *type,
3255 size_t nesting)
3256 {
3257 const struct lttng_kernel_enum_desc *enum_desc;
3258 const struct lttng_kernel_type_common *container_type;
3259 int ret;
3260 unsigned int i, nr_entries;
3261
3262 container_type = type->container_type;
3263 if (container_type->type != lttng_kernel_type_integer) {
3264 ret = -EINVAL;
3265 goto end;
3266 }
3267 enum_desc = type->desc;
3268 nr_entries = enum_desc->nr_entries;
3269
3270 ret = print_tabs(session, nesting);
3271 if (ret)
3272 goto end;
3273 ret = lttng_metadata_printf(session, "enum : ");
3274 if (ret)
3275 goto end;
3276 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3277 lttng_kernel_string_encoding_none, 0);
3278 if (ret)
3279 goto end;
3280 ret = lttng_metadata_printf(session, " {\n");
3281 if (ret)
3282 goto end;
3283 /* Dump all entries */
3284 for (i = 0; i < nr_entries; i++) {
3285 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3286 int j, len;
3287
3288 ret = print_tabs(session, nesting + 1);
3289 if (ret)
3290 goto end;
3291 ret = lttng_metadata_printf(session,
3292 "\"");
3293 if (ret)
3294 goto end;
3295 len = strlen(entry->string);
3296 /* Escape the character '"' */
3297 for (j = 0; j < len; j++) {
3298 char c = entry->string[j];
3299
3300 switch (c) {
3301 case '"':
3302 ret = lttng_metadata_printf(session,
3303 "\\\"");
3304 break;
3305 case '\\':
3306 ret = lttng_metadata_printf(session,
3307 "\\\\");
3308 break;
3309 default:
3310 ret = lttng_metadata_printf(session,
3311 "%c", c);
3312 break;
3313 }
3314 if (ret)
3315 goto end;
3316 }
3317 ret = lttng_metadata_printf(session, "\"");
3318 if (ret)
3319 goto end;
3320
3321 if (entry->options.is_auto) {
3322 ret = lttng_metadata_printf(session, ",\n");
3323 if (ret)
3324 goto end;
3325 } else {
3326 ret = lttng_metadata_printf(session,
3327 " = ");
3328 if (ret)
3329 goto end;
3330 if (entry->start.signedness)
3331 ret = lttng_metadata_printf(session,
3332 "%lld", (long long) entry->start.value);
3333 else
3334 ret = lttng_metadata_printf(session,
3335 "%llu", entry->start.value);
3336 if (ret)
3337 goto end;
3338 if (entry->start.signedness == entry->end.signedness &&
3339 entry->start.value
3340 == entry->end.value) {
3341 ret = lttng_metadata_printf(session,
3342 ",\n");
3343 } else {
3344 if (entry->end.signedness) {
3345 ret = lttng_metadata_printf(session,
3346 " ... %lld,\n",
3347 (long long) entry->end.value);
3348 } else {
3349 ret = lttng_metadata_printf(session,
3350 " ... %llu,\n",
3351 entry->end.value);
3352 }
3353 }
3354 if (ret)
3355 goto end;
3356 }
3357 }
3358 ret = print_tabs(session, nesting);
3359 if (ret)
3360 goto end;
3361 ret = lttng_metadata_printf(session, "}");
3362 end:
3363 return ret;
3364 }
3365
3366 /*
3367 * Must be called with sessions_mutex held.
3368 */
3369 static
3370 int _lttng_enum_field_statedump(struct lttng_session *session,
3371 const struct lttng_kernel_event_field *field,
3372 size_t nesting)
3373 {
3374 int ret;
3375 const struct lttng_kernel_type_enum *enum_type;
3376
3377 enum_type = lttng_kernel_get_type_enum(field->type);
3378 WARN_ON_ONCE(!enum_type);
3379 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3380 if (ret)
3381 return ret;
3382 return lttng_field_name_statedump(session, field, nesting);
3383 }
3384
3385 static
3386 int _lttng_integer_field_statedump(struct lttng_session *session,
3387 const struct lttng_kernel_event_field *field,
3388 size_t nesting)
3389 {
3390 int ret;
3391
3392 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3393 lttng_kernel_string_encoding_none, nesting);
3394 if (ret)
3395 return ret;
3396 return lttng_field_name_statedump(session, field, nesting);
3397 }
3398
3399 static
3400 int _lttng_string_type_statedump(struct lttng_session *session,
3401 const struct lttng_kernel_type_string *type,
3402 size_t nesting)
3403 {
3404 int ret;
3405
3406 /* Default encoding is UTF8 */
3407 ret = print_tabs(session, nesting);
3408 if (ret)
3409 return ret;
3410 ret = lttng_metadata_printf(session,
3411 "string%s",
3412 type->encoding == lttng_kernel_string_encoding_ASCII ?
3413 " { encoding = ASCII; }" : "");
3414 return ret;
3415 }
3416
3417 static
3418 int _lttng_string_field_statedump(struct lttng_session *session,
3419 const struct lttng_kernel_event_field *field,
3420 size_t nesting)
3421 {
3422 const struct lttng_kernel_type_string *string_type;
3423 int ret;
3424
3425 string_type = lttng_kernel_get_type_string(field->type);
3426 WARN_ON_ONCE(!string_type);
3427 ret = _lttng_string_type_statedump(session, string_type, nesting);
3428 if (ret)
3429 return ret;
3430 return lttng_field_name_statedump(session, field, nesting);
3431 }
3432
3433 /*
3434 * Must be called with sessions_mutex held.
3435 */
3436 static
3437 int _lttng_type_statedump(struct lttng_session *session,
3438 const struct lttng_kernel_type_common *type,
3439 enum lttng_kernel_string_encoding parent_encoding,
3440 size_t nesting)
3441 {
3442 int ret = 0;
3443
3444 switch (type->type) {
3445 case lttng_kernel_type_integer:
3446 ret = _lttng_integer_type_statedump(session,
3447 lttng_kernel_get_type_integer(type),
3448 parent_encoding, nesting);
3449 break;
3450 case lttng_kernel_type_enum:
3451 ret = _lttng_enum_type_statedump(session,
3452 lttng_kernel_get_type_enum(type),
3453 nesting);
3454 break;
3455 case lttng_kernel_type_string:
3456 ret = _lttng_string_type_statedump(session,
3457 lttng_kernel_get_type_string(type),
3458 nesting);
3459 break;
3460 case lttng_kernel_type_struct:
3461 ret = _lttng_struct_type_statedump(session,
3462 lttng_kernel_get_type_struct(type),
3463 nesting);
3464 break;
3465 case lttng_kernel_type_variant:
3466 ret = _lttng_variant_type_statedump(session,
3467 lttng_kernel_get_type_variant(type),
3468 nesting);
3469 break;
3470
3471 /* Nested arrays and sequences are not supported yet. */
3472 case lttng_kernel_type_array:
3473 case lttng_kernel_type_sequence:
3474 default:
3475 WARN_ON_ONCE(1);
3476 return -EINVAL;
3477 }
3478 return ret;
3479 }
3480
3481 /*
3482 * Must be called with sessions_mutex held.
3483 */
3484 static
3485 int _lttng_field_statedump(struct lttng_session *session,
3486 const struct lttng_kernel_event_field *field,
3487 size_t nesting)
3488 {
3489 int ret = 0;
3490
3491 switch (field->type->type) {
3492 case lttng_kernel_type_integer:
3493 ret = _lttng_integer_field_statedump(session, field, nesting);
3494 break;
3495 case lttng_kernel_type_enum:
3496 ret = _lttng_enum_field_statedump(session, field, nesting);
3497 break;
3498 case lttng_kernel_type_string:
3499 ret = _lttng_string_field_statedump(session, field, nesting);
3500 break;
3501 case lttng_kernel_type_struct:
3502 ret = _lttng_struct_field_statedump(session, field, nesting);
3503 break;
3504 case lttng_kernel_type_array:
3505 ret = _lttng_array_field_statedump(session, field, nesting);
3506 break;
3507 case lttng_kernel_type_sequence:
3508 ret = _lttng_sequence_field_statedump(session, field, nesting);
3509 break;
3510 case lttng_kernel_type_variant:
3511 ret = _lttng_variant_field_statedump(session, field, nesting);
3512 break;
3513
3514 default:
3515 WARN_ON_ONCE(1);
3516 return -EINVAL;
3517 }
3518 return ret;
3519 }
3520
3521 static
3522 int _lttng_context_metadata_statedump(struct lttng_session *session,
3523 struct lttng_kernel_ctx *ctx)
3524 {
3525 int ret = 0;
3526 int i;
3527
3528 if (!ctx)
3529 return 0;
3530 for (i = 0; i < ctx->nr_fields; i++) {
3531 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3532
3533 ret = _lttng_field_statedump(session, field->event_field, 2);
3534 if (ret)
3535 return ret;
3536 }
3537 return ret;
3538 }
3539
3540 static
3541 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3542 struct lttng_event *event)
3543 {
3544 const struct lttng_kernel_event_desc *desc = event->desc;
3545 int ret = 0;
3546 int i;
3547
3548 for (i = 0; i < desc->nr_fields; i++) {
3549 const struct lttng_kernel_event_field *field = desc->fields[i];
3550
3551 ret = _lttng_field_statedump(session, field, 2);
3552 if (ret)
3553 return ret;
3554 }
3555 return ret;
3556 }
3557
3558 /*
3559 * Must be called with sessions_mutex held.
3560 * The entire event metadata is printed as a single atomic metadata
3561 * transaction.
3562 */
3563 static
3564 int _lttng_event_metadata_statedump(struct lttng_session *session,
3565 struct lttng_channel *chan,
3566 struct lttng_event *event)
3567 {
3568 int ret = 0;
3569
3570 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3571 return 0;
3572 if (chan->channel_type == METADATA_CHANNEL)
3573 return 0;
3574
3575 lttng_metadata_begin(session);
3576
3577 ret = lttng_metadata_printf(session,
3578 "event {\n"
3579 " name = \"%s\";\n"
3580 " id = %u;\n"
3581 " stream_id = %u;\n",
3582 event->desc->event_name,
3583 event->id,
3584 event->chan->id);
3585 if (ret)
3586 goto end;
3587
3588 if (event->ctx) {
3589 ret = lttng_metadata_printf(session,
3590 " context := struct {\n");
3591 if (ret)
3592 goto end;
3593 }
3594 ret = _lttng_context_metadata_statedump(session, event->ctx);
3595 if (ret)
3596 goto end;
3597 if (event->ctx) {
3598 ret = lttng_metadata_printf(session,
3599 " };\n");
3600 if (ret)
3601 goto end;
3602 }
3603
3604 ret = lttng_metadata_printf(session,
3605 " fields := struct {\n"
3606 );
3607 if (ret)
3608 goto end;
3609
3610 ret = _lttng_fields_metadata_statedump(session, event);
3611 if (ret)
3612 goto end;
3613
3614 /*
3615 * LTTng space reservation can only reserve multiples of the
3616 * byte size.
3617 */
3618 ret = lttng_metadata_printf(session,
3619 " };\n"
3620 "};\n\n");
3621 if (ret)
3622 goto end;
3623
3624 event->metadata_dumped = 1;
3625 end:
3626 lttng_metadata_end(session);
3627 return ret;
3628
3629 }
3630
3631 /*
3632 * Must be called with sessions_mutex held.
3633 * The entire channel metadata is printed as a single atomic metadata
3634 * transaction.
3635 */
3636 static
3637 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3638 struct lttng_channel *chan)
3639 {
3640 int ret = 0;
3641
3642 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3643 return 0;
3644
3645 if (chan->channel_type == METADATA_CHANNEL)
3646 return 0;
3647
3648 lttng_metadata_begin(session);
3649
3650 WARN_ON_ONCE(!chan->header_type);
3651 ret = lttng_metadata_printf(session,
3652 "stream {\n"
3653 " id = %u;\n"
3654 " event.header := %s;\n"
3655 " packet.context := struct packet_context;\n",
3656 chan->id,
3657 chan->header_type == 1 ? "struct event_header_compact" :
3658 "struct event_header_large");
3659 if (ret)
3660 goto end;
3661
3662 if (chan->ctx) {
3663 ret = lttng_metadata_printf(session,
3664 " event.context := struct {\n");
3665 if (ret)
3666 goto end;
3667 }
3668 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3669 if (ret)
3670 goto end;
3671 if (chan->ctx) {
3672 ret = lttng_metadata_printf(session,
3673 " };\n");
3674 if (ret)
3675 goto end;
3676 }
3677
3678 ret = lttng_metadata_printf(session,
3679 "};\n\n");
3680
3681 chan->metadata_dumped = 1;
3682 end:
3683 lttng_metadata_end(session);
3684 return ret;
3685 }
3686
3687 /*
3688 * Must be called with sessions_mutex held.
3689 */
3690 static
3691 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3692 {
3693 return lttng_metadata_printf(session,
3694 "struct packet_context {\n"
3695 " uint64_clock_monotonic_t timestamp_begin;\n"
3696 " uint64_clock_monotonic_t timestamp_end;\n"
3697 " uint64_t content_size;\n"
3698 " uint64_t packet_size;\n"
3699 " uint64_t packet_seq_num;\n"
3700 " unsigned long events_discarded;\n"
3701 " uint32_t cpu_id;\n"
3702 "};\n\n"
3703 );
3704 }
3705
3706 /*
3707 * Compact header:
3708 * id: range: 0 - 30.
3709 * id 31 is reserved to indicate an extended header.
3710 *
3711 * Large header:
3712 * id: range: 0 - 65534.
3713 * id 65535 is reserved to indicate an extended header.
3714 *
3715 * Must be called with sessions_mutex held.
3716 */
3717 static
3718 int _lttng_event_header_declare(struct lttng_session *session)
3719 {
3720 return lttng_metadata_printf(session,
3721 "struct event_header_compact {\n"
3722 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3723 " variant <id> {\n"
3724 " struct {\n"
3725 " uint27_clock_monotonic_t timestamp;\n"
3726 " } compact;\n"
3727 " struct {\n"
3728 " uint32_t id;\n"
3729 " uint64_clock_monotonic_t timestamp;\n"
3730 " } extended;\n"
3731 " } v;\n"
3732 "} align(%u);\n"
3733 "\n"
3734 "struct event_header_large {\n"
3735 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3736 " variant <id> {\n"
3737 " struct {\n"
3738 " uint32_clock_monotonic_t timestamp;\n"
3739 " } compact;\n"
3740 " struct {\n"
3741 " uint32_t id;\n"
3742 " uint64_clock_monotonic_t timestamp;\n"
3743 " } extended;\n"
3744 " } v;\n"
3745 "} align(%u);\n\n",
3746 lttng_alignof(uint32_t) * CHAR_BIT,
3747 lttng_alignof(uint16_t) * CHAR_BIT
3748 );
3749 }
3750
3751 /*
3752 * Approximation of NTP time of day to clock monotonic correlation,
3753 * taken at start of trace.
3754 * Yes, this is only an approximation. Yes, we can (and will) do better
3755 * in future versions.
3756 * This function may return a negative offset. It may happen if the
3757 * system sets the REALTIME clock to 0 after boot.
3758 *
3759 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3760 * y2038 compliant.
3761 */
3762 static
3763 int64_t measure_clock_offset(void)
3764 {
3765 uint64_t monotonic_avg, monotonic[2], realtime;
3766 uint64_t tcf = trace_clock_freq();
3767 int64_t offset;
3768 unsigned long flags;
3769 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3770 struct timespec64 rts = { 0, 0 };
3771 #else
3772 struct timespec rts = { 0, 0 };
3773 #endif
3774
3775 /* Disable interrupts to increase correlation precision. */
3776 local_irq_save(flags);
3777 monotonic[0] = trace_clock_read64();
3778 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3779 ktime_get_real_ts64(&rts);
3780 #else
3781 getnstimeofday(&rts);
3782 #endif
3783 monotonic[1] = trace_clock_read64();
3784 local_irq_restore(flags);
3785
3786 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3787 realtime = (uint64_t) rts.tv_sec * tcf;
3788 if (tcf == NSEC_PER_SEC) {
3789 realtime += rts.tv_nsec;
3790 } else {
3791 uint64_t n = rts.tv_nsec * tcf;
3792
3793 do_div(n, NSEC_PER_SEC);
3794 realtime += n;
3795 }
3796 offset = (int64_t) realtime - monotonic_avg;
3797 return offset;
3798 }
3799
3800 static
3801 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3802 {
3803 int ret = 0;
3804 size_t i;
3805 char cur;
3806
3807 i = 0;
3808 cur = string[i];
3809 while (cur != '\0') {
3810 switch (cur) {
3811 case '\n':
3812 ret = lttng_metadata_printf(session, "%s", "\\n");
3813 break;
3814 case '\\':
3815 case '"':
3816 ret = lttng_metadata_printf(session, "%c", '\\');
3817 if (ret)
3818 goto error;
3819 /* We still print the current char */
3820 /* Fallthrough */
3821 default:
3822 ret = lttng_metadata_printf(session, "%c", cur);
3823 break;
3824 }
3825
3826 if (ret)
3827 goto error;
3828
3829 cur = string[++i];
3830 }
3831 error:
3832 return ret;
3833 }
3834
3835 static
3836 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3837 const char *field_value)
3838 {
3839 int ret;
3840
3841 ret = lttng_metadata_printf(session, " %s = \"", field);
3842 if (ret)
3843 goto error;
3844
3845 ret = print_escaped_ctf_string(session, field_value);
3846 if (ret)
3847 goto error;
3848
3849 ret = lttng_metadata_printf(session, "\";\n");
3850
3851 error:
3852 return ret;
3853 }
3854
3855 /*
3856 * Output metadata into this session's metadata buffers.
3857 * Must be called with sessions_mutex held.
3858 */
3859 static
3860 int _lttng_session_metadata_statedump(struct lttng_session *session)
3861 {
3862 unsigned char *uuid_c = session->uuid.b;
3863 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3864 const char *product_uuid;
3865 struct lttng_channel *chan;
3866 struct lttng_event *event;
3867 int ret = 0;
3868
3869 if (!LTTNG_READ_ONCE(session->active))
3870 return 0;
3871
3872 lttng_metadata_begin(session);
3873
3874 if (session->metadata_dumped)
3875 goto skip_session;
3876
3877 snprintf(uuid_s, sizeof(uuid_s),
3878 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3879 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3880 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3881 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3882 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3883
3884 ret = lttng_metadata_printf(session,
3885 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3886 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3887 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3888 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3889 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3890 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3891 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3892 "\n"
3893 "trace {\n"
3894 " major = %u;\n"
3895 " minor = %u;\n"
3896 " uuid = \"%s\";\n"
3897 " byte_order = %s;\n"
3898 " packet.header := struct {\n"
3899 " uint32_t magic;\n"
3900 " uint8_t uuid[16];\n"
3901 " uint32_t stream_id;\n"
3902 " uint64_t stream_instance_id;\n"
3903 " };\n"
3904 "};\n\n",
3905 lttng_alignof(uint8_t) * CHAR_BIT,
3906 lttng_alignof(uint16_t) * CHAR_BIT,
3907 lttng_alignof(uint32_t) * CHAR_BIT,
3908 lttng_alignof(uint64_t) * CHAR_BIT,
3909 sizeof(unsigned long) * CHAR_BIT,
3910 lttng_alignof(unsigned long) * CHAR_BIT,
3911 CTF_SPEC_MAJOR,
3912 CTF_SPEC_MINOR,
3913 uuid_s,
3914 #if __BYTE_ORDER == __BIG_ENDIAN
3915 "be"
3916 #else
3917 "le"
3918 #endif
3919 );
3920 if (ret)
3921 goto end;
3922
3923 ret = lttng_metadata_printf(session,
3924 "env {\n"
3925 " hostname = \"%s\";\n"
3926 " domain = \"kernel\";\n"
3927 " sysname = \"%s\";\n"
3928 " kernel_release = \"%s\";\n"
3929 " kernel_version = \"%s\";\n"
3930 " tracer_name = \"lttng-modules\";\n"
3931 " tracer_major = %d;\n"
3932 " tracer_minor = %d;\n"
3933 " tracer_patchlevel = %d;\n"
3934 " trace_buffering_scheme = \"global\";\n",
3935 current->nsproxy->uts_ns->name.nodename,
3936 utsname()->sysname,
3937 utsname()->release,
3938 utsname()->version,
3939 LTTNG_MODULES_MAJOR_VERSION,
3940 LTTNG_MODULES_MINOR_VERSION,
3941 LTTNG_MODULES_PATCHLEVEL_VERSION
3942 );
3943 if (ret)
3944 goto end;
3945
3946 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3947 if (ret)
3948 goto end;
3949 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3950 session->creation_time);
3951 if (ret)
3952 goto end;
3953
3954 /* Add the product UUID to the 'env' section */
3955 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3956 if (product_uuid) {
3957 ret = lttng_metadata_printf(session,
3958 " product_uuid = \"%s\";\n",
3959 product_uuid
3960 );
3961 if (ret)
3962 goto end;
3963 }
3964
3965 /* Close the 'env' section */
3966 ret = lttng_metadata_printf(session, "};\n\n");
3967 if (ret)
3968 goto end;
3969
3970 ret = lttng_metadata_printf(session,
3971 "clock {\n"
3972 " name = \"%s\";\n",
3973 trace_clock_name()
3974 );
3975 if (ret)
3976 goto end;
3977
3978 if (!trace_clock_uuid(clock_uuid_s)) {
3979 ret = lttng_metadata_printf(session,
3980 " uuid = \"%s\";\n",
3981 clock_uuid_s
3982 );
3983 if (ret)
3984 goto end;
3985 }
3986
3987 ret = lttng_metadata_printf(session,
3988 " description = \"%s\";\n"
3989 " freq = %llu; /* Frequency, in Hz */\n"
3990 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3991 " offset = %lld;\n"
3992 "};\n\n",
3993 trace_clock_description(),
3994 (unsigned long long) trace_clock_freq(),
3995 (long long) measure_clock_offset()
3996 );
3997 if (ret)
3998 goto end;
3999
4000 ret = lttng_metadata_printf(session,
4001 "typealias integer {\n"
4002 " size = 27; align = 1; signed = false;\n"
4003 " map = clock.%s.value;\n"
4004 "} := uint27_clock_monotonic_t;\n"
4005 "\n"
4006 "typealias integer {\n"
4007 " size = 32; align = %u; signed = false;\n"
4008 " map = clock.%s.value;\n"
4009 "} := uint32_clock_monotonic_t;\n"
4010 "\n"
4011 "typealias integer {\n"
4012 " size = 64; align = %u; signed = false;\n"
4013 " map = clock.%s.value;\n"
4014 "} := uint64_clock_monotonic_t;\n\n",
4015 trace_clock_name(),
4016 lttng_alignof(uint32_t) * CHAR_BIT,
4017 trace_clock_name(),
4018 lttng_alignof(uint64_t) * CHAR_BIT,
4019 trace_clock_name()
4020 );
4021 if (ret)
4022 goto end;
4023
4024 ret = _lttng_stream_packet_context_declare(session);
4025 if (ret)
4026 goto end;
4027
4028 ret = _lttng_event_header_declare(session);
4029 if (ret)
4030 goto end;
4031
4032 skip_session:
4033 list_for_each_entry(chan, &session->chan, list) {
4034 ret = _lttng_channel_metadata_statedump(session, chan);
4035 if (ret)
4036 goto end;
4037 }
4038
4039 list_for_each_entry(event, &session->events, list) {
4040 ret = _lttng_event_metadata_statedump(session, event->chan, event);
4041 if (ret)
4042 goto end;
4043 }
4044 session->metadata_dumped = 1;
4045 end:
4046 lttng_metadata_end(session);
4047 return ret;
4048 }
4049
4050 /**
4051 * lttng_transport_register - LTT transport registration
4052 * @transport: transport structure
4053 *
4054 * Registers a transport which can be used as output to extract the data out of
4055 * LTTng. The module calling this registration function must ensure that no
4056 * trap-inducing code will be executed by the transport functions. E.g.
4057 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4058 * is made visible to the transport function. This registration acts as a
4059 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4060 * after its registration must it synchronize the TLBs.
4061 */
4062 void lttng_transport_register(struct lttng_transport *transport)
4063 {
4064 /*
4065 * Make sure no page fault can be triggered by the module about to be
4066 * registered. We deal with this here so we don't have to call
4067 * vmalloc_sync_mappings() in each module's init.
4068 */
4069 wrapper_vmalloc_sync_mappings();
4070
4071 mutex_lock(&sessions_mutex);
4072 list_add_tail(&transport->node, &lttng_transport_list);
4073 mutex_unlock(&sessions_mutex);
4074 }
4075 EXPORT_SYMBOL_GPL(lttng_transport_register);
4076
4077 /**
4078 * lttng_transport_unregister - LTT transport unregistration
4079 * @transport: transport structure
4080 */
4081 void lttng_transport_unregister(struct lttng_transport *transport)
4082 {
4083 mutex_lock(&sessions_mutex);
4084 list_del(&transport->node);
4085 mutex_unlock(&sessions_mutex);
4086 }
4087 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4088
4089 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4090 {
4091 /*
4092 * Make sure no page fault can be triggered by the module about to be
4093 * registered. We deal with this here so we don't have to call
4094 * vmalloc_sync_mappings() in each module's init.
4095 */
4096 wrapper_vmalloc_sync_mappings();
4097
4098 mutex_lock(&sessions_mutex);
4099 list_add_tail(&transport->node, &lttng_counter_transport_list);
4100 mutex_unlock(&sessions_mutex);
4101 }
4102 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4103
4104 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4105 {
4106 mutex_lock(&sessions_mutex);
4107 list_del(&transport->node);
4108 mutex_unlock(&sessions_mutex);
4109 }
4110 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4111
4112 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4113
4114 enum cpuhp_state lttng_hp_prepare;
4115 enum cpuhp_state lttng_hp_online;
4116
4117 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4118 {
4119 struct lttng_cpuhp_node *lttng_node;
4120
4121 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4122 switch (lttng_node->component) {
4123 case LTTNG_RING_BUFFER_FRONTEND:
4124 return 0;
4125 case LTTNG_RING_BUFFER_BACKEND:
4126 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4127 case LTTNG_RING_BUFFER_ITER:
4128 return 0;
4129 case LTTNG_CONTEXT_PERF_COUNTERS:
4130 return 0;
4131 default:
4132 return -EINVAL;
4133 }
4134 }
4135
4136 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4137 {
4138 struct lttng_cpuhp_node *lttng_node;
4139
4140 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4141 switch (lttng_node->component) {
4142 case LTTNG_RING_BUFFER_FRONTEND:
4143 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4144 case LTTNG_RING_BUFFER_BACKEND:
4145 return 0;
4146 case LTTNG_RING_BUFFER_ITER:
4147 return 0;
4148 case LTTNG_CONTEXT_PERF_COUNTERS:
4149 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4150 default:
4151 return -EINVAL;
4152 }
4153 }
4154
4155 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4156 {
4157 struct lttng_cpuhp_node *lttng_node;
4158
4159 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4160 switch (lttng_node->component) {
4161 case LTTNG_RING_BUFFER_FRONTEND:
4162 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4163 case LTTNG_RING_BUFFER_BACKEND:
4164 return 0;
4165 case LTTNG_RING_BUFFER_ITER:
4166 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4167 case LTTNG_CONTEXT_PERF_COUNTERS:
4168 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4169 default:
4170 return -EINVAL;
4171 }
4172 }
4173
4174 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4175 {
4176 struct lttng_cpuhp_node *lttng_node;
4177
4178 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4179 switch (lttng_node->component) {
4180 case LTTNG_RING_BUFFER_FRONTEND:
4181 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4182 case LTTNG_RING_BUFFER_BACKEND:
4183 return 0;
4184 case LTTNG_RING_BUFFER_ITER:
4185 return 0;
4186 case LTTNG_CONTEXT_PERF_COUNTERS:
4187 return 0;
4188 default:
4189 return -EINVAL;
4190 }
4191 }
4192
4193 static int __init lttng_init_cpu_hotplug(void)
4194 {
4195 int ret;
4196
4197 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4198 lttng_hotplug_prepare,
4199 lttng_hotplug_dead);
4200 if (ret < 0) {
4201 return ret;
4202 }
4203 lttng_hp_prepare = ret;
4204 lttng_rb_set_hp_prepare(ret);
4205
4206 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4207 lttng_hotplug_online,
4208 lttng_hotplug_offline);
4209 if (ret < 0) {
4210 cpuhp_remove_multi_state(lttng_hp_prepare);
4211 lttng_hp_prepare = 0;
4212 return ret;
4213 }
4214 lttng_hp_online = ret;
4215 lttng_rb_set_hp_online(ret);
4216
4217 return 0;
4218 }
4219
4220 static void __exit lttng_exit_cpu_hotplug(void)
4221 {
4222 lttng_rb_set_hp_online(0);
4223 cpuhp_remove_multi_state(lttng_hp_online);
4224 lttng_rb_set_hp_prepare(0);
4225 cpuhp_remove_multi_state(lttng_hp_prepare);
4226 }
4227
4228 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4229 static int lttng_init_cpu_hotplug(void)
4230 {
4231 return 0;
4232 }
4233 static void lttng_exit_cpu_hotplug(void)
4234 {
4235 }
4236 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4237
4238
4239 static int __init lttng_events_init(void)
4240 {
4241 int ret;
4242
4243 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4244 if (ret)
4245 return ret;
4246 ret = wrapper_get_pfnblock_flags_mask_init();
4247 if (ret)
4248 return ret;
4249 ret = wrapper_get_pageblock_flags_mask_init();
4250 if (ret)
4251 return ret;
4252 ret = lttng_probes_init();
4253 if (ret)
4254 return ret;
4255 ret = lttng_context_init();
4256 if (ret)
4257 return ret;
4258 ret = lttng_tracepoint_init();
4259 if (ret)
4260 goto error_tp;
4261 event_cache = KMEM_CACHE(lttng_event, 0);
4262 if (!event_cache) {
4263 ret = -ENOMEM;
4264 goto error_kmem_event;
4265 }
4266 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
4267 if (!event_notifier_cache) {
4268 ret = -ENOMEM;
4269 goto error_kmem_event_notifier;
4270 }
4271 ret = lttng_abi_init();
4272 if (ret)
4273 goto error_abi;
4274 ret = lttng_logger_init();
4275 if (ret)
4276 goto error_logger;
4277 ret = lttng_init_cpu_hotplug();
4278 if (ret)
4279 goto error_hotplug;
4280 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4281 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4282 __stringify(LTTNG_MODULES_MINOR_VERSION),
4283 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4284 LTTNG_MODULES_EXTRAVERSION,
4285 LTTNG_VERSION_NAME,
4286 #ifdef LTTNG_EXTRA_VERSION_GIT
4287 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4288 #else
4289 "",
4290 #endif
4291 #ifdef LTTNG_EXTRA_VERSION_NAME
4292 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4293 #else
4294 "");
4295 #endif
4296 return 0;
4297
4298 error_hotplug:
4299 lttng_logger_exit();
4300 error_logger:
4301 lttng_abi_exit();
4302 error_abi:
4303 kmem_cache_destroy(event_notifier_cache);
4304 error_kmem_event_notifier:
4305 kmem_cache_destroy(event_cache);
4306 error_kmem_event:
4307 lttng_tracepoint_exit();
4308 error_tp:
4309 lttng_context_exit();
4310 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4311 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4312 __stringify(LTTNG_MODULES_MINOR_VERSION),
4313 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4314 LTTNG_MODULES_EXTRAVERSION,
4315 LTTNG_VERSION_NAME,
4316 #ifdef LTTNG_EXTRA_VERSION_GIT
4317 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4318 #else
4319 "",
4320 #endif
4321 #ifdef LTTNG_EXTRA_VERSION_NAME
4322 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4323 #else
4324 "");
4325 #endif
4326 return ret;
4327 }
4328
4329 module_init(lttng_events_init);
4330
4331 static void __exit lttng_events_exit(void)
4332 {
4333 struct lttng_session *session, *tmpsession;
4334
4335 lttng_exit_cpu_hotplug();
4336 lttng_logger_exit();
4337 lttng_abi_exit();
4338 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4339 lttng_session_destroy(session);
4340 kmem_cache_destroy(event_cache);
4341 kmem_cache_destroy(event_notifier_cache);
4342 lttng_tracepoint_exit();
4343 lttng_context_exit();
4344 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4345 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4346 __stringify(LTTNG_MODULES_MINOR_VERSION),
4347 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4348 LTTNG_MODULES_EXTRAVERSION,
4349 LTTNG_VERSION_NAME,
4350 #ifdef LTTNG_EXTRA_VERSION_GIT
4351 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4352 #else
4353 "",
4354 #endif
4355 #ifdef LTTNG_EXTRA_VERSION_NAME
4356 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4357 #else
4358 "");
4359 #endif
4360 }
4361
4362 module_exit(lttng_events_exit);
4363
4364 #include <generated/patches.h>
4365 #ifdef LTTNG_EXTRA_VERSION_GIT
4366 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4367 #endif
4368 #ifdef LTTNG_EXTRA_VERSION_NAME
4369 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4370 #endif
4371 MODULE_LICENSE("GPL and additional rights");
4372 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4373 MODULE_DESCRIPTION("LTTng tracer");
4374 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4375 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4376 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4377 LTTNG_MODULES_EXTRAVERSION);
This page took 0.150929 seconds and 4 git commands to generate.