lttng-modules v0.19-stable: setup_trace_write: Fix recursive locking
[lttng-modules.git] / ltt-tracer.c
CommitLineData
1c8284eb
MD
1/*
2 * ltt/ltt-tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
8 * start/stop.
9 *
10 * Author:
11 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
12 *
13 * Inspired from LTT :
14 * Karim Yaghmour (karim@opersys.com)
15 * Tom Zanussi (zanussi@us.ibm.com)
16 * Bob Wisniewski (bob@watson.ibm.com)
17 * And from K42 :
18 * Bob Wisniewski (bob@watson.ibm.com)
19 *
20 * Changelog:
21 * 22/09/06, Move to the marker/probes mechanism.
22 * 19/10/05, Complete lockless mechanism.
23 * 27/05/05, Modular redesign and rewrite.
24 *
25 * Dual LGPL v2.1/GPL v2 license.
26 */
27
28#include <linux/time.h>
29#include <linux/module.h>
30#include <linux/string.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/rcupdate.h>
34#include <linux/sched.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37#include <linux/cpu.h>
38#include <linux/kref.h>
39#include <linux/delay.h>
40#include <linux/vmalloc.h>
41#include <asm/atomic.h>
42
43#include "ltt-tracer.h"
44
45static void synchronize_trace(void)
46{
47 synchronize_sched();
48#ifdef CONFIG_PREEMPT_RT
49 synchronize_rcu();
50#endif
51}
52
53static void async_wakeup(unsigned long data);
54
55static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
56
57/* Default callbacks for modules */
58notrace
59int ltt_filter_control_default(enum ltt_filter_control_msg msg,
60 struct ltt_trace *trace)
61{
62 return 0;
63}
64
65int ltt_statedump_default(struct ltt_trace *trace)
66{
67 return 0;
68}
69
70/* Callbacks for registered modules */
71
72int (*ltt_filter_control_functor)
73 (enum ltt_filter_control_msg msg, struct ltt_trace *trace) =
74 ltt_filter_control_default;
75struct module *ltt_filter_control_owner;
76
77/* These function pointers are protected by a trace activation check */
78struct module *ltt_run_filter_owner;
79int (*ltt_statedump_functor)(struct ltt_trace *trace) = ltt_statedump_default;
80struct module *ltt_statedump_owner;
81
82struct chan_info_struct {
83 const char *name;
84 unsigned int def_sb_size;
85 unsigned int def_n_sb;
86} chan_infos[] = {
87 [LTT_CHANNEL_METADATA] = {
88 LTT_METADATA_CHANNEL,
89 LTT_DEFAULT_SUBBUF_SIZE_LOW,
90 LTT_DEFAULT_N_SUBBUFS_LOW,
91 },
92 [LTT_CHANNEL_FD_STATE] = {
93 LTT_FD_STATE_CHANNEL,
94 LTT_DEFAULT_SUBBUF_SIZE_LOW,
95 LTT_DEFAULT_N_SUBBUFS_LOW,
96 },
97 [LTT_CHANNEL_GLOBAL_STATE] = {
98 LTT_GLOBAL_STATE_CHANNEL,
99 LTT_DEFAULT_SUBBUF_SIZE_LOW,
100 LTT_DEFAULT_N_SUBBUFS_LOW,
101 },
102 [LTT_CHANNEL_IRQ_STATE] = {
103 LTT_IRQ_STATE_CHANNEL,
104 LTT_DEFAULT_SUBBUF_SIZE_LOW,
105 LTT_DEFAULT_N_SUBBUFS_LOW,
106 },
107 [LTT_CHANNEL_MODULE_STATE] = {
108 LTT_MODULE_STATE_CHANNEL,
109 LTT_DEFAULT_SUBBUF_SIZE_LOW,
110 LTT_DEFAULT_N_SUBBUFS_LOW,
111 },
112 [LTT_CHANNEL_NETIF_STATE] = {
113 LTT_NETIF_STATE_CHANNEL,
114 LTT_DEFAULT_SUBBUF_SIZE_LOW,
115 LTT_DEFAULT_N_SUBBUFS_LOW,
116 },
117 [LTT_CHANNEL_SOFTIRQ_STATE] = {
118 LTT_SOFTIRQ_STATE_CHANNEL,
119 LTT_DEFAULT_SUBBUF_SIZE_LOW,
120 LTT_DEFAULT_N_SUBBUFS_LOW,
121 },
122 [LTT_CHANNEL_SWAP_STATE] = {
123 LTT_SWAP_STATE_CHANNEL,
124 LTT_DEFAULT_SUBBUF_SIZE_LOW,
125 LTT_DEFAULT_N_SUBBUFS_LOW,
126 },
127 [LTT_CHANNEL_SYSCALL_STATE] = {
128 LTT_SYSCALL_STATE_CHANNEL,
129 LTT_DEFAULT_SUBBUF_SIZE_LOW,
130 LTT_DEFAULT_N_SUBBUFS_LOW,
131 },
132 [LTT_CHANNEL_TASK_STATE] = {
133 LTT_TASK_STATE_CHANNEL,
134 LTT_DEFAULT_SUBBUF_SIZE_LOW,
135 LTT_DEFAULT_N_SUBBUFS_LOW,
136 },
137 [LTT_CHANNEL_VM_STATE] = {
138 LTT_VM_STATE_CHANNEL,
139 LTT_DEFAULT_SUBBUF_SIZE_MED,
140 LTT_DEFAULT_N_SUBBUFS_MED,
141 },
142 [LTT_CHANNEL_FS] = {
143 LTT_FS_CHANNEL,
144 LTT_DEFAULT_SUBBUF_SIZE_MED,
145 LTT_DEFAULT_N_SUBBUFS_MED,
146 },
147 [LTT_CHANNEL_INPUT] = {
148 LTT_INPUT_CHANNEL,
149 LTT_DEFAULT_SUBBUF_SIZE_LOW,
150 LTT_DEFAULT_N_SUBBUFS_LOW,
151 },
152 [LTT_CHANNEL_IPC] = {
153 LTT_IPC_CHANNEL,
154 LTT_DEFAULT_SUBBUF_SIZE_LOW,
155 LTT_DEFAULT_N_SUBBUFS_LOW,
156 },
157 [LTT_CHANNEL_KERNEL] = {
158 LTT_KERNEL_CHANNEL,
159 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
160 LTT_DEFAULT_N_SUBBUFS_HIGH,
161 },
162 [LTT_CHANNEL_MM] = {
163 LTT_MM_CHANNEL,
164 LTT_DEFAULT_SUBBUF_SIZE_MED,
165 LTT_DEFAULT_N_SUBBUFS_MED,
166 },
167 [LTT_CHANNEL_RCU] = {
168 LTT_RCU_CHANNEL,
169 LTT_DEFAULT_SUBBUF_SIZE_MED,
170 LTT_DEFAULT_N_SUBBUFS_MED,
171 },
172 [LTT_CHANNEL_DEFAULT] = {
173 NULL,
174 LTT_DEFAULT_SUBBUF_SIZE_MED,
175 LTT_DEFAULT_N_SUBBUFS_MED,
176 },
177};
178
179static enum ltt_channels get_channel_type_from_name(const char *name)
180{
181 int i;
182
183 if (!name)
184 return LTT_CHANNEL_DEFAULT;
185
186 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
187 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
188 return (enum ltt_channels)i;
189
190 return LTT_CHANNEL_DEFAULT;
191}
192
193/**
194 * ltt_module_register - LTT module registration
195 * @name: module type
196 * @function: callback to register
197 * @owner: module which owns the callback
198 *
199 * The module calling this registration function must ensure that no
200 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
201 * must be called between a vmalloc and the moment the memory is made visible to
202 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
203 * the module allocates virtual memory after its registration must it
204 * synchronize the TLBs.
205 */
206int ltt_module_register(enum ltt_module_function name, void *function,
207 struct module *owner)
208{
209 int ret = 0;
210
211 /*
212 * Make sure no page fault can be triggered by the module about to be
213 * registered. We deal with this here so we don't have to call
214 * vmalloc_sync_all() in each module's init.
215 */
216 vmalloc_sync_all();
217
218 switch (name) {
219 case LTT_FUNCTION_RUN_FILTER:
220 if (ltt_run_filter_owner != NULL) {
221 ret = -EEXIST;
222 goto end;
223 }
224 ltt_filter_register((ltt_run_filter_functor)function);
225 ltt_run_filter_owner = owner;
226 break;
227 case LTT_FUNCTION_FILTER_CONTROL:
228 if (ltt_filter_control_owner != NULL) {
229 ret = -EEXIST;
230 goto end;
231 }
232 ltt_filter_control_functor =
233 (int (*)(enum ltt_filter_control_msg,
234 struct ltt_trace *))function;
235 ltt_filter_control_owner = owner;
236 break;
237 case LTT_FUNCTION_STATEDUMP:
238 if (ltt_statedump_owner != NULL) {
239 ret = -EEXIST;
240 goto end;
241 }
242 ltt_statedump_functor =
243 (int (*)(struct ltt_trace *))function;
244 ltt_statedump_owner = owner;
245 break;
246 }
247
248end:
249
250 return ret;
251}
252EXPORT_SYMBOL_GPL(ltt_module_register);
253
254/**
255 * ltt_module_unregister - LTT module unregistration
256 * @name: module type
257 */
258void ltt_module_unregister(enum ltt_module_function name)
259{
260 switch (name) {
261 case LTT_FUNCTION_RUN_FILTER:
262 ltt_filter_unregister();
263 ltt_run_filter_owner = NULL;
264 /* Wait for preempt sections to finish */
265 synchronize_trace();
266 break;
267 case LTT_FUNCTION_FILTER_CONTROL:
268 ltt_filter_control_functor = ltt_filter_control_default;
269 ltt_filter_control_owner = NULL;
270 break;
271 case LTT_FUNCTION_STATEDUMP:
272 ltt_statedump_functor = ltt_statedump_default;
273 ltt_statedump_owner = NULL;
274 break;
275 }
276
277}
278EXPORT_SYMBOL_GPL(ltt_module_unregister);
279
280static LIST_HEAD(ltt_transport_list);
281
282/**
283 * ltt_transport_register - LTT transport registration
284 * @transport: transport structure
285 *
286 * Registers a transport which can be used as output to extract the data out of
287 * LTTng. The module calling this registration function must ensure that no
288 * trap-inducing code will be executed by the transport functions. E.g.
289 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
290 * is made visible to the transport function. This registration acts as a
291 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
292 * after its registration must it synchronize the TLBs.
293 */
294void ltt_transport_register(struct ltt_transport *transport)
295{
296 /*
297 * Make sure no page fault can be triggered by the module about to be
298 * registered. We deal with this here so we don't have to call
299 * vmalloc_sync_all() in each module's init.
300 */
301 vmalloc_sync_all();
302
303 ltt_lock_traces();
304 list_add_tail(&transport->node, &ltt_transport_list);
305 ltt_unlock_traces();
306}
307EXPORT_SYMBOL_GPL(ltt_transport_register);
308
309/**
310 * ltt_transport_unregister - LTT transport unregistration
311 * @transport: transport structure
312 */
313void ltt_transport_unregister(struct ltt_transport *transport)
314{
315 ltt_lock_traces();
316 list_del(&transport->node);
317 ltt_unlock_traces();
318}
319EXPORT_SYMBOL_GPL(ltt_transport_unregister);
320
321static inline
322int is_channel_overwrite(enum ltt_channels chan, enum trace_mode mode)
323{
324 switch (mode) {
325 case LTT_TRACE_NORMAL:
326 return 0;
327 case LTT_TRACE_FLIGHT:
328 switch (chan) {
329 case LTT_CHANNEL_METADATA:
330 return 0;
331 default:
332 return 1;
333 }
334 case LTT_TRACE_HYBRID:
335 switch (chan) {
336 case LTT_CHANNEL_KERNEL:
337 case LTT_CHANNEL_FS:
338 case LTT_CHANNEL_MM:
339 case LTT_CHANNEL_RCU:
340 case LTT_CHANNEL_IPC:
341 case LTT_CHANNEL_INPUT:
342 return 1;
343 default:
344 return 0;
345 }
346 default:
347 return 0;
348 }
349}
350
351static void trace_async_wakeup(struct ltt_trace *trace)
352{
353 int i;
354 struct ltt_chan *chan;
355
356 /* Must check each channel for pending read wakeup */
357 for (i = 0; i < trace->nr_channels; i++) {
358 chan = &trace->channels[i];
359 if (chan->active)
360 trace->ops->wakeup_channel(chan);
361 }
362}
363
364/* Timer to send async wakeups to the readers */
365static void async_wakeup(unsigned long data)
366{
367 struct ltt_trace *trace;
368
369 /*
370 * PREEMPT_RT does not allow spinlocks to be taken within preempt
371 * disable sections (spinlock taken in wake_up). However, mainline won't
372 * allow mutex to be taken in interrupt context. Ugly.
373 * Take a standard RCU read lock for RT kernels, which imply that we
374 * also have to synchronize_rcu() upon updates.
375 */
376#ifndef CONFIG_PREEMPT_RT
377 rcu_read_lock_sched();
378#else
379 rcu_read_lock();
380#endif
381 list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
382 trace_async_wakeup(trace);
383 }
384#ifndef CONFIG_PREEMPT_RT
385 rcu_read_unlock_sched();
386#else
387 rcu_read_unlock();
388#endif
389
390 mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
391}
392
393/**
394 * _ltt_trace_find - find a trace by given name.
395 * trace_name: trace name
396 *
397 * Returns a pointer to the trace structure, NULL if not found.
398 */
399static struct ltt_trace *_ltt_trace_find(const char *trace_name)
400{
401 struct ltt_trace *trace;
402
403 list_for_each_entry(trace, &ltt_traces.head, list)
404 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
405 return trace;
406
407 return NULL;
408}
409
410/* _ltt_trace_find_setup :
411 * find a trace in setup list by given name.
412 *
413 * Returns a pointer to the trace structure, NULL if not found.
414 */
415struct ltt_trace *_ltt_trace_find_setup(const char *trace_name)
416{
417 struct ltt_trace *trace;
418
419 list_for_each_entry(trace, &ltt_traces.setup_head, list)
420 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
421 return trace;
422
423 return NULL;
424}
425EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
426
427/**
428 * ltt_release_trace - Release a LTT trace
429 * @kref : reference count on the trace
430 */
431void ltt_release_trace(struct kref *kref)
432{
433 struct ltt_trace *trace = container_of(kref, struct ltt_trace, kref);
434
435 trace->ops->remove_dirs(trace);
436 module_put(trace->transport->owner);
437 ltt_channels_trace_free(trace->channels, trace->nr_channels);
438 kfree(trace);
439}
440EXPORT_SYMBOL_GPL(ltt_release_trace);
441
442static inline void prepare_chan_size_num(unsigned int *subbuf_size,
443 unsigned int *n_subbufs)
444{
445 /* Make sure the subbuffer size is larger than a page */
446 *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
447
448 /* round to next power of 2 */
449 *subbuf_size = 1 << get_count_order(*subbuf_size);
450 *n_subbufs = 1 << get_count_order(*n_subbufs);
451
452 /* Subbuf size and number must both be power of two */
453 WARN_ON(hweight32(*subbuf_size) != 1);
454 WARN_ON(hweight32(*n_subbufs) != 1);
455}
456
457int _ltt_trace_setup(const char *trace_name)
458{
459 int err = 0;
460 struct ltt_trace *new_trace = NULL;
461 int metadata_index;
462 unsigned int chan;
463 enum ltt_channels chantype;
464
465 if (_ltt_trace_find_setup(trace_name)) {
466 printk(KERN_ERR "LTT : Trace name %s already used.\n",
467 trace_name);
468 err = -EEXIST;
469 goto traces_error;
470 }
471
472 if (_ltt_trace_find(trace_name)) {
473 printk(KERN_ERR "LTT : Trace name %s already used.\n",
474 trace_name);
475 err = -EEXIST;
476 goto traces_error;
477 }
478
479 new_trace = kzalloc(sizeof(struct ltt_trace), GFP_KERNEL);
480 if (!new_trace) {
481 printk(KERN_ERR
482 "LTT : Unable to allocate memory for trace %s\n",
483 trace_name);
484 err = -ENOMEM;
485 goto traces_error;
486 }
487 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
488 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
489 0, 1);
490 if (!new_trace->channels) {
491 printk(KERN_ERR
492 "LTT : Unable to allocate memory for chaninfo %s\n",
493 trace_name);
494 err = -ENOMEM;
495 goto trace_free;
496 }
497
498 /*
499 * Force metadata channel to active, no overwrite.
500 */
501 metadata_index = ltt_channels_get_index_from_name("metadata");
502 WARN_ON(metadata_index < 0);
503 new_trace->channels[metadata_index].overwrite = 0;
504 new_trace->channels[metadata_index].active = 1;
505
506 /*
507 * Set hardcoded tracer defaults for some channels
508 */
509 for (chan = 0; chan < new_trace->nr_channels; chan++) {
510 if (!(new_trace->channels[chan].active))
511 continue;
512
513 chantype = get_channel_type_from_name(
514 ltt_channels_get_name_from_index(chan));
515 new_trace->channels[chan].a.sb_size =
516 chan_infos[chantype].def_sb_size;
517 new_trace->channels[chan].a.n_sb =
518 chan_infos[chantype].def_n_sb;
519 }
520
521 list_add(&new_trace->list, &ltt_traces.setup_head);
522 return 0;
523
524trace_free:
525 kfree(new_trace);
526traces_error:
527 return err;
528}
529EXPORT_SYMBOL_GPL(_ltt_trace_setup);
530
531
532int ltt_trace_setup(const char *trace_name)
533{
534 int ret;
535 ltt_lock_traces();
536 ret = _ltt_trace_setup(trace_name);
537 ltt_unlock_traces();
538 return ret;
539}
540EXPORT_SYMBOL_GPL(ltt_trace_setup);
541
542/* must be called from within a traces lock. */
543static void _ltt_trace_free(struct ltt_trace *trace)
544{
545 list_del(&trace->list);
546 kfree(trace);
547}
548
549int ltt_trace_set_type(const char *trace_name, const char *trace_type)
550{
551 int err = 0;
552 struct ltt_trace *trace;
553 struct ltt_transport *tran_iter, *transport = NULL;
554
555 ltt_lock_traces();
556
557 trace = _ltt_trace_find_setup(trace_name);
558 if (!trace) {
559 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
560 err = -ENOENT;
561 goto traces_error;
562 }
563
564 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
565 if (!strcmp(tran_iter->name, trace_type)) {
566 transport = tran_iter;
567 break;
568 }
569 }
570 if (!transport) {
571 printk(KERN_ERR "LTT : Transport %s is not present.\n",
572 trace_type);
573 err = -EINVAL;
574 goto traces_error;
575 }
576
577 trace->transport = transport;
578
579traces_error:
580 ltt_unlock_traces();
581 return err;
582}
583EXPORT_SYMBOL_GPL(ltt_trace_set_type);
584
585int ltt_trace_set_channel_subbufsize(const char *trace_name,
586 const char *channel_name,
587 unsigned int size)
588{
589 int err = 0;
590 struct ltt_trace *trace;
591 int index;
592
593 ltt_lock_traces();
594
595 trace = _ltt_trace_find_setup(trace_name);
596 if (!trace) {
597 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
598 err = -ENOENT;
599 goto traces_error;
600 }
601
602 index = ltt_channels_get_index_from_name(channel_name);
603 if (index < 0) {
604 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
605 err = -ENOENT;
606 goto traces_error;
607 }
608 trace->channels[index].a.sb_size = size;
609
610traces_error:
611 ltt_unlock_traces();
612 return err;
613}
614EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
615
616int ltt_trace_set_channel_subbufcount(const char *trace_name,
617 const char *channel_name,
618 unsigned int cnt)
619{
620 int err = 0;
621 struct ltt_trace *trace;
622 int index;
623
624 ltt_lock_traces();
625
626 trace = _ltt_trace_find_setup(trace_name);
627 if (!trace) {
628 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
629 err = -ENOENT;
630 goto traces_error;
631 }
632
633 index = ltt_channels_get_index_from_name(channel_name);
634 if (index < 0) {
635 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
636 err = -ENOENT;
637 goto traces_error;
638 }
639 trace->channels[index].a.n_sb = cnt;
640
641traces_error:
642 ltt_unlock_traces();
643 return err;
644}
645EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
646
647int ltt_trace_set_channel_switch_timer(const char *trace_name,
648 const char *channel_name,
649 unsigned long interval)
650{
651 int err = 0;
652 struct ltt_trace *trace;
653 int index;
654
655 ltt_lock_traces();
656
657 trace = _ltt_trace_find_setup(trace_name);
658 if (!trace) {
659 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
660 err = -ENOENT;
661 goto traces_error;
662 }
663
664 index = ltt_channels_get_index_from_name(channel_name);
665 if (index < 0) {
666 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
667 err = -ENOENT;
668 goto traces_error;
669 }
670 ltt_channels_trace_set_timer(&trace->channels[index], interval);
671
672traces_error:
673 ltt_unlock_traces();
674 return err;
675}
676EXPORT_SYMBOL_GPL(ltt_trace_set_channel_switch_timer);
677
678int ltt_trace_set_channel_enable(const char *trace_name,
679 const char *channel_name, unsigned int enable)
680{
681 int err = 0;
682 struct ltt_trace *trace;
683 int index;
684
685 ltt_lock_traces();
686
687 trace = _ltt_trace_find_setup(trace_name);
688 if (!trace) {
689 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
690 err = -ENOENT;
691 goto traces_error;
692 }
693
694 /*
695 * Datas in metadata channel(marker info) is necessary to be able to
696 * read the trace, we always enable this channel.
697 */
698 if (!enable && !strcmp(channel_name, "metadata")) {
699 printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
700 err = -EINVAL;
701 goto traces_error;
702 }
703
704 index = ltt_channels_get_index_from_name(channel_name);
705 if (index < 0) {
706 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
707 err = -ENOENT;
708 goto traces_error;
709 }
710
711 trace->channels[index].active = enable;
712
713traces_error:
714 ltt_unlock_traces();
715 return err;
716}
717EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
718
719int ltt_trace_set_channel_overwrite(const char *trace_name,
720 const char *channel_name,
721 unsigned int overwrite)
722{
723 int err = 0;
724 struct ltt_trace *trace;
725 int index;
726
727 ltt_lock_traces();
728
729 trace = _ltt_trace_find_setup(trace_name);
730 if (!trace) {
731 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
732 err = -ENOENT;
733 goto traces_error;
734 }
735
736 /*
737 * Always put the metadata channel in non-overwrite mode :
738 * This is a very low traffic channel and it can't afford to have its
739 * data overwritten : this data (marker info) is necessary to be
740 * able to read the trace.
741 */
742 if (overwrite && !strcmp(channel_name, "metadata")) {
743 printk(KERN_ERR "LTT : Trying to set metadata channel to "
744 "overwrite mode\n");
745 err = -EINVAL;
746 goto traces_error;
747 }
748
749 index = ltt_channels_get_index_from_name(channel_name);
750 if (index < 0) {
751 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
752 err = -ENOENT;
753 goto traces_error;
754 }
755
756 trace->channels[index].overwrite = overwrite;
757
758traces_error:
759 ltt_unlock_traces();
760 return err;
761}
762EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
763
764int ltt_trace_alloc(const char *trace_name)
765{
766 int err = 0;
767 struct ltt_trace *trace;
768 int sb_size, n_sb;
769 unsigned long flags;
770 int chan;
771 const char *channel_name;
772
773 ltt_lock_traces();
774
775 trace = _ltt_trace_find_setup(trace_name);
776 if (!trace) {
777 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
778 err = -ENOENT;
779 goto traces_error;
780 }
781
782 kref_init(&trace->kref);
783 init_waitqueue_head(&trace->kref_wq);
784 trace->active = 0;
b46ea6e8
MD
785 err = get_trace_clock();
786 if (err)
787 goto traces_error;
1c8284eb
MD
788 trace->freq_scale = trace_clock_freq_scale();
789
790 if (!trace->transport) {
791 printk(KERN_ERR "LTT : Transport is not set.\n");
792 err = -EINVAL;
793 goto transport_error;
794 }
795 if (!try_module_get(trace->transport->owner)) {
796 printk(KERN_ERR "LTT : Can't lock transport module.\n");
797 err = -ENODEV;
798 goto transport_error;
799 }
800 trace->ops = &trace->transport->ops;
801
802 err = trace->ops->create_dirs(trace);
803 if (err) {
804 printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
805 trace_name);
806 goto dirs_error;
807 }
808
809 local_irq_save(flags);
810 trace->start_freq = trace_clock_frequency();
811 trace->start_tsc = trace_clock_read64();
812 do_gettimeofday(&trace->start_time);
813 local_irq_restore(flags);
814
815 for (chan = 0; chan < trace->nr_channels; chan++) {
816 if (!(trace->channels[chan].active))
817 continue;
818
819 channel_name = ltt_channels_get_name_from_index(chan);
820 WARN_ON(!channel_name);
821 /*
822 * note: sb_size and n_sb will be overwritten with updated
823 * values by channel creation.
824 */
825 sb_size = trace->channels[chan].a.sb_size;
826 n_sb = trace->channels[chan].a.n_sb;
827 prepare_chan_size_num(&sb_size, &n_sb);
828 err = trace->ops->create_channel(channel_name,
829 &trace->channels[chan],
830 trace->dentry.trace_root,
831 sb_size, n_sb,
832 trace->channels[chan].overwrite, trace);
833 if (err != 0) {
834 printk(KERN_ERR "LTT : Can't create channel %s.\n",
835 channel_name);
836 goto create_channel_error;
837 }
838 }
839
840 list_del(&trace->list);
841 if (list_empty(&ltt_traces.head)) {
842 mod_timer(&ltt_async_wakeup_timer,
843 jiffies + LTT_PERCPU_TIMER_INTERVAL);
844 set_kernel_trace_flag_all_tasks();
845 }
846 list_add_rcu(&trace->list, &ltt_traces.head);
847 synchronize_trace();
848
849 ltt_unlock_traces();
850
851 return 0;
852
853create_channel_error:
854 for (chan--; chan >= 0; chan--) {
855 if (trace->channels[chan].active) {
856 struct ltt_chan *chanp = &trace->channels[chan];
857 trace->ops->remove_channel_files(chanp);
858 kref_put(&chanp->a.kref, trace->ops->remove_channel);
859 }
860 }
861 trace->ops->remove_dirs(trace);
862
863dirs_error:
864 module_put(trace->transport->owner);
865transport_error:
866 put_trace_clock();
867traces_error:
868 ltt_unlock_traces();
869 return err;
870}
871EXPORT_SYMBOL_GPL(ltt_trace_alloc);
872
873/*
874 * It is worked as a wrapper for current version of ltt_control.ko.
875 * We will make a new ltt_control based on debugfs, and control each channel's
876 * buffer.
877 */
878static
879int ltt_trace_create(const char *trace_name, const char *trace_type,
880 enum trace_mode mode,
881 unsigned int subbuf_size_low, unsigned int n_subbufs_low,
882 unsigned int subbuf_size_med, unsigned int n_subbufs_med,
883 unsigned int subbuf_size_high, unsigned int n_subbufs_high)
884{
885 int err = 0;
886
887 err = ltt_trace_setup(trace_name);
888 if (IS_ERR_VALUE(err))
889 return err;
890
891 err = ltt_trace_set_type(trace_name, trace_type);
892 if (IS_ERR_VALUE(err))
893 return err;
894
895 err = ltt_trace_alloc(trace_name);
896 if (IS_ERR_VALUE(err))
897 return err;
898
899 return err;
900}
901
902/* Must be called while sure that trace is in the list. */
903static int _ltt_trace_destroy(struct ltt_trace *trace)
904{
905 int err = -EPERM;
906
907 if (trace == NULL) {
908 err = -ENOENT;
909 goto traces_error;
910 }
911 if (trace->active) {
912 printk(KERN_ERR
913 "LTT : Can't destroy trace %s : tracer is active\n",
914 trace->trace_name);
915 err = -EBUSY;
916 goto active_error;
917 }
918 /* Everything went fine */
919 list_del_rcu(&trace->list);
920 synchronize_trace();
921 if (list_empty(&ltt_traces.head)) {
922 clear_kernel_trace_flag_all_tasks();
923 /*
924 * We stop the asynchronous delivery of reader wakeup, but
925 * we must make one last check for reader wakeups pending
926 * later in __ltt_trace_destroy.
927 */
928 del_timer_sync(&ltt_async_wakeup_timer);
929 }
930 return 0;
931
932 /* error handling */
933active_error:
934traces_error:
935 return err;
936}
937
938/* Sleepable part of the destroy */
939static void __ltt_trace_destroy(struct ltt_trace *trace)
940{
941 int i;
942 struct ltt_chan *chan;
943
944 for (i = 0; i < trace->nr_channels; i++) {
945 chan = &trace->channels[i];
946 if (chan->active)
947 trace->ops->finish_channel(chan);
948 }
949
950 flush_scheduled_work();
951
952 /*
953 * The currently destroyed trace is not in the trace list anymore,
954 * so it's safe to call the async wakeup ourself. It will deliver
955 * the last subbuffers.
956 */
957 trace_async_wakeup(trace);
958
959 for (i = 0; i < trace->nr_channels; i++) {
960 chan = &trace->channels[i];
961 if (chan->active) {
962 trace->ops->remove_channel_files(chan);
963 kref_put(&chan->a.kref,
964 trace->ops->remove_channel);
965 }
966 }
967
968 /*
969 * Wait for lttd readers to release the files, therefore making sure
970 * the last subbuffers have been read.
971 */
972 if (atomic_read(&trace->kref.refcount) > 1) {
973 int ret = 0;
974 /*
975 * Unlock traces and CPU hotplug while we wait for lttd to
976 * release the files.
977 */
978 ltt_unlock_traces();
979 __wait_event_interruptible(trace->kref_wq,
980 (atomic_read(&trace->kref.refcount) == 1), ret);
981 ltt_lock_traces();
982 }
983
984 kref_put(&trace->kref, ltt_release_trace);
985}
986
987int ltt_trace_destroy(const char *trace_name)
988{
989 int err = 0;
990 struct ltt_trace *trace;
991
992 ltt_lock_traces();
993
994 trace = _ltt_trace_find(trace_name);
995 if (trace) {
996 err = _ltt_trace_destroy(trace);
997 if (err)
998 goto error;
999
1000 __ltt_trace_destroy(trace);
1001 ltt_unlock_traces();
1002 put_trace_clock();
1003
1004 return 0;
1005 }
1006
1007 trace = _ltt_trace_find_setup(trace_name);
1008 if (trace) {
1009 _ltt_trace_free(trace);
1010 ltt_unlock_traces();
1011 return 0;
1012 }
1013
1014 err = -ENOENT;
1015
1016 /* Error handling */
1017error:
1018 ltt_unlock_traces();
1019 return err;
1020}
1021EXPORT_SYMBOL_GPL(ltt_trace_destroy);
1022
1023/*
1024 * called with trace lock held.
1025 */
1026static
1027void ltt_channels_trace_start_timer(struct ltt_chan *channels,
1028 unsigned int nr_channels)
1029{
1030 int i;
1031
1032 for (i = 0; i < nr_channels; i++) {
1033 struct ltt_chan *chan = &channels[i];
e5b34c66
MD
1034
1035 if (chan->active)
1036 chan->a.trace->ops->start_switch_timer(chan);
1c8284eb
MD
1037 }
1038}
1039
1040/*
1041 * called with trace lock held.
1042 */
1043static
1044void ltt_channels_trace_stop_timer(struct ltt_chan *channels,
1045 unsigned int nr_channels)
1046{
1047 int i;
1048
1049 for (i = 0; i < nr_channels; i++) {
1050 struct ltt_chan *chan = &channels[i];
e5b34c66
MD
1051
1052 if (chan->active)
1053 chan->a.trace->ops->stop_switch_timer(chan);
1c8284eb
MD
1054 }
1055}
1056
1057/* must be called from within a traces lock. */
1058static int _ltt_trace_start(struct ltt_trace *trace)
1059{
1060 int err = 0;
1061
1062 if (trace == NULL) {
1063 err = -ENOENT;
1064 goto traces_error;
1065 }
1066 if (trace->active)
1067 printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
1068 trace->trace_name);
1069 if (!try_module_get(ltt_run_filter_owner)) {
1070 err = -ENODEV;
1071 printk(KERN_ERR "LTT : Can't lock filter module.\n");
1072 goto get_ltt_run_filter_error;
1073 }
1074 ltt_channels_trace_start_timer(trace->channels, trace->nr_channels);
1075 trace->active = 1;
1076 /* Read by trace points without protection : be careful */
1077 ltt_traces.num_active_traces++;
1078 return err;
1079
1080 /* error handling */
1081get_ltt_run_filter_error:
1082traces_error:
1083 return err;
1084}
1085
1086int ltt_trace_start(const char *trace_name)
1087{
1088 int err = 0;
1089 struct ltt_trace *trace;
1090
1091 ltt_lock_traces();
1092
1093 trace = _ltt_trace_find(trace_name);
1094 err = _ltt_trace_start(trace);
1095 if (err)
1096 goto no_trace;
1097
1098 ltt_unlock_traces();
1099
1100 /*
1101 * Call the kernel state dump.
1102 * Events will be mixed with real kernel events, it's ok.
1103 * Notice that there is no protection on the trace : that's exactly
1104 * why we iterate on the list and check for trace equality instead of
1105 * directly using this trace handle inside the logging function.
1106 */
1107
1108 ltt_dump_marker_state(trace);
1109
1110 if (!try_module_get(ltt_statedump_owner)) {
1111 err = -ENODEV;
1112 printk(KERN_ERR
1113 "LTT : Can't lock state dump module.\n");
1114 } else {
1115 ltt_statedump_functor(trace);
1116 module_put(ltt_statedump_owner);
1117 }
1118
1119 return err;
1120
1121 /* Error handling */
1122no_trace:
1123 ltt_unlock_traces();
1124 return err;
1125}
1126EXPORT_SYMBOL_GPL(ltt_trace_start);
1127
1128/* must be called from within traces lock */
1129static int _ltt_trace_stop(struct ltt_trace *trace)
1130{
1131 int err = -EPERM;
1132
1133 if (trace == NULL) {
1134 err = -ENOENT;
1135 goto traces_error;
1136 }
1137 if (!trace->active)
1138 printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
1139 trace->trace_name);
1140 if (trace->active) {
1141 ltt_channels_trace_stop_timer(trace->channels,
1142 trace->nr_channels);
1143 trace->active = 0;
1144 ltt_traces.num_active_traces--;
1145 synchronize_trace(); /* Wait for each tracing to be finished */
1146 }
1147 module_put(ltt_run_filter_owner);
1148 /* Everything went fine */
1149 return 0;
1150
1151 /* Error handling */
1152traces_error:
1153 return err;
1154}
1155
1156int ltt_trace_stop(const char *trace_name)
1157{
1158 int err = 0;
1159 struct ltt_trace *trace;
1160
1161 ltt_lock_traces();
1162 trace = _ltt_trace_find(trace_name);
1163 err = _ltt_trace_stop(trace);
1164 ltt_unlock_traces();
1165 return err;
1166}
1167EXPORT_SYMBOL_GPL(ltt_trace_stop);
1168
1169/**
1170 * ltt_control - Trace control in-kernel API
1171 * @msg: Action to perform
1172 * @trace_name: Trace on which the action must be done
1173 * @trace_type: Type of trace (normal, flight, hybrid)
1174 * @args: Arguments specific to the action
1175 */
1176int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1177 const char *trace_type, union ltt_control_args args)
1178{
1179 int err = -EPERM;
1180
1181 printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1182 switch (msg) {
1183 case LTT_CONTROL_START:
1184 printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1185 err = ltt_trace_start(trace_name);
1186 break;
1187 case LTT_CONTROL_STOP:
1188 printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1189 err = ltt_trace_stop(trace_name);
1190 break;
1191 case LTT_CONTROL_CREATE_TRACE:
1192 printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1193 err = ltt_trace_create(trace_name, trace_type,
1194 args.new_trace.mode,
1195 args.new_trace.subbuf_size_low,
1196 args.new_trace.n_subbufs_low,
1197 args.new_trace.subbuf_size_med,
1198 args.new_trace.n_subbufs_med,
1199 args.new_trace.subbuf_size_high,
1200 args.new_trace.n_subbufs_high);
1201 break;
1202 case LTT_CONTROL_DESTROY_TRACE:
1203 printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1204 err = ltt_trace_destroy(trace_name);
1205 break;
1206 }
1207 return err;
1208}
1209EXPORT_SYMBOL_GPL(ltt_control);
1210
1211/**
1212 * ltt_filter_control - Trace filter control in-kernel API
1213 * @msg: Action to perform on the filter
1214 * @trace_name: Trace on which the action must be done
1215 */
1216int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
1217{
1218 int err;
1219 struct ltt_trace *trace;
1220
1221 printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
1222 ltt_lock_traces();
1223 trace = _ltt_trace_find(trace_name);
1224 if (trace == NULL) {
1225 printk(KERN_ALERT
1226 "Trace does not exist. Cannot proxy control request\n");
1227 err = -ENOENT;
1228 goto trace_error;
1229 }
1230 if (!try_module_get(ltt_filter_control_owner)) {
1231 err = -ENODEV;
1232 goto get_module_error;
1233 }
1234 switch (msg) {
1235 case LTT_FILTER_DEFAULT_ACCEPT:
1236 printk(KERN_DEBUG
1237 "Proxy filter default accept %s\n", trace_name);
1238 err = (*ltt_filter_control_functor)(msg, trace);
1239 break;
1240 case LTT_FILTER_DEFAULT_REJECT:
1241 printk(KERN_DEBUG
1242 "Proxy filter default reject %s\n", trace_name);
1243 err = (*ltt_filter_control_functor)(msg, trace);
1244 break;
1245 default:
1246 err = -EPERM;
1247 }
1248 module_put(ltt_filter_control_owner);
1249
1250get_module_error:
1251trace_error:
1252 ltt_unlock_traces();
1253 return err;
1254}
1255EXPORT_SYMBOL_GPL(ltt_filter_control);
1256
1257int __init ltt_init(void)
1258{
1259 /* Make sure no page fault can be triggered by this module */
1260 vmalloc_sync_all();
1261 init_timer_deferrable(&ltt_async_wakeup_timer);
1262 return 0;
1263}
1264
1265module_init(ltt_init)
1266
1267static void __exit ltt_exit(void)
1268{
1269 struct ltt_trace *trace;
1270 struct list_head *pos, *n;
1271
1272 ltt_lock_traces();
1273 /* Stop each trace, currently being read by RCU read-side */
1274 list_for_each_entry_rcu(trace, &ltt_traces.head, list)
1275 _ltt_trace_stop(trace);
1276 /* Wait for quiescent state. Readers have preemption disabled. */
1277 synchronize_trace();
1278 /* Safe iteration is now permitted. It does not have to be RCU-safe
1279 * because no readers are left. */
1280 list_for_each_safe(pos, n, &ltt_traces.head) {
1281 trace = container_of(pos, struct ltt_trace, list);
1282 /* _ltt_trace_destroy does a synchronize_trace() */
1283 _ltt_trace_destroy(trace);
1284 __ltt_trace_destroy(trace);
1285 }
1286 /* free traces in pre-alloc status */
1287 list_for_each_safe(pos, n, &ltt_traces.setup_head) {
1288 trace = container_of(pos, struct ltt_trace, list);
1289 _ltt_trace_free(trace);
1290 }
1291
1292 ltt_unlock_traces();
1293}
1294
1295module_exit(ltt_exit)
1296
1297MODULE_LICENSE("GPL and additional rights");
1298MODULE_AUTHOR("Mathieu Desnoyers");
1299MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
This page took 0.067011 seconds and 4 git commands to generate.