ust: add kernel files: relay-alloc.c (ltt-relay-alloc.c), relay.c (ltt-relay.c)
[ust.git] / libtracing / tracer.c
CommitLineData
9dad1eb8
PMF
1/*
2 * ltt/ltt-tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
8 * start/stop.
9 *
10 * Author:
11 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
12 *
13 * Inspired from LTT :
14 * Karim Yaghmour (karim@opersys.com)
15 * Tom Zanussi (zanussi@us.ibm.com)
16 * Bob Wisniewski (bob@watson.ibm.com)
17 * And from K42 :
18 * Bob Wisniewski (bob@watson.ibm.com)
19 *
20 * Changelog:
21 * 22/09/06, Move to the marker/probes mechanism.
22 * 19/10/05, Complete lockless mechanism.
23 * 27/05/05, Modular redesign and rewrite.
24 */
25
b6bf28ec
PMF
26//ust// #include <linux/time.h>
27//ust// #include <linux/ltt-tracer.h>
28//ust// #include <linux/module.h>
29//ust// #include <linux/string.h>
30//ust// #include <linux/slab.h>
31//ust// #include <linux/init.h>
32//ust// #include <linux/rcupdate.h>
33//ust// #include <linux/sched.h>
34//ust// #include <linux/bitops.h>
35//ust// #include <linux/fs.h>
36//ust// #include <linux/cpu.h>
37//ust// #include <linux/kref.h>
38//ust// #include <linux/delay.h>
39//ust// #include <linux/vmalloc.h>
40//ust// #include <asm/atomic.h>
41#include "tracercore.h"
42#include "tracer.h"
43#include "kernelcompat.h"
44#include "usterr.h"
45
46//ust// static void async_wakeup(unsigned long data);
47//ust//
48//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
9dad1eb8
PMF
49
50/* Default callbacks for modules */
51notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
52 struct ltt_trace_struct *trace)
53{
54 return 0;
55}
56
57int ltt_statedump_default(struct ltt_trace_struct *trace)
58{
59 return 0;
60}
61
62/* Callbacks for registered modules */
63
64int (*ltt_filter_control_functor)
65 (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
66 ltt_filter_control_default;
67struct module *ltt_filter_control_owner;
68
69/* These function pointers are protected by a trace activation check */
70struct module *ltt_run_filter_owner;
71int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
72 ltt_statedump_default;
73struct module *ltt_statedump_owner;
74
75struct chan_info_struct {
76 const char *name;
77 unsigned int def_subbufsize;
78 unsigned int def_subbufcount;
79} chan_infos[] = {
80 [LTT_CHANNEL_METADATA] = {
81 LTT_METADATA_CHANNEL,
82 LTT_DEFAULT_SUBBUF_SIZE_LOW,
83 LTT_DEFAULT_N_SUBBUFS_LOW,
84 },
b6bf28ec
PMF
85 [LTT_CHANNEL_UST] = {
86 LTT_UST_CHANNEL,
9dad1eb8
PMF
87 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
88 LTT_DEFAULT_N_SUBBUFS_HIGH,
89 },
9dad1eb8
PMF
90};
91
92static enum ltt_channels get_channel_type_from_name(const char *name)
93{
94 int i;
95
96 if (!name)
b6bf28ec 97 return LTT_CHANNEL_UST;
9dad1eb8
PMF
98
99 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
100 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
101 return (enum ltt_channels)i;
102
b6bf28ec 103 return LTT_CHANNEL_UST;
9dad1eb8
PMF
104}
105
106/**
107 * ltt_module_register - LTT module registration
108 * @name: module type
109 * @function: callback to register
110 * @owner: module which owns the callback
111 *
112 * The module calling this registration function must ensure that no
113 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
114 * must be called between a vmalloc and the moment the memory is made visible to
115 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
116 * the module allocates virtual memory after its registration must it
117 * synchronize the TLBs.
118 */
b6bf28ec
PMF
119//ust// int ltt_module_register(enum ltt_module_function name, void *function,
120//ust// struct module *owner)
121//ust// {
122//ust// int ret = 0;
123//ust//
124//ust// /*
125//ust// * Make sure no page fault can be triggered by the module about to be
126//ust// * registered. We deal with this here so we don't have to call
127//ust// * vmalloc_sync_all() in each module's init.
128//ust// */
129//ust// vmalloc_sync_all();
130//ust//
131//ust// switch (name) {
132//ust// case LTT_FUNCTION_RUN_FILTER:
133//ust// if (ltt_run_filter_owner != NULL) {
134//ust// ret = -EEXIST;
135//ust// goto end;
136//ust// }
137//ust// ltt_filter_register((ltt_run_filter_functor)function);
138//ust// ltt_run_filter_owner = owner;
139//ust// break;
140//ust// case LTT_FUNCTION_FILTER_CONTROL:
141//ust// if (ltt_filter_control_owner != NULL) {
142//ust// ret = -EEXIST;
143//ust// goto end;
144//ust// }
145//ust// ltt_filter_control_functor =
146//ust// (int (*)(enum ltt_filter_control_msg,
147//ust// struct ltt_trace_struct *))function;
148//ust// ltt_filter_control_owner = owner;
149//ust// break;
150//ust// case LTT_FUNCTION_STATEDUMP:
151//ust// if (ltt_statedump_owner != NULL) {
152//ust// ret = -EEXIST;
153//ust// goto end;
154//ust// }
155//ust// ltt_statedump_functor =
156//ust// (int (*)(struct ltt_trace_struct *))function;
157//ust// ltt_statedump_owner = owner;
158//ust// break;
159//ust// }
160//ust//
161//ust// end:
162//ust//
163//ust// return ret;
164//ust// }
165//ust// EXPORT_SYMBOL_GPL(ltt_module_register);
9dad1eb8
PMF
166
167/**
168 * ltt_module_unregister - LTT module unregistration
169 * @name: module type
170 */
b6bf28ec
PMF
171//ust// void ltt_module_unregister(enum ltt_module_function name)
172//ust// {
173//ust// switch (name) {
174//ust// case LTT_FUNCTION_RUN_FILTER:
175//ust// ltt_filter_unregister();
176//ust// ltt_run_filter_owner = NULL;
177//ust// /* Wait for preempt sections to finish */
178//ust// synchronize_sched();
179//ust// break;
180//ust// case LTT_FUNCTION_FILTER_CONTROL:
181//ust// ltt_filter_control_functor = ltt_filter_control_default;
182//ust// ltt_filter_control_owner = NULL;
183//ust// break;
184//ust// case LTT_FUNCTION_STATEDUMP:
185//ust// ltt_statedump_functor = ltt_statedump_default;
186//ust// ltt_statedump_owner = NULL;
187//ust// break;
188//ust// }
189//ust//
190//ust// }
191//ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
9dad1eb8
PMF
192
193static LIST_HEAD(ltt_transport_list);
194
195/**
196 * ltt_transport_register - LTT transport registration
197 * @transport: transport structure
198 *
199 * Registers a transport which can be used as output to extract the data out of
200 * LTTng. The module calling this registration function must ensure that no
201 * trap-inducing code will be executed by the transport functions. E.g.
202 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
203 * is made visible to the transport function. This registration acts as a
204 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
205 * after its registration must it synchronize the TLBs.
206 */
b6bf28ec
PMF
207//ust// void ltt_transport_register(struct ltt_transport *transport)
208//ust// {
209//ust// /*
210//ust// * Make sure no page fault can be triggered by the module about to be
211//ust// * registered. We deal with this here so we don't have to call
212//ust// * vmalloc_sync_all() in each module's init.
213//ust// */
214//ust// vmalloc_sync_all();
215//ust//
216//ust// ltt_lock_traces();
217//ust// list_add_tail(&transport->node, &ltt_transport_list);
218//ust// ltt_unlock_traces();
219//ust// }
220//ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
9dad1eb8
PMF
221
222/**
223 * ltt_transport_unregister - LTT transport unregistration
224 * @transport: transport structure
225 */
b6bf28ec
PMF
226//ust// void ltt_transport_unregister(struct ltt_transport *transport)
227//ust// {
228//ust// ltt_lock_traces();
229//ust// list_del(&transport->node);
230//ust// ltt_unlock_traces();
231//ust// }
232//ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
9dad1eb8
PMF
233
234static inline int is_channel_overwrite(enum ltt_channels chan,
235 enum trace_mode mode)
236{
237 switch (mode) {
238 case LTT_TRACE_NORMAL:
239 return 0;
240 case LTT_TRACE_FLIGHT:
241 switch (chan) {
242 case LTT_CHANNEL_METADATA:
243 return 0;
244 default:
245 return 1;
246 }
247 case LTT_TRACE_HYBRID:
248 switch (chan) {
b6bf28ec 249 case LTT_CHANNEL_METADATA:
9dad1eb8 250 return 0;
b6bf28ec
PMF
251 default:
252 return 1;
9dad1eb8
PMF
253 }
254 default:
255 return 0;
256 }
257}
258
259/**
260 * ltt_write_trace_header - Write trace header
261 * @trace: Trace information
262 * @header: Memory address where the information must be written to
263 */
264void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
265 struct ltt_subbuffer_header *header)
266{
267 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
268 header->major_version = LTT_TRACER_VERSION_MAJOR;
269 header->minor_version = LTT_TRACER_VERSION_MINOR;
270 header->arch_size = sizeof(void *);
271 header->alignment = ltt_get_alignment();
272 header->start_time_sec = trace->start_time.tv_sec;
273 header->start_time_usec = trace->start_time.tv_usec;
274 header->start_freq = trace->start_freq;
275 header->freq_scale = trace->freq_scale;
276}
b6bf28ec 277//ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
9dad1eb8
PMF
278
279static void trace_async_wakeup(struct ltt_trace_struct *trace)
280{
281 int i;
282 struct ltt_channel_struct *chan;
283
284 /* Must check each channel for pending read wakeup */
285 for (i = 0; i < trace->nr_channels; i++) {
286 chan = &trace->channels[i];
287 if (chan->active)
288 trace->ops->wakeup_channel(chan);
289 }
290}
291
b6bf28ec
PMF
292//ust// /* Timer to send async wakeups to the readers */
293//ust// static void async_wakeup(unsigned long data)
294//ust// {
295//ust// struct ltt_trace_struct *trace;
296//ust//
297//ust// /*
298//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
299//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
300//ust// * allow mutex to be taken in interrupt context. Ugly.
301//ust// * A proper way to do this would be to turn the timer into a
302//ust// * periodically woken up thread, but it adds to the footprint.
303//ust// */
304//ust// #ifndef CONFIG_PREEMPT_RT
305//ust// rcu_read_lock_sched();
306//ust// #else
307//ust// ltt_lock_traces();
308//ust// #endif
309//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
310//ust// trace_async_wakeup(trace);
311//ust// }
312//ust// #ifndef CONFIG_PREEMPT_RT
313//ust// rcu_read_unlock_sched();
314//ust// #else
315//ust// ltt_unlock_traces();
316//ust// #endif
317//ust//
318//ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
319//ust// }
9dad1eb8
PMF
320
321/**
322 * _ltt_trace_find - find a trace by given name.
323 * trace_name: trace name
324 *
325 * Returns a pointer to the trace structure, NULL if not found.
326 */
327static struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
328{
329 struct ltt_trace_struct *trace;
330
331 list_for_each_entry(trace, &ltt_traces.head, list)
332 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
333 return trace;
334
335 return NULL;
336}
337
338/* _ltt_trace_find_setup :
339 * find a trace in setup list by given name.
340 *
341 * Returns a pointer to the trace structure, NULL if not found.
342 */
343struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
344{
345 struct ltt_trace_struct *trace;
346
347 list_for_each_entry(trace, &ltt_traces.setup_head, list)
348 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
349 return trace;
350
351 return NULL;
352}
b6bf28ec 353//ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
9dad1eb8
PMF
354
355/**
356 * ltt_release_transport - Release an LTT transport
357 * @kref : reference count on the transport
358 */
359void ltt_release_transport(struct kref *kref)
360{
361 struct ltt_trace_struct *trace = container_of(kref,
362 struct ltt_trace_struct, ltt_transport_kref);
b6bf28ec 363//ust// trace->ops->remove_dirs(trace);
9dad1eb8 364}
b6bf28ec 365//ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
9dad1eb8
PMF
366
367/**
368 * ltt_release_trace - Release a LTT trace
369 * @kref : reference count on the trace
370 */
371void ltt_release_trace(struct kref *kref)
372{
373 struct ltt_trace_struct *trace = container_of(kref,
374 struct ltt_trace_struct, kref);
375 ltt_channels_trace_free(trace->channels);
376 kfree(trace);
377}
b6bf28ec 378//ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
9dad1eb8
PMF
379
380static inline void prepare_chan_size_num(unsigned int *subbuf_size,
381 unsigned int *n_subbufs)
382{
383 *subbuf_size = 1 << get_count_order(*subbuf_size);
384 *n_subbufs = 1 << get_count_order(*n_subbufs);
385
386 /* Subbuf size and number must both be power of two */
387 WARN_ON(hweight32(*subbuf_size) != 1);
388 WARN_ON(hweight32(*n_subbufs) != 1);
389}
390
391int _ltt_trace_setup(const char *trace_name)
392{
393 int err = 0;
394 struct ltt_trace_struct *new_trace = NULL;
395 int metadata_index;
396 unsigned int chan;
397 enum ltt_channels chantype;
398
399 if (_ltt_trace_find_setup(trace_name)) {
400 printk(KERN_ERR "LTT : Trace name %s already used.\n",
401 trace_name);
402 err = -EEXIST;
403 goto traces_error;
404 }
405
406 if (_ltt_trace_find(trace_name)) {
407 printk(KERN_ERR "LTT : Trace name %s already used.\n",
408 trace_name);
409 err = -EEXIST;
410 goto traces_error;
411 }
412
413 new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
414 if (!new_trace) {
415 printk(KERN_ERR
416 "LTT : Unable to allocate memory for trace %s\n",
417 trace_name);
418 err = -ENOMEM;
419 goto traces_error;
420 }
421 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
422 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
423 0, 1);
424 if (!new_trace->channels) {
425 printk(KERN_ERR
426 "LTT : Unable to allocate memory for chaninfo %s\n",
427 trace_name);
428 err = -ENOMEM;
429 goto trace_free;
430 }
431
432 /*
433 * Force metadata channel to active, no overwrite.
434 */
435 metadata_index = ltt_channels_get_index_from_name("metadata");
436 WARN_ON(metadata_index < 0);
437 new_trace->channels[metadata_index].overwrite = 0;
438 new_trace->channels[metadata_index].active = 1;
439
440 /*
441 * Set hardcoded tracer defaults for some channels
442 */
443 for (chan = 0; chan < new_trace->nr_channels; chan++) {
444 if (!(new_trace->channels[chan].active))
445 continue;
446
447 chantype = get_channel_type_from_name(
448 ltt_channels_get_name_from_index(chan));
449 new_trace->channels[chan].subbuf_size =
450 chan_infos[chantype].def_subbufsize;
451 new_trace->channels[chan].subbuf_cnt =
452 chan_infos[chantype].def_subbufcount;
453 }
454
455 list_add(&new_trace->list, &ltt_traces.setup_head);
456 return 0;
457
458trace_free:
459 kfree(new_trace);
460traces_error:
461 return err;
462}
b6bf28ec 463//ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
9dad1eb8
PMF
464
465
466int ltt_trace_setup(const char *trace_name)
467{
468 int ret;
469 ltt_lock_traces();
470 ret = _ltt_trace_setup(trace_name);
471 ltt_unlock_traces();
472 return ret;
473}
b6bf28ec 474//ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
9dad1eb8
PMF
475
476/* must be called from within a traces lock. */
477static void _ltt_trace_free(struct ltt_trace_struct *trace)
478{
479 list_del(&trace->list);
480 kfree(trace);
481}
482
483int ltt_trace_set_type(const char *trace_name, const char *trace_type)
484{
485 int err = 0;
486 struct ltt_trace_struct *trace;
487 struct ltt_transport *tran_iter, *transport = NULL;
488
489 ltt_lock_traces();
490
491 trace = _ltt_trace_find_setup(trace_name);
492 if (!trace) {
493 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
494 err = -ENOENT;
495 goto traces_error;
496 }
497
498 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
499 if (!strcmp(tran_iter->name, trace_type)) {
500 transport = tran_iter;
501 break;
502 }
503 }
504 if (!transport) {
505 printk(KERN_ERR "LTT : Transport %s is not present.\n",
506 trace_type);
507 err = -EINVAL;
508 goto traces_error;
509 }
510
511 trace->transport = transport;
512
513traces_error:
514 ltt_unlock_traces();
515 return err;
516}
b6bf28ec 517//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
9dad1eb8
PMF
518
519int ltt_trace_set_channel_subbufsize(const char *trace_name,
520 const char *channel_name, unsigned int size)
521{
522 int err = 0;
523 struct ltt_trace_struct *trace;
524 int index;
525
526 ltt_lock_traces();
527
528 trace = _ltt_trace_find_setup(trace_name);
529 if (!trace) {
530 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
531 err = -ENOENT;
532 goto traces_error;
533 }
534
535 index = ltt_channels_get_index_from_name(channel_name);
536 if (index < 0) {
537 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
538 err = -ENOENT;
539 goto traces_error;
540 }
541 trace->channels[index].subbuf_size = size;
542
543traces_error:
544 ltt_unlock_traces();
545 return err;
546}
b6bf28ec 547//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
9dad1eb8
PMF
548
549int ltt_trace_set_channel_subbufcount(const char *trace_name,
550 const char *channel_name, unsigned int cnt)
551{
552 int err = 0;
553 struct ltt_trace_struct *trace;
554 int index;
555
556 ltt_lock_traces();
557
558 trace = _ltt_trace_find_setup(trace_name);
559 if (!trace) {
560 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
561 err = -ENOENT;
562 goto traces_error;
563 }
564
565 index = ltt_channels_get_index_from_name(channel_name);
566 if (index < 0) {
567 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
568 err = -ENOENT;
569 goto traces_error;
570 }
571 trace->channels[index].subbuf_cnt = cnt;
572
573traces_error:
574 ltt_unlock_traces();
575 return err;
576}
b6bf28ec 577//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
9dad1eb8
PMF
578
579int ltt_trace_set_channel_enable(const char *trace_name,
580 const char *channel_name, unsigned int enable)
581{
582 int err = 0;
583 struct ltt_trace_struct *trace;
584 int index;
585
586 ltt_lock_traces();
587
588 trace = _ltt_trace_find_setup(trace_name);
589 if (!trace) {
590 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
591 err = -ENOENT;
592 goto traces_error;
593 }
594
595 /*
596 * Datas in metadata channel(marker info) is necessary to be able to
597 * read the trace, we always enable this channel.
598 */
599 if (!enable && !strcmp(channel_name, "metadata")) {
600 printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
601 err = -EINVAL;
602 goto traces_error;
603 }
604
605 index = ltt_channels_get_index_from_name(channel_name);
606 if (index < 0) {
607 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
608 err = -ENOENT;
609 goto traces_error;
610 }
611
612 trace->channels[index].active = enable;
613
614traces_error:
615 ltt_unlock_traces();
616 return err;
617}
b6bf28ec 618//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
9dad1eb8
PMF
619
620int ltt_trace_set_channel_overwrite(const char *trace_name,
621 const char *channel_name, unsigned int overwrite)
622{
623 int err = 0;
624 struct ltt_trace_struct *trace;
625 int index;
626
627 ltt_lock_traces();
628
629 trace = _ltt_trace_find_setup(trace_name);
630 if (!trace) {
631 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
632 err = -ENOENT;
633 goto traces_error;
634 }
635
636 /*
637 * Always put the metadata channel in non-overwrite mode :
638 * This is a very low traffic channel and it can't afford to have its
639 * data overwritten : this data (marker info) is necessary to be
640 * able to read the trace.
641 */
642 if (overwrite && !strcmp(channel_name, "metadata")) {
643 printk(KERN_ERR "LTT : Trying to set metadata channel to "
644 "overwrite mode\n");
645 err = -EINVAL;
646 goto traces_error;
647 }
648
649 index = ltt_channels_get_index_from_name(channel_name);
650 if (index < 0) {
651 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
652 err = -ENOENT;
653 goto traces_error;
654 }
655
656 trace->channels[index].overwrite = overwrite;
657
658traces_error:
659 ltt_unlock_traces();
660 return err;
661}
b6bf28ec 662//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
9dad1eb8
PMF
663
664int ltt_trace_alloc(const char *trace_name)
665{
666 int err = 0;
667 struct ltt_trace_struct *trace;
668 int subbuf_size, subbuf_cnt;
669 unsigned long flags;
670 int chan;
671 const char *channel_name;
672
673 ltt_lock_traces();
674
675 trace = _ltt_trace_find_setup(trace_name);
676 if (!trace) {
677 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
678 err = -ENOENT;
679 goto traces_error;
680 }
681
682 kref_init(&trace->kref);
683 kref_init(&trace->ltt_transport_kref);
b6bf28ec 684//ust// init_waitqueue_head(&trace->kref_wq);
9dad1eb8 685 trace->active = 0;
b6bf28ec 686//ust// get_trace_clock();
9dad1eb8
PMF
687 trace->freq_scale = trace_clock_freq_scale();
688
689 if (!trace->transport) {
690 printk(KERN_ERR "LTT : Transport is not set.\n");
691 err = -EINVAL;
692 goto transport_error;
693 }
b6bf28ec
PMF
694//ust// if (!try_module_get(trace->transport->owner)) {
695//ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
696//ust// err = -ENODEV;
697//ust// goto transport_error;
698//ust// }
9dad1eb8
PMF
699 trace->ops = &trace->transport->ops;
700
b6bf28ec
PMF
701//ust// err = trace->ops->create_dirs(trace);
702//ust// if (err) {
703//ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
704//ust// trace_name);
705//ust// goto dirs_error;
706//ust// }
9dad1eb8 707
b6bf28ec 708//ust// local_irq_save(flags);
9dad1eb8
PMF
709 trace->start_freq = trace_clock_frequency();
710 trace->start_tsc = trace_clock_read64();
b6bf28ec
PMF
711 gettimeofday(&trace->start_time, NULL); //ust// changed
712//ust// local_irq_restore(flags);
9dad1eb8
PMF
713
714 for (chan = 0; chan < trace->nr_channels; chan++) {
715 if (!(trace->channels[chan].active))
716 continue;
717
718 channel_name = ltt_channels_get_name_from_index(chan);
719 WARN_ON(!channel_name);
720 subbuf_size = trace->channels[chan].subbuf_size;
721 subbuf_cnt = trace->channels[chan].subbuf_cnt;
722 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
723 err = trace->ops->create_channel(trace_name, trace,
724 trace->dentry.trace_root,
725 channel_name,
726 &trace->channels[chan],
727 subbuf_size,
728 subbuf_cnt,
729 trace->channels[chan].overwrite);
730 if (err != 0) {
731 printk(KERN_ERR "LTT : Can't create channel %s.\n",
732 channel_name);
733 goto create_channel_error;
734 }
735 }
736
737 list_del(&trace->list);
b6bf28ec
PMF
738//ust// if (list_empty(&ltt_traces.head)) {
739//ust// mod_timer(&ltt_async_wakeup_timer,
740//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
741//ust// set_kernel_trace_flag_all_tasks();
742//ust// }
743//ust// list_add_rcu(&trace->list, &ltt_traces.head);
744//ust// synchronize_sched();
9dad1eb8
PMF
745
746 ltt_unlock_traces();
747
748 return 0;
749
750create_channel_error:
751 for (chan--; chan >= 0; chan--)
752 if (trace->channels[chan].active)
753 trace->ops->remove_channel(&trace->channels[chan]);
754
755dirs_error:
b6bf28ec 756//ust// module_put(trace->transport->owner);
9dad1eb8 757transport_error:
b6bf28ec 758//ust// put_trace_clock();
9dad1eb8
PMF
759traces_error:
760 ltt_unlock_traces();
761 return err;
762}
b6bf28ec 763//ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
9dad1eb8
PMF
764
765/*
766 * It is worked as a wrapper for current version of ltt_control.ko.
767 * We will make a new ltt_control based on debugfs, and control each channel's
768 * buffer.
769 */
770static int ltt_trace_create(const char *trace_name, const char *trace_type,
771 enum trace_mode mode,
772 unsigned int subbuf_size_low, unsigned int n_subbufs_low,
773 unsigned int subbuf_size_med, unsigned int n_subbufs_med,
774 unsigned int subbuf_size_high, unsigned int n_subbufs_high)
775{
776 int err = 0;
777
778 err = ltt_trace_setup(trace_name);
779 if (IS_ERR_VALUE(err))
780 return err;
781
782 err = ltt_trace_set_type(trace_name, trace_type);
783 if (IS_ERR_VALUE(err))
784 return err;
785
786 err = ltt_trace_alloc(trace_name);
787 if (IS_ERR_VALUE(err))
788 return err;
789
790 return err;
791}
792
793/* Must be called while sure that trace is in the list. */
794static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
795{
796 int err = -EPERM;
797
798 if (trace == NULL) {
799 err = -ENOENT;
800 goto traces_error;
801 }
802 if (trace->active) {
803 printk(KERN_ERR
804 "LTT : Can't destroy trace %s : tracer is active\n",
805 trace->trace_name);
806 err = -EBUSY;
807 goto active_error;
808 }
809 /* Everything went fine */
b6bf28ec
PMF
810//ust// list_del_rcu(&trace->list);
811//ust// synchronize_sched();
9dad1eb8 812 if (list_empty(&ltt_traces.head)) {
b6bf28ec 813//ust// clear_kernel_trace_flag_all_tasks();
9dad1eb8
PMF
814 /*
815 * We stop the asynchronous delivery of reader wakeup, but
816 * we must make one last check for reader wakeups pending
817 * later in __ltt_trace_destroy.
818 */
b6bf28ec 819//ust// del_timer_sync(&ltt_async_wakeup_timer);
9dad1eb8
PMF
820 }
821 return 0;
822
823 /* error handling */
824active_error:
825traces_error:
826 return err;
827}
828
829/* Sleepable part of the destroy */
830static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
831{
832 int i;
833 struct ltt_channel_struct *chan;
834
835 for (i = 0; i < trace->nr_channels; i++) {
836 chan = &trace->channels[i];
837 if (chan->active)
838 trace->ops->finish_channel(chan);
839 }
840
b6bf28ec 841//ust// flush_scheduled_work();
9dad1eb8
PMF
842
843 /*
844 * The currently destroyed trace is not in the trace list anymore,
845 * so it's safe to call the async wakeup ourself. It will deliver
846 * the last subbuffers.
847 */
848 trace_async_wakeup(trace);
849
850 for (i = 0; i < trace->nr_channels; i++) {
851 chan = &trace->channels[i];
852 if (chan->active)
853 trace->ops->remove_channel(chan);
854 }
855
856 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
857
b6bf28ec 858//ust// module_put(trace->transport->owner);
9dad1eb8
PMF
859
860 /*
861 * Wait for lttd readers to release the files, therefore making sure
862 * the last subbuffers have been read.
863 */
b6bf28ec
PMF
864//ust// if (atomic_read(&trace->kref.refcount) > 1) {
865//ust// int ret = 0;
866//ust// __wait_event_interruptible(trace->kref_wq,
867//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
868//ust// }
9dad1eb8
PMF
869 kref_put(&trace->kref, ltt_release_trace);
870}
871
872int ltt_trace_destroy(const char *trace_name)
873{
874 int err = 0;
875 struct ltt_trace_struct *trace;
876
877 ltt_lock_traces();
878
879 trace = _ltt_trace_find(trace_name);
880 if (trace) {
881 err = _ltt_trace_destroy(trace);
882 if (err)
883 goto error;
884
885 ltt_unlock_traces();
886
887 __ltt_trace_destroy(trace);
b6bf28ec 888//ust// put_trace_clock();
9dad1eb8
PMF
889
890 return 0;
891 }
892
893 trace = _ltt_trace_find_setup(trace_name);
894 if (trace) {
895 _ltt_trace_free(trace);
896 ltt_unlock_traces();
897 return 0;
898 }
899
900 err = -ENOENT;
901
902 /* Error handling */
903error:
904 ltt_unlock_traces();
905 return err;
906}
b6bf28ec 907//ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
9dad1eb8
PMF
908
909/* must be called from within a traces lock. */
910static int _ltt_trace_start(struct ltt_trace_struct *trace)
911{
912 int err = 0;
913
914 if (trace == NULL) {
915 err = -ENOENT;
916 goto traces_error;
917 }
918 if (trace->active)
919 printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
920 trace->trace_name);
b6bf28ec
PMF
921//ust// if (!try_module_get(ltt_run_filter_owner)) {
922//ust// err = -ENODEV;
923//ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
924//ust// goto get_ltt_run_filter_error;
925//ust// }
9dad1eb8
PMF
926 trace->active = 1;
927 /* Read by trace points without protection : be careful */
928 ltt_traces.num_active_traces++;
929 return err;
930
931 /* error handling */
932get_ltt_run_filter_error:
933traces_error:
934 return err;
935}
936
937int ltt_trace_start(const char *trace_name)
938{
939 int err = 0;
940 struct ltt_trace_struct *trace;
941
942 ltt_lock_traces();
943
944 trace = _ltt_trace_find(trace_name);
945 err = _ltt_trace_start(trace);
946 if (err)
947 goto no_trace;
948
949 ltt_unlock_traces();
950
951 /*
952 * Call the kernel state dump.
953 * Events will be mixed with real kernel events, it's ok.
954 * Notice that there is no protection on the trace : that's exactly
955 * why we iterate on the list and check for trace equality instead of
956 * directly using this trace handle inside the logging function.
957 */
958
b6bf28ec 959//ust// ltt_dump_marker_state(trace);
9dad1eb8 960
b6bf28ec
PMF
961//ust// if (!try_module_get(ltt_statedump_owner)) {
962//ust// err = -ENODEV;
963//ust// printk(KERN_ERR
964//ust// "LTT : Can't lock state dump module.\n");
965//ust// } else {
9dad1eb8 966 ltt_statedump_functor(trace);
b6bf28ec
PMF
967//ust// module_put(ltt_statedump_owner);
968//ust// }
9dad1eb8
PMF
969
970 return err;
971
972 /* Error handling */
973no_trace:
974 ltt_unlock_traces();
975 return err;
976}
b6bf28ec 977//ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
9dad1eb8
PMF
978
979/* must be called from within traces lock */
980static int _ltt_trace_stop(struct ltt_trace_struct *trace)
981{
982 int err = -EPERM;
983
984 if (trace == NULL) {
985 err = -ENOENT;
986 goto traces_error;
987 }
988 if (!trace->active)
989 printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
990 trace->trace_name);
991 if (trace->active) {
992 trace->active = 0;
993 ltt_traces.num_active_traces--;
b6bf28ec 994//ust// synchronize_sched(); /* Wait for each tracing to be finished */
9dad1eb8 995 }
b6bf28ec 996//ust// module_put(ltt_run_filter_owner);
9dad1eb8
PMF
997 /* Everything went fine */
998 return 0;
999
1000 /* Error handling */
1001traces_error:
1002 return err;
1003}
1004
1005int ltt_trace_stop(const char *trace_name)
1006{
1007 int err = 0;
1008 struct ltt_trace_struct *trace;
1009
1010 ltt_lock_traces();
1011 trace = _ltt_trace_find(trace_name);
1012 err = _ltt_trace_stop(trace);
1013 ltt_unlock_traces();
1014 return err;
1015}
b6bf28ec 1016//ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
9dad1eb8
PMF
1017
1018/**
1019 * ltt_control - Trace control in-kernel API
1020 * @msg: Action to perform
1021 * @trace_name: Trace on which the action must be done
1022 * @trace_type: Type of trace (normal, flight, hybrid)
1023 * @args: Arguments specific to the action
1024 */
b6bf28ec
PMF
1025//ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1026//ust// const char *trace_type, union ltt_control_args args)
1027//ust// {
1028//ust// int err = -EPERM;
1029//ust//
1030//ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1031//ust// switch (msg) {
1032//ust// case LTT_CONTROL_START:
1033//ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1034//ust// err = ltt_trace_start(trace_name);
1035//ust// break;
1036//ust// case LTT_CONTROL_STOP:
1037//ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1038//ust// err = ltt_trace_stop(trace_name);
1039//ust// break;
1040//ust// case LTT_CONTROL_CREATE_TRACE:
1041//ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1042//ust// err = ltt_trace_create(trace_name, trace_type,
1043//ust// args.new_trace.mode,
1044//ust// args.new_trace.subbuf_size_low,
1045//ust// args.new_trace.n_subbufs_low,
1046//ust// args.new_trace.subbuf_size_med,
1047//ust// args.new_trace.n_subbufs_med,
1048//ust// args.new_trace.subbuf_size_high,
1049//ust// args.new_trace.n_subbufs_high);
1050//ust// break;
1051//ust// case LTT_CONTROL_DESTROY_TRACE:
1052//ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1053//ust// err = ltt_trace_destroy(trace_name);
1054//ust// break;
1055//ust// }
1056//ust// return err;
1057//ust// }
1058//ust// EXPORT_SYMBOL_GPL(ltt_control);
9dad1eb8
PMF
1059
1060/**
1061 * ltt_filter_control - Trace filter control in-kernel API
1062 * @msg: Action to perform on the filter
1063 * @trace_name: Trace on which the action must be done
1064 */
1065int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
1066{
1067 int err;
1068 struct ltt_trace_struct *trace;
1069
1070 printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
1071 ltt_lock_traces();
1072 trace = _ltt_trace_find(trace_name);
1073 if (trace == NULL) {
1074 printk(KERN_ALERT
1075 "Trace does not exist. Cannot proxy control request\n");
1076 err = -ENOENT;
1077 goto trace_error;
1078 }
b6bf28ec
PMF
1079//ust// if (!try_module_get(ltt_filter_control_owner)) {
1080//ust// err = -ENODEV;
1081//ust// goto get_module_error;
1082//ust// }
9dad1eb8
PMF
1083 switch (msg) {
1084 case LTT_FILTER_DEFAULT_ACCEPT:
1085 printk(KERN_DEBUG
1086 "Proxy filter default accept %s\n", trace_name);
1087 err = (*ltt_filter_control_functor)(msg, trace);
1088 break;
1089 case LTT_FILTER_DEFAULT_REJECT:
1090 printk(KERN_DEBUG
1091 "Proxy filter default reject %s\n", trace_name);
1092 err = (*ltt_filter_control_functor)(msg, trace);
1093 break;
1094 default:
1095 err = -EPERM;
1096 }
b6bf28ec 1097//ust// module_put(ltt_filter_control_owner);
9dad1eb8
PMF
1098
1099get_module_error:
1100trace_error:
1101 ltt_unlock_traces();
1102 return err;
1103}
b6bf28ec
PMF
1104//ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
1105
1106//ust// int __init ltt_init(void)
1107//ust// {
1108//ust// /* Make sure no page fault can be triggered by this module */
1109//ust// vmalloc_sync_all();
1110//ust// return 0;
1111//ust// }
1112
1113//ust// module_init(ltt_init)
1114
1115//ust// static void __exit ltt_exit(void)
1116//ust// {
1117//ust// struct ltt_trace_struct *trace;
1118//ust// struct list_head *pos, *n;
1119//ust//
1120//ust// ltt_lock_traces();
1121//ust// /* Stop each trace, currently being read by RCU read-side */
1122//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list)
1123//ust// _ltt_trace_stop(trace);
1124//ust// /* Wait for quiescent state. Readers have preemption disabled. */
1125//ust// synchronize_sched();
1126//ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
1127//ust// * because no readers are left. */
1128//ust// list_for_each_safe(pos, n, &ltt_traces.head) {
1129//ust// trace = container_of(pos, struct ltt_trace_struct, list);
1130//ust// /* _ltt_trace_destroy does a synchronize_sched() */
1131//ust// _ltt_trace_destroy(trace);
1132//ust// __ltt_trace_destroy(trace);
1133//ust// }
1134//ust// /* free traces in pre-alloc status */
1135//ust// list_for_each_safe(pos, n, &ltt_traces.setup_head) {
1136//ust// trace = container_of(pos, struct ltt_trace_struct, list);
1137//ust// _ltt_trace_free(trace);
1138//ust// }
1139//ust//
1140//ust// ltt_unlock_traces();
1141//ust// }
1142
1143//ust// module_exit(ltt_exit)
1144
1145//ust// MODULE_LICENSE("GPL");
1146//ust// MODULE_AUTHOR("Mathieu Desnoyers");
1147//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
This page took 0.064773 seconds and 4 git commands to generate.