Move kernelcompat.h to include/ust/ and share.h, usterr.h to include/
[ust.git] / libust / tracer.c
CommitLineData
9dad1eb8
PMF
1/*
2 * ltt/ltt-tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
34e4b7db
PMF
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
9dad1eb8
PMF
21 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
22 * start/stop.
23 *
24 * Author:
25 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
26 *
27 * Inspired from LTT :
28 * Karim Yaghmour (karim@opersys.com)
29 * Tom Zanussi (zanussi@us.ibm.com)
30 * Bob Wisniewski (bob@watson.ibm.com)
31 * And from K42 :
32 * Bob Wisniewski (bob@watson.ibm.com)
33 *
34 * Changelog:
35 * 22/09/06, Move to the marker/probes mechanism.
36 * 19/10/05, Complete lockless mechanism.
37 * 27/05/05, Modular redesign and rewrite.
38 */
39
b6bf28ec
PMF
40//ust// #include <linux/time.h>
41//ust// #include <linux/ltt-tracer.h>
42//ust// #include <linux/module.h>
43//ust// #include <linux/string.h>
44//ust// #include <linux/slab.h>
45//ust// #include <linux/init.h>
46//ust// #include <linux/rcupdate.h>
47//ust// #include <linux/sched.h>
48//ust// #include <linux/bitops.h>
49//ust// #include <linux/fs.h>
50//ust// #include <linux/cpu.h>
51//ust// #include <linux/kref.h>
52//ust// #include <linux/delay.h>
53//ust// #include <linux/vmalloc.h>
54//ust// #include <asm/atomic.h>
b7ea1a1c 55#include <urcu/rculist.h>
4268fdcd 56
fbca6b62 57#include <ust/kernelcompat.h>
b6bf28ec
PMF
58#include "tracercore.h"
59#include "tracer.h"
b6bf28ec
PMF
60#include "usterr.h"
61
62//ust// static void async_wakeup(unsigned long data);
63//ust//
64//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
9dad1eb8
PMF
65
66/* Default callbacks for modules */
67notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
68 struct ltt_trace_struct *trace)
69{
70 return 0;
71}
72
73int ltt_statedump_default(struct ltt_trace_struct *trace)
74{
75 return 0;
76}
77
78/* Callbacks for registered modules */
79
80int (*ltt_filter_control_functor)
81 (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
82 ltt_filter_control_default;
83struct module *ltt_filter_control_owner;
84
85/* These function pointers are protected by a trace activation check */
86struct module *ltt_run_filter_owner;
87int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
88 ltt_statedump_default;
89struct module *ltt_statedump_owner;
90
91struct chan_info_struct {
92 const char *name;
93 unsigned int def_subbufsize;
94 unsigned int def_subbufcount;
95} chan_infos[] = {
96 [LTT_CHANNEL_METADATA] = {
97 LTT_METADATA_CHANNEL,
98 LTT_DEFAULT_SUBBUF_SIZE_LOW,
99 LTT_DEFAULT_N_SUBBUFS_LOW,
100 },
b6bf28ec
PMF
101 [LTT_CHANNEL_UST] = {
102 LTT_UST_CHANNEL,
9dad1eb8
PMF
103 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
104 LTT_DEFAULT_N_SUBBUFS_HIGH,
105 },
9dad1eb8
PMF
106};
107
108static enum ltt_channels get_channel_type_from_name(const char *name)
109{
110 int i;
111
112 if (!name)
b6bf28ec 113 return LTT_CHANNEL_UST;
9dad1eb8
PMF
114
115 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
116 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
117 return (enum ltt_channels)i;
118
b6bf28ec 119 return LTT_CHANNEL_UST;
9dad1eb8
PMF
120}
121
122/**
123 * ltt_module_register - LTT module registration
124 * @name: module type
125 * @function: callback to register
126 * @owner: module which owns the callback
127 *
128 * The module calling this registration function must ensure that no
129 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
130 * must be called between a vmalloc and the moment the memory is made visible to
131 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
132 * the module allocates virtual memory after its registration must it
133 * synchronize the TLBs.
134 */
b6bf28ec
PMF
135//ust// int ltt_module_register(enum ltt_module_function name, void *function,
136//ust// struct module *owner)
137//ust// {
138//ust// int ret = 0;
139//ust//
140//ust// /*
141//ust// * Make sure no page fault can be triggered by the module about to be
142//ust// * registered. We deal with this here so we don't have to call
143//ust// * vmalloc_sync_all() in each module's init.
144//ust// */
145//ust// vmalloc_sync_all();
146//ust//
147//ust// switch (name) {
148//ust// case LTT_FUNCTION_RUN_FILTER:
149//ust// if (ltt_run_filter_owner != NULL) {
150//ust// ret = -EEXIST;
151//ust// goto end;
152//ust// }
153//ust// ltt_filter_register((ltt_run_filter_functor)function);
154//ust// ltt_run_filter_owner = owner;
155//ust// break;
156//ust// case LTT_FUNCTION_FILTER_CONTROL:
157//ust// if (ltt_filter_control_owner != NULL) {
158//ust// ret = -EEXIST;
159//ust// goto end;
160//ust// }
161//ust// ltt_filter_control_functor =
162//ust// (int (*)(enum ltt_filter_control_msg,
163//ust// struct ltt_trace_struct *))function;
164//ust// ltt_filter_control_owner = owner;
165//ust// break;
166//ust// case LTT_FUNCTION_STATEDUMP:
167//ust// if (ltt_statedump_owner != NULL) {
168//ust// ret = -EEXIST;
169//ust// goto end;
170//ust// }
171//ust// ltt_statedump_functor =
172//ust// (int (*)(struct ltt_trace_struct *))function;
173//ust// ltt_statedump_owner = owner;
174//ust// break;
175//ust// }
176//ust//
177//ust// end:
178//ust//
179//ust// return ret;
180//ust// }
181//ust// EXPORT_SYMBOL_GPL(ltt_module_register);
9dad1eb8
PMF
182
183/**
184 * ltt_module_unregister - LTT module unregistration
185 * @name: module type
186 */
b6bf28ec
PMF
187//ust// void ltt_module_unregister(enum ltt_module_function name)
188//ust// {
189//ust// switch (name) {
190//ust// case LTT_FUNCTION_RUN_FILTER:
191//ust// ltt_filter_unregister();
192//ust// ltt_run_filter_owner = NULL;
193//ust// /* Wait for preempt sections to finish */
194//ust// synchronize_sched();
195//ust// break;
196//ust// case LTT_FUNCTION_FILTER_CONTROL:
197//ust// ltt_filter_control_functor = ltt_filter_control_default;
198//ust// ltt_filter_control_owner = NULL;
199//ust// break;
200//ust// case LTT_FUNCTION_STATEDUMP:
201//ust// ltt_statedump_functor = ltt_statedump_default;
202//ust// ltt_statedump_owner = NULL;
203//ust// break;
204//ust// }
205//ust//
206//ust// }
207//ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
9dad1eb8
PMF
208
209static LIST_HEAD(ltt_transport_list);
210
211/**
212 * ltt_transport_register - LTT transport registration
213 * @transport: transport structure
214 *
215 * Registers a transport which can be used as output to extract the data out of
216 * LTTng. The module calling this registration function must ensure that no
217 * trap-inducing code will be executed by the transport functions. E.g.
218 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
219 * is made visible to the transport function. This registration acts as a
220 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
221 * after its registration must it synchronize the TLBs.
222 */
5f54827b
PMF
223void ltt_transport_register(struct ltt_transport *transport)
224{
225 /*
226 * Make sure no page fault can be triggered by the module about to be
227 * registered. We deal with this here so we don't have to call
228 * vmalloc_sync_all() in each module's init.
229 */
bb07823d 230//ust// vmalloc_sync_all();
5f54827b
PMF
231
232 ltt_lock_traces();
233 list_add_tail(&transport->node, &ltt_transport_list);
234 ltt_unlock_traces();
235}
b6bf28ec 236//ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
9dad1eb8
PMF
237
238/**
239 * ltt_transport_unregister - LTT transport unregistration
240 * @transport: transport structure
241 */
bb07823d
PMF
242void ltt_transport_unregister(struct ltt_transport *transport)
243{
244 ltt_lock_traces();
245 list_del(&transport->node);
246 ltt_unlock_traces();
247}
b6bf28ec 248//ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
9dad1eb8
PMF
249
250static inline int is_channel_overwrite(enum ltt_channels chan,
251 enum trace_mode mode)
252{
253 switch (mode) {
254 case LTT_TRACE_NORMAL:
255 return 0;
256 case LTT_TRACE_FLIGHT:
257 switch (chan) {
258 case LTT_CHANNEL_METADATA:
259 return 0;
260 default:
261 return 1;
262 }
263 case LTT_TRACE_HYBRID:
264 switch (chan) {
b6bf28ec 265 case LTT_CHANNEL_METADATA:
9dad1eb8 266 return 0;
b6bf28ec
PMF
267 default:
268 return 1;
9dad1eb8
PMF
269 }
270 default:
271 return 0;
272 }
273}
274
275/**
276 * ltt_write_trace_header - Write trace header
277 * @trace: Trace information
278 * @header: Memory address where the information must be written to
279 */
280void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
281 struct ltt_subbuffer_header *header)
282{
283 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
284 header->major_version = LTT_TRACER_VERSION_MAJOR;
285 header->minor_version = LTT_TRACER_VERSION_MINOR;
286 header->arch_size = sizeof(void *);
287 header->alignment = ltt_get_alignment();
288 header->start_time_sec = trace->start_time.tv_sec;
289 header->start_time_usec = trace->start_time.tv_usec;
290 header->start_freq = trace->start_freq;
291 header->freq_scale = trace->freq_scale;
292}
b6bf28ec 293//ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
9dad1eb8
PMF
294
295static void trace_async_wakeup(struct ltt_trace_struct *trace)
296{
297 int i;
298 struct ltt_channel_struct *chan;
299
300 /* Must check each channel for pending read wakeup */
301 for (i = 0; i < trace->nr_channels; i++) {
302 chan = &trace->channels[i];
303 if (chan->active)
304 trace->ops->wakeup_channel(chan);
305 }
306}
307
b6bf28ec
PMF
308//ust// /* Timer to send async wakeups to the readers */
309//ust// static void async_wakeup(unsigned long data)
310//ust// {
311//ust// struct ltt_trace_struct *trace;
312//ust//
313//ust// /*
314//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
315//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
316//ust// * allow mutex to be taken in interrupt context. Ugly.
317//ust// * A proper way to do this would be to turn the timer into a
318//ust// * periodically woken up thread, but it adds to the footprint.
319//ust// */
320//ust// #ifndef CONFIG_PREEMPT_RT
321//ust// rcu_read_lock_sched();
322//ust// #else
323//ust// ltt_lock_traces();
324//ust// #endif
325//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
326//ust// trace_async_wakeup(trace);
327//ust// }
328//ust// #ifndef CONFIG_PREEMPT_RT
329//ust// rcu_read_unlock_sched();
330//ust// #else
331//ust// ltt_unlock_traces();
332//ust// #endif
333//ust//
334//ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
335//ust// }
9dad1eb8
PMF
336
337/**
338 * _ltt_trace_find - find a trace by given name.
339 * trace_name: trace name
340 *
341 * Returns a pointer to the trace structure, NULL if not found.
342 */
9c67dc50 343struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
9dad1eb8
PMF
344{
345 struct ltt_trace_struct *trace;
346
347 list_for_each_entry(trace, &ltt_traces.head, list)
348 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
349 return trace;
350
351 return NULL;
352}
353
354/* _ltt_trace_find_setup :
355 * find a trace in setup list by given name.
356 *
357 * Returns a pointer to the trace structure, NULL if not found.
358 */
359struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
360{
361 struct ltt_trace_struct *trace;
362
363 list_for_each_entry(trace, &ltt_traces.setup_head, list)
364 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
365 return trace;
366
367 return NULL;
368}
b6bf28ec 369//ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
9dad1eb8
PMF
370
371/**
372 * ltt_release_transport - Release an LTT transport
373 * @kref : reference count on the transport
374 */
375void ltt_release_transport(struct kref *kref)
376{
772030fe
PMF
377//ust// struct ltt_trace_struct *trace = container_of(kref,
378//ust// struct ltt_trace_struct, ltt_transport_kref);
379//ust// trace->ops->remove_dirs(trace);
9dad1eb8 380}
b6bf28ec 381//ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
9dad1eb8
PMF
382
383/**
384 * ltt_release_trace - Release a LTT trace
385 * @kref : reference count on the trace
386 */
387void ltt_release_trace(struct kref *kref)
388{
389 struct ltt_trace_struct *trace = container_of(kref,
390 struct ltt_trace_struct, kref);
391 ltt_channels_trace_free(trace->channels);
392 kfree(trace);
393}
b6bf28ec 394//ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
9dad1eb8
PMF
395
396static inline void prepare_chan_size_num(unsigned int *subbuf_size,
397 unsigned int *n_subbufs)
398{
399 *subbuf_size = 1 << get_count_order(*subbuf_size);
400 *n_subbufs = 1 << get_count_order(*n_subbufs);
401
402 /* Subbuf size and number must both be power of two */
403 WARN_ON(hweight32(*subbuf_size) != 1);
404 WARN_ON(hweight32(*n_subbufs) != 1);
405}
406
407int _ltt_trace_setup(const char *trace_name)
408{
409 int err = 0;
410 struct ltt_trace_struct *new_trace = NULL;
411 int metadata_index;
412 unsigned int chan;
413 enum ltt_channels chantype;
414
415 if (_ltt_trace_find_setup(trace_name)) {
416 printk(KERN_ERR "LTT : Trace name %s already used.\n",
417 trace_name);
418 err = -EEXIST;
419 goto traces_error;
420 }
421
422 if (_ltt_trace_find(trace_name)) {
423 printk(KERN_ERR "LTT : Trace name %s already used.\n",
424 trace_name);
425 err = -EEXIST;
426 goto traces_error;
427 }
428
429 new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
430 if (!new_trace) {
431 printk(KERN_ERR
432 "LTT : Unable to allocate memory for trace %s\n",
433 trace_name);
434 err = -ENOMEM;
435 goto traces_error;
436 }
437 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
438 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
439 0, 1);
440 if (!new_trace->channels) {
441 printk(KERN_ERR
442 "LTT : Unable to allocate memory for chaninfo %s\n",
443 trace_name);
444 err = -ENOMEM;
445 goto trace_free;
446 }
447
448 /*
449 * Force metadata channel to active, no overwrite.
450 */
451 metadata_index = ltt_channels_get_index_from_name("metadata");
452 WARN_ON(metadata_index < 0);
453 new_trace->channels[metadata_index].overwrite = 0;
454 new_trace->channels[metadata_index].active = 1;
455
456 /*
457 * Set hardcoded tracer defaults for some channels
458 */
459 for (chan = 0; chan < new_trace->nr_channels; chan++) {
460 if (!(new_trace->channels[chan].active))
461 continue;
462
463 chantype = get_channel_type_from_name(
464 ltt_channels_get_name_from_index(chan));
465 new_trace->channels[chan].subbuf_size =
466 chan_infos[chantype].def_subbufsize;
467 new_trace->channels[chan].subbuf_cnt =
468 chan_infos[chantype].def_subbufcount;
469 }
470
471 list_add(&new_trace->list, &ltt_traces.setup_head);
472 return 0;
473
474trace_free:
475 kfree(new_trace);
476traces_error:
477 return err;
478}
b6bf28ec 479//ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
9dad1eb8
PMF
480
481
482int ltt_trace_setup(const char *trace_name)
483{
484 int ret;
485 ltt_lock_traces();
486 ret = _ltt_trace_setup(trace_name);
487 ltt_unlock_traces();
488 return ret;
489}
b6bf28ec 490//ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
9dad1eb8
PMF
491
492/* must be called from within a traces lock. */
493static void _ltt_trace_free(struct ltt_trace_struct *trace)
494{
495 list_del(&trace->list);
496 kfree(trace);
497}
498
499int ltt_trace_set_type(const char *trace_name, const char *trace_type)
500{
501 int err = 0;
502 struct ltt_trace_struct *trace;
503 struct ltt_transport *tran_iter, *transport = NULL;
504
505 ltt_lock_traces();
506
507 trace = _ltt_trace_find_setup(trace_name);
508 if (!trace) {
509 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
510 err = -ENOENT;
511 goto traces_error;
512 }
513
514 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
515 if (!strcmp(tran_iter->name, trace_type)) {
516 transport = tran_iter;
517 break;
518 }
519 }
520 if (!transport) {
521 printk(KERN_ERR "LTT : Transport %s is not present.\n",
522 trace_type);
523 err = -EINVAL;
524 goto traces_error;
525 }
526
527 trace->transport = transport;
528
529traces_error:
530 ltt_unlock_traces();
531 return err;
532}
b6bf28ec 533//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
9dad1eb8
PMF
534
535int ltt_trace_set_channel_subbufsize(const char *trace_name,
536 const char *channel_name, unsigned int size)
537{
538 int err = 0;
539 struct ltt_trace_struct *trace;
540 int index;
541
542 ltt_lock_traces();
543
544 trace = _ltt_trace_find_setup(trace_name);
545 if (!trace) {
546 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
547 err = -ENOENT;
548 goto traces_error;
549 }
550
551 index = ltt_channels_get_index_from_name(channel_name);
552 if (index < 0) {
553 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
554 err = -ENOENT;
555 goto traces_error;
556 }
557 trace->channels[index].subbuf_size = size;
558
559traces_error:
560 ltt_unlock_traces();
561 return err;
562}
b6bf28ec 563//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
9dad1eb8
PMF
564
565int ltt_trace_set_channel_subbufcount(const char *trace_name,
566 const char *channel_name, unsigned int cnt)
567{
568 int err = 0;
569 struct ltt_trace_struct *trace;
570 int index;
571
572 ltt_lock_traces();
573
574 trace = _ltt_trace_find_setup(trace_name);
575 if (!trace) {
576 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
577 err = -ENOENT;
578 goto traces_error;
579 }
580
581 index = ltt_channels_get_index_from_name(channel_name);
582 if (index < 0) {
583 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
584 err = -ENOENT;
585 goto traces_error;
586 }
587 trace->channels[index].subbuf_cnt = cnt;
588
589traces_error:
590 ltt_unlock_traces();
591 return err;
592}
b6bf28ec 593//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
9dad1eb8
PMF
594
595int ltt_trace_set_channel_enable(const char *trace_name,
596 const char *channel_name, unsigned int enable)
597{
598 int err = 0;
599 struct ltt_trace_struct *trace;
600 int index;
601
602 ltt_lock_traces();
603
604 trace = _ltt_trace_find_setup(trace_name);
605 if (!trace) {
606 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
607 err = -ENOENT;
608 goto traces_error;
609 }
610
611 /*
612 * Datas in metadata channel(marker info) is necessary to be able to
613 * read the trace, we always enable this channel.
614 */
615 if (!enable && !strcmp(channel_name, "metadata")) {
616 printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
617 err = -EINVAL;
618 goto traces_error;
619 }
620
621 index = ltt_channels_get_index_from_name(channel_name);
622 if (index < 0) {
623 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
624 err = -ENOENT;
625 goto traces_error;
626 }
627
628 trace->channels[index].active = enable;
629
630traces_error:
631 ltt_unlock_traces();
632 return err;
633}
b6bf28ec 634//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
9dad1eb8
PMF
635
636int ltt_trace_set_channel_overwrite(const char *trace_name,
637 const char *channel_name, unsigned int overwrite)
638{
639 int err = 0;
640 struct ltt_trace_struct *trace;
641 int index;
642
643 ltt_lock_traces();
644
645 trace = _ltt_trace_find_setup(trace_name);
646 if (!trace) {
647 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
648 err = -ENOENT;
649 goto traces_error;
650 }
651
652 /*
653 * Always put the metadata channel in non-overwrite mode :
654 * This is a very low traffic channel and it can't afford to have its
655 * data overwritten : this data (marker info) is necessary to be
656 * able to read the trace.
657 */
658 if (overwrite && !strcmp(channel_name, "metadata")) {
659 printk(KERN_ERR "LTT : Trying to set metadata channel to "
660 "overwrite mode\n");
661 err = -EINVAL;
662 goto traces_error;
663 }
664
665 index = ltt_channels_get_index_from_name(channel_name);
666 if (index < 0) {
667 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
668 err = -ENOENT;
669 goto traces_error;
670 }
671
672 trace->channels[index].overwrite = overwrite;
673
674traces_error:
675 ltt_unlock_traces();
676 return err;
677}
b6bf28ec 678//ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
9dad1eb8
PMF
679
680int ltt_trace_alloc(const char *trace_name)
681{
682 int err = 0;
683 struct ltt_trace_struct *trace;
c697d411 684 unsigned int subbuf_size, subbuf_cnt;
772030fe 685//ust// unsigned long flags;
9dad1eb8
PMF
686 int chan;
687 const char *channel_name;
688
689 ltt_lock_traces();
690
691 trace = _ltt_trace_find_setup(trace_name);
692 if (!trace) {
693 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
694 err = -ENOENT;
695 goto traces_error;
696 }
697
698 kref_init(&trace->kref);
699 kref_init(&trace->ltt_transport_kref);
b6bf28ec 700//ust// init_waitqueue_head(&trace->kref_wq);
9dad1eb8 701 trace->active = 0;
b6bf28ec 702//ust// get_trace_clock();
9dad1eb8
PMF
703 trace->freq_scale = trace_clock_freq_scale();
704
705 if (!trace->transport) {
706 printk(KERN_ERR "LTT : Transport is not set.\n");
707 err = -EINVAL;
708 goto transport_error;
709 }
b6bf28ec
PMF
710//ust// if (!try_module_get(trace->transport->owner)) {
711//ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
712//ust// err = -ENODEV;
713//ust// goto transport_error;
714//ust// }
9dad1eb8
PMF
715 trace->ops = &trace->transport->ops;
716
b6bf28ec
PMF
717//ust// err = trace->ops->create_dirs(trace);
718//ust// if (err) {
719//ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
720//ust// trace_name);
721//ust// goto dirs_error;
722//ust// }
9dad1eb8 723
b6bf28ec 724//ust// local_irq_save(flags);
9dad1eb8
PMF
725 trace->start_freq = trace_clock_frequency();
726 trace->start_tsc = trace_clock_read64();
b6bf28ec
PMF
727 gettimeofday(&trace->start_time, NULL); //ust// changed
728//ust// local_irq_restore(flags);
9dad1eb8
PMF
729
730 for (chan = 0; chan < trace->nr_channels; chan++) {
731 if (!(trace->channels[chan].active))
732 continue;
733
734 channel_name = ltt_channels_get_name_from_index(chan);
735 WARN_ON(!channel_name);
736 subbuf_size = trace->channels[chan].subbuf_size;
737 subbuf_cnt = trace->channels[chan].subbuf_cnt;
738 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
739 err = trace->ops->create_channel(trace_name, trace,
740 trace->dentry.trace_root,
741 channel_name,
742 &trace->channels[chan],
743 subbuf_size,
744 subbuf_cnt,
745 trace->channels[chan].overwrite);
746 if (err != 0) {
747 printk(KERN_ERR "LTT : Can't create channel %s.\n",
748 channel_name);
749 goto create_channel_error;
750 }
751 }
752
753 list_del(&trace->list);
b6bf28ec
PMF
754//ust// if (list_empty(&ltt_traces.head)) {
755//ust// mod_timer(&ltt_async_wakeup_timer,
756//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
757//ust// set_kernel_trace_flag_all_tasks();
758//ust// }
8d938dbd 759 list_add_rcu(&trace->list, &ltt_traces.head);
b6bf28ec 760//ust// synchronize_sched();
9dad1eb8
PMF
761
762 ltt_unlock_traces();
763
764 return 0;
765
766create_channel_error:
767 for (chan--; chan >= 0; chan--)
768 if (trace->channels[chan].active)
769 trace->ops->remove_channel(&trace->channels[chan]);
770
772030fe 771//ust// dirs_error:
b6bf28ec 772//ust// module_put(trace->transport->owner);
9dad1eb8 773transport_error:
b6bf28ec 774//ust// put_trace_clock();
9dad1eb8
PMF
775traces_error:
776 ltt_unlock_traces();
777 return err;
778}
b6bf28ec 779//ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
9dad1eb8
PMF
780
781/*
782 * It is worked as a wrapper for current version of ltt_control.ko.
783 * We will make a new ltt_control based on debugfs, and control each channel's
784 * buffer.
785 */
772030fe
PMF
786//ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
787//ust// enum trace_mode mode,
788//ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
789//ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
790//ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
791//ust// {
792//ust// int err = 0;
793//ust//
794//ust// err = ltt_trace_setup(trace_name);
795//ust// if (IS_ERR_VALUE(err))
796//ust// return err;
797//ust//
798//ust// err = ltt_trace_set_type(trace_name, trace_type);
799//ust// if (IS_ERR_VALUE(err))
800//ust// return err;
801//ust//
802//ust// err = ltt_trace_alloc(trace_name);
803//ust// if (IS_ERR_VALUE(err))
804//ust// return err;
805//ust//
806//ust// return err;
807//ust// }
9dad1eb8
PMF
808
809/* Must be called while sure that trace is in the list. */
99b72dc0 810static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
9dad1eb8
PMF
811{
812 int err = -EPERM;
813
814 if (trace == NULL) {
815 err = -ENOENT;
816 goto traces_error;
817 }
818 if (trace->active) {
819 printk(KERN_ERR
820 "LTT : Can't destroy trace %s : tracer is active\n",
821 trace->trace_name);
822 err = -EBUSY;
823 goto active_error;
824 }
825 /* Everything went fine */
a3272941
PMF
826 list_del_rcu(&trace->list);
827 synchronize_rcu();
9dad1eb8 828 if (list_empty(&ltt_traces.head)) {
b6bf28ec 829//ust// clear_kernel_trace_flag_all_tasks();
9dad1eb8
PMF
830 /*
831 * We stop the asynchronous delivery of reader wakeup, but
832 * we must make one last check for reader wakeups pending
833 * later in __ltt_trace_destroy.
834 */
b6bf28ec 835//ust// del_timer_sync(&ltt_async_wakeup_timer);
9dad1eb8
PMF
836 }
837 return 0;
838
839 /* error handling */
840active_error:
841traces_error:
842 return err;
843}
844
845/* Sleepable part of the destroy */
846static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
847{
848 int i;
849 struct ltt_channel_struct *chan;
850
851 for (i = 0; i < trace->nr_channels; i++) {
852 chan = &trace->channels[i];
853 if (chan->active)
854 trace->ops->finish_channel(chan);
855 }
856
98963de4 857 return; /* FIXME: temporary for ust */
b6bf28ec 858//ust// flush_scheduled_work();
9dad1eb8
PMF
859
860 /*
861 * The currently destroyed trace is not in the trace list anymore,
862 * so it's safe to call the async wakeup ourself. It will deliver
863 * the last subbuffers.
864 */
865 trace_async_wakeup(trace);
866
867 for (i = 0; i < trace->nr_channels; i++) {
868 chan = &trace->channels[i];
869 if (chan->active)
870 trace->ops->remove_channel(chan);
871 }
872
873 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
874
b6bf28ec 875//ust// module_put(trace->transport->owner);
9dad1eb8
PMF
876
877 /*
878 * Wait for lttd readers to release the files, therefore making sure
879 * the last subbuffers have been read.
880 */
b6bf28ec
PMF
881//ust// if (atomic_read(&trace->kref.refcount) > 1) {
882//ust// int ret = 0;
883//ust// __wait_event_interruptible(trace->kref_wq,
884//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
885//ust// }
9dad1eb8
PMF
886 kref_put(&trace->kref, ltt_release_trace);
887}
888
889int ltt_trace_destroy(const char *trace_name)
890{
891 int err = 0;
892 struct ltt_trace_struct *trace;
893
894 ltt_lock_traces();
895
896 trace = _ltt_trace_find(trace_name);
897 if (trace) {
898 err = _ltt_trace_destroy(trace);
899 if (err)
900 goto error;
901
902 ltt_unlock_traces();
903
904 __ltt_trace_destroy(trace);
b6bf28ec 905//ust// put_trace_clock();
9dad1eb8
PMF
906
907 return 0;
908 }
909
910 trace = _ltt_trace_find_setup(trace_name);
911 if (trace) {
912 _ltt_trace_free(trace);
913 ltt_unlock_traces();
914 return 0;
915 }
916
917 err = -ENOENT;
918
919 /* Error handling */
920error:
921 ltt_unlock_traces();
922 return err;
923}
b6bf28ec 924//ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
9dad1eb8
PMF
925
926/* must be called from within a traces lock. */
927static int _ltt_trace_start(struct ltt_trace_struct *trace)
928{
929 int err = 0;
930
931 if (trace == NULL) {
932 err = -ENOENT;
933 goto traces_error;
934 }
935 if (trace->active)
936 printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
937 trace->trace_name);
b6bf28ec
PMF
938//ust// if (!try_module_get(ltt_run_filter_owner)) {
939//ust// err = -ENODEV;
940//ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
941//ust// goto get_ltt_run_filter_error;
942//ust// }
9dad1eb8
PMF
943 trace->active = 1;
944 /* Read by trace points without protection : be careful */
945 ltt_traces.num_active_traces++;
946 return err;
947
948 /* error handling */
772030fe 949//ust// get_ltt_run_filter_error:
9dad1eb8
PMF
950traces_error:
951 return err;
952}
953
954int ltt_trace_start(const char *trace_name)
955{
956 int err = 0;
957 struct ltt_trace_struct *trace;
958
959 ltt_lock_traces();
960
961 trace = _ltt_trace_find(trace_name);
962 err = _ltt_trace_start(trace);
963 if (err)
964 goto no_trace;
965
966 ltt_unlock_traces();
967
968 /*
969 * Call the kernel state dump.
970 * Events will be mixed with real kernel events, it's ok.
971 * Notice that there is no protection on the trace : that's exactly
972 * why we iterate on the list and check for trace equality instead of
973 * directly using this trace handle inside the logging function.
974 */
975
9c67dc50 976 ltt_dump_marker_state(trace);
9dad1eb8 977
b6bf28ec
PMF
978//ust// if (!try_module_get(ltt_statedump_owner)) {
979//ust// err = -ENODEV;
980//ust// printk(KERN_ERR
981//ust// "LTT : Can't lock state dump module.\n");
982//ust// } else {
9dad1eb8 983 ltt_statedump_functor(trace);
b6bf28ec
PMF
984//ust// module_put(ltt_statedump_owner);
985//ust// }
9dad1eb8
PMF
986
987 return err;
988
989 /* Error handling */
990no_trace:
991 ltt_unlock_traces();
992 return err;
993}
b6bf28ec 994//ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
9dad1eb8
PMF
995
996/* must be called from within traces lock */
997static int _ltt_trace_stop(struct ltt_trace_struct *trace)
998{
999 int err = -EPERM;
1000
1001 if (trace == NULL) {
1002 err = -ENOENT;
1003 goto traces_error;
1004 }
1005 if (!trace->active)
1006 printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
1007 trace->trace_name);
1008 if (trace->active) {
1009 trace->active = 0;
1010 ltt_traces.num_active_traces--;
b6bf28ec 1011//ust// synchronize_sched(); /* Wait for each tracing to be finished */
9dad1eb8 1012 }
b6bf28ec 1013//ust// module_put(ltt_run_filter_owner);
9dad1eb8
PMF
1014 /* Everything went fine */
1015 return 0;
1016
1017 /* Error handling */
1018traces_error:
1019 return err;
1020}
1021
1022int ltt_trace_stop(const char *trace_name)
1023{
1024 int err = 0;
1025 struct ltt_trace_struct *trace;
1026
1027 ltt_lock_traces();
1028 trace = _ltt_trace_find(trace_name);
1029 err = _ltt_trace_stop(trace);
1030 ltt_unlock_traces();
1031 return err;
1032}
b6bf28ec 1033//ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
9dad1eb8
PMF
1034
1035/**
1036 * ltt_control - Trace control in-kernel API
1037 * @msg: Action to perform
1038 * @trace_name: Trace on which the action must be done
1039 * @trace_type: Type of trace (normal, flight, hybrid)
1040 * @args: Arguments specific to the action
1041 */
b6bf28ec
PMF
1042//ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1043//ust// const char *trace_type, union ltt_control_args args)
1044//ust// {
1045//ust// int err = -EPERM;
1046//ust//
1047//ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1048//ust// switch (msg) {
1049//ust// case LTT_CONTROL_START:
1050//ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1051//ust// err = ltt_trace_start(trace_name);
1052//ust// break;
1053//ust// case LTT_CONTROL_STOP:
1054//ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1055//ust// err = ltt_trace_stop(trace_name);
1056//ust// break;
1057//ust// case LTT_CONTROL_CREATE_TRACE:
1058//ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1059//ust// err = ltt_trace_create(trace_name, trace_type,
1060//ust// args.new_trace.mode,
1061//ust// args.new_trace.subbuf_size_low,
1062//ust// args.new_trace.n_subbufs_low,
1063//ust// args.new_trace.subbuf_size_med,
1064//ust// args.new_trace.n_subbufs_med,
1065//ust// args.new_trace.subbuf_size_high,
1066//ust// args.new_trace.n_subbufs_high);
1067//ust// break;
1068//ust// case LTT_CONTROL_DESTROY_TRACE:
1069//ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1070//ust// err = ltt_trace_destroy(trace_name);
1071//ust// break;
1072//ust// }
1073//ust// return err;
1074//ust// }
1075//ust// EXPORT_SYMBOL_GPL(ltt_control);
9dad1eb8
PMF
1076
1077/**
1078 * ltt_filter_control - Trace filter control in-kernel API
1079 * @msg: Action to perform on the filter
1080 * @trace_name: Trace on which the action must be done
1081 */
1082int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
1083{
1084 int err;
1085 struct ltt_trace_struct *trace;
1086
1087 printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
1088 ltt_lock_traces();
1089 trace = _ltt_trace_find(trace_name);
1090 if (trace == NULL) {
1091 printk(KERN_ALERT
1092 "Trace does not exist. Cannot proxy control request\n");
1093 err = -ENOENT;
1094 goto trace_error;
1095 }
b6bf28ec
PMF
1096//ust// if (!try_module_get(ltt_filter_control_owner)) {
1097//ust// err = -ENODEV;
1098//ust// goto get_module_error;
1099//ust// }
9dad1eb8
PMF
1100 switch (msg) {
1101 case LTT_FILTER_DEFAULT_ACCEPT:
1102 printk(KERN_DEBUG
1103 "Proxy filter default accept %s\n", trace_name);
1104 err = (*ltt_filter_control_functor)(msg, trace);
1105 break;
1106 case LTT_FILTER_DEFAULT_REJECT:
1107 printk(KERN_DEBUG
1108 "Proxy filter default reject %s\n", trace_name);
1109 err = (*ltt_filter_control_functor)(msg, trace);
1110 break;
1111 default:
1112 err = -EPERM;
1113 }
b6bf28ec 1114//ust// module_put(ltt_filter_control_owner);
9dad1eb8 1115
772030fe 1116//ust// get_module_error:
9dad1eb8
PMF
1117trace_error:
1118 ltt_unlock_traces();
1119 return err;
1120}
b6bf28ec
PMF
1121//ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
1122
1123//ust// int __init ltt_init(void)
1124//ust// {
1125//ust// /* Make sure no page fault can be triggered by this module */
1126//ust// vmalloc_sync_all();
1127//ust// return 0;
1128//ust// }
1129
1130//ust// module_init(ltt_init)
1131
1132//ust// static void __exit ltt_exit(void)
1133//ust// {
1134//ust// struct ltt_trace_struct *trace;
1135//ust// struct list_head *pos, *n;
1136//ust//
1137//ust// ltt_lock_traces();
1138//ust// /* Stop each trace, currently being read by RCU read-side */
1139//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list)
1140//ust// _ltt_trace_stop(trace);
1141//ust// /* Wait for quiescent state. Readers have preemption disabled. */
1142//ust// synchronize_sched();
1143//ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
1144//ust// * because no readers are left. */
1145//ust// list_for_each_safe(pos, n, &ltt_traces.head) {
1146//ust// trace = container_of(pos, struct ltt_trace_struct, list);
1147//ust// /* _ltt_trace_destroy does a synchronize_sched() */
1148//ust// _ltt_trace_destroy(trace);
1149//ust// __ltt_trace_destroy(trace);
1150//ust// }
1151//ust// /* free traces in pre-alloc status */
1152//ust// list_for_each_safe(pos, n, &ltt_traces.setup_head) {
1153//ust// trace = container_of(pos, struct ltt_trace_struct, list);
1154//ust// _ltt_trace_free(trace);
1155//ust// }
1156//ust//
1157//ust// ltt_unlock_traces();
1158//ust// }
1159
1160//ust// module_exit(ltt_exit)
1161
1162//ust// MODULE_LICENSE("GPL");
1163//ust// MODULE_AUTHOR("Mathieu Desnoyers");
1164//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
This page took 0.068443 seconds and 4 git commands to generate.