4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
25 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
28 * Karim Yaghmour (karim@opersys.com)
29 * Tom Zanussi (zanussi@us.ibm.com)
30 * Bob Wisniewski (bob@watson.ibm.com)
32 * Bob Wisniewski (bob@watson.ibm.com)
35 * 22/09/06, Move to the marker/probes mechanism.
36 * 19/10/05, Complete lockless mechanism.
37 * 27/05/05, Modular redesign and rewrite.
40 //ust// #include <linux/time.h>
41 //ust// #include <linux/ltt-tracer.h>
42 //ust// #include <linux/module.h>
43 //ust// #include <linux/string.h>
44 //ust// #include <linux/slab.h>
45 //ust// #include <linux/init.h>
46 //ust// #include <linux/rcupdate.h>
47 //ust// #include <linux/sched.h>
48 //ust// #include <linux/bitops.h>
49 //ust// #include <linux/fs.h>
50 //ust// #include <linux/cpu.h>
51 //ust// #include <linux/kref.h>
52 //ust// #include <linux/delay.h>
53 //ust// #include <linux/vmalloc.h>
54 //ust// #include <asm/atomic.h>
55 #include <urcu/rculist.h>
57 #include <ust/kernelcompat.h>
58 #include "tracercore.h"
62 //ust// static void async_wakeup(unsigned long data);
64 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
66 /* Default callbacks for modules */
67 notrace
int ltt_filter_control_default(enum ltt_filter_control_msg msg
,
68 struct ltt_trace_struct
*trace
)
73 int ltt_statedump_default(struct ltt_trace_struct
*trace
)
78 /* Callbacks for registered modules */
80 int (*ltt_filter_control_functor
)
81 (enum ltt_filter_control_msg msg
, struct ltt_trace_struct
*trace
) =
82 ltt_filter_control_default
;
83 struct module
*ltt_filter_control_owner
;
85 /* These function pointers are protected by a trace activation check */
86 struct module
*ltt_run_filter_owner
;
87 int (*ltt_statedump_functor
)(struct ltt_trace_struct
*trace
) =
88 ltt_statedump_default
;
89 struct module
*ltt_statedump_owner
;
91 struct chan_info_struct
{
93 unsigned int def_subbufsize
;
94 unsigned int def_subbufcount
;
96 [LTT_CHANNEL_METADATA
] = {
98 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
99 LTT_DEFAULT_N_SUBBUFS_LOW
,
101 [LTT_CHANNEL_UST
] = {
103 LTT_DEFAULT_SUBBUF_SIZE_HIGH
,
104 LTT_DEFAULT_N_SUBBUFS_HIGH
,
108 static enum ltt_channels
get_channel_type_from_name(const char *name
)
113 return LTT_CHANNEL_UST
;
115 for (i
= 0; i
< ARRAY_SIZE(chan_infos
); i
++)
116 if (chan_infos
[i
].name
&& !strcmp(name
, chan_infos
[i
].name
))
117 return (enum ltt_channels
)i
;
119 return LTT_CHANNEL_UST
;
123 * ltt_module_register - LTT module registration
125 * @function: callback to register
126 * @owner: module which owns the callback
128 * The module calling this registration function must ensure that no
129 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
130 * must be called between a vmalloc and the moment the memory is made visible to
131 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
132 * the module allocates virtual memory after its registration must it
133 * synchronize the TLBs.
135 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
136 //ust// struct module *owner)
141 //ust// * Make sure no page fault can be triggered by the module about to be
142 //ust// * registered. We deal with this here so we don't have to call
143 //ust// * vmalloc_sync_all() in each module's init.
145 //ust// vmalloc_sync_all();
147 //ust// switch (name) {
148 //ust// case LTT_FUNCTION_RUN_FILTER:
149 //ust// if (ltt_run_filter_owner != NULL) {
150 //ust// ret = -EEXIST;
153 //ust// ltt_filter_register((ltt_run_filter_functor)function);
154 //ust// ltt_run_filter_owner = owner;
156 //ust// case LTT_FUNCTION_FILTER_CONTROL:
157 //ust// if (ltt_filter_control_owner != NULL) {
158 //ust// ret = -EEXIST;
161 //ust// ltt_filter_control_functor =
162 //ust// (int (*)(enum ltt_filter_control_msg,
163 //ust// struct ltt_trace_struct *))function;
164 //ust// ltt_filter_control_owner = owner;
166 //ust// case LTT_FUNCTION_STATEDUMP:
167 //ust// if (ltt_statedump_owner != NULL) {
168 //ust// ret = -EEXIST;
171 //ust// ltt_statedump_functor =
172 //ust// (int (*)(struct ltt_trace_struct *))function;
173 //ust// ltt_statedump_owner = owner;
181 //ust// EXPORT_SYMBOL_GPL(ltt_module_register);
184 * ltt_module_unregister - LTT module unregistration
187 //ust// void ltt_module_unregister(enum ltt_module_function name)
189 //ust// switch (name) {
190 //ust// case LTT_FUNCTION_RUN_FILTER:
191 //ust// ltt_filter_unregister();
192 //ust// ltt_run_filter_owner = NULL;
193 //ust// /* Wait for preempt sections to finish */
194 //ust// synchronize_sched();
196 //ust// case LTT_FUNCTION_FILTER_CONTROL:
197 //ust// ltt_filter_control_functor = ltt_filter_control_default;
198 //ust// ltt_filter_control_owner = NULL;
200 //ust// case LTT_FUNCTION_STATEDUMP:
201 //ust// ltt_statedump_functor = ltt_statedump_default;
202 //ust// ltt_statedump_owner = NULL;
207 //ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
209 static LIST_HEAD(ltt_transport_list
);
212 * ltt_transport_register - LTT transport registration
213 * @transport: transport structure
215 * Registers a transport which can be used as output to extract the data out of
216 * LTTng. The module calling this registration function must ensure that no
217 * trap-inducing code will be executed by the transport functions. E.g.
218 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
219 * is made visible to the transport function. This registration acts as a
220 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
221 * after its registration must it synchronize the TLBs.
223 void ltt_transport_register(struct ltt_transport
*transport
)
226 * Make sure no page fault can be triggered by the module about to be
227 * registered. We deal with this here so we don't have to call
228 * vmalloc_sync_all() in each module's init.
230 //ust// vmalloc_sync_all();
233 list_add_tail(&transport
->node
, <t_transport_list
);
236 //ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
239 * ltt_transport_unregister - LTT transport unregistration
240 * @transport: transport structure
242 void ltt_transport_unregister(struct ltt_transport
*transport
)
245 list_del(&transport
->node
);
248 //ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
250 static inline int is_channel_overwrite(enum ltt_channels chan
,
251 enum trace_mode mode
)
254 case LTT_TRACE_NORMAL
:
256 case LTT_TRACE_FLIGHT
:
258 case LTT_CHANNEL_METADATA
:
263 case LTT_TRACE_HYBRID
:
265 case LTT_CHANNEL_METADATA
:
276 * ltt_write_trace_header - Write trace header
277 * @trace: Trace information
278 * @header: Memory address where the information must be written to
280 void notrace
ltt_write_trace_header(struct ltt_trace_struct
*trace
,
281 struct ltt_subbuffer_header
*header
)
283 header
->magic_number
= LTT_TRACER_MAGIC_NUMBER
;
284 header
->major_version
= LTT_TRACER_VERSION_MAJOR
;
285 header
->minor_version
= LTT_TRACER_VERSION_MINOR
;
286 header
->arch_size
= sizeof(void *);
287 header
->alignment
= ltt_get_alignment();
288 header
->start_time_sec
= trace
->start_time
.tv_sec
;
289 header
->start_time_usec
= trace
->start_time
.tv_usec
;
290 header
->start_freq
= trace
->start_freq
;
291 header
->freq_scale
= trace
->freq_scale
;
293 //ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
295 static void trace_async_wakeup(struct ltt_trace_struct
*trace
)
298 struct ltt_channel_struct
*chan
;
300 /* Must check each channel for pending read wakeup */
301 for (i
= 0; i
< trace
->nr_channels
; i
++) {
302 chan
= &trace
->channels
[i
];
304 trace
->ops
->wakeup_channel(chan
);
308 //ust// /* Timer to send async wakeups to the readers */
309 //ust// static void async_wakeup(unsigned long data)
311 //ust// struct ltt_trace_struct *trace;
314 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
315 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
316 //ust// * allow mutex to be taken in interrupt context. Ugly.
317 //ust// * A proper way to do this would be to turn the timer into a
318 //ust// * periodically woken up thread, but it adds to the footprint.
320 //ust// #ifndef CONFIG_PREEMPT_RT
321 //ust// rcu_read_lock_sched();
323 //ust// ltt_lock_traces();
325 //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
326 //ust// trace_async_wakeup(trace);
328 //ust// #ifndef CONFIG_PREEMPT_RT
329 //ust// rcu_read_unlock_sched();
331 //ust// ltt_unlock_traces();
334 //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
338 * _ltt_trace_find - find a trace by given name.
339 * trace_name: trace name
341 * Returns a pointer to the trace structure, NULL if not found.
343 struct ltt_trace_struct
*_ltt_trace_find(const char *trace_name
)
345 struct ltt_trace_struct
*trace
;
347 list_for_each_entry(trace
, <t_traces
.head
, list
)
348 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
354 /* _ltt_trace_find_setup :
355 * find a trace in setup list by given name.
357 * Returns a pointer to the trace structure, NULL if not found.
359 struct ltt_trace_struct
*_ltt_trace_find_setup(const char *trace_name
)
361 struct ltt_trace_struct
*trace
;
363 list_for_each_entry(trace
, <t_traces
.setup_head
, list
)
364 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
369 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
372 * ltt_release_transport - Release an LTT transport
373 * @kref : reference count on the transport
375 void ltt_release_transport(struct kref
*kref
)
377 //ust// struct ltt_trace_struct *trace = container_of(kref,
378 //ust// struct ltt_trace_struct, ltt_transport_kref);
379 //ust// trace->ops->remove_dirs(trace);
381 //ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
384 * ltt_release_trace - Release a LTT trace
385 * @kref : reference count on the trace
387 void ltt_release_trace(struct kref
*kref
)
389 struct ltt_trace_struct
*trace
= container_of(kref
,
390 struct ltt_trace_struct
, kref
);
391 ltt_channels_trace_free(trace
->channels
);
394 //ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
396 static inline void prepare_chan_size_num(unsigned int *subbuf_size
,
397 unsigned int *n_subbufs
)
399 *subbuf_size
= 1 << get_count_order(*subbuf_size
);
400 *n_subbufs
= 1 << get_count_order(*n_subbufs
);
402 /* Subbuf size and number must both be power of two */
403 WARN_ON(hweight32(*subbuf_size
) != 1);
404 WARN_ON(hweight32(*n_subbufs
) != 1);
407 int _ltt_trace_setup(const char *trace_name
)
410 struct ltt_trace_struct
*new_trace
= NULL
;
413 enum ltt_channels chantype
;
415 if (_ltt_trace_find_setup(trace_name
)) {
416 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
422 if (_ltt_trace_find(trace_name
)) {
423 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
429 new_trace
= kzalloc(sizeof(struct ltt_trace_struct
), GFP_KERNEL
);
432 "LTT : Unable to allocate memory for trace %s\n",
437 strncpy(new_trace
->trace_name
, trace_name
, NAME_MAX
);
438 new_trace
->channels
= ltt_channels_trace_alloc(&new_trace
->nr_channels
,
440 if (!new_trace
->channels
) {
442 "LTT : Unable to allocate memory for chaninfo %s\n",
449 * Force metadata channel to active, no overwrite.
451 metadata_index
= ltt_channels_get_index_from_name("metadata");
452 WARN_ON(metadata_index
< 0);
453 new_trace
->channels
[metadata_index
].overwrite
= 0;
454 new_trace
->channels
[metadata_index
].active
= 1;
457 * Set hardcoded tracer defaults for some channels
459 for (chan
= 0; chan
< new_trace
->nr_channels
; chan
++) {
460 if (!(new_trace
->channels
[chan
].active
))
463 chantype
= get_channel_type_from_name(
464 ltt_channels_get_name_from_index(chan
));
465 new_trace
->channels
[chan
].subbuf_size
=
466 chan_infos
[chantype
].def_subbufsize
;
467 new_trace
->channels
[chan
].subbuf_cnt
=
468 chan_infos
[chantype
].def_subbufcount
;
471 list_add(&new_trace
->list
, <t_traces
.setup_head
);
479 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
482 int ltt_trace_setup(const char *trace_name
)
486 ret
= _ltt_trace_setup(trace_name
);
490 //ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
492 /* must be called from within a traces lock. */
493 static void _ltt_trace_free(struct ltt_trace_struct
*trace
)
495 list_del(&trace
->list
);
499 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
)
502 struct ltt_trace_struct
*trace
;
503 struct ltt_transport
*tran_iter
, *transport
= NULL
;
507 trace
= _ltt_trace_find_setup(trace_name
);
509 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
514 list_for_each_entry(tran_iter
, <t_transport_list
, node
) {
515 if (!strcmp(tran_iter
->name
, trace_type
)) {
516 transport
= tran_iter
;
521 printk(KERN_ERR
"LTT : Transport %s is not present.\n",
527 trace
->transport
= transport
;
533 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
535 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
536 const char *channel_name
, unsigned int size
)
539 struct ltt_trace_struct
*trace
;
544 trace
= _ltt_trace_find_setup(trace_name
);
546 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
551 index
= ltt_channels_get_index_from_name(channel_name
);
553 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
557 trace
->channels
[index
].subbuf_size
= size
;
563 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
565 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
566 const char *channel_name
, unsigned int cnt
)
569 struct ltt_trace_struct
*trace
;
574 trace
= _ltt_trace_find_setup(trace_name
);
576 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
581 index
= ltt_channels_get_index_from_name(channel_name
);
583 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
587 trace
->channels
[index
].subbuf_cnt
= cnt
;
593 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
595 int ltt_trace_set_channel_enable(const char *trace_name
,
596 const char *channel_name
, unsigned int enable
)
599 struct ltt_trace_struct
*trace
;
604 trace
= _ltt_trace_find_setup(trace_name
);
606 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
612 * Datas in metadata channel(marker info) is necessary to be able to
613 * read the trace, we always enable this channel.
615 if (!enable
&& !strcmp(channel_name
, "metadata")) {
616 printk(KERN_ERR
"LTT : Trying to disable metadata channel\n");
621 index
= ltt_channels_get_index_from_name(channel_name
);
623 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
628 trace
->channels
[index
].active
= enable
;
634 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
636 int ltt_trace_set_channel_overwrite(const char *trace_name
,
637 const char *channel_name
, unsigned int overwrite
)
640 struct ltt_trace_struct
*trace
;
645 trace
= _ltt_trace_find_setup(trace_name
);
647 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
653 * Always put the metadata channel in non-overwrite mode :
654 * This is a very low traffic channel and it can't afford to have its
655 * data overwritten : this data (marker info) is necessary to be
656 * able to read the trace.
658 if (overwrite
&& !strcmp(channel_name
, "metadata")) {
659 printk(KERN_ERR
"LTT : Trying to set metadata channel to "
665 index
= ltt_channels_get_index_from_name(channel_name
);
667 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
672 trace
->channels
[index
].overwrite
= overwrite
;
678 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
680 int ltt_trace_alloc(const char *trace_name
)
683 struct ltt_trace_struct
*trace
;
684 unsigned int subbuf_size
, subbuf_cnt
;
685 //ust// unsigned long flags;
687 const char *channel_name
;
691 trace
= _ltt_trace_find_setup(trace_name
);
693 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
698 kref_init(&trace
->kref
);
699 kref_init(&trace
->ltt_transport_kref
);
700 //ust// init_waitqueue_head(&trace->kref_wq);
702 //ust// get_trace_clock();
703 trace
->freq_scale
= trace_clock_freq_scale();
705 if (!trace
->transport
) {
706 printk(KERN_ERR
"LTT : Transport is not set.\n");
708 goto transport_error
;
710 //ust// if (!try_module_get(trace->transport->owner)) {
711 //ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
712 //ust// err = -ENODEV;
713 //ust// goto transport_error;
715 trace
->ops
= &trace
->transport
->ops
;
717 //ust// err = trace->ops->create_dirs(trace);
719 //ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
721 //ust// goto dirs_error;
724 //ust// local_irq_save(flags);
725 trace
->start_freq
= trace_clock_frequency();
726 trace
->start_tsc
= trace_clock_read64();
727 gettimeofday(&trace
->start_time
, NULL
); //ust// changed
728 //ust// local_irq_restore(flags);
730 for (chan
= 0; chan
< trace
->nr_channels
; chan
++) {
731 if (!(trace
->channels
[chan
].active
))
734 channel_name
= ltt_channels_get_name_from_index(chan
);
735 WARN_ON(!channel_name
);
736 subbuf_size
= trace
->channels
[chan
].subbuf_size
;
737 subbuf_cnt
= trace
->channels
[chan
].subbuf_cnt
;
738 prepare_chan_size_num(&subbuf_size
, &subbuf_cnt
);
739 err
= trace
->ops
->create_channel(trace_name
, trace
,
740 trace
->dentry
.trace_root
,
742 &trace
->channels
[chan
],
745 trace
->channels
[chan
].overwrite
);
747 printk(KERN_ERR
"LTT : Can't create channel %s.\n",
749 goto create_channel_error
;
753 list_del(&trace
->list
);
754 //ust// if (list_empty(<t_traces.head)) {
755 //ust// mod_timer(<t_async_wakeup_timer,
756 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
757 //ust// set_kernel_trace_flag_all_tasks();
759 list_add_rcu(&trace
->list
, <t_traces
.head
);
760 //ust// synchronize_sched();
766 create_channel_error
:
767 for (chan
--; chan
>= 0; chan
--)
768 if (trace
->channels
[chan
].active
)
769 trace
->ops
->remove_channel(&trace
->channels
[chan
]);
772 //ust// module_put(trace->transport->owner);
774 //ust// put_trace_clock();
779 //ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
782 * It is worked as a wrapper for current version of ltt_control.ko.
783 * We will make a new ltt_control based on debugfs, and control each channel's
786 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
787 //ust// enum trace_mode mode,
788 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
789 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
790 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
794 //ust// err = ltt_trace_setup(trace_name);
795 //ust// if (IS_ERR_VALUE(err))
798 //ust// err = ltt_trace_set_type(trace_name, trace_type);
799 //ust// if (IS_ERR_VALUE(err))
802 //ust// err = ltt_trace_alloc(trace_name);
803 //ust// if (IS_ERR_VALUE(err))
809 /* Must be called while sure that trace is in the list. */
810 static int _ltt_trace_destroy(struct ltt_trace_struct
*trace
)
820 "LTT : Can't destroy trace %s : tracer is active\n",
825 /* Everything went fine */
826 list_del_rcu(&trace
->list
);
828 if (list_empty(<t_traces
.head
)) {
829 //ust// clear_kernel_trace_flag_all_tasks();
831 * We stop the asynchronous delivery of reader wakeup, but
832 * we must make one last check for reader wakeups pending
833 * later in __ltt_trace_destroy.
835 //ust// del_timer_sync(<t_async_wakeup_timer);
845 /* Sleepable part of the destroy */
846 static void __ltt_trace_destroy(struct ltt_trace_struct
*trace
)
849 struct ltt_channel_struct
*chan
;
851 for (i
= 0; i
< trace
->nr_channels
; i
++) {
852 chan
= &trace
->channels
[i
];
854 trace
->ops
->finish_channel(chan
);
857 return; /* FIXME: temporary for ust */
858 //ust// flush_scheduled_work();
861 * The currently destroyed trace is not in the trace list anymore,
862 * so it's safe to call the async wakeup ourself. It will deliver
863 * the last subbuffers.
865 trace_async_wakeup(trace
);
867 for (i
= 0; i
< trace
->nr_channels
; i
++) {
868 chan
= &trace
->channels
[i
];
870 trace
->ops
->remove_channel(chan
);
873 kref_put(&trace
->ltt_transport_kref
, ltt_release_transport
);
875 //ust// module_put(trace->transport->owner);
878 * Wait for lttd readers to release the files, therefore making sure
879 * the last subbuffers have been read.
881 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
883 //ust// __wait_event_interruptible(trace->kref_wq,
884 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
886 kref_put(&trace
->kref
, ltt_release_trace
);
889 int ltt_trace_destroy(const char *trace_name
)
892 struct ltt_trace_struct
*trace
;
896 trace
= _ltt_trace_find(trace_name
);
898 err
= _ltt_trace_destroy(trace
);
904 __ltt_trace_destroy(trace
);
905 //ust// put_trace_clock();
910 trace
= _ltt_trace_find_setup(trace_name
);
912 _ltt_trace_free(trace
);
924 //ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
926 /* must be called from within a traces lock. */
927 static int _ltt_trace_start(struct ltt_trace_struct
*trace
)
936 printk(KERN_INFO
"LTT : Tracing already active for trace %s\n",
938 //ust// if (!try_module_get(ltt_run_filter_owner)) {
939 //ust// err = -ENODEV;
940 //ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
941 //ust// goto get_ltt_run_filter_error;
944 /* Read by trace points without protection : be careful */
945 ltt_traces
.num_active_traces
++;
949 //ust// get_ltt_run_filter_error:
954 int ltt_trace_start(const char *trace_name
)
957 struct ltt_trace_struct
*trace
;
961 trace
= _ltt_trace_find(trace_name
);
962 err
= _ltt_trace_start(trace
);
969 * Call the kernel state dump.
970 * Events will be mixed with real kernel events, it's ok.
971 * Notice that there is no protection on the trace : that's exactly
972 * why we iterate on the list and check for trace equality instead of
973 * directly using this trace handle inside the logging function.
976 ltt_dump_marker_state(trace
);
978 //ust// if (!try_module_get(ltt_statedump_owner)) {
979 //ust// err = -ENODEV;
980 //ust// printk(KERN_ERR
981 //ust// "LTT : Can't lock state dump module.\n");
983 ltt_statedump_functor(trace
);
984 //ust// module_put(ltt_statedump_owner);
994 //ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
996 /* must be called from within traces lock */
997 static int _ltt_trace_stop(struct ltt_trace_struct
*trace
)
1001 if (trace
== NULL
) {
1006 printk(KERN_INFO
"LTT : Tracing not active for trace %s\n",
1008 if (trace
->active
) {
1010 ltt_traces
.num_active_traces
--;
1011 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
1013 //ust// module_put(ltt_run_filter_owner);
1014 /* Everything went fine */
1017 /* Error handling */
1022 int ltt_trace_stop(const char *trace_name
)
1025 struct ltt_trace_struct
*trace
;
1028 trace
= _ltt_trace_find(trace_name
);
1029 err
= _ltt_trace_stop(trace
);
1030 ltt_unlock_traces();
1033 //ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
1036 * ltt_control - Trace control in-kernel API
1037 * @msg: Action to perform
1038 * @trace_name: Trace on which the action must be done
1039 * @trace_type: Type of trace (normal, flight, hybrid)
1040 * @args: Arguments specific to the action
1042 //ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1043 //ust// const char *trace_type, union ltt_control_args args)
1045 //ust// int err = -EPERM;
1047 //ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1048 //ust// switch (msg) {
1049 //ust// case LTT_CONTROL_START:
1050 //ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1051 //ust// err = ltt_trace_start(trace_name);
1053 //ust// case LTT_CONTROL_STOP:
1054 //ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1055 //ust// err = ltt_trace_stop(trace_name);
1057 //ust// case LTT_CONTROL_CREATE_TRACE:
1058 //ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1059 //ust// err = ltt_trace_create(trace_name, trace_type,
1060 //ust// args.new_trace.mode,
1061 //ust// args.new_trace.subbuf_size_low,
1062 //ust// args.new_trace.n_subbufs_low,
1063 //ust// args.new_trace.subbuf_size_med,
1064 //ust// args.new_trace.n_subbufs_med,
1065 //ust// args.new_trace.subbuf_size_high,
1066 //ust// args.new_trace.n_subbufs_high);
1068 //ust// case LTT_CONTROL_DESTROY_TRACE:
1069 //ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1070 //ust// err = ltt_trace_destroy(trace_name);
1075 //ust// EXPORT_SYMBOL_GPL(ltt_control);
1078 * ltt_filter_control - Trace filter control in-kernel API
1079 * @msg: Action to perform on the filter
1080 * @trace_name: Trace on which the action must be done
1082 int ltt_filter_control(enum ltt_filter_control_msg msg
, const char *trace_name
)
1085 struct ltt_trace_struct
*trace
;
1087 printk(KERN_DEBUG
"ltt_filter_control : trace %s\n", trace_name
);
1089 trace
= _ltt_trace_find(trace_name
);
1090 if (trace
== NULL
) {
1092 "Trace does not exist. Cannot proxy control request\n");
1096 //ust// if (!try_module_get(ltt_filter_control_owner)) {
1097 //ust// err = -ENODEV;
1098 //ust// goto get_module_error;
1101 case LTT_FILTER_DEFAULT_ACCEPT
:
1103 "Proxy filter default accept %s\n", trace_name
);
1104 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1106 case LTT_FILTER_DEFAULT_REJECT
:
1108 "Proxy filter default reject %s\n", trace_name
);
1109 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1114 //ust// module_put(ltt_filter_control_owner);
1116 //ust// get_module_error:
1118 ltt_unlock_traces();
1121 //ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
1123 //ust// int __init ltt_init(void)
1125 //ust// /* Make sure no page fault can be triggered by this module */
1126 //ust// vmalloc_sync_all();
1130 //ust// module_init(ltt_init)
1132 //ust// static void __exit ltt_exit(void)
1134 //ust// struct ltt_trace_struct *trace;
1135 //ust// struct list_head *pos, *n;
1137 //ust// ltt_lock_traces();
1138 //ust// /* Stop each trace, currently being read by RCU read-side */
1139 //ust// list_for_each_entry_rcu(trace, <t_traces.head, list)
1140 //ust// _ltt_trace_stop(trace);
1141 //ust// /* Wait for quiescent state. Readers have preemption disabled. */
1142 //ust// synchronize_sched();
1143 //ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
1144 //ust// * because no readers are left. */
1145 //ust// list_for_each_safe(pos, n, <t_traces.head) {
1146 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1147 //ust// /* _ltt_trace_destroy does a synchronize_sched() */
1148 //ust// _ltt_trace_destroy(trace);
1149 //ust// __ltt_trace_destroy(trace);
1151 //ust// /* free traces in pre-alloc status */
1152 //ust// list_for_each_safe(pos, n, <t_traces.setup_head) {
1153 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1154 //ust// _ltt_trace_free(trace);
1157 //ust// ltt_unlock_traces();
1160 //ust// module_exit(ltt_exit)
1162 //ust// MODULE_LICENSE("GPL");
1163 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
1164 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");