e3538e0d5706e86368e03136fdaf98255525b86d
[ust.git] / libust / tracer.c
1 /*
2 * ltt/ltt-tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
22 * start/stop.
23 *
24 * Author:
25 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
26 *
27 * Inspired from LTT :
28 * Karim Yaghmour (karim@opersys.com)
29 * Tom Zanussi (zanussi@us.ibm.com)
30 * Bob Wisniewski (bob@watson.ibm.com)
31 * And from K42 :
32 * Bob Wisniewski (bob@watson.ibm.com)
33 *
34 * Changelog:
35 * 22/09/06, Move to the marker/probes mechanism.
36 * 19/10/05, Complete lockless mechanism.
37 * 27/05/05, Modular redesign and rewrite.
38 */
39
40 //ust// #include <linux/time.h>
41 //ust// #include <linux/ltt-tracer.h>
42 //ust// #include <linux/module.h>
43 //ust// #include <linux/string.h>
44 //ust// #include <linux/slab.h>
45 //ust// #include <linux/init.h>
46 //ust// #include <linux/rcupdate.h>
47 //ust// #include <linux/sched.h>
48 //ust// #include <linux/bitops.h>
49 //ust// #include <linux/fs.h>
50 //ust// #include <linux/cpu.h>
51 //ust// #include <linux/kref.h>
52 //ust// #include <linux/delay.h>
53 //ust// #include <linux/vmalloc.h>
54 //ust// #include <asm/atomic.h>
55 #include <urcu-bp.h>
56 #include <urcu/rculist.h>
57
58 #include <ust/kernelcompat.h>
59 #include "tracercore.h"
60 #include "tracer.h"
61 #include "usterr.h"
62
63 //ust// static void async_wakeup(unsigned long data);
64 //ust//
65 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
66
67 /* Default callbacks for modules */
68 notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
69 struct ltt_trace_struct *trace)
70 {
71 return 0;
72 }
73
74 int ltt_statedump_default(struct ltt_trace_struct *trace)
75 {
76 return 0;
77 }
78
79 /* Callbacks for registered modules */
80
81 int (*ltt_filter_control_functor)
82 (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
83 ltt_filter_control_default;
84 struct module *ltt_filter_control_owner;
85
86 /* These function pointers are protected by a trace activation check */
87 struct module *ltt_run_filter_owner;
88 int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
89 ltt_statedump_default;
90 struct module *ltt_statedump_owner;
91
92 struct chan_info_struct {
93 const char *name;
94 unsigned int def_subbufsize;
95 unsigned int def_subbufcount;
96 } chan_infos[] = {
97 [LTT_CHANNEL_METADATA] = {
98 LTT_METADATA_CHANNEL,
99 LTT_DEFAULT_SUBBUF_SIZE_LOW,
100 LTT_DEFAULT_N_SUBBUFS_LOW,
101 },
102 [LTT_CHANNEL_UST] = {
103 LTT_UST_CHANNEL,
104 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
105 LTT_DEFAULT_N_SUBBUFS_HIGH,
106 },
107 };
108
109 static enum ltt_channels get_channel_type_from_name(const char *name)
110 {
111 int i;
112
113 if (!name)
114 return LTT_CHANNEL_UST;
115
116 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
117 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
118 return (enum ltt_channels)i;
119
120 return LTT_CHANNEL_UST;
121 }
122
123 /**
124 * ltt_module_register - LTT module registration
125 * @name: module type
126 * @function: callback to register
127 * @owner: module which owns the callback
128 *
129 * The module calling this registration function must ensure that no
130 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
131 * must be called between a vmalloc and the moment the memory is made visible to
132 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
133 * the module allocates virtual memory after its registration must it
134 * synchronize the TLBs.
135 */
136 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
137 //ust// struct module *owner)
138 //ust// {
139 //ust// int ret = 0;
140 //ust//
141 //ust// /*
142 //ust// * Make sure no page fault can be triggered by the module about to be
143 //ust// * registered. We deal with this here so we don't have to call
144 //ust// * vmalloc_sync_all() in each module's init.
145 //ust// */
146 //ust// vmalloc_sync_all();
147 //ust//
148 //ust// switch (name) {
149 //ust// case LTT_FUNCTION_RUN_FILTER:
150 //ust// if (ltt_run_filter_owner != NULL) {
151 //ust// ret = -EEXIST;
152 //ust// goto end;
153 //ust// }
154 //ust// ltt_filter_register((ltt_run_filter_functor)function);
155 //ust// ltt_run_filter_owner = owner;
156 //ust// break;
157 //ust// case LTT_FUNCTION_FILTER_CONTROL:
158 //ust// if (ltt_filter_control_owner != NULL) {
159 //ust// ret = -EEXIST;
160 //ust// goto end;
161 //ust// }
162 //ust// ltt_filter_control_functor =
163 //ust// (int (*)(enum ltt_filter_control_msg,
164 //ust// struct ltt_trace_struct *))function;
165 //ust// ltt_filter_control_owner = owner;
166 //ust// break;
167 //ust// case LTT_FUNCTION_STATEDUMP:
168 //ust// if (ltt_statedump_owner != NULL) {
169 //ust// ret = -EEXIST;
170 //ust// goto end;
171 //ust// }
172 //ust// ltt_statedump_functor =
173 //ust// (int (*)(struct ltt_trace_struct *))function;
174 //ust// ltt_statedump_owner = owner;
175 //ust// break;
176 //ust// }
177 //ust//
178 //ust// end:
179 //ust//
180 //ust// return ret;
181 //ust// }
182 //ust// EXPORT_SYMBOL_GPL(ltt_module_register);
183
184 /**
185 * ltt_module_unregister - LTT module unregistration
186 * @name: module type
187 */
188 //ust// void ltt_module_unregister(enum ltt_module_function name)
189 //ust// {
190 //ust// switch (name) {
191 //ust// case LTT_FUNCTION_RUN_FILTER:
192 //ust// ltt_filter_unregister();
193 //ust// ltt_run_filter_owner = NULL;
194 //ust// /* Wait for preempt sections to finish */
195 //ust// synchronize_sched();
196 //ust// break;
197 //ust// case LTT_FUNCTION_FILTER_CONTROL:
198 //ust// ltt_filter_control_functor = ltt_filter_control_default;
199 //ust// ltt_filter_control_owner = NULL;
200 //ust// break;
201 //ust// case LTT_FUNCTION_STATEDUMP:
202 //ust// ltt_statedump_functor = ltt_statedump_default;
203 //ust// ltt_statedump_owner = NULL;
204 //ust// break;
205 //ust// }
206 //ust//
207 //ust// }
208 //ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
209
210 static LIST_HEAD(ltt_transport_list);
211
212 /**
213 * ltt_transport_register - LTT transport registration
214 * @transport: transport structure
215 *
216 * Registers a transport which can be used as output to extract the data out of
217 * LTTng. The module calling this registration function must ensure that no
218 * trap-inducing code will be executed by the transport functions. E.g.
219 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
220 * is made visible to the transport function. This registration acts as a
221 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
222 * after its registration must it synchronize the TLBs.
223 */
224 void ltt_transport_register(struct ltt_transport *transport)
225 {
226 /*
227 * Make sure no page fault can be triggered by the module about to be
228 * registered. We deal with this here so we don't have to call
229 * vmalloc_sync_all() in each module's init.
230 */
231 //ust// vmalloc_sync_all();
232
233 ltt_lock_traces();
234 list_add_tail(&transport->node, &ltt_transport_list);
235 ltt_unlock_traces();
236 }
237 //ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
238
239 /**
240 * ltt_transport_unregister - LTT transport unregistration
241 * @transport: transport structure
242 */
243 void ltt_transport_unregister(struct ltt_transport *transport)
244 {
245 ltt_lock_traces();
246 list_del(&transport->node);
247 ltt_unlock_traces();
248 }
249 //ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
250
251 static inline int is_channel_overwrite(enum ltt_channels chan,
252 enum trace_mode mode)
253 {
254 switch (mode) {
255 case LTT_TRACE_NORMAL:
256 return 0;
257 case LTT_TRACE_FLIGHT:
258 switch (chan) {
259 case LTT_CHANNEL_METADATA:
260 return 0;
261 default:
262 return 1;
263 }
264 case LTT_TRACE_HYBRID:
265 switch (chan) {
266 case LTT_CHANNEL_METADATA:
267 return 0;
268 default:
269 return 1;
270 }
271 default:
272 return 0;
273 }
274 }
275
276 /**
277 * ltt_write_trace_header - Write trace header
278 * @trace: Trace information
279 * @header: Memory address where the information must be written to
280 */
281 void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
282 struct ltt_subbuffer_header *header)
283 {
284 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
285 header->major_version = LTT_TRACER_VERSION_MAJOR;
286 header->minor_version = LTT_TRACER_VERSION_MINOR;
287 header->arch_size = sizeof(void *);
288 header->alignment = ltt_get_alignment();
289 header->start_time_sec = trace->start_time.tv_sec;
290 header->start_time_usec = trace->start_time.tv_usec;
291 header->start_freq = trace->start_freq;
292 header->freq_scale = trace->freq_scale;
293 }
294 //ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
295
296 static void trace_async_wakeup(struct ltt_trace_struct *trace)
297 {
298 int i;
299 struct ust_channel *chan;
300
301 /* Must check each channel for pending read wakeup */
302 for (i = 0; i < trace->nr_channels; i++) {
303 chan = &trace->channels[i];
304 if (chan->active)
305 trace->ops->wakeup_channel(chan);
306 }
307 }
308
309 //ust// /* Timer to send async wakeups to the readers */
310 //ust// static void async_wakeup(unsigned long data)
311 //ust// {
312 //ust// struct ltt_trace_struct *trace;
313 //ust//
314 //ust// /*
315 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
316 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
317 //ust// * allow mutex to be taken in interrupt context. Ugly.
318 //ust// * A proper way to do this would be to turn the timer into a
319 //ust// * periodically woken up thread, but it adds to the footprint.
320 //ust// */
321 //ust// #ifndef CONFIG_PREEMPT_RT
322 //ust// rcu_read_lock_sched();
323 //ust// #else
324 //ust// ltt_lock_traces();
325 //ust// #endif
326 //ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
327 //ust// trace_async_wakeup(trace);
328 //ust// }
329 //ust// #ifndef CONFIG_PREEMPT_RT
330 //ust// rcu_read_unlock_sched();
331 //ust// #else
332 //ust// ltt_unlock_traces();
333 //ust// #endif
334 //ust//
335 //ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
336 //ust// }
337
338 /**
339 * _ltt_trace_find - find a trace by given name.
340 * trace_name: trace name
341 *
342 * Returns a pointer to the trace structure, NULL if not found.
343 */
344 struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
345 {
346 struct ltt_trace_struct *trace;
347
348 list_for_each_entry(trace, &ltt_traces.head, list)
349 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
350 return trace;
351
352 return NULL;
353 }
354
355 /* _ltt_trace_find_setup :
356 * find a trace in setup list by given name.
357 *
358 * Returns a pointer to the trace structure, NULL if not found.
359 */
360 struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
361 {
362 struct ltt_trace_struct *trace;
363
364 list_for_each_entry(trace, &ltt_traces.setup_head, list)
365 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
366 return trace;
367
368 return NULL;
369 }
370 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
371
372 /**
373 * ltt_release_transport - Release an LTT transport
374 * @kref : reference count on the transport
375 */
376 void ltt_release_transport(struct kref *kref)
377 {
378 //ust// struct ltt_trace_struct *trace = container_of(kref,
379 //ust// struct ltt_trace_struct, ltt_transport_kref);
380 //ust// trace->ops->remove_dirs(trace);
381 }
382 //ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
383
384 /**
385 * ltt_release_trace - Release a LTT trace
386 * @kref : reference count on the trace
387 */
388 void ltt_release_trace(struct kref *kref)
389 {
390 struct ltt_trace_struct *trace = container_of(kref,
391 struct ltt_trace_struct, kref);
392 ltt_channels_trace_free(trace->channels);
393 kfree(trace);
394 }
395 //ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
396
397 static inline void prepare_chan_size_num(unsigned int *subbuf_size,
398 unsigned int *n_subbufs)
399 {
400 *subbuf_size = 1 << get_count_order(*subbuf_size);
401 *n_subbufs = 1 << get_count_order(*n_subbufs);
402
403 /* Subbuf size and number must both be power of two */
404 WARN_ON(hweight32(*subbuf_size) != 1);
405 WARN_ON(hweight32(*n_subbufs) != 1);
406 }
407
408 int _ltt_trace_setup(const char *trace_name)
409 {
410 int err = 0;
411 struct ltt_trace_struct *new_trace = NULL;
412 int metadata_index;
413 unsigned int chan;
414 enum ltt_channels chantype;
415
416 if (_ltt_trace_find_setup(trace_name)) {
417 printk(KERN_ERR "LTT : Trace name %s already used.\n",
418 trace_name);
419 err = -EEXIST;
420 goto traces_error;
421 }
422
423 if (_ltt_trace_find(trace_name)) {
424 printk(KERN_ERR "LTT : Trace name %s already used.\n",
425 trace_name);
426 err = -EEXIST;
427 goto traces_error;
428 }
429
430 new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
431 if (!new_trace) {
432 printk(KERN_ERR
433 "LTT : Unable to allocate memory for trace %s\n",
434 trace_name);
435 err = -ENOMEM;
436 goto traces_error;
437 }
438 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
439 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
440 0, 1);
441 if (!new_trace->channels) {
442 printk(KERN_ERR
443 "LTT : Unable to allocate memory for chaninfo %s\n",
444 trace_name);
445 err = -ENOMEM;
446 goto trace_free;
447 }
448
449 /*
450 * Force metadata channel to active, no overwrite.
451 */
452 metadata_index = ltt_channels_get_index_from_name("metadata");
453 WARN_ON(metadata_index < 0);
454 new_trace->channels[metadata_index].overwrite = 0;
455 new_trace->channels[metadata_index].active = 1;
456
457 /*
458 * Set hardcoded tracer defaults for some channels
459 */
460 for (chan = 0; chan < new_trace->nr_channels; chan++) {
461 if (!(new_trace->channels[chan].active))
462 continue;
463
464 chantype = get_channel_type_from_name(
465 ltt_channels_get_name_from_index(chan));
466 new_trace->channels[chan].subbuf_size =
467 chan_infos[chantype].def_subbufsize;
468 new_trace->channels[chan].subbuf_cnt =
469 chan_infos[chantype].def_subbufcount;
470 }
471
472 list_add(&new_trace->list, &ltt_traces.setup_head);
473 return 0;
474
475 trace_free:
476 kfree(new_trace);
477 traces_error:
478 return err;
479 }
480 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
481
482
483 int ltt_trace_setup(const char *trace_name)
484 {
485 int ret;
486 ltt_lock_traces();
487 ret = _ltt_trace_setup(trace_name);
488 ltt_unlock_traces();
489 return ret;
490 }
491 //ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
492
493 /* must be called from within a traces lock. */
494 static void _ltt_trace_free(struct ltt_trace_struct *trace)
495 {
496 list_del(&trace->list);
497 kfree(trace);
498 }
499
500 int ltt_trace_set_type(const char *trace_name, const char *trace_type)
501 {
502 int err = 0;
503 struct ltt_trace_struct *trace;
504 struct ltt_transport *tran_iter, *transport = NULL;
505
506 ltt_lock_traces();
507
508 trace = _ltt_trace_find_setup(trace_name);
509 if (!trace) {
510 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
511 err = -ENOENT;
512 goto traces_error;
513 }
514
515 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
516 if (!strcmp(tran_iter->name, trace_type)) {
517 transport = tran_iter;
518 break;
519 }
520 }
521 if (!transport) {
522 printk(KERN_ERR "LTT : Transport %s is not present.\n",
523 trace_type);
524 err = -EINVAL;
525 goto traces_error;
526 }
527
528 trace->transport = transport;
529
530 traces_error:
531 ltt_unlock_traces();
532 return err;
533 }
534 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
535
536 int ltt_trace_set_channel_subbufsize(const char *trace_name,
537 const char *channel_name, unsigned int size)
538 {
539 int err = 0;
540 struct ltt_trace_struct *trace;
541 int index;
542
543 ltt_lock_traces();
544
545 trace = _ltt_trace_find_setup(trace_name);
546 if (!trace) {
547 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
548 err = -ENOENT;
549 goto traces_error;
550 }
551
552 index = ltt_channels_get_index_from_name(channel_name);
553 if (index < 0) {
554 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
555 err = -ENOENT;
556 goto traces_error;
557 }
558 trace->channels[index].subbuf_size = size;
559
560 traces_error:
561 ltt_unlock_traces();
562 return err;
563 }
564 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
565
566 int ltt_trace_set_channel_subbufcount(const char *trace_name,
567 const char *channel_name, unsigned int cnt)
568 {
569 int err = 0;
570 struct ltt_trace_struct *trace;
571 int index;
572
573 ltt_lock_traces();
574
575 trace = _ltt_trace_find_setup(trace_name);
576 if (!trace) {
577 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
578 err = -ENOENT;
579 goto traces_error;
580 }
581
582 index = ltt_channels_get_index_from_name(channel_name);
583 if (index < 0) {
584 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
585 err = -ENOENT;
586 goto traces_error;
587 }
588 trace->channels[index].subbuf_cnt = cnt;
589
590 traces_error:
591 ltt_unlock_traces();
592 return err;
593 }
594 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
595
596 int ltt_trace_set_channel_enable(const char *trace_name,
597 const char *channel_name, unsigned int enable)
598 {
599 int err = 0;
600 struct ltt_trace_struct *trace;
601 int index;
602
603 ltt_lock_traces();
604
605 trace = _ltt_trace_find_setup(trace_name);
606 if (!trace) {
607 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
608 err = -ENOENT;
609 goto traces_error;
610 }
611
612 /*
613 * Datas in metadata channel(marker info) is necessary to be able to
614 * read the trace, we always enable this channel.
615 */
616 if (!enable && !strcmp(channel_name, "metadata")) {
617 printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
618 err = -EINVAL;
619 goto traces_error;
620 }
621
622 index = ltt_channels_get_index_from_name(channel_name);
623 if (index < 0) {
624 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
625 err = -ENOENT;
626 goto traces_error;
627 }
628
629 trace->channels[index].active = enable;
630
631 traces_error:
632 ltt_unlock_traces();
633 return err;
634 }
635 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
636
637 int ltt_trace_set_channel_overwrite(const char *trace_name,
638 const char *channel_name, unsigned int overwrite)
639 {
640 int err = 0;
641 struct ltt_trace_struct *trace;
642 int index;
643
644 ltt_lock_traces();
645
646 trace = _ltt_trace_find_setup(trace_name);
647 if (!trace) {
648 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
649 err = -ENOENT;
650 goto traces_error;
651 }
652
653 /*
654 * Always put the metadata channel in non-overwrite mode :
655 * This is a very low traffic channel and it can't afford to have its
656 * data overwritten : this data (marker info) is necessary to be
657 * able to read the trace.
658 */
659 if (overwrite && !strcmp(channel_name, "metadata")) {
660 printk(KERN_ERR "LTT : Trying to set metadata channel to "
661 "overwrite mode\n");
662 err = -EINVAL;
663 goto traces_error;
664 }
665
666 index = ltt_channels_get_index_from_name(channel_name);
667 if (index < 0) {
668 printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
669 err = -ENOENT;
670 goto traces_error;
671 }
672
673 trace->channels[index].overwrite = overwrite;
674
675 traces_error:
676 ltt_unlock_traces();
677 return err;
678 }
679 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
680
681 int ltt_trace_alloc(const char *trace_name)
682 {
683 int err = 0;
684 struct ltt_trace_struct *trace;
685 unsigned int subbuf_size, subbuf_cnt;
686 //ust// unsigned long flags;
687 int chan;
688 const char *channel_name;
689
690 ltt_lock_traces();
691
692 trace = _ltt_trace_find_setup(trace_name);
693 if (!trace) {
694 printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
695 err = -ENOENT;
696 goto traces_error;
697 }
698
699 kref_init(&trace->kref);
700 kref_init(&trace->ltt_transport_kref);
701 //ust// init_waitqueue_head(&trace->kref_wq);
702 trace->active = 0;
703 //ust// get_trace_clock();
704 trace->freq_scale = trace_clock_freq_scale();
705
706 if (!trace->transport) {
707 printk(KERN_ERR "LTT : Transport is not set.\n");
708 err = -EINVAL;
709 goto transport_error;
710 }
711 //ust// if (!try_module_get(trace->transport->owner)) {
712 //ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
713 //ust// err = -ENODEV;
714 //ust// goto transport_error;
715 //ust// }
716 trace->ops = &trace->transport->ops;
717
718 //ust// err = trace->ops->create_dirs(trace);
719 //ust// if (err) {
720 //ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
721 //ust// trace_name);
722 //ust// goto dirs_error;
723 //ust// }
724
725 //ust// local_irq_save(flags);
726 trace->start_freq = trace_clock_frequency();
727 trace->start_tsc = trace_clock_read64();
728 gettimeofday(&trace->start_time, NULL); //ust// changed
729 //ust// local_irq_restore(flags);
730
731 for (chan = 0; chan < trace->nr_channels; chan++) {
732 if (!(trace->channels[chan].active))
733 continue;
734
735 channel_name = ltt_channels_get_name_from_index(chan);
736 WARN_ON(!channel_name);
737 subbuf_size = trace->channels[chan].subbuf_size;
738 subbuf_cnt = trace->channels[chan].subbuf_cnt;
739 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
740 err = trace->ops->create_channel(trace_name, trace,
741 channel_name,
742 &trace->channels[chan],
743 subbuf_size,
744 subbuf_cnt,
745 trace->channels[chan].overwrite);
746 if (err != 0) {
747 printk(KERN_ERR "LTT : Can't create channel %s.\n",
748 channel_name);
749 goto create_channel_error;
750 }
751 }
752
753 list_del(&trace->list);
754 //ust// if (list_empty(&ltt_traces.head)) {
755 //ust// mod_timer(&ltt_async_wakeup_timer,
756 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
757 //ust// set_kernel_trace_flag_all_tasks();
758 //ust// }
759 list_add_rcu(&trace->list, &ltt_traces.head);
760 //ust// synchronize_sched();
761
762 ltt_unlock_traces();
763
764 return 0;
765
766 create_channel_error:
767 for (chan--; chan >= 0; chan--)
768 if (trace->channels[chan].active)
769 trace->ops->remove_channel(&trace->channels[chan]);
770
771 //ust// dirs_error:
772 //ust// module_put(trace->transport->owner);
773 transport_error:
774 //ust// put_trace_clock();
775 traces_error:
776 ltt_unlock_traces();
777 return err;
778 }
779 //ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
780
781 /*
782 * It is worked as a wrapper for current version of ltt_control.ko.
783 * We will make a new ltt_control based on debugfs, and control each channel's
784 * buffer.
785 */
786 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
787 //ust// enum trace_mode mode,
788 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
789 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
790 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
791 //ust// {
792 //ust// int err = 0;
793 //ust//
794 //ust// err = ltt_trace_setup(trace_name);
795 //ust// if (IS_ERR_VALUE(err))
796 //ust// return err;
797 //ust//
798 //ust// err = ltt_trace_set_type(trace_name, trace_type);
799 //ust// if (IS_ERR_VALUE(err))
800 //ust// return err;
801 //ust//
802 //ust// err = ltt_trace_alloc(trace_name);
803 //ust// if (IS_ERR_VALUE(err))
804 //ust// return err;
805 //ust//
806 //ust// return err;
807 //ust// }
808
809 /* Must be called while sure that trace is in the list. */
810 static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
811 {
812 int err = -EPERM;
813
814 if (trace == NULL) {
815 err = -ENOENT;
816 goto traces_error;
817 }
818 if (trace->active) {
819 printk(KERN_ERR
820 "LTT : Can't destroy trace %s : tracer is active\n",
821 trace->trace_name);
822 err = -EBUSY;
823 goto active_error;
824 }
825 /* Everything went fine */
826 list_del_rcu(&trace->list);
827 synchronize_rcu();
828 if (list_empty(&ltt_traces.head)) {
829 //ust// clear_kernel_trace_flag_all_tasks();
830 /*
831 * We stop the asynchronous delivery of reader wakeup, but
832 * we must make one last check for reader wakeups pending
833 * later in __ltt_trace_destroy.
834 */
835 //ust// del_timer_sync(&ltt_async_wakeup_timer);
836 }
837 return 0;
838
839 /* error handling */
840 active_error:
841 traces_error:
842 return err;
843 }
844
845 /* Sleepable part of the destroy */
846 static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
847 {
848 int i;
849 struct ust_channel *chan;
850
851 for (i = 0; i < trace->nr_channels; i++) {
852 chan = &trace->channels[i];
853 if (chan->active)
854 trace->ops->finish_channel(chan);
855 }
856
857 return; /* FIXME: temporary for ust */
858 //ust// flush_scheduled_work();
859
860 /*
861 * The currently destroyed trace is not in the trace list anymore,
862 * so it's safe to call the async wakeup ourself. It will deliver
863 * the last subbuffers.
864 */
865 trace_async_wakeup(trace);
866
867 for (i = 0; i < trace->nr_channels; i++) {
868 chan = &trace->channels[i];
869 if (chan->active)
870 trace->ops->remove_channel(chan);
871 }
872
873 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
874
875 //ust// module_put(trace->transport->owner);
876
877 /*
878 * Wait for lttd readers to release the files, therefore making sure
879 * the last subbuffers have been read.
880 */
881 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
882 //ust// int ret = 0;
883 //ust// __wait_event_interruptible(trace->kref_wq,
884 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
885 //ust// }
886 kref_put(&trace->kref, ltt_release_trace);
887 }
888
889 int ltt_trace_destroy(const char *trace_name)
890 {
891 int err = 0;
892 struct ltt_trace_struct *trace;
893
894 ltt_lock_traces();
895
896 trace = _ltt_trace_find(trace_name);
897 if (trace) {
898 err = _ltt_trace_destroy(trace);
899 if (err)
900 goto error;
901
902 ltt_unlock_traces();
903
904 __ltt_trace_destroy(trace);
905 //ust// put_trace_clock();
906
907 return 0;
908 }
909
910 trace = _ltt_trace_find_setup(trace_name);
911 if (trace) {
912 _ltt_trace_free(trace);
913 ltt_unlock_traces();
914 return 0;
915 }
916
917 err = -ENOENT;
918
919 /* Error handling */
920 error:
921 ltt_unlock_traces();
922 return err;
923 }
924 //ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
925
926 /* must be called from within a traces lock. */
927 static int _ltt_trace_start(struct ltt_trace_struct *trace)
928 {
929 int err = 0;
930
931 if (trace == NULL) {
932 err = -ENOENT;
933 goto traces_error;
934 }
935 if (trace->active)
936 printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
937 trace->trace_name);
938 //ust// if (!try_module_get(ltt_run_filter_owner)) {
939 //ust// err = -ENODEV;
940 //ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
941 //ust// goto get_ltt_run_filter_error;
942 //ust// }
943 trace->active = 1;
944 /* Read by trace points without protection : be careful */
945 ltt_traces.num_active_traces++;
946 return err;
947
948 /* error handling */
949 //ust// get_ltt_run_filter_error:
950 traces_error:
951 return err;
952 }
953
954 int ltt_trace_start(const char *trace_name)
955 {
956 int err = 0;
957 struct ltt_trace_struct *trace;
958
959 ltt_lock_traces();
960
961 trace = _ltt_trace_find(trace_name);
962 err = _ltt_trace_start(trace);
963 if (err)
964 goto no_trace;
965
966 ltt_unlock_traces();
967
968 /*
969 * Call the kernel state dump.
970 * Events will be mixed with real kernel events, it's ok.
971 * Notice that there is no protection on the trace : that's exactly
972 * why we iterate on the list and check for trace equality instead of
973 * directly using this trace handle inside the logging function.
974 */
975
976 ltt_dump_marker_state(trace);
977
978 //ust// if (!try_module_get(ltt_statedump_owner)) {
979 //ust// err = -ENODEV;
980 //ust// printk(KERN_ERR
981 //ust// "LTT : Can't lock state dump module.\n");
982 //ust// } else {
983 ltt_statedump_functor(trace);
984 //ust// module_put(ltt_statedump_owner);
985 //ust// }
986
987 return err;
988
989 /* Error handling */
990 no_trace:
991 ltt_unlock_traces();
992 return err;
993 }
994 //ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
995
996 /* must be called from within traces lock */
997 static int _ltt_trace_stop(struct ltt_trace_struct *trace)
998 {
999 int err = -EPERM;
1000
1001 if (trace == NULL) {
1002 err = -ENOENT;
1003 goto traces_error;
1004 }
1005 if (!trace->active)
1006 printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
1007 trace->trace_name);
1008 if (trace->active) {
1009 trace->active = 0;
1010 ltt_traces.num_active_traces--;
1011 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
1012 }
1013 //ust// module_put(ltt_run_filter_owner);
1014 /* Everything went fine */
1015 return 0;
1016
1017 /* Error handling */
1018 traces_error:
1019 return err;
1020 }
1021
1022 int ltt_trace_stop(const char *trace_name)
1023 {
1024 int err = 0;
1025 struct ltt_trace_struct *trace;
1026
1027 ltt_lock_traces();
1028 trace = _ltt_trace_find(trace_name);
1029 err = _ltt_trace_stop(trace);
1030 ltt_unlock_traces();
1031 return err;
1032 }
1033 //ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
1034
1035 /**
1036 * ltt_control - Trace control in-kernel API
1037 * @msg: Action to perform
1038 * @trace_name: Trace on which the action must be done
1039 * @trace_type: Type of trace (normal, flight, hybrid)
1040 * @args: Arguments specific to the action
1041 */
1042 //ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1043 //ust// const char *trace_type, union ltt_control_args args)
1044 //ust// {
1045 //ust// int err = -EPERM;
1046 //ust//
1047 //ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1048 //ust// switch (msg) {
1049 //ust// case LTT_CONTROL_START:
1050 //ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1051 //ust// err = ltt_trace_start(trace_name);
1052 //ust// break;
1053 //ust// case LTT_CONTROL_STOP:
1054 //ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1055 //ust// err = ltt_trace_stop(trace_name);
1056 //ust// break;
1057 //ust// case LTT_CONTROL_CREATE_TRACE:
1058 //ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1059 //ust// err = ltt_trace_create(trace_name, trace_type,
1060 //ust// args.new_trace.mode,
1061 //ust// args.new_trace.subbuf_size_low,
1062 //ust// args.new_trace.n_subbufs_low,
1063 //ust// args.new_trace.subbuf_size_med,
1064 //ust// args.new_trace.n_subbufs_med,
1065 //ust// args.new_trace.subbuf_size_high,
1066 //ust// args.new_trace.n_subbufs_high);
1067 //ust// break;
1068 //ust// case LTT_CONTROL_DESTROY_TRACE:
1069 //ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1070 //ust// err = ltt_trace_destroy(trace_name);
1071 //ust// break;
1072 //ust// }
1073 //ust// return err;
1074 //ust// }
1075 //ust// EXPORT_SYMBOL_GPL(ltt_control);
1076
1077 /**
1078 * ltt_filter_control - Trace filter control in-kernel API
1079 * @msg: Action to perform on the filter
1080 * @trace_name: Trace on which the action must be done
1081 */
1082 int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
1083 {
1084 int err;
1085 struct ltt_trace_struct *trace;
1086
1087 printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
1088 ltt_lock_traces();
1089 trace = _ltt_trace_find(trace_name);
1090 if (trace == NULL) {
1091 printk(KERN_ALERT
1092 "Trace does not exist. Cannot proxy control request\n");
1093 err = -ENOENT;
1094 goto trace_error;
1095 }
1096 //ust// if (!try_module_get(ltt_filter_control_owner)) {
1097 //ust// err = -ENODEV;
1098 //ust// goto get_module_error;
1099 //ust// }
1100 switch (msg) {
1101 case LTT_FILTER_DEFAULT_ACCEPT:
1102 printk(KERN_DEBUG
1103 "Proxy filter default accept %s\n", trace_name);
1104 err = (*ltt_filter_control_functor)(msg, trace);
1105 break;
1106 case LTT_FILTER_DEFAULT_REJECT:
1107 printk(KERN_DEBUG
1108 "Proxy filter default reject %s\n", trace_name);
1109 err = (*ltt_filter_control_functor)(msg, trace);
1110 break;
1111 default:
1112 err = -EPERM;
1113 }
1114 //ust// module_put(ltt_filter_control_owner);
1115
1116 //ust// get_module_error:
1117 trace_error:
1118 ltt_unlock_traces();
1119 return err;
1120 }
1121 //ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
1122
1123 //ust// int __init ltt_init(void)
1124 //ust// {
1125 //ust// /* Make sure no page fault can be triggered by this module */
1126 //ust// vmalloc_sync_all();
1127 //ust// return 0;
1128 //ust// }
1129
1130 //ust// module_init(ltt_init)
1131
1132 //ust// static void __exit ltt_exit(void)
1133 //ust// {
1134 //ust// struct ltt_trace_struct *trace;
1135 //ust// struct list_head *pos, *n;
1136 //ust//
1137 //ust// ltt_lock_traces();
1138 //ust// /* Stop each trace, currently being read by RCU read-side */
1139 //ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list)
1140 //ust// _ltt_trace_stop(trace);
1141 //ust// /* Wait for quiescent state. Readers have preemption disabled. */
1142 //ust// synchronize_sched();
1143 //ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
1144 //ust// * because no readers are left. */
1145 //ust// list_for_each_safe(pos, n, &ltt_traces.head) {
1146 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1147 //ust// /* _ltt_trace_destroy does a synchronize_sched() */
1148 //ust// _ltt_trace_destroy(trace);
1149 //ust// __ltt_trace_destroy(trace);
1150 //ust// }
1151 //ust// /* free traces in pre-alloc status */
1152 //ust// list_for_each_safe(pos, n, &ltt_traces.setup_head) {
1153 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1154 //ust// _ltt_trace_free(trace);
1155 //ust// }
1156 //ust//
1157 //ust// ltt_unlock_traces();
1158 //ust// }
1159
1160 //ust// module_exit(ltt_exit)
1161
1162 //ust// MODULE_LICENSE("GPL");
1163 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
1164 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
This page took 0.05037 seconds and 3 git commands to generate.