add support for channel overwrite and non-collection
[ust.git] / libust / tracer.c
1 /*
2 * tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 * Inspired from LTT :
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
25 * And from K42 :
26 * Bob Wisniewski (bob@watson.ibm.com)
27 *
28 * Changelog:
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
32 */
33
34 #include <urcu-bp.h>
35 #include <urcu/rculist.h>
36
37 #include <ust/kernelcompat.h>
38 #include "tracercore.h"
39 #include "tracer.h"
40 #include "usterr.h"
41
42 //ust// static void async_wakeup(unsigned long data);
43 //ust//
44 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
45
46 /* Default callbacks for modules */
47 notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
48 struct ust_trace *trace)
49 {
50 return 0;
51 }
52
53 int ltt_statedump_default(struct ust_trace *trace)
54 {
55 return 0;
56 }
57
58 /* Callbacks for registered modules */
59
60 int (*ltt_filter_control_functor)
61 (enum ltt_filter_control_msg msg, struct ust_trace *trace) =
62 ltt_filter_control_default;
63 struct module *ltt_filter_control_owner;
64
65 /* These function pointers are protected by a trace activation check */
66 struct module *ltt_run_filter_owner;
67 int (*ltt_statedump_functor)(struct ust_trace *trace) =
68 ltt_statedump_default;
69 struct module *ltt_statedump_owner;
70
71 struct chan_info_struct {
72 const char *name;
73 unsigned int def_subbufsize;
74 unsigned int def_subbufcount;
75 } chan_infos[] = {
76 [LTT_CHANNEL_METADATA] = {
77 LTT_METADATA_CHANNEL,
78 LTT_DEFAULT_SUBBUF_SIZE_LOW,
79 LTT_DEFAULT_N_SUBBUFS_LOW,
80 },
81 [LTT_CHANNEL_UST] = {
82 LTT_UST_CHANNEL,
83 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
84 LTT_DEFAULT_N_SUBBUFS_HIGH,
85 },
86 };
87
88 static enum ltt_channels get_channel_type_from_name(const char *name)
89 {
90 int i;
91
92 if (!name)
93 return LTT_CHANNEL_UST;
94
95 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
96 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
97 return (enum ltt_channels)i;
98
99 return LTT_CHANNEL_UST;
100 }
101
102 /**
103 * ltt_module_register - LTT module registration
104 * @name: module type
105 * @function: callback to register
106 * @owner: module which owns the callback
107 *
108 * The module calling this registration function must ensure that no
109 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
110 * must be called between a vmalloc and the moment the memory is made visible to
111 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
112 * the module allocates virtual memory after its registration must it
113 * synchronize the TLBs.
114 */
115 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
116 //ust// struct module *owner)
117 //ust// {
118 //ust// int ret = 0;
119 //ust//
120 //ust// /*
121 //ust// * Make sure no page fault can be triggered by the module about to be
122 //ust// * registered. We deal with this here so we don't have to call
123 //ust// * vmalloc_sync_all() in each module's init.
124 //ust// */
125 //ust// vmalloc_sync_all();
126 //ust//
127 //ust// switch (name) {
128 //ust// case LTT_FUNCTION_RUN_FILTER:
129 //ust// if (ltt_run_filter_owner != NULL) {
130 //ust// ret = -EEXIST;
131 //ust// goto end;
132 //ust// }
133 //ust// ltt_filter_register((ltt_run_filter_functor)function);
134 //ust// ltt_run_filter_owner = owner;
135 //ust// break;
136 //ust// case LTT_FUNCTION_FILTER_CONTROL:
137 //ust// if (ltt_filter_control_owner != NULL) {
138 //ust// ret = -EEXIST;
139 //ust// goto end;
140 //ust// }
141 //ust// ltt_filter_control_functor =
142 //ust// (int (*)(enum ltt_filter_control_msg,
143 //ust// struct ust_trace *))function;
144 //ust// ltt_filter_control_owner = owner;
145 //ust// break;
146 //ust// case LTT_FUNCTION_STATEDUMP:
147 //ust// if (ltt_statedump_owner != NULL) {
148 //ust// ret = -EEXIST;
149 //ust// goto end;
150 //ust// }
151 //ust// ltt_statedump_functor =
152 //ust// (int (*)(struct ust_trace *))function;
153 //ust// ltt_statedump_owner = owner;
154 //ust// break;
155 //ust// }
156 //ust//
157 //ust// end:
158 //ust//
159 //ust// return ret;
160 //ust// }
161
162 /**
163 * ltt_module_unregister - LTT module unregistration
164 * @name: module type
165 */
166 //ust// void ltt_module_unregister(enum ltt_module_function name)
167 //ust// {
168 //ust// switch (name) {
169 //ust// case LTT_FUNCTION_RUN_FILTER:
170 //ust// ltt_filter_unregister();
171 //ust// ltt_run_filter_owner = NULL;
172 //ust// /* Wait for preempt sections to finish */
173 //ust// synchronize_sched();
174 //ust// break;
175 //ust// case LTT_FUNCTION_FILTER_CONTROL:
176 //ust// ltt_filter_control_functor = ltt_filter_control_default;
177 //ust// ltt_filter_control_owner = NULL;
178 //ust// break;
179 //ust// case LTT_FUNCTION_STATEDUMP:
180 //ust// ltt_statedump_functor = ltt_statedump_default;
181 //ust// ltt_statedump_owner = NULL;
182 //ust// break;
183 //ust// }
184 //ust//
185 //ust// }
186
187 static LIST_HEAD(ltt_transport_list);
188
189 /**
190 * ltt_transport_register - LTT transport registration
191 * @transport: transport structure
192 *
193 * Registers a transport which can be used as output to extract the data out of
194 * LTTng. The module calling this registration function must ensure that no
195 * trap-inducing code will be executed by the transport functions. E.g.
196 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
197 * is made visible to the transport function. This registration acts as a
198 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
199 * after its registration must it synchronize the TLBs.
200 */
201 void ltt_transport_register(struct ltt_transport *transport)
202 {
203 /*
204 * Make sure no page fault can be triggered by the module about to be
205 * registered. We deal with this here so we don't have to call
206 * vmalloc_sync_all() in each module's init.
207 */
208 //ust// vmalloc_sync_all();
209
210 ltt_lock_traces();
211 list_add_tail(&transport->node, &ltt_transport_list);
212 ltt_unlock_traces();
213 }
214
215 /**
216 * ltt_transport_unregister - LTT transport unregistration
217 * @transport: transport structure
218 */
219 void ltt_transport_unregister(struct ltt_transport *transport)
220 {
221 ltt_lock_traces();
222 list_del(&transport->node);
223 ltt_unlock_traces();
224 }
225
226 static inline int is_channel_overwrite(enum ltt_channels chan,
227 enum trace_mode mode)
228 {
229 switch (mode) {
230 case LTT_TRACE_NORMAL:
231 return 0;
232 case LTT_TRACE_FLIGHT:
233 switch (chan) {
234 case LTT_CHANNEL_METADATA:
235 return 0;
236 default:
237 return 1;
238 }
239 case LTT_TRACE_HYBRID:
240 switch (chan) {
241 case LTT_CHANNEL_METADATA:
242 return 0;
243 default:
244 return 1;
245 }
246 default:
247 return 0;
248 }
249 }
250
251 static void trace_async_wakeup(struct ust_trace *trace)
252 {
253 int i;
254 struct ust_channel *chan;
255
256 /* Must check each channel for pending read wakeup */
257 for (i = 0; i < trace->nr_channels; i++) {
258 chan = &trace->channels[i];
259 if (chan->active)
260 trace->ops->wakeup_channel(chan);
261 }
262 }
263
264 //ust// /* Timer to send async wakeups to the readers */
265 //ust// static void async_wakeup(unsigned long data)
266 //ust// {
267 //ust// struct ust_trace *trace;
268 //ust//
269 //ust// /*
270 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
271 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
272 //ust// * allow mutex to be taken in interrupt context. Ugly.
273 //ust// * A proper way to do this would be to turn the timer into a
274 //ust// * periodically woken up thread, but it adds to the footprint.
275 //ust// */
276 //ust// #ifndef CONFIG_PREEMPT_RT
277 //ust// rcu_read_lock_sched();
278 //ust// #else
279 //ust// ltt_lock_traces();
280 //ust// #endif
281 //ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
282 //ust// trace_async_wakeup(trace);
283 //ust// }
284 //ust// #ifndef CONFIG_PREEMPT_RT
285 //ust// rcu_read_unlock_sched();
286 //ust// #else
287 //ust// ltt_unlock_traces();
288 //ust// #endif
289 //ust//
290 //ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
291 //ust// }
292
293 /**
294 * _ltt_trace_find - find a trace by given name.
295 * trace_name: trace name
296 *
297 * Returns a pointer to the trace structure, NULL if not found.
298 */
299 struct ust_trace *_ltt_trace_find(const char *trace_name)
300 {
301 struct ust_trace *trace;
302
303 list_for_each_entry(trace, &ltt_traces.head, list)
304 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
305 return trace;
306
307 return NULL;
308 }
309
310 /* _ltt_trace_find_setup :
311 * find a trace in setup list by given name.
312 *
313 * Returns a pointer to the trace structure, NULL if not found.
314 */
315 struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
316 {
317 struct ust_trace *trace;
318
319 list_for_each_entry(trace, &ltt_traces.setup_head, list)
320 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
321 return trace;
322
323 return NULL;
324 }
325
326 /**
327 * ltt_release_transport - Release an LTT transport
328 * @kref : reference count on the transport
329 */
330 void ltt_release_transport(struct kref *kref)
331 {
332 //ust// struct ust_trace *trace = container_of(kref,
333 //ust// struct ust_trace, ltt_transport_kref);
334 //ust// trace->ops->remove_dirs(trace);
335 }
336
337 /**
338 * ltt_release_trace - Release a LTT trace
339 * @kref : reference count on the trace
340 */
341 void ltt_release_trace(struct kref *kref)
342 {
343 struct ust_trace *trace = container_of(kref,
344 struct ust_trace, kref);
345 ltt_channels_trace_free(trace->channels);
346 free(trace);
347 }
348
349 static inline void prepare_chan_size_num(unsigned int *subbuf_size,
350 unsigned int *n_subbufs)
351 {
352 /* Make sure the subbuffer size is larger than a page */
353 *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
354
355 /* round to next power of 2 */
356 *subbuf_size = 1 << get_count_order(*subbuf_size);
357 *n_subbufs = 1 << get_count_order(*n_subbufs);
358
359 /* Subbuf size and number must both be power of two */
360 WARN_ON(hweight32(*subbuf_size) != 1);
361 WARN_ON(hweight32(*n_subbufs) != 1);
362 }
363
364 int _ltt_trace_setup(const char *trace_name)
365 {
366 int err = 0;
367 struct ust_trace *new_trace = NULL;
368 int metadata_index;
369 unsigned int chan;
370 enum ltt_channels chantype;
371
372 if (_ltt_trace_find_setup(trace_name)) {
373 ERR("Trace name %s already used", trace_name);
374 err = -EEXIST;
375 goto traces_error;
376 }
377
378 if (_ltt_trace_find(trace_name)) {
379 ERR("Trace name %s already used", trace_name);
380 err = -EEXIST;
381 goto traces_error;
382 }
383
384 new_trace = zmalloc(sizeof(struct ust_trace));
385 if (!new_trace) {
386 ERR("Unable to allocate memory for trace %s", trace_name);
387 err = -ENOMEM;
388 goto traces_error;
389 }
390 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
391 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
392 ust_channels_overwrite_by_default,
393 ust_channels_request_collection_by_default, 1);
394 if (!new_trace->channels) {
395 ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
396 err = -ENOMEM;
397 goto trace_free;
398 }
399
400 /*
401 * Force metadata channel to active, no overwrite.
402 */
403 metadata_index = ltt_channels_get_index_from_name("metadata");
404 WARN_ON(metadata_index < 0);
405 new_trace->channels[metadata_index].overwrite = 0;
406 new_trace->channels[metadata_index].active = 1;
407
408 /*
409 * Set hardcoded tracer defaults for some channels
410 */
411 for (chan = 0; chan < new_trace->nr_channels; chan++) {
412 if (!(new_trace->channels[chan].active))
413 continue;
414
415 chantype = get_channel_type_from_name(
416 ltt_channels_get_name_from_index(chan));
417 new_trace->channels[chan].subbuf_size =
418 chan_infos[chantype].def_subbufsize;
419 new_trace->channels[chan].subbuf_cnt =
420 chan_infos[chantype].def_subbufcount;
421 }
422
423 list_add(&new_trace->list, &ltt_traces.setup_head);
424 return 0;
425
426 trace_free:
427 free(new_trace);
428 traces_error:
429 return err;
430 }
431
432
433 int ltt_trace_setup(const char *trace_name)
434 {
435 int ret;
436 ltt_lock_traces();
437 ret = _ltt_trace_setup(trace_name);
438 ltt_unlock_traces();
439 return ret;
440 }
441
442 /* must be called from within a traces lock. */
443 static void _ltt_trace_free(struct ust_trace *trace)
444 {
445 list_del(&trace->list);
446 free(trace);
447 }
448
449 int ltt_trace_set_type(const char *trace_name, const char *trace_type)
450 {
451 int err = 0;
452 struct ust_trace *trace;
453 struct ltt_transport *tran_iter, *transport = NULL;
454
455 ltt_lock_traces();
456
457 trace = _ltt_trace_find_setup(trace_name);
458 if (!trace) {
459 ERR("Trace not found %s", trace_name);
460 err = -ENOENT;
461 goto traces_error;
462 }
463
464 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
465 if (!strcmp(tran_iter->name, trace_type)) {
466 transport = tran_iter;
467 break;
468 }
469 }
470 if (!transport) {
471 ERR("Transport %s is not present", trace_type);
472 err = -EINVAL;
473 goto traces_error;
474 }
475
476 trace->transport = transport;
477
478 traces_error:
479 ltt_unlock_traces();
480 return err;
481 }
482
483 int ltt_trace_set_channel_subbufsize(const char *trace_name,
484 const char *channel_name, unsigned int size)
485 {
486 int err = 0;
487 struct ust_trace *trace;
488 int index;
489
490 ltt_lock_traces();
491
492 trace = _ltt_trace_find_setup(trace_name);
493 if (!trace) {
494 ERR("Trace not found %s", trace_name);
495 err = -ENOENT;
496 goto traces_error;
497 }
498
499 index = ltt_channels_get_index_from_name(channel_name);
500 if (index < 0) {
501 ERR("Channel %s not found", channel_name);
502 err = -ENOENT;
503 goto traces_error;
504 }
505 trace->channels[index].subbuf_size = size;
506
507 traces_error:
508 ltt_unlock_traces();
509 return err;
510 }
511
512 int ltt_trace_set_channel_subbufcount(const char *trace_name,
513 const char *channel_name, unsigned int cnt)
514 {
515 int err = 0;
516 struct ust_trace *trace;
517 int index;
518
519 ltt_lock_traces();
520
521 trace = _ltt_trace_find_setup(trace_name);
522 if (!trace) {
523 ERR("Trace not found %s", trace_name);
524 err = -ENOENT;
525 goto traces_error;
526 }
527
528 index = ltt_channels_get_index_from_name(channel_name);
529 if (index < 0) {
530 ERR("Channel %s not found", channel_name);
531 err = -ENOENT;
532 goto traces_error;
533 }
534 trace->channels[index].subbuf_cnt = cnt;
535
536 traces_error:
537 ltt_unlock_traces();
538 return err;
539 }
540
541 int ltt_trace_set_channel_enable(const char *trace_name,
542 const char *channel_name, unsigned int enable)
543 {
544 int err = 0;
545 struct ust_trace *trace;
546 int index;
547
548 ltt_lock_traces();
549
550 trace = _ltt_trace_find_setup(trace_name);
551 if (!trace) {
552 ERR("Trace not found %s", trace_name);
553 err = -ENOENT;
554 goto traces_error;
555 }
556
557 /*
558 * Datas in metadata channel(marker info) is necessary to be able to
559 * read the trace, we always enable this channel.
560 */
561 if (!enable && !strcmp(channel_name, "metadata")) {
562 ERR("Trying to disable metadata channel");
563 err = -EINVAL;
564 goto traces_error;
565 }
566
567 index = ltt_channels_get_index_from_name(channel_name);
568 if (index < 0) {
569 ERR("Channel %s not found", channel_name);
570 err = -ENOENT;
571 goto traces_error;
572 }
573
574 trace->channels[index].active = enable;
575
576 traces_error:
577 ltt_unlock_traces();
578 return err;
579 }
580
581 int ltt_trace_set_channel_overwrite(const char *trace_name,
582 const char *channel_name, unsigned int overwrite)
583 {
584 int err = 0;
585 struct ust_trace *trace;
586 int index;
587
588 ltt_lock_traces();
589
590 trace = _ltt_trace_find_setup(trace_name);
591 if (!trace) {
592 ERR("Trace not found %s", trace_name);
593 err = -ENOENT;
594 goto traces_error;
595 }
596
597 /*
598 * Always put the metadata channel in non-overwrite mode :
599 * This is a very low traffic channel and it can't afford to have its
600 * data overwritten : this data (marker info) is necessary to be
601 * able to read the trace.
602 */
603 if (overwrite && !strcmp(channel_name, "metadata")) {
604 ERR("Trying to set metadata channel to overwrite mode");
605 err = -EINVAL;
606 goto traces_error;
607 }
608
609 index = ltt_channels_get_index_from_name(channel_name);
610 if (index < 0) {
611 ERR("Channel %s not found", channel_name);
612 err = -ENOENT;
613 goto traces_error;
614 }
615
616 trace->channels[index].overwrite = overwrite;
617
618 traces_error:
619 ltt_unlock_traces();
620 return err;
621 }
622
623 int ltt_trace_alloc(const char *trace_name)
624 {
625 int err = 0;
626 struct ust_trace *trace;
627 unsigned int subbuf_size, subbuf_cnt;
628 //ust// unsigned long flags;
629 int chan;
630 const char *channel_name;
631
632 ltt_lock_traces();
633
634 if (_ltt_trace_find(trace_name)) { /* Trace already allocated */
635 err = 1;
636 goto traces_error;
637 }
638
639 trace = _ltt_trace_find_setup(trace_name);
640 if (!trace) {
641 ERR("Trace not found %s", trace_name);
642 err = -ENOENT;
643 goto traces_error;
644 }
645
646 kref_init(&trace->kref);
647 kref_init(&trace->ltt_transport_kref);
648 //ust// init_waitqueue_head(&trace->kref_wq);
649 trace->active = 0;
650 //ust// get_trace_clock();
651 trace->freq_scale = trace_clock_freq_scale();
652
653 if (!trace->transport) {
654 ERR("Transport is not set");
655 err = -EINVAL;
656 goto transport_error;
657 }
658 //ust// if (!try_module_get(trace->transport->owner)) {
659 //ust// ERR("Can't lock transport module");
660 //ust// err = -ENODEV;
661 //ust// goto transport_error;
662 //ust// }
663 trace->ops = &trace->transport->ops;
664
665 //ust// err = trace->ops->create_dirs(trace);
666 //ust// if (err) {
667 //ust// ERR("Can't create dir for trace %s", trace_name);
668 //ust// goto dirs_error;
669 //ust// }
670
671 //ust// local_irq_save(flags);
672 trace->start_freq = trace_clock_frequency();
673 trace->start_tsc = trace_clock_read64();
674 gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
675 //ust// local_irq_restore(flags);
676
677 for (chan = 0; chan < trace->nr_channels; chan++) {
678 if (!(trace->channels[chan].active))
679 continue;
680
681 channel_name = ltt_channels_get_name_from_index(chan);
682 WARN_ON(!channel_name);
683 subbuf_size = trace->channels[chan].subbuf_size;
684 subbuf_cnt = trace->channels[chan].subbuf_cnt;
685 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
686 err = trace->ops->create_channel(trace_name, trace,
687 channel_name,
688 &trace->channels[chan],
689 subbuf_size,
690 subbuf_cnt,
691 trace->channels[chan].overwrite);
692 if (err != 0) {
693 ERR("Cannot create channel %s", channel_name);
694 goto create_channel_error;
695 }
696 }
697
698 list_del(&trace->list);
699 //ust// if (list_empty(&ltt_traces.head)) {
700 //ust// mod_timer(&ltt_async_wakeup_timer,
701 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
702 //ust// set_kernel_trace_flag_all_tasks();
703 //ust// }
704 list_add_rcu(&trace->list, &ltt_traces.head);
705 //ust// synchronize_sched();
706
707 ltt_unlock_traces();
708
709 return 0;
710
711 create_channel_error:
712 for (chan--; chan >= 0; chan--)
713 if (trace->channels[chan].active)
714 trace->ops->remove_channel(&trace->channels[chan]);
715
716 //ust// dirs_error:
717 //ust// module_put(trace->transport->owner);
718 transport_error:
719 //ust// put_trace_clock();
720 traces_error:
721 ltt_unlock_traces();
722 return err;
723 }
724
725 /*
726 * It is worked as a wrapper for current version of ltt_control.ko.
727 * We will make a new ltt_control based on debugfs, and control each channel's
728 * buffer.
729 */
730 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
731 //ust// enum trace_mode mode,
732 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
733 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
734 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
735 //ust// {
736 //ust// int err = 0;
737 //ust//
738 //ust// err = ltt_trace_setup(trace_name);
739 //ust// if (IS_ERR_VALUE(err))
740 //ust// return err;
741 //ust//
742 //ust// err = ltt_trace_set_type(trace_name, trace_type);
743 //ust// if (IS_ERR_VALUE(err))
744 //ust// return err;
745 //ust//
746 //ust// err = ltt_trace_alloc(trace_name);
747 //ust// if (IS_ERR_VALUE(err))
748 //ust// return err;
749 //ust//
750 //ust// return err;
751 //ust// }
752
753 /* Must be called while sure that trace is in the list. */
754 static int _ltt_trace_destroy(struct ust_trace *trace)
755 {
756 int err = -EPERM;
757
758 if (trace == NULL) {
759 err = -ENOENT;
760 goto traces_error;
761 }
762 if (trace->active) {
763 ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
764 err = -EBUSY;
765 goto active_error;
766 }
767 /* Everything went fine */
768 list_del_rcu(&trace->list);
769 synchronize_rcu();
770 if (list_empty(&ltt_traces.head)) {
771 //ust// clear_kernel_trace_flag_all_tasks();
772 /*
773 * We stop the asynchronous delivery of reader wakeup, but
774 * we must make one last check for reader wakeups pending
775 * later in __ltt_trace_destroy.
776 */
777 //ust// del_timer_sync(&ltt_async_wakeup_timer);
778 }
779 return 0;
780
781 /* error handling */
782 active_error:
783 traces_error:
784 return err;
785 }
786
787 /* Sleepable part of the destroy */
788 static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
789 {
790 int i;
791 struct ust_channel *chan;
792
793 if(!drop) {
794 for (i = 0; i < trace->nr_channels; i++) {
795 chan = &trace->channels[i];
796 if (chan->active)
797 trace->ops->finish_channel(chan);
798 }
799 }
800
801 return; /* FIXME: temporary for ust */
802 //ust// flush_scheduled_work();
803
804 /*
805 * The currently destroyed trace is not in the trace list anymore,
806 * so it's safe to call the async wakeup ourself. It will deliver
807 * the last subbuffers.
808 */
809 trace_async_wakeup(trace);
810
811 for (i = 0; i < trace->nr_channels; i++) {
812 chan = &trace->channels[i];
813 if (chan->active)
814 trace->ops->remove_channel(chan);
815 }
816
817 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
818
819 //ust// module_put(trace->transport->owner);
820
821 /*
822 * Wait for lttd readers to release the files, therefore making sure
823 * the last subbuffers have been read.
824 */
825 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
826 //ust// int ret = 0;
827 //ust// __wait_event_interruptible(trace->kref_wq,
828 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
829 //ust// }
830 kref_put(&trace->kref, ltt_release_trace);
831 }
832
833 int ltt_trace_destroy(const char *trace_name, int drop)
834 {
835 int err = 0;
836 struct ust_trace *trace;
837
838 ltt_lock_traces();
839
840 trace = _ltt_trace_find(trace_name);
841 if (trace) {
842 err = _ltt_trace_destroy(trace);
843 if (err)
844 goto error;
845
846 ltt_unlock_traces();
847
848 __ltt_trace_destroy(trace, drop);
849 //ust// put_trace_clock();
850
851 return 0;
852 }
853
854 trace = _ltt_trace_find_setup(trace_name);
855 if (trace) {
856 _ltt_trace_free(trace);
857 ltt_unlock_traces();
858 return 0;
859 }
860
861 err = -ENOENT;
862
863 /* Error handling */
864 error:
865 ltt_unlock_traces();
866 return err;
867 }
868
869 /* must be called from within a traces lock. */
870 static int _ltt_trace_start(struct ust_trace *trace)
871 {
872 int err = 0;
873
874 if (trace == NULL) {
875 err = -ENOENT;
876 goto traces_error;
877 }
878 if (trace->active)
879 DBG("Tracing already active for trace %s", trace->trace_name);
880 //ust// if (!try_module_get(ltt_run_filter_owner)) {
881 //ust// err = -ENODEV;
882 //ust// ERR("Cannot lock filter module");
883 //ust// goto get_ltt_run_filter_error;
884 //ust// }
885 trace->active = 1;
886 /* Read by trace points without protection : be careful */
887 ltt_traces.num_active_traces++;
888 return err;
889
890 /* error handling */
891 //ust// get_ltt_run_filter_error:
892 traces_error:
893 return err;
894 }
895
896 int ltt_trace_start(const char *trace_name)
897 {
898 int err = 0;
899 struct ust_trace *trace;
900
901 ltt_lock_traces();
902
903 trace = _ltt_trace_find(trace_name);
904 err = _ltt_trace_start(trace);
905 if (err)
906 goto no_trace;
907
908 ltt_unlock_traces();
909
910 /*
911 * Call the kernel state dump.
912 * Events will be mixed with real kernel events, it's ok.
913 * Notice that there is no protection on the trace : that's exactly
914 * why we iterate on the list and check for trace equality instead of
915 * directly using this trace handle inside the logging function.
916 */
917
918 ltt_dump_marker_state(trace);
919
920 //ust// if (!try_module_get(ltt_statedump_owner)) {
921 //ust// err = -ENODEV;
922 //ust// ERR("Cannot lock state dump module");
923 //ust// } else {
924 ltt_statedump_functor(trace);
925 //ust// module_put(ltt_statedump_owner);
926 //ust// }
927
928 return err;
929
930 /* Error handling */
931 no_trace:
932 ltt_unlock_traces();
933 return err;
934 }
935
936 /* must be called from within traces lock */
937 static int _ltt_trace_stop(struct ust_trace *trace)
938 {
939 int err = -EPERM;
940
941 if (trace == NULL) {
942 err = -ENOENT;
943 goto traces_error;
944 }
945 if (!trace->active)
946 DBG("LTT : Tracing not active for trace %s", trace->trace_name);
947 if (trace->active) {
948 trace->active = 0;
949 ltt_traces.num_active_traces--;
950 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
951 }
952 //ust// module_put(ltt_run_filter_owner);
953 /* Everything went fine */
954 return 0;
955
956 /* Error handling */
957 traces_error:
958 return err;
959 }
960
961 int ltt_trace_stop(const char *trace_name)
962 {
963 int err = 0;
964 struct ust_trace *trace;
965
966 ltt_lock_traces();
967 trace = _ltt_trace_find(trace_name);
968 err = _ltt_trace_stop(trace);
969 ltt_unlock_traces();
970 return err;
971 }
972
973 /**
974 * ltt_filter_control - Trace filter control in-kernel API
975 * @msg: Action to perform on the filter
976 * @trace_name: Trace on which the action must be done
977 */
978 int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
979 {
980 int err;
981 struct ust_trace *trace;
982
983 DBG("ltt_filter_control : trace %s", trace_name);
984 ltt_lock_traces();
985 trace = _ltt_trace_find(trace_name);
986 if (trace == NULL) {
987 ERR("Trace does not exist. Cannot proxy control request");
988 err = -ENOENT;
989 goto trace_error;
990 }
991 //ust// if (!try_module_get(ltt_filter_control_owner)) {
992 //ust// err = -ENODEV;
993 //ust// goto get_module_error;
994 //ust// }
995 switch (msg) {
996 case LTT_FILTER_DEFAULT_ACCEPT:
997 DBG("Proxy filter default accept %s", trace_name);
998 err = (*ltt_filter_control_functor)(msg, trace);
999 break;
1000 case LTT_FILTER_DEFAULT_REJECT:
1001 DBG("Proxy filter default reject %s", trace_name);
1002 err = (*ltt_filter_control_functor)(msg, trace);
1003 break;
1004 default:
1005 err = -EPERM;
1006 }
1007 //ust// module_put(ltt_filter_control_owner);
1008
1009 //ust// get_module_error:
1010 trace_error:
1011 ltt_unlock_traces();
1012 return err;
1013 }
This page took 0.050147 seconds and 4 git commands to generate.