update README
[ust.git] / libust / tracer.c
... / ...
CommitLineData
1/*
2 * tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 * Inspired from LTT :
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
25 * And from K42 :
26 * Bob Wisniewski (bob@watson.ibm.com)
27 *
28 * Changelog:
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
32 */
33
34#include <urcu-bp.h>
35#include <urcu/rculist.h>
36
37#include <ust/clock.h>
38
39#include "tracercore.h"
40#include "tracer.h"
41#include "usterr.h"
42
43//ust// static void async_wakeup(unsigned long data);
44//ust//
45//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
46
47/* Default callbacks for modules */
48notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
49 struct ust_trace *trace)
50{
51 return 0;
52}
53
54int ltt_statedump_default(struct ust_trace *trace)
55{
56 return 0;
57}
58
59/* Callbacks for registered modules */
60
61int (*ltt_filter_control_functor)
62 (enum ltt_filter_control_msg msg, struct ust_trace *trace) =
63 ltt_filter_control_default;
64struct module *ltt_filter_control_owner;
65
66/* These function pointers are protected by a trace activation check */
67struct module *ltt_run_filter_owner;
68int (*ltt_statedump_functor)(struct ust_trace *trace) =
69 ltt_statedump_default;
70struct module *ltt_statedump_owner;
71
72struct chan_info_struct {
73 const char *name;
74 unsigned int def_subbufsize;
75 unsigned int def_subbufcount;
76} chan_infos[] = {
77 [LTT_CHANNEL_METADATA] = {
78 LTT_METADATA_CHANNEL,
79 LTT_DEFAULT_SUBBUF_SIZE_LOW,
80 LTT_DEFAULT_N_SUBBUFS_LOW,
81 },
82 [LTT_CHANNEL_UST] = {
83 LTT_UST_CHANNEL,
84 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
85 LTT_DEFAULT_N_SUBBUFS_HIGH,
86 },
87};
88
89static enum ltt_channels get_channel_type_from_name(const char *name)
90{
91 int i;
92
93 if (!name)
94 return LTT_CHANNEL_UST;
95
96 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
97 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
98 return (enum ltt_channels)i;
99
100 return LTT_CHANNEL_UST;
101}
102
103/**
104 * ltt_module_register - LTT module registration
105 * @name: module type
106 * @function: callback to register
107 * @owner: module which owns the callback
108 *
109 * The module calling this registration function must ensure that no
110 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
111 * must be called between a vmalloc and the moment the memory is made visible to
112 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
113 * the module allocates virtual memory after its registration must it
114 * synchronize the TLBs.
115 */
116//ust// int ltt_module_register(enum ltt_module_function name, void *function,
117//ust// struct module *owner)
118//ust// {
119//ust// int ret = 0;
120//ust//
121//ust// /*
122//ust// * Make sure no page fault can be triggered by the module about to be
123//ust// * registered. We deal with this here so we don't have to call
124//ust// * vmalloc_sync_all() in each module's init.
125//ust// */
126//ust// vmalloc_sync_all();
127//ust//
128//ust// switch (name) {
129//ust// case LTT_FUNCTION_RUN_FILTER:
130//ust// if (ltt_run_filter_owner != NULL) {
131//ust// ret = -EEXIST;
132//ust// goto end;
133//ust// }
134//ust// ltt_filter_register((ltt_run_filter_functor)function);
135//ust// ltt_run_filter_owner = owner;
136//ust// break;
137//ust// case LTT_FUNCTION_FILTER_CONTROL:
138//ust// if (ltt_filter_control_owner != NULL) {
139//ust// ret = -EEXIST;
140//ust// goto end;
141//ust// }
142//ust// ltt_filter_control_functor =
143//ust// (int (*)(enum ltt_filter_control_msg,
144//ust// struct ust_trace *))function;
145//ust// ltt_filter_control_owner = owner;
146//ust// break;
147//ust// case LTT_FUNCTION_STATEDUMP:
148//ust// if (ltt_statedump_owner != NULL) {
149//ust// ret = -EEXIST;
150//ust// goto end;
151//ust// }
152//ust// ltt_statedump_functor =
153//ust// (int (*)(struct ust_trace *))function;
154//ust// ltt_statedump_owner = owner;
155//ust// break;
156//ust// }
157//ust//
158//ust// end:
159//ust//
160//ust// return ret;
161//ust// }
162
163/**
164 * ltt_module_unregister - LTT module unregistration
165 * @name: module type
166 */
167//ust// void ltt_module_unregister(enum ltt_module_function name)
168//ust// {
169//ust// switch (name) {
170//ust// case LTT_FUNCTION_RUN_FILTER:
171//ust// ltt_filter_unregister();
172//ust// ltt_run_filter_owner = NULL;
173//ust// /* Wait for preempt sections to finish */
174//ust// synchronize_sched();
175//ust// break;
176//ust// case LTT_FUNCTION_FILTER_CONTROL:
177//ust// ltt_filter_control_functor = ltt_filter_control_default;
178//ust// ltt_filter_control_owner = NULL;
179//ust// break;
180//ust// case LTT_FUNCTION_STATEDUMP:
181//ust// ltt_statedump_functor = ltt_statedump_default;
182//ust// ltt_statedump_owner = NULL;
183//ust// break;
184//ust// }
185//ust//
186//ust// }
187
188static LIST_HEAD(ltt_transport_list);
189
190/**
191 * ltt_transport_register - LTT transport registration
192 * @transport: transport structure
193 *
194 * Registers a transport which can be used as output to extract the data out of
195 * LTTng. The module calling this registration function must ensure that no
196 * trap-inducing code will be executed by the transport functions. E.g.
197 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
198 * is made visible to the transport function. This registration acts as a
199 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
200 * after its registration must it synchronize the TLBs.
201 */
202void ltt_transport_register(struct ltt_transport *transport)
203{
204 /*
205 * Make sure no page fault can be triggered by the module about to be
206 * registered. We deal with this here so we don't have to call
207 * vmalloc_sync_all() in each module's init.
208 */
209//ust// vmalloc_sync_all();
210
211 ltt_lock_traces();
212 list_add_tail(&transport->node, &ltt_transport_list);
213 ltt_unlock_traces();
214}
215
216/**
217 * ltt_transport_unregister - LTT transport unregistration
218 * @transport: transport structure
219 */
220void ltt_transport_unregister(struct ltt_transport *transport)
221{
222 ltt_lock_traces();
223 list_del(&transport->node);
224 ltt_unlock_traces();
225}
226
227static inline int is_channel_overwrite(enum ltt_channels chan,
228 enum trace_mode mode)
229{
230 switch (mode) {
231 case LTT_TRACE_NORMAL:
232 return 0;
233 case LTT_TRACE_FLIGHT:
234 switch (chan) {
235 case LTT_CHANNEL_METADATA:
236 return 0;
237 default:
238 return 1;
239 }
240 case LTT_TRACE_HYBRID:
241 switch (chan) {
242 case LTT_CHANNEL_METADATA:
243 return 0;
244 default:
245 return 1;
246 }
247 default:
248 return 0;
249 }
250}
251
252static void trace_async_wakeup(struct ust_trace *trace)
253{
254 int i;
255 struct ust_channel *chan;
256
257 /* Must check each channel for pending read wakeup */
258 for (i = 0; i < trace->nr_channels; i++) {
259 chan = &trace->channels[i];
260 if (chan->active)
261 trace->ops->wakeup_channel(chan);
262 }
263}
264
265//ust// /* Timer to send async wakeups to the readers */
266//ust// static void async_wakeup(unsigned long data)
267//ust// {
268//ust// struct ust_trace *trace;
269//ust//
270//ust// /*
271//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
272//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
273//ust// * allow mutex to be taken in interrupt context. Ugly.
274//ust// * A proper way to do this would be to turn the timer into a
275//ust// * periodically woken up thread, but it adds to the footprint.
276//ust// */
277//ust// #ifndef CONFIG_PREEMPT_RT
278//ust// rcu_read_lock_sched();
279//ust// #else
280//ust// ltt_lock_traces();
281//ust// #endif
282//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
283//ust// trace_async_wakeup(trace);
284//ust// }
285//ust// #ifndef CONFIG_PREEMPT_RT
286//ust// rcu_read_unlock_sched();
287//ust// #else
288//ust// ltt_unlock_traces();
289//ust// #endif
290//ust//
291//ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
292//ust// }
293
294/**
295 * _ltt_trace_find - find a trace by given name.
296 * trace_name: trace name
297 *
298 * Returns a pointer to the trace structure, NULL if not found.
299 */
300struct ust_trace *_ltt_trace_find(const char *trace_name)
301{
302 struct ust_trace *trace;
303
304 list_for_each_entry(trace, &ltt_traces.head, list)
305 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
306 return trace;
307
308 return NULL;
309}
310
311/* _ltt_trace_find_setup :
312 * find a trace in setup list by given name.
313 *
314 * Returns a pointer to the trace structure, NULL if not found.
315 */
316struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
317{
318 struct ust_trace *trace;
319
320 list_for_each_entry(trace, &ltt_traces.setup_head, list)
321 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
322 return trace;
323
324 return NULL;
325}
326
327/**
328 * ltt_release_transport - Release an LTT transport
329 * @kref : reference count on the transport
330 */
331void ltt_release_transport(struct kref *kref)
332{
333//ust// struct ust_trace *trace = container_of(kref,
334//ust// struct ust_trace, ltt_transport_kref);
335//ust// trace->ops->remove_dirs(trace);
336}
337
338/**
339 * ltt_release_trace - Release a LTT trace
340 * @kref : reference count on the trace
341 */
342void ltt_release_trace(struct kref *kref)
343{
344 struct ust_trace *trace = container_of(kref,
345 struct ust_trace, kref);
346 ltt_channels_trace_free(trace->channels);
347 free(trace);
348}
349
350static inline void prepare_chan_size_num(unsigned int *subbuf_size,
351 unsigned int *n_subbufs)
352{
353 /* Make sure the subbuffer size is larger than a page */
354 *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
355
356 /* round to next power of 2 */
357 *subbuf_size = 1 << get_count_order(*subbuf_size);
358 *n_subbufs = 1 << get_count_order(*n_subbufs);
359
360 /* Subbuf size and number must both be power of two */
361 WARN_ON(hweight32(*subbuf_size) != 1);
362 WARN_ON(hweight32(*n_subbufs) != 1);
363}
364
365int _ltt_trace_setup(const char *trace_name)
366{
367 int err = 0;
368 struct ust_trace *new_trace = NULL;
369 int metadata_index;
370 unsigned int chan;
371 enum ltt_channels chantype;
372
373 if (_ltt_trace_find_setup(trace_name)) {
374 ERR("Trace name %s already used", trace_name);
375 err = -EEXIST;
376 goto traces_error;
377 }
378
379 if (_ltt_trace_find(trace_name)) {
380 ERR("Trace name %s already used", trace_name);
381 err = -EEXIST;
382 goto traces_error;
383 }
384
385 new_trace = zmalloc(sizeof(struct ust_trace));
386 if (!new_trace) {
387 ERR("Unable to allocate memory for trace %s", trace_name);
388 err = -ENOMEM;
389 goto traces_error;
390 }
391 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
392 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
393 ust_channels_overwrite_by_default,
394 ust_channels_request_collection_by_default, 1);
395 if (!new_trace->channels) {
396 ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
397 err = -ENOMEM;
398 goto trace_free;
399 }
400
401 /*
402 * Force metadata channel to active, no overwrite.
403 */
404 metadata_index = ltt_channels_get_index_from_name("metadata");
405 WARN_ON(metadata_index < 0);
406 new_trace->channels[metadata_index].overwrite = 0;
407 new_trace->channels[metadata_index].active = 1;
408
409 /*
410 * Set hardcoded tracer defaults for some channels
411 */
412 for (chan = 0; chan < new_trace->nr_channels; chan++) {
413 if (!(new_trace->channels[chan].active))
414 continue;
415
416 chantype = get_channel_type_from_name(
417 ltt_channels_get_name_from_index(chan));
418 new_trace->channels[chan].subbuf_size =
419 chan_infos[chantype].def_subbufsize;
420 new_trace->channels[chan].subbuf_cnt =
421 chan_infos[chantype].def_subbufcount;
422 }
423
424 list_add(&new_trace->list, &ltt_traces.setup_head);
425 return 0;
426
427trace_free:
428 free(new_trace);
429traces_error:
430 return err;
431}
432
433
434int ltt_trace_setup(const char *trace_name)
435{
436 int ret;
437 ltt_lock_traces();
438 ret = _ltt_trace_setup(trace_name);
439 ltt_unlock_traces();
440 return ret;
441}
442
443/* must be called from within a traces lock. */
444static void _ltt_trace_free(struct ust_trace *trace)
445{
446 list_del(&trace->list);
447 free(trace);
448}
449
450int ltt_trace_set_type(const char *trace_name, const char *trace_type)
451{
452 int err = 0;
453 struct ust_trace *trace;
454 struct ltt_transport *tran_iter, *transport = NULL;
455
456 ltt_lock_traces();
457
458 trace = _ltt_trace_find_setup(trace_name);
459 if (!trace) {
460 ERR("Trace not found %s", trace_name);
461 err = -ENOENT;
462 goto traces_error;
463 }
464
465 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
466 if (!strcmp(tran_iter->name, trace_type)) {
467 transport = tran_iter;
468 break;
469 }
470 }
471 if (!transport) {
472 ERR("Transport %s is not present", trace_type);
473 err = -EINVAL;
474 goto traces_error;
475 }
476
477 trace->transport = transport;
478
479traces_error:
480 ltt_unlock_traces();
481 return err;
482}
483
484int ltt_trace_set_channel_subbufsize(const char *trace_name,
485 const char *channel_name, unsigned int size)
486{
487 int err = 0;
488 struct ust_trace *trace;
489 int index;
490
491 ltt_lock_traces();
492
493 trace = _ltt_trace_find_setup(trace_name);
494 if (!trace) {
495 ERR("Trace not found %s", trace_name);
496 err = -ENOENT;
497 goto traces_error;
498 }
499
500 index = ltt_channels_get_index_from_name(channel_name);
501 if (index < 0) {
502 ERR("Channel %s not found", channel_name);
503 err = -ENOENT;
504 goto traces_error;
505 }
506 trace->channels[index].subbuf_size = size;
507
508traces_error:
509 ltt_unlock_traces();
510 return err;
511}
512
513int ltt_trace_set_channel_subbufcount(const char *trace_name,
514 const char *channel_name, unsigned int cnt)
515{
516 int err = 0;
517 struct ust_trace *trace;
518 int index;
519
520 ltt_lock_traces();
521
522 trace = _ltt_trace_find_setup(trace_name);
523 if (!trace) {
524 ERR("Trace not found %s", trace_name);
525 err = -ENOENT;
526 goto traces_error;
527 }
528
529 index = ltt_channels_get_index_from_name(channel_name);
530 if (index < 0) {
531 ERR("Channel %s not found", channel_name);
532 err = -ENOENT;
533 goto traces_error;
534 }
535 trace->channels[index].subbuf_cnt = cnt;
536
537traces_error:
538 ltt_unlock_traces();
539 return err;
540}
541
542int ltt_trace_set_channel_enable(const char *trace_name,
543 const char *channel_name, unsigned int enable)
544{
545 int err = 0;
546 struct ust_trace *trace;
547 int index;
548
549 ltt_lock_traces();
550
551 trace = _ltt_trace_find_setup(trace_name);
552 if (!trace) {
553 ERR("Trace not found %s", trace_name);
554 err = -ENOENT;
555 goto traces_error;
556 }
557
558 /*
559 * Datas in metadata channel(marker info) is necessary to be able to
560 * read the trace, we always enable this channel.
561 */
562 if (!enable && !strcmp(channel_name, "metadata")) {
563 ERR("Trying to disable metadata channel");
564 err = -EINVAL;
565 goto traces_error;
566 }
567
568 index = ltt_channels_get_index_from_name(channel_name);
569 if (index < 0) {
570 ERR("Channel %s not found", channel_name);
571 err = -ENOENT;
572 goto traces_error;
573 }
574
575 trace->channels[index].active = enable;
576
577traces_error:
578 ltt_unlock_traces();
579 return err;
580}
581
582int ltt_trace_set_channel_overwrite(const char *trace_name,
583 const char *channel_name, unsigned int overwrite)
584{
585 int err = 0;
586 struct ust_trace *trace;
587 int index;
588
589 ltt_lock_traces();
590
591 trace = _ltt_trace_find_setup(trace_name);
592 if (!trace) {
593 ERR("Trace not found %s", trace_name);
594 err = -ENOENT;
595 goto traces_error;
596 }
597
598 /*
599 * Always put the metadata channel in non-overwrite mode :
600 * This is a very low traffic channel and it can't afford to have its
601 * data overwritten : this data (marker info) is necessary to be
602 * able to read the trace.
603 */
604 if (overwrite && !strcmp(channel_name, "metadata")) {
605 ERR("Trying to set metadata channel to overwrite mode");
606 err = -EINVAL;
607 goto traces_error;
608 }
609
610 index = ltt_channels_get_index_from_name(channel_name);
611 if (index < 0) {
612 ERR("Channel %s not found", channel_name);
613 err = -ENOENT;
614 goto traces_error;
615 }
616
617 trace->channels[index].overwrite = overwrite;
618
619traces_error:
620 ltt_unlock_traces();
621 return err;
622}
623
624int ltt_trace_alloc(const char *trace_name)
625{
626 int err = 0;
627 struct ust_trace *trace;
628 unsigned int subbuf_size, subbuf_cnt;
629//ust// unsigned long flags;
630 int chan;
631 const char *channel_name;
632
633 ltt_lock_traces();
634
635 if (_ltt_trace_find(trace_name)) { /* Trace already allocated */
636 err = 1;
637 goto traces_error;
638 }
639
640 trace = _ltt_trace_find_setup(trace_name);
641 if (!trace) {
642 ERR("Trace not found %s", trace_name);
643 err = -ENOENT;
644 goto traces_error;
645 }
646
647 kref_init(&trace->kref);
648 kref_init(&trace->ltt_transport_kref);
649//ust// init_waitqueue_head(&trace->kref_wq);
650 trace->active = 0;
651//ust// get_trace_clock();
652 trace->freq_scale = trace_clock_freq_scale();
653
654 if (!trace->transport) {
655 ERR("Transport is not set");
656 err = -EINVAL;
657 goto transport_error;
658 }
659//ust// if (!try_module_get(trace->transport->owner)) {
660//ust// ERR("Can't lock transport module");
661//ust// err = -ENODEV;
662//ust// goto transport_error;
663//ust// }
664 trace->ops = &trace->transport->ops;
665
666//ust// err = trace->ops->create_dirs(trace);
667//ust// if (err) {
668//ust// ERR("Can't create dir for trace %s", trace_name);
669//ust// goto dirs_error;
670//ust// }
671
672//ust// local_irq_save(flags);
673 trace->start_freq = trace_clock_frequency();
674 trace->start_tsc = trace_clock_read64();
675 gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
676//ust// local_irq_restore(flags);
677
678 for (chan = 0; chan < trace->nr_channels; chan++) {
679 if (!(trace->channels[chan].active))
680 continue;
681
682 channel_name = ltt_channels_get_name_from_index(chan);
683 WARN_ON(!channel_name);
684 subbuf_size = trace->channels[chan].subbuf_size;
685 subbuf_cnt = trace->channels[chan].subbuf_cnt;
686 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
687 err = trace->ops->create_channel(trace_name, trace,
688 channel_name,
689 &trace->channels[chan],
690 subbuf_size,
691 subbuf_cnt,
692 trace->channels[chan].overwrite);
693 if (err != 0) {
694 ERR("Cannot create channel %s", channel_name);
695 goto create_channel_error;
696 }
697 }
698
699 list_del(&trace->list);
700//ust// if (list_empty(&ltt_traces.head)) {
701//ust// mod_timer(&ltt_async_wakeup_timer,
702//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
703//ust// set_kernel_trace_flag_all_tasks();
704//ust// }
705 list_add_rcu(&trace->list, &ltt_traces.head);
706//ust// synchronize_sched();
707
708 ltt_unlock_traces();
709
710 return 0;
711
712create_channel_error:
713 for (chan--; chan >= 0; chan--)
714 if (trace->channels[chan].active)
715 trace->ops->remove_channel(&trace->channels[chan]);
716
717//ust// dirs_error:
718//ust// module_put(trace->transport->owner);
719transport_error:
720//ust// put_trace_clock();
721traces_error:
722 ltt_unlock_traces();
723 return err;
724}
725
726/*
727 * It is worked as a wrapper for current version of ltt_control.ko.
728 * We will make a new ltt_control based on debugfs, and control each channel's
729 * buffer.
730 */
731//ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
732//ust// enum trace_mode mode,
733//ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
734//ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
735//ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
736//ust// {
737//ust// int err = 0;
738//ust//
739//ust// err = ltt_trace_setup(trace_name);
740//ust// if (IS_ERR_VALUE(err))
741//ust// return err;
742//ust//
743//ust// err = ltt_trace_set_type(trace_name, trace_type);
744//ust// if (IS_ERR_VALUE(err))
745//ust// return err;
746//ust//
747//ust// err = ltt_trace_alloc(trace_name);
748//ust// if (IS_ERR_VALUE(err))
749//ust// return err;
750//ust//
751//ust// return err;
752//ust// }
753
754/* Must be called while sure that trace is in the list. */
755static int _ltt_trace_destroy(struct ust_trace *trace)
756{
757 int err = -EPERM;
758
759 if (trace == NULL) {
760 err = -ENOENT;
761 goto traces_error;
762 }
763 if (trace->active) {
764 ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
765 err = -EBUSY;
766 goto active_error;
767 }
768 /* Everything went fine */
769 list_del_rcu(&trace->list);
770 synchronize_rcu();
771 if (list_empty(&ltt_traces.head)) {
772//ust// clear_kernel_trace_flag_all_tasks();
773 /*
774 * We stop the asynchronous delivery of reader wakeup, but
775 * we must make one last check for reader wakeups pending
776 * later in __ltt_trace_destroy.
777 */
778//ust// del_timer_sync(&ltt_async_wakeup_timer);
779 }
780 return 0;
781
782 /* error handling */
783active_error:
784traces_error:
785 return err;
786}
787
788/* Sleepable part of the destroy */
789static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
790{
791 int i;
792 struct ust_channel *chan;
793
794 if(!drop) {
795 for (i = 0; i < trace->nr_channels; i++) {
796 chan = &trace->channels[i];
797 if (chan->active)
798 trace->ops->finish_channel(chan);
799 }
800 }
801
802 return; /* FIXME: temporary for ust */
803//ust// flush_scheduled_work();
804
805 /*
806 * The currently destroyed trace is not in the trace list anymore,
807 * so it's safe to call the async wakeup ourself. It will deliver
808 * the last subbuffers.
809 */
810 trace_async_wakeup(trace);
811
812 for (i = 0; i < trace->nr_channels; i++) {
813 chan = &trace->channels[i];
814 if (chan->active)
815 trace->ops->remove_channel(chan);
816 }
817
818 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
819
820//ust// module_put(trace->transport->owner);
821
822 /*
823 * Wait for lttd readers to release the files, therefore making sure
824 * the last subbuffers have been read.
825 */
826//ust// if (atomic_read(&trace->kref.refcount) > 1) {
827//ust// int ret = 0;
828//ust// __wait_event_interruptible(trace->kref_wq,
829//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
830//ust// }
831 kref_put(&trace->kref, ltt_release_trace);
832}
833
834int ltt_trace_destroy(const char *trace_name, int drop)
835{
836 int err = 0;
837 struct ust_trace *trace;
838
839 ltt_lock_traces();
840
841 trace = _ltt_trace_find(trace_name);
842 if (trace) {
843 err = _ltt_trace_destroy(trace);
844 if (err)
845 goto error;
846
847 ltt_unlock_traces();
848
849 __ltt_trace_destroy(trace, drop);
850//ust// put_trace_clock();
851
852 return 0;
853 }
854
855 trace = _ltt_trace_find_setup(trace_name);
856 if (trace) {
857 _ltt_trace_free(trace);
858 ltt_unlock_traces();
859 return 0;
860 }
861
862 err = -ENOENT;
863
864 /* Error handling */
865error:
866 ltt_unlock_traces();
867 return err;
868}
869
870/* must be called from within a traces lock. */
871static int _ltt_trace_start(struct ust_trace *trace)
872{
873 int err = 0;
874
875 if (trace == NULL) {
876 err = -ENOENT;
877 goto traces_error;
878 }
879 if (trace->active)
880 DBG("Tracing already active for trace %s", trace->trace_name);
881//ust// if (!try_module_get(ltt_run_filter_owner)) {
882//ust// err = -ENODEV;
883//ust// ERR("Cannot lock filter module");
884//ust// goto get_ltt_run_filter_error;
885//ust// }
886 trace->active = 1;
887 /* Read by trace points without protection : be careful */
888 ltt_traces.num_active_traces++;
889 return err;
890
891 /* error handling */
892//ust// get_ltt_run_filter_error:
893traces_error:
894 return err;
895}
896
897int ltt_trace_start(const char *trace_name)
898{
899 int err = 0;
900 struct ust_trace *trace;
901
902 ltt_lock_traces();
903
904 trace = _ltt_trace_find(trace_name);
905 err = _ltt_trace_start(trace);
906 if (err)
907 goto no_trace;
908
909 ltt_unlock_traces();
910
911 /*
912 * Call the kernel state dump.
913 * Events will be mixed with real kernel events, it's ok.
914 * Notice that there is no protection on the trace : that's exactly
915 * why we iterate on the list and check for trace equality instead of
916 * directly using this trace handle inside the logging function.
917 */
918
919 ltt_dump_marker_state(trace);
920
921//ust// if (!try_module_get(ltt_statedump_owner)) {
922//ust// err = -ENODEV;
923//ust// ERR("Cannot lock state dump module");
924//ust// } else {
925 ltt_statedump_functor(trace);
926//ust// module_put(ltt_statedump_owner);
927//ust// }
928
929 return err;
930
931 /* Error handling */
932no_trace:
933 ltt_unlock_traces();
934 return err;
935}
936
937/* must be called from within traces lock */
938static int _ltt_trace_stop(struct ust_trace *trace)
939{
940 int err = -EPERM;
941
942 if (trace == NULL) {
943 err = -ENOENT;
944 goto traces_error;
945 }
946 if (!trace->active)
947 DBG("LTT : Tracing not active for trace %s", trace->trace_name);
948 if (trace->active) {
949 trace->active = 0;
950 ltt_traces.num_active_traces--;
951//ust// synchronize_sched(); /* Wait for each tracing to be finished */
952 }
953//ust// module_put(ltt_run_filter_owner);
954 /* Everything went fine */
955 return 0;
956
957 /* Error handling */
958traces_error:
959 return err;
960}
961
962int ltt_trace_stop(const char *trace_name)
963{
964 int err = 0;
965 struct ust_trace *trace;
966
967 ltt_lock_traces();
968 trace = _ltt_trace_find(trace_name);
969 err = _ltt_trace_stop(trace);
970 ltt_unlock_traces();
971 return err;
972}
973
974/**
975 * ltt_filter_control - Trace filter control in-kernel API
976 * @msg: Action to perform on the filter
977 * @trace_name: Trace on which the action must be done
978 */
979int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
980{
981 int err;
982 struct ust_trace *trace;
983
984 DBG("ltt_filter_control : trace %s", trace_name);
985 ltt_lock_traces();
986 trace = _ltt_trace_find(trace_name);
987 if (trace == NULL) {
988 ERR("Trace does not exist. Cannot proxy control request");
989 err = -ENOENT;
990 goto trace_error;
991 }
992//ust// if (!try_module_get(ltt_filter_control_owner)) {
993//ust// err = -ENODEV;
994//ust// goto get_module_error;
995//ust// }
996 switch (msg) {
997 case LTT_FILTER_DEFAULT_ACCEPT:
998 DBG("Proxy filter default accept %s", trace_name);
999 err = (*ltt_filter_control_functor)(msg, trace);
1000 break;
1001 case LTT_FILTER_DEFAULT_REJECT:
1002 DBG("Proxy filter default reject %s", trace_name);
1003 err = (*ltt_filter_control_functor)(msg, trace);
1004 break;
1005 default:
1006 err = -EPERM;
1007 }
1008//ust// module_put(ltt_filter_control_owner);
1009
1010//ust// get_module_error:
1011trace_error:
1012 ltt_unlock_traces();
1013 return err;
1014}
This page took 0.024839 seconds and 4 git commands to generate.