quick and dirty fix for message maximum length bug
[ust.git] / libust / tracer.c
CommitLineData
9dad1eb8 1/*
c1f20530 2 * tracer.c
9dad1eb8
PMF
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
34e4b7db
PMF
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
9dad1eb8
PMF
21 * Inspired from LTT :
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
25 * And from K42 :
26 * Bob Wisniewski (bob@watson.ibm.com)
27 *
28 * Changelog:
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
32 */
33
b5b073e2 34#include <urcu-bp.h>
b7ea1a1c 35#include <urcu/rculist.h>
4268fdcd 36
fbca6b62 37#include <ust/kernelcompat.h>
b6bf28ec 38#include "tracercore.h"
c93858f1 39#include "tracer.h"
b6bf28ec
PMF
40#include "usterr.h"
41
42//ust// static void async_wakeup(unsigned long data);
43//ust//
44//ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
9dad1eb8
PMF
45
46/* Default callbacks for modules */
47notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
48 struct ltt_trace_struct *trace)
49{
50 return 0;
51}
52
53int ltt_statedump_default(struct ltt_trace_struct *trace)
54{
55 return 0;
56}
57
58/* Callbacks for registered modules */
59
60int (*ltt_filter_control_functor)
61 (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
62 ltt_filter_control_default;
63struct module *ltt_filter_control_owner;
64
65/* These function pointers are protected by a trace activation check */
66struct module *ltt_run_filter_owner;
67int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
68 ltt_statedump_default;
69struct module *ltt_statedump_owner;
70
71struct chan_info_struct {
72 const char *name;
73 unsigned int def_subbufsize;
74 unsigned int def_subbufcount;
75} chan_infos[] = {
76 [LTT_CHANNEL_METADATA] = {
77 LTT_METADATA_CHANNEL,
78 LTT_DEFAULT_SUBBUF_SIZE_LOW,
79 LTT_DEFAULT_N_SUBBUFS_LOW,
80 },
b6bf28ec
PMF
81 [LTT_CHANNEL_UST] = {
82 LTT_UST_CHANNEL,
9dad1eb8
PMF
83 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
84 LTT_DEFAULT_N_SUBBUFS_HIGH,
85 },
9dad1eb8
PMF
86};
87
88static enum ltt_channels get_channel_type_from_name(const char *name)
89{
90 int i;
91
92 if (!name)
b6bf28ec 93 return LTT_CHANNEL_UST;
9dad1eb8
PMF
94
95 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
96 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
97 return (enum ltt_channels)i;
98
b6bf28ec 99 return LTT_CHANNEL_UST;
9dad1eb8
PMF
100}
101
102/**
103 * ltt_module_register - LTT module registration
104 * @name: module type
105 * @function: callback to register
106 * @owner: module which owns the callback
107 *
108 * The module calling this registration function must ensure that no
109 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
110 * must be called between a vmalloc and the moment the memory is made visible to
111 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
112 * the module allocates virtual memory after its registration must it
113 * synchronize the TLBs.
114 */
b6bf28ec
PMF
115//ust// int ltt_module_register(enum ltt_module_function name, void *function,
116//ust// struct module *owner)
117//ust// {
118//ust// int ret = 0;
119//ust//
120//ust// /*
121//ust// * Make sure no page fault can be triggered by the module about to be
122//ust// * registered. We deal with this here so we don't have to call
123//ust// * vmalloc_sync_all() in each module's init.
124//ust// */
125//ust// vmalloc_sync_all();
126//ust//
127//ust// switch (name) {
128//ust// case LTT_FUNCTION_RUN_FILTER:
129//ust// if (ltt_run_filter_owner != NULL) {
130//ust// ret = -EEXIST;
131//ust// goto end;
132//ust// }
133//ust// ltt_filter_register((ltt_run_filter_functor)function);
134//ust// ltt_run_filter_owner = owner;
135//ust// break;
136//ust// case LTT_FUNCTION_FILTER_CONTROL:
137//ust// if (ltt_filter_control_owner != NULL) {
138//ust// ret = -EEXIST;
139//ust// goto end;
140//ust// }
141//ust// ltt_filter_control_functor =
142//ust// (int (*)(enum ltt_filter_control_msg,
143//ust// struct ltt_trace_struct *))function;
144//ust// ltt_filter_control_owner = owner;
145//ust// break;
146//ust// case LTT_FUNCTION_STATEDUMP:
147//ust// if (ltt_statedump_owner != NULL) {
148//ust// ret = -EEXIST;
149//ust// goto end;
150//ust// }
151//ust// ltt_statedump_functor =
152//ust// (int (*)(struct ltt_trace_struct *))function;
153//ust// ltt_statedump_owner = owner;
154//ust// break;
155//ust// }
156//ust//
157//ust// end:
158//ust//
159//ust// return ret;
160//ust// }
9dad1eb8
PMF
161
162/**
163 * ltt_module_unregister - LTT module unregistration
164 * @name: module type
165 */
b6bf28ec
PMF
166//ust// void ltt_module_unregister(enum ltt_module_function name)
167//ust// {
168//ust// switch (name) {
169//ust// case LTT_FUNCTION_RUN_FILTER:
170//ust// ltt_filter_unregister();
171//ust// ltt_run_filter_owner = NULL;
172//ust// /* Wait for preempt sections to finish */
173//ust// synchronize_sched();
174//ust// break;
175//ust// case LTT_FUNCTION_FILTER_CONTROL:
176//ust// ltt_filter_control_functor = ltt_filter_control_default;
177//ust// ltt_filter_control_owner = NULL;
178//ust// break;
179//ust// case LTT_FUNCTION_STATEDUMP:
180//ust// ltt_statedump_functor = ltt_statedump_default;
181//ust// ltt_statedump_owner = NULL;
182//ust// break;
183//ust// }
184//ust//
185//ust// }
9dad1eb8
PMF
186
187static LIST_HEAD(ltt_transport_list);
188
189/**
190 * ltt_transport_register - LTT transport registration
191 * @transport: transport structure
192 *
193 * Registers a transport which can be used as output to extract the data out of
194 * LTTng. The module calling this registration function must ensure that no
195 * trap-inducing code will be executed by the transport functions. E.g.
196 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
197 * is made visible to the transport function. This registration acts as a
198 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
199 * after its registration must it synchronize the TLBs.
200 */
5f54827b
PMF
201void ltt_transport_register(struct ltt_transport *transport)
202{
203 /*
204 * Make sure no page fault can be triggered by the module about to be
205 * registered. We deal with this here so we don't have to call
206 * vmalloc_sync_all() in each module's init.
207 */
bb07823d 208//ust// vmalloc_sync_all();
5f54827b
PMF
209
210 ltt_lock_traces();
211 list_add_tail(&transport->node, &ltt_transport_list);
212 ltt_unlock_traces();
213}
9dad1eb8
PMF
214
215/**
216 * ltt_transport_unregister - LTT transport unregistration
217 * @transport: transport structure
218 */
bb07823d
PMF
219void ltt_transport_unregister(struct ltt_transport *transport)
220{
221 ltt_lock_traces();
222 list_del(&transport->node);
223 ltt_unlock_traces();
224}
9dad1eb8
PMF
225
226static inline int is_channel_overwrite(enum ltt_channels chan,
227 enum trace_mode mode)
228{
229 switch (mode) {
230 case LTT_TRACE_NORMAL:
231 return 0;
232 case LTT_TRACE_FLIGHT:
233 switch (chan) {
234 case LTT_CHANNEL_METADATA:
235 return 0;
236 default:
237 return 1;
238 }
239 case LTT_TRACE_HYBRID:
240 switch (chan) {
b6bf28ec 241 case LTT_CHANNEL_METADATA:
9dad1eb8 242 return 0;
b6bf28ec
PMF
243 default:
244 return 1;
9dad1eb8
PMF
245 }
246 default:
247 return 0;
248 }
249}
250
251/**
252 * ltt_write_trace_header - Write trace header
253 * @trace: Trace information
254 * @header: Memory address where the information must be written to
255 */
256void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
257 struct ltt_subbuffer_header *header)
258{
259 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
260 header->major_version = LTT_TRACER_VERSION_MAJOR;
261 header->minor_version = LTT_TRACER_VERSION_MINOR;
262 header->arch_size = sizeof(void *);
263 header->alignment = ltt_get_alignment();
264 header->start_time_sec = trace->start_time.tv_sec;
265 header->start_time_usec = trace->start_time.tv_usec;
266 header->start_freq = trace->start_freq;
267 header->freq_scale = trace->freq_scale;
268}
9dad1eb8
PMF
269
270static void trace_async_wakeup(struct ltt_trace_struct *trace)
271{
272 int i;
b5b073e2 273 struct ust_channel *chan;
9dad1eb8
PMF
274
275 /* Must check each channel for pending read wakeup */
276 for (i = 0; i < trace->nr_channels; i++) {
277 chan = &trace->channels[i];
278 if (chan->active)
279 trace->ops->wakeup_channel(chan);
280 }
281}
282
b6bf28ec
PMF
283//ust// /* Timer to send async wakeups to the readers */
284//ust// static void async_wakeup(unsigned long data)
285//ust// {
286//ust// struct ltt_trace_struct *trace;
287//ust//
288//ust// /*
289//ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
290//ust// * disable sections (spinlock taken in wake_up). However, mainline won't
291//ust// * allow mutex to be taken in interrupt context. Ugly.
292//ust// * A proper way to do this would be to turn the timer into a
293//ust// * periodically woken up thread, but it adds to the footprint.
294//ust// */
295//ust// #ifndef CONFIG_PREEMPT_RT
296//ust// rcu_read_lock_sched();
297//ust// #else
298//ust// ltt_lock_traces();
299//ust// #endif
300//ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
301//ust// trace_async_wakeup(trace);
302//ust// }
303//ust// #ifndef CONFIG_PREEMPT_RT
304//ust// rcu_read_unlock_sched();
305//ust// #else
306//ust// ltt_unlock_traces();
307//ust// #endif
308//ust//
309//ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
310//ust// }
9dad1eb8
PMF
311
312/**
313 * _ltt_trace_find - find a trace by given name.
314 * trace_name: trace name
315 *
316 * Returns a pointer to the trace structure, NULL if not found.
317 */
9c67dc50 318struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
9dad1eb8
PMF
319{
320 struct ltt_trace_struct *trace;
321
322 list_for_each_entry(trace, &ltt_traces.head, list)
323 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
324 return trace;
325
326 return NULL;
327}
328
329/* _ltt_trace_find_setup :
330 * find a trace in setup list by given name.
331 *
332 * Returns a pointer to the trace structure, NULL if not found.
333 */
334struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
335{
336 struct ltt_trace_struct *trace;
337
338 list_for_each_entry(trace, &ltt_traces.setup_head, list)
339 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
340 return trace;
341
342 return NULL;
343}
9dad1eb8
PMF
344
345/**
346 * ltt_release_transport - Release an LTT transport
347 * @kref : reference count on the transport
348 */
349void ltt_release_transport(struct kref *kref)
350{
772030fe
PMF
351//ust// struct ltt_trace_struct *trace = container_of(kref,
352//ust// struct ltt_trace_struct, ltt_transport_kref);
353//ust// trace->ops->remove_dirs(trace);
9dad1eb8 354}
9dad1eb8
PMF
355
356/**
357 * ltt_release_trace - Release a LTT trace
358 * @kref : reference count on the trace
359 */
360void ltt_release_trace(struct kref *kref)
361{
362 struct ltt_trace_struct *trace = container_of(kref,
363 struct ltt_trace_struct, kref);
364 ltt_channels_trace_free(trace->channels);
365 kfree(trace);
366}
9dad1eb8
PMF
367
368static inline void prepare_chan_size_num(unsigned int *subbuf_size,
369 unsigned int *n_subbufs)
370{
371 *subbuf_size = 1 << get_count_order(*subbuf_size);
372 *n_subbufs = 1 << get_count_order(*n_subbufs);
373
374 /* Subbuf size and number must both be power of two */
375 WARN_ON(hweight32(*subbuf_size) != 1);
376 WARN_ON(hweight32(*n_subbufs) != 1);
377}
378
379int _ltt_trace_setup(const char *trace_name)
380{
381 int err = 0;
382 struct ltt_trace_struct *new_trace = NULL;
383 int metadata_index;
384 unsigned int chan;
385 enum ltt_channels chantype;
386
387 if (_ltt_trace_find_setup(trace_name)) {
c1f20530 388 ERR("Trace name %s already used", trace_name);
9dad1eb8
PMF
389 err = -EEXIST;
390 goto traces_error;
391 }
392
393 if (_ltt_trace_find(trace_name)) {
c1f20530 394 ERR("Trace name %s already used", trace_name);
9dad1eb8
PMF
395 err = -EEXIST;
396 goto traces_error;
397 }
398
399 new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
400 if (!new_trace) {
c1f20530 401 ERR("Unable to allocate memory for trace %s", trace_name);
9dad1eb8
PMF
402 err = -ENOMEM;
403 goto traces_error;
404 }
405 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
406 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
407 0, 1);
408 if (!new_trace->channels) {
c1f20530 409 ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
9dad1eb8
PMF
410 err = -ENOMEM;
411 goto trace_free;
412 }
413
414 /*
415 * Force metadata channel to active, no overwrite.
416 */
417 metadata_index = ltt_channels_get_index_from_name("metadata");
418 WARN_ON(metadata_index < 0);
419 new_trace->channels[metadata_index].overwrite = 0;
420 new_trace->channels[metadata_index].active = 1;
421
422 /*
423 * Set hardcoded tracer defaults for some channels
424 */
425 for (chan = 0; chan < new_trace->nr_channels; chan++) {
426 if (!(new_trace->channels[chan].active))
427 continue;
428
429 chantype = get_channel_type_from_name(
430 ltt_channels_get_name_from_index(chan));
431 new_trace->channels[chan].subbuf_size =
432 chan_infos[chantype].def_subbufsize;
433 new_trace->channels[chan].subbuf_cnt =
434 chan_infos[chantype].def_subbufcount;
435 }
436
437 list_add(&new_trace->list, &ltt_traces.setup_head);
438 return 0;
439
440trace_free:
441 kfree(new_trace);
442traces_error:
443 return err;
444}
9dad1eb8
PMF
445
446
447int ltt_trace_setup(const char *trace_name)
448{
449 int ret;
450 ltt_lock_traces();
451 ret = _ltt_trace_setup(trace_name);
452 ltt_unlock_traces();
453 return ret;
454}
9dad1eb8
PMF
455
456/* must be called from within a traces lock. */
457static void _ltt_trace_free(struct ltt_trace_struct *trace)
458{
459 list_del(&trace->list);
460 kfree(trace);
461}
462
463int ltt_trace_set_type(const char *trace_name, const char *trace_type)
464{
465 int err = 0;
466 struct ltt_trace_struct *trace;
467 struct ltt_transport *tran_iter, *transport = NULL;
468
469 ltt_lock_traces();
470
471 trace = _ltt_trace_find_setup(trace_name);
472 if (!trace) {
c1f20530 473 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
474 err = -ENOENT;
475 goto traces_error;
476 }
477
478 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
479 if (!strcmp(tran_iter->name, trace_type)) {
480 transport = tran_iter;
481 break;
482 }
483 }
484 if (!transport) {
c1f20530 485 ERR("Transport %s is not present", trace_type);
9dad1eb8
PMF
486 err = -EINVAL;
487 goto traces_error;
488 }
489
490 trace->transport = transport;
491
492traces_error:
493 ltt_unlock_traces();
494 return err;
495}
9dad1eb8
PMF
496
497int ltt_trace_set_channel_subbufsize(const char *trace_name,
498 const char *channel_name, unsigned int size)
499{
500 int err = 0;
501 struct ltt_trace_struct *trace;
502 int index;
503
504 ltt_lock_traces();
505
506 trace = _ltt_trace_find_setup(trace_name);
507 if (!trace) {
c1f20530 508 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
509 err = -ENOENT;
510 goto traces_error;
511 }
512
513 index = ltt_channels_get_index_from_name(channel_name);
514 if (index < 0) {
c1f20530 515 ERR("Channel %s not found", channel_name);
9dad1eb8
PMF
516 err = -ENOENT;
517 goto traces_error;
518 }
519 trace->channels[index].subbuf_size = size;
520
521traces_error:
522 ltt_unlock_traces();
523 return err;
524}
9dad1eb8
PMF
525
526int ltt_trace_set_channel_subbufcount(const char *trace_name,
527 const char *channel_name, unsigned int cnt)
528{
529 int err = 0;
530 struct ltt_trace_struct *trace;
531 int index;
532
533 ltt_lock_traces();
534
535 trace = _ltt_trace_find_setup(trace_name);
536 if (!trace) {
c1f20530 537 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
538 err = -ENOENT;
539 goto traces_error;
540 }
541
542 index = ltt_channels_get_index_from_name(channel_name);
543 if (index < 0) {
c1f20530 544 ERR("Channel %s not found", channel_name);
9dad1eb8
PMF
545 err = -ENOENT;
546 goto traces_error;
547 }
548 trace->channels[index].subbuf_cnt = cnt;
549
550traces_error:
551 ltt_unlock_traces();
552 return err;
553}
9dad1eb8
PMF
554
555int ltt_trace_set_channel_enable(const char *trace_name,
556 const char *channel_name, unsigned int enable)
557{
558 int err = 0;
559 struct ltt_trace_struct *trace;
560 int index;
561
562 ltt_lock_traces();
563
564 trace = _ltt_trace_find_setup(trace_name);
565 if (!trace) {
c1f20530 566 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
567 err = -ENOENT;
568 goto traces_error;
569 }
570
571 /*
572 * Datas in metadata channel(marker info) is necessary to be able to
573 * read the trace, we always enable this channel.
574 */
575 if (!enable && !strcmp(channel_name, "metadata")) {
c1f20530 576 ERR("Trying to disable metadata channel");
9dad1eb8
PMF
577 err = -EINVAL;
578 goto traces_error;
579 }
580
581 index = ltt_channels_get_index_from_name(channel_name);
582 if (index < 0) {
c1f20530 583 ERR("Channel %s not found", channel_name);
9dad1eb8
PMF
584 err = -ENOENT;
585 goto traces_error;
586 }
587
588 trace->channels[index].active = enable;
589
590traces_error:
591 ltt_unlock_traces();
592 return err;
593}
9dad1eb8
PMF
594
595int ltt_trace_set_channel_overwrite(const char *trace_name,
596 const char *channel_name, unsigned int overwrite)
597{
598 int err = 0;
599 struct ltt_trace_struct *trace;
600 int index;
601
602 ltt_lock_traces();
603
604 trace = _ltt_trace_find_setup(trace_name);
605 if (!trace) {
c1f20530 606 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
607 err = -ENOENT;
608 goto traces_error;
609 }
610
611 /*
612 * Always put the metadata channel in non-overwrite mode :
613 * This is a very low traffic channel and it can't afford to have its
614 * data overwritten : this data (marker info) is necessary to be
615 * able to read the trace.
616 */
617 if (overwrite && !strcmp(channel_name, "metadata")) {
c1f20530 618 ERR("Trying to set metadata channel to overwrite mode");
9dad1eb8
PMF
619 err = -EINVAL;
620 goto traces_error;
621 }
622
623 index = ltt_channels_get_index_from_name(channel_name);
624 if (index < 0) {
c1f20530 625 ERR("Channel %s not found", channel_name);
9dad1eb8
PMF
626 err = -ENOENT;
627 goto traces_error;
628 }
629
630 trace->channels[index].overwrite = overwrite;
631
632traces_error:
633 ltt_unlock_traces();
634 return err;
635}
9dad1eb8
PMF
636
637int ltt_trace_alloc(const char *trace_name)
638{
639 int err = 0;
640 struct ltt_trace_struct *trace;
c697d411 641 unsigned int subbuf_size, subbuf_cnt;
772030fe 642//ust// unsigned long flags;
9dad1eb8
PMF
643 int chan;
644 const char *channel_name;
645
646 ltt_lock_traces();
647
648 trace = _ltt_trace_find_setup(trace_name);
649 if (!trace) {
c1f20530 650 ERR("Trace not found %s", trace_name);
9dad1eb8
PMF
651 err = -ENOENT;
652 goto traces_error;
653 }
654
655 kref_init(&trace->kref);
656 kref_init(&trace->ltt_transport_kref);
b6bf28ec 657//ust// init_waitqueue_head(&trace->kref_wq);
9dad1eb8 658 trace->active = 0;
b6bf28ec 659//ust// get_trace_clock();
9dad1eb8
PMF
660 trace->freq_scale = trace_clock_freq_scale();
661
662 if (!trace->transport) {
c1f20530 663 ERR("Transport is not set");
9dad1eb8
PMF
664 err = -EINVAL;
665 goto transport_error;
666 }
b6bf28ec 667//ust// if (!try_module_get(trace->transport->owner)) {
c1f20530 668//ust// ERR("Can't lock transport module");
b6bf28ec
PMF
669//ust// err = -ENODEV;
670//ust// goto transport_error;
671//ust// }
9dad1eb8
PMF
672 trace->ops = &trace->transport->ops;
673
b6bf28ec
PMF
674//ust// err = trace->ops->create_dirs(trace);
675//ust// if (err) {
c1f20530 676//ust// ERR("Can't create dir for trace %s", trace_name);
b6bf28ec
PMF
677//ust// goto dirs_error;
678//ust// }
9dad1eb8 679
b6bf28ec 680//ust// local_irq_save(flags);
9dad1eb8
PMF
681 trace->start_freq = trace_clock_frequency();
682 trace->start_tsc = trace_clock_read64();
204141ee 683 gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
b6bf28ec 684//ust// local_irq_restore(flags);
9dad1eb8
PMF
685
686 for (chan = 0; chan < trace->nr_channels; chan++) {
687 if (!(trace->channels[chan].active))
688 continue;
689
690 channel_name = ltt_channels_get_name_from_index(chan);
691 WARN_ON(!channel_name);
692 subbuf_size = trace->channels[chan].subbuf_size;
693 subbuf_cnt = trace->channels[chan].subbuf_cnt;
694 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
695 err = trace->ops->create_channel(trace_name, trace,
9dad1eb8
PMF
696 channel_name,
697 &trace->channels[chan],
698 subbuf_size,
699 subbuf_cnt,
700 trace->channels[chan].overwrite);
701 if (err != 0) {
c1f20530 702 ERR("Cannot create channel %s", channel_name);
9dad1eb8
PMF
703 goto create_channel_error;
704 }
705 }
706
707 list_del(&trace->list);
b6bf28ec
PMF
708//ust// if (list_empty(&ltt_traces.head)) {
709//ust// mod_timer(&ltt_async_wakeup_timer,
710//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
711//ust// set_kernel_trace_flag_all_tasks();
712//ust// }
8d938dbd 713 list_add_rcu(&trace->list, &ltt_traces.head);
b6bf28ec 714//ust// synchronize_sched();
9dad1eb8
PMF
715
716 ltt_unlock_traces();
717
718 return 0;
719
720create_channel_error:
721 for (chan--; chan >= 0; chan--)
722 if (trace->channels[chan].active)
723 trace->ops->remove_channel(&trace->channels[chan]);
724
772030fe 725//ust// dirs_error:
b6bf28ec 726//ust// module_put(trace->transport->owner);
9dad1eb8 727transport_error:
b6bf28ec 728//ust// put_trace_clock();
9dad1eb8
PMF
729traces_error:
730 ltt_unlock_traces();
731 return err;
732}
9dad1eb8
PMF
733
734/*
735 * It is worked as a wrapper for current version of ltt_control.ko.
736 * We will make a new ltt_control based on debugfs, and control each channel's
737 * buffer.
738 */
772030fe
PMF
739//ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
740//ust// enum trace_mode mode,
741//ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
742//ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
743//ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
744//ust// {
745//ust// int err = 0;
746//ust//
747//ust// err = ltt_trace_setup(trace_name);
748//ust// if (IS_ERR_VALUE(err))
749//ust// return err;
750//ust//
751//ust// err = ltt_trace_set_type(trace_name, trace_type);
752//ust// if (IS_ERR_VALUE(err))
753//ust// return err;
754//ust//
755//ust// err = ltt_trace_alloc(trace_name);
756//ust// if (IS_ERR_VALUE(err))
757//ust// return err;
758//ust//
759//ust// return err;
760//ust// }
9dad1eb8
PMF
761
762/* Must be called while sure that trace is in the list. */
99b72dc0 763static int _ltt_trace_destroy(struct ltt_trace_struct *trace)
9dad1eb8
PMF
764{
765 int err = -EPERM;
766
767 if (trace == NULL) {
768 err = -ENOENT;
769 goto traces_error;
770 }
771 if (trace->active) {
c1f20530 772 ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
9dad1eb8
PMF
773 err = -EBUSY;
774 goto active_error;
775 }
776 /* Everything went fine */
a3272941
PMF
777 list_del_rcu(&trace->list);
778 synchronize_rcu();
9dad1eb8 779 if (list_empty(&ltt_traces.head)) {
b6bf28ec 780//ust// clear_kernel_trace_flag_all_tasks();
9dad1eb8
PMF
781 /*
782 * We stop the asynchronous delivery of reader wakeup, but
783 * we must make one last check for reader wakeups pending
784 * later in __ltt_trace_destroy.
785 */
b6bf28ec 786//ust// del_timer_sync(&ltt_async_wakeup_timer);
9dad1eb8
PMF
787 }
788 return 0;
789
790 /* error handling */
791active_error:
792traces_error:
793 return err;
794}
795
796/* Sleepable part of the destroy */
797static void __ltt_trace_destroy(struct ltt_trace_struct *trace)
798{
799 int i;
b5b073e2 800 struct ust_channel *chan;
9dad1eb8
PMF
801
802 for (i = 0; i < trace->nr_channels; i++) {
803 chan = &trace->channels[i];
804 if (chan->active)
805 trace->ops->finish_channel(chan);
806 }
807
98963de4 808 return; /* FIXME: temporary for ust */
b6bf28ec 809//ust// flush_scheduled_work();
9dad1eb8
PMF
810
811 /*
812 * The currently destroyed trace is not in the trace list anymore,
813 * so it's safe to call the async wakeup ourself. It will deliver
814 * the last subbuffers.
815 */
816 trace_async_wakeup(trace);
817
818 for (i = 0; i < trace->nr_channels; i++) {
819 chan = &trace->channels[i];
820 if (chan->active)
821 trace->ops->remove_channel(chan);
822 }
823
824 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
825
b6bf28ec 826//ust// module_put(trace->transport->owner);
9dad1eb8
PMF
827
828 /*
829 * Wait for lttd readers to release the files, therefore making sure
830 * the last subbuffers have been read.
831 */
b6bf28ec
PMF
832//ust// if (atomic_read(&trace->kref.refcount) > 1) {
833//ust// int ret = 0;
834//ust// __wait_event_interruptible(trace->kref_wq,
835//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
836//ust// }
9dad1eb8
PMF
837 kref_put(&trace->kref, ltt_release_trace);
838}
839
840int ltt_trace_destroy(const char *trace_name)
841{
842 int err = 0;
843 struct ltt_trace_struct *trace;
844
845 ltt_lock_traces();
846
847 trace = _ltt_trace_find(trace_name);
848 if (trace) {
849 err = _ltt_trace_destroy(trace);
850 if (err)
851 goto error;
852
853 ltt_unlock_traces();
854
855 __ltt_trace_destroy(trace);
b6bf28ec 856//ust// put_trace_clock();
9dad1eb8
PMF
857
858 return 0;
859 }
860
861 trace = _ltt_trace_find_setup(trace_name);
862 if (trace) {
863 _ltt_trace_free(trace);
864 ltt_unlock_traces();
865 return 0;
866 }
867
868 err = -ENOENT;
869
870 /* Error handling */
871error:
872 ltt_unlock_traces();
873 return err;
874}
9dad1eb8
PMF
875
876/* must be called from within a traces lock. */
877static int _ltt_trace_start(struct ltt_trace_struct *trace)
878{
879 int err = 0;
880
881 if (trace == NULL) {
882 err = -ENOENT;
883 goto traces_error;
884 }
885 if (trace->active)
c1f20530 886 DBG("Tracing already active for trace %s", trace->trace_name);
b6bf28ec
PMF
887//ust// if (!try_module_get(ltt_run_filter_owner)) {
888//ust// err = -ENODEV;
c1f20530 889//ust// ERR("Cannot lock filter module");
b6bf28ec
PMF
890//ust// goto get_ltt_run_filter_error;
891//ust// }
9dad1eb8
PMF
892 trace->active = 1;
893 /* Read by trace points without protection : be careful */
894 ltt_traces.num_active_traces++;
895 return err;
896
897 /* error handling */
772030fe 898//ust// get_ltt_run_filter_error:
9dad1eb8
PMF
899traces_error:
900 return err;
901}
902
903int ltt_trace_start(const char *trace_name)
904{
905 int err = 0;
906 struct ltt_trace_struct *trace;
907
908 ltt_lock_traces();
909
910 trace = _ltt_trace_find(trace_name);
911 err = _ltt_trace_start(trace);
912 if (err)
913 goto no_trace;
914
915 ltt_unlock_traces();
916
917 /*
918 * Call the kernel state dump.
919 * Events will be mixed with real kernel events, it's ok.
920 * Notice that there is no protection on the trace : that's exactly
921 * why we iterate on the list and check for trace equality instead of
922 * directly using this trace handle inside the logging function.
923 */
924
9c67dc50 925 ltt_dump_marker_state(trace);
9dad1eb8 926
b6bf28ec
PMF
927//ust// if (!try_module_get(ltt_statedump_owner)) {
928//ust// err = -ENODEV;
c1f20530 929//ust// ERR("Cannot lock state dump module");
b6bf28ec 930//ust// } else {
9dad1eb8 931 ltt_statedump_functor(trace);
b6bf28ec
PMF
932//ust// module_put(ltt_statedump_owner);
933//ust// }
9dad1eb8
PMF
934
935 return err;
936
937 /* Error handling */
938no_trace:
939 ltt_unlock_traces();
940 return err;
941}
9dad1eb8
PMF
942
943/* must be called from within traces lock */
944static int _ltt_trace_stop(struct ltt_trace_struct *trace)
945{
946 int err = -EPERM;
947
948 if (trace == NULL) {
949 err = -ENOENT;
950 goto traces_error;
951 }
952 if (!trace->active)
c1f20530 953 DBG("LTT : Tracing not active for trace %s", trace->trace_name);
9dad1eb8
PMF
954 if (trace->active) {
955 trace->active = 0;
956 ltt_traces.num_active_traces--;
b6bf28ec 957//ust// synchronize_sched(); /* Wait for each tracing to be finished */
9dad1eb8 958 }
b6bf28ec 959//ust// module_put(ltt_run_filter_owner);
9dad1eb8
PMF
960 /* Everything went fine */
961 return 0;
962
963 /* Error handling */
964traces_error:
965 return err;
966}
967
968int ltt_trace_stop(const char *trace_name)
969{
970 int err = 0;
971 struct ltt_trace_struct *trace;
972
973 ltt_lock_traces();
974 trace = _ltt_trace_find(trace_name);
975 err = _ltt_trace_stop(trace);
976 ltt_unlock_traces();
977 return err;
978}
9dad1eb8
PMF
979
980/**
981 * ltt_filter_control - Trace filter control in-kernel API
982 * @msg: Action to perform on the filter
983 * @trace_name: Trace on which the action must be done
984 */
985int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
986{
987 int err;
988 struct ltt_trace_struct *trace;
989
c1f20530 990 DBG("ltt_filter_control : trace %s", trace_name);
9dad1eb8
PMF
991 ltt_lock_traces();
992 trace = _ltt_trace_find(trace_name);
993 if (trace == NULL) {
c1f20530 994 ERR("Trace does not exist. Cannot proxy control request");
9dad1eb8
PMF
995 err = -ENOENT;
996 goto trace_error;
997 }
b6bf28ec
PMF
998//ust// if (!try_module_get(ltt_filter_control_owner)) {
999//ust// err = -ENODEV;
1000//ust// goto get_module_error;
1001//ust// }
9dad1eb8
PMF
1002 switch (msg) {
1003 case LTT_FILTER_DEFAULT_ACCEPT:
c1f20530 1004 DBG("Proxy filter default accept %s", trace_name);
9dad1eb8
PMF
1005 err = (*ltt_filter_control_functor)(msg, trace);
1006 break;
1007 case LTT_FILTER_DEFAULT_REJECT:
c1f20530 1008 DBG("Proxy filter default reject %s", trace_name);
9dad1eb8
PMF
1009 err = (*ltt_filter_control_functor)(msg, trace);
1010 break;
1011 default:
1012 err = -EPERM;
1013 }
b6bf28ec 1014//ust// module_put(ltt_filter_control_owner);
9dad1eb8 1015
772030fe 1016//ust// get_module_error:
9dad1eb8
PMF
1017trace_error:
1018 ltt_unlock_traces();
1019 return err;
1020}
This page took 0.067599 seconds and 4 git commands to generate.