4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
26 * Bob Wisniewski (bob@watson.ibm.com)
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
35 #include <urcu/rculist.h>
37 #include <ust/clock.h>
39 #include "tracercore.h"
43 //ust// static void async_wakeup(unsigned long data);
45 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
47 /* Default callbacks for modules */
48 notrace
int ltt_filter_control_default(enum ltt_filter_control_msg msg
,
49 struct ust_trace
*trace
)
54 int ltt_statedump_default(struct ust_trace
*trace
)
59 /* Callbacks for registered modules */
61 int (*ltt_filter_control_functor
)
62 (enum ltt_filter_control_msg msg
, struct ust_trace
*trace
) =
63 ltt_filter_control_default
;
64 struct module
*ltt_filter_control_owner
;
66 /* These function pointers are protected by a trace activation check */
67 struct module
*ltt_run_filter_owner
;
68 int (*ltt_statedump_functor
)(struct ust_trace
*trace
) =
69 ltt_statedump_default
;
70 struct module
*ltt_statedump_owner
;
72 struct chan_info_struct chan_infos
[] = {
73 [LTT_CHANNEL_METADATA
] = {
75 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
76 LTT_DEFAULT_N_SUBBUFS_LOW
,
80 LTT_DEFAULT_SUBBUF_SIZE_HIGH
,
81 LTT_DEFAULT_N_SUBBUFS_HIGH
,
85 static enum ltt_channels
get_channel_type_from_name(const char *name
)
90 return LTT_CHANNEL_UST
;
92 for (i
= 0; i
< ARRAY_SIZE(chan_infos
); i
++)
93 if (chan_infos
[i
].name
&& !strcmp(name
, chan_infos
[i
].name
))
94 return (enum ltt_channels
)i
;
96 return LTT_CHANNEL_UST
;
100 * ltt_module_register - LTT module registration
102 * @function: callback to register
103 * @owner: module which owns the callback
105 * The module calling this registration function must ensure that no
106 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
107 * must be called between a vmalloc and the moment the memory is made visible to
108 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
109 * the module allocates virtual memory after its registration must it
110 * synchronize the TLBs.
112 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
113 //ust// struct module *owner)
118 //ust// * Make sure no page fault can be triggered by the module about to be
119 //ust// * registered. We deal with this here so we don't have to call
120 //ust// * vmalloc_sync_all() in each module's init.
122 //ust// vmalloc_sync_all();
124 //ust// switch (name) {
125 //ust// case LTT_FUNCTION_RUN_FILTER:
126 //ust// if (ltt_run_filter_owner != NULL) {
127 //ust// ret = -EEXIST;
130 //ust// ltt_filter_register((ltt_run_filter_functor)function);
131 //ust// ltt_run_filter_owner = owner;
133 //ust// case LTT_FUNCTION_FILTER_CONTROL:
134 //ust// if (ltt_filter_control_owner != NULL) {
135 //ust// ret = -EEXIST;
138 //ust// ltt_filter_control_functor =
139 //ust// (int (*)(enum ltt_filter_control_msg,
140 //ust// struct ust_trace *))function;
141 //ust// ltt_filter_control_owner = owner;
143 //ust// case LTT_FUNCTION_STATEDUMP:
144 //ust// if (ltt_statedump_owner != NULL) {
145 //ust// ret = -EEXIST;
148 //ust// ltt_statedump_functor =
149 //ust// (int (*)(struct ust_trace *))function;
150 //ust// ltt_statedump_owner = owner;
160 * ltt_module_unregister - LTT module unregistration
163 //ust// void ltt_module_unregister(enum ltt_module_function name)
165 //ust// switch (name) {
166 //ust// case LTT_FUNCTION_RUN_FILTER:
167 //ust// ltt_filter_unregister();
168 //ust// ltt_run_filter_owner = NULL;
169 //ust// /* Wait for preempt sections to finish */
170 //ust// synchronize_sched();
172 //ust// case LTT_FUNCTION_FILTER_CONTROL:
173 //ust// ltt_filter_control_functor = ltt_filter_control_default;
174 //ust// ltt_filter_control_owner = NULL;
176 //ust// case LTT_FUNCTION_STATEDUMP:
177 //ust// ltt_statedump_functor = ltt_statedump_default;
178 //ust// ltt_statedump_owner = NULL;
184 static CDS_LIST_HEAD(ltt_transport_list
);
185 /* transport mutex, nests inside traces mutex (ltt_lock_traces) */
186 static DEFINE_MUTEX(ltt_transport_mutex
);
188 * ltt_transport_register - LTT transport registration
189 * @transport: transport structure
191 * Registers a transport which can be used as output to extract the data out of
192 * LTTng. The module calling this registration function must ensure that no
193 * trap-inducing code will be executed by the transport functions. E.g.
194 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
195 * is made visible to the transport function. This registration acts as a
196 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
197 * after its registration must it synchronize the TLBs.
199 void ltt_transport_register(struct ltt_transport
*transport
)
202 * Make sure no page fault can be triggered by the module about to be
203 * registered. We deal with this here so we don't have to call
204 * vmalloc_sync_all() in each module's init.
206 //ust// vmalloc_sync_all();
208 pthread_mutex_lock(<t_transport_mutex
);
209 cds_list_add_tail(&transport
->node
, <t_transport_list
);
210 pthread_mutex_unlock(<t_transport_mutex
);
214 * ltt_transport_unregister - LTT transport unregistration
215 * @transport: transport structure
217 void ltt_transport_unregister(struct ltt_transport
*transport
)
219 pthread_mutex_lock(<t_transport_mutex
);
220 cds_list_del(&transport
->node
);
221 pthread_mutex_unlock(<t_transport_mutex
);
224 static inline int is_channel_overwrite(enum ltt_channels chan
,
225 enum trace_mode mode
)
228 case LTT_TRACE_NORMAL
:
230 case LTT_TRACE_FLIGHT
:
232 case LTT_CHANNEL_METADATA
:
237 case LTT_TRACE_HYBRID
:
239 case LTT_CHANNEL_METADATA
:
249 static void trace_async_wakeup(struct ust_trace
*trace
)
252 struct ust_channel
*chan
;
254 /* Must check each channel for pending read wakeup */
255 for (i
= 0; i
< trace
->nr_channels
; i
++) {
256 chan
= &trace
->channels
[i
];
258 trace
->ops
->wakeup_channel(chan
);
262 //ust// /* Timer to send async wakeups to the readers */
263 //ust// static void async_wakeup(unsigned long data)
265 //ust// struct ust_trace *trace;
268 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
269 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
270 //ust// * allow mutex to be taken in interrupt context. Ugly.
271 //ust// * A proper way to do this would be to turn the timer into a
272 //ust// * periodically woken up thread, but it adds to the footprint.
274 //ust// #ifndef CONFIG_PREEMPT_RT
275 //ust// rcu_read_lock_sched();
277 //ust// ltt_lock_traces();
279 //ust// cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
280 //ust// trace_async_wakeup(trace);
282 //ust// #ifndef CONFIG_PREEMPT_RT
283 //ust// rcu_read_unlock_sched();
285 //ust// ltt_unlock_traces();
288 //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
292 * _ltt_trace_find - find a trace by given name.
293 * trace_name: trace name
295 * Returns a pointer to the trace structure, NULL if not found.
297 struct ust_trace
*_ltt_trace_find(const char *trace_name
)
299 struct ust_trace
*trace
;
301 cds_list_for_each_entry(trace
, <t_traces
.head
, list
)
302 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
308 /* _ltt_trace_find_setup :
309 * find a trace in setup list by given name.
311 * Returns a pointer to the trace structure, NULL if not found.
313 struct ust_trace
*_ltt_trace_find_setup(const char *trace_name
)
315 struct ust_trace
*trace
;
317 cds_list_for_each_entry(trace
, <t_traces
.setup_head
, list
)
318 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
325 * ltt_release_transport - Release an LTT transport
326 * @kref : reference count on the transport
328 void ltt_release_transport(struct urcu_ref
*urcu_ref
)
330 //ust// struct ust_trace *trace = container_of(kref,
331 //ust// struct ust_trace, ltt_transport_kref);
332 //ust// trace->ops->remove_dirs(trace);
336 * ltt_release_trace - Release a LTT trace
337 * @kref : reference count on the trace
339 void ltt_release_trace(struct urcu_ref
*urcu_ref
)
341 struct ust_trace
*trace
= _ust_container_of(urcu_ref
,
342 struct ust_trace
, urcu_ref
);
343 ltt_channels_trace_free(trace
->channels
);
347 static inline void prepare_chan_size_num(unsigned int *subbuf_size
,
348 unsigned int *n_subbufs
)
350 /* Make sure the subbuffer size is larger than a page */
351 *subbuf_size
= max_t(unsigned int, *subbuf_size
, PAGE_SIZE
);
353 /* round to next power of 2 */
354 *subbuf_size
= 1 << get_count_order(*subbuf_size
);
355 *n_subbufs
= 1 << get_count_order(*n_subbufs
);
357 /* Subbuf size and number must both be power of two */
358 WARN_ON(hweight32(*subbuf_size
) != 1);
359 WARN_ON(hweight32(*n_subbufs
) != 1);
362 int _ltt_trace_setup(const char *trace_name
)
365 struct ust_trace
*new_trace
= NULL
;
368 enum ltt_channels chantype
;
370 if (_ltt_trace_find_setup(trace_name
)) {
371 ERR("Trace name %s already used", trace_name
);
376 if (_ltt_trace_find(trace_name
)) {
377 ERR("Trace name %s already used", trace_name
);
382 new_trace
= zmalloc(sizeof(struct ust_trace
));
384 ERR("Unable to allocate memory for trace %s", trace_name
);
388 strncpy(new_trace
->trace_name
, trace_name
, NAME_MAX
);
389 new_trace
->channels
= ltt_channels_trace_alloc(&new_trace
->nr_channels
,
390 ust_channels_overwrite_by_default
,
391 ust_channels_request_collection_by_default
, 1);
392 if (!new_trace
->channels
) {
393 ERR("Unable to allocate memory for chaninfo %s\n", trace_name
);
399 * Force metadata channel to active, no overwrite.
401 metadata_index
= ltt_channels_get_index_from_name("metadata");
402 WARN_ON(metadata_index
< 0);
403 new_trace
->channels
[metadata_index
].overwrite
= 0;
404 new_trace
->channels
[metadata_index
].active
= 1;
407 * Set hardcoded tracer defaults for some channels
409 for (chan
= 0; chan
< new_trace
->nr_channels
; chan
++) {
410 if (!(new_trace
->channels
[chan
].active
))
413 chantype
= get_channel_type_from_name(
414 ltt_channels_get_name_from_index(chan
));
415 new_trace
->channels
[chan
].subbuf_size
=
416 chan_infos
[chantype
].def_subbufsize
;
417 new_trace
->channels
[chan
].subbuf_cnt
=
418 chan_infos
[chantype
].def_subbufcount
;
421 cds_list_add(&new_trace
->list
, <t_traces
.setup_head
);
431 int ltt_trace_setup(const char *trace_name
)
435 ret
= _ltt_trace_setup(trace_name
);
440 /* must be called from within a traces lock. */
441 static void _ltt_trace_free(struct ust_trace
*trace
)
443 cds_list_del(&trace
->list
);
447 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
)
450 struct ust_trace
*trace
;
451 struct ltt_transport
*tran_iter
, *transport
= NULL
;
455 trace
= _ltt_trace_find_setup(trace_name
);
457 ERR("Trace not found %s", trace_name
);
462 pthread_mutex_lock(<t_transport_mutex
);
463 cds_list_for_each_entry(tran_iter
, <t_transport_list
, node
) {
464 if (!strcmp(tran_iter
->name
, trace_type
)) {
465 transport
= tran_iter
;
469 pthread_mutex_unlock(<t_transport_mutex
);
472 ERR("Transport %s is not present", trace_type
);
477 trace
->transport
= transport
;
484 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
485 const char *channel_name
, unsigned int size
)
488 struct ust_trace
*trace
;
493 trace
= _ltt_trace_find_setup(trace_name
);
495 ERR("Trace not found %s", trace_name
);
500 index
= ltt_channels_get_index_from_name(channel_name
);
502 ERR("Channel %s not found", channel_name
);
506 trace
->channels
[index
].subbuf_size
= size
;
513 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
514 const char *channel_name
, unsigned int cnt
)
517 struct ust_trace
*trace
;
522 trace
= _ltt_trace_find_setup(trace_name
);
524 ERR("Trace not found %s", trace_name
);
529 index
= ltt_channels_get_index_from_name(channel_name
);
531 ERR("Channel %s not found", channel_name
);
535 trace
->channels
[index
].subbuf_cnt
= cnt
;
542 int ltt_trace_set_channel_enable(const char *trace_name
,
543 const char *channel_name
, unsigned int enable
)
546 struct ust_trace
*trace
;
551 trace
= _ltt_trace_find_setup(trace_name
);
553 ERR("Trace not found %s", trace_name
);
559 * Datas in metadata channel(marker info) is necessary to be able to
560 * read the trace, we always enable this channel.
562 if (!enable
&& !strcmp(channel_name
, "metadata")) {
563 ERR("Trying to disable metadata channel");
568 index
= ltt_channels_get_index_from_name(channel_name
);
570 ERR("Channel %s not found", channel_name
);
575 trace
->channels
[index
].active
= enable
;
582 int ltt_trace_set_channel_overwrite(const char *trace_name
,
583 const char *channel_name
, unsigned int overwrite
)
586 struct ust_trace
*trace
;
591 trace
= _ltt_trace_find_setup(trace_name
);
593 ERR("Trace not found %s", trace_name
);
599 * Always put the metadata channel in non-overwrite mode :
600 * This is a very low traffic channel and it can't afford to have its
601 * data overwritten : this data (marker info) is necessary to be
602 * able to read the trace.
604 if (overwrite
&& !strcmp(channel_name
, "metadata")) {
605 ERR("Trying to set metadata channel to overwrite mode");
610 index
= ltt_channels_get_index_from_name(channel_name
);
612 ERR("Channel %s not found", channel_name
);
617 trace
->channels
[index
].overwrite
= overwrite
;
624 int ltt_trace_alloc(const char *trace_name
)
627 struct ust_trace
*trace
;
628 unsigned int subbuf_size
, subbuf_cnt
;
629 //ust// unsigned long flags;
631 const char *channel_name
;
635 if (_ltt_trace_find(trace_name
)) { /* Trace already allocated */
640 trace
= _ltt_trace_find_setup(trace_name
);
642 ERR("Trace not found %s", trace_name
);
647 urcu_ref_init(&trace
->urcu_ref
);
648 urcu_ref_init(&trace
->ltt_transport_urcu_ref
);
649 //ust// init_waitqueue_head(&trace->urcu_ref_wq);
651 //ust// get_trace_clock();
652 trace
->freq_scale
= trace_clock_freq_scale();
654 if (!trace
->transport
) {
655 ERR("Transport is not set");
657 goto transport_error
;
659 //ust// if (!try_module_get(trace->transport->owner)) {
660 //ust// ERR("Can't lock transport module");
661 //ust// err = -ENODEV;
662 //ust// goto transport_error;
664 trace
->ops
= &trace
->transport
->ops
;
666 //ust// err = trace->ops->create_dirs(trace);
668 //ust// ERR("Can't create dir for trace %s", trace_name);
669 //ust// goto dirs_error;
672 //ust// local_irq_save(flags);
673 trace
->start_freq
= trace_clock_frequency();
674 trace
->start_tsc
= trace_clock_read64();
675 gettimeofday(&trace
->start_time
, NULL
); //ust// changed /* FIXME: is this ok? */
676 //ust// local_irq_restore(flags);
678 for (chan
= 0; chan
< trace
->nr_channels
; chan
++) {
679 if (!(trace
->channels
[chan
].active
))
682 channel_name
= ltt_channels_get_name_from_index(chan
);
683 WARN_ON(!channel_name
);
684 subbuf_size
= trace
->channels
[chan
].subbuf_size
;
685 subbuf_cnt
= trace
->channels
[chan
].subbuf_cnt
;
686 prepare_chan_size_num(&subbuf_size
, &subbuf_cnt
);
687 err
= trace
->ops
->create_channel(trace_name
, trace
,
689 &trace
->channels
[chan
],
692 trace
->channels
[chan
].overwrite
);
694 ERR("Cannot create channel %s", channel_name
);
695 goto create_channel_error
;
699 cds_list_del(&trace
->list
);
700 //ust// if (cds_list_empty(<t_traces.head)) {
701 //ust// mod_timer(<t_async_wakeup_timer,
702 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
703 //ust// set_kernel_trace_flag_all_tasks();
705 cds_list_add_rcu(&trace
->list
, <t_traces
.head
);
706 //ust// synchronize_sched();
712 create_channel_error
:
713 for (chan
--; chan
>= 0; chan
--)
714 if (trace
->channels
[chan
].active
)
715 trace
->ops
->remove_channel(&trace
->channels
[chan
]);
718 //ust// module_put(trace->transport->owner);
720 //ust// put_trace_clock();
727 * It is worked as a wrapper for current version of ltt_control.ko.
728 * We will make a new ltt_control based on debugfs, and control each channel's
731 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
732 //ust// enum trace_mode mode,
733 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
734 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
735 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
739 //ust// err = ltt_trace_setup(trace_name);
740 //ust// if (IS_ERR_VALUE(err))
743 //ust// err = ltt_trace_set_type(trace_name, trace_type);
744 //ust// if (IS_ERR_VALUE(err))
747 //ust// err = ltt_trace_alloc(trace_name);
748 //ust// if (IS_ERR_VALUE(err))
754 /* Must be called while sure that trace is in the list. */
755 static int _ltt_trace_destroy(struct ust_trace
*trace
)
764 ERR("Can't destroy trace %s : tracer is active", trace
->trace_name
);
768 /* Everything went fine */
769 cds_list_del_rcu(&trace
->list
);
771 if (cds_list_empty(<t_traces
.head
)) {
772 //ust// clear_kernel_trace_flag_all_tasks();
774 * We stop the asynchronous delivery of reader wakeup, but
775 * we must make one last check for reader wakeups pending
776 * later in __ltt_trace_destroy.
778 //ust// del_timer_sync(<t_async_wakeup_timer);
788 /* Sleepable part of the destroy */
789 static void __ltt_trace_destroy(struct ust_trace
*trace
, int drop
)
792 struct ust_channel
*chan
;
795 for (i
= 0; i
< trace
->nr_channels
; i
++) {
796 chan
= &trace
->channels
[i
];
798 trace
->ops
->finish_channel(chan
);
803 * The currently destroyed trace is not in the trace list anymore,
804 * so it's safe to call the async wakeup ourself. It will deliver
805 * the last subbuffers.
807 trace_async_wakeup(trace
);
809 for (i
= 0; i
< trace
->nr_channels
; i
++) {
810 chan
= &trace
->channels
[i
];
812 trace
->ops
->remove_channel(chan
);
815 urcu_ref_put(&trace
->ltt_transport_urcu_ref
, ltt_release_transport
);
817 //ust// module_put(trace->transport->owner);
820 * Wait for lttd readers to release the files, therefore making sure
821 * the last subbuffers have been read.
823 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
825 //ust// __wait_event_interruptible(trace->kref_wq,
826 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
828 urcu_ref_put(&trace
->urcu_ref
, ltt_release_trace
);
831 int ltt_trace_destroy(const char *trace_name
, int drop
)
834 struct ust_trace
*trace
;
838 trace
= _ltt_trace_find(trace_name
);
840 err
= _ltt_trace_destroy(trace
);
846 __ltt_trace_destroy(trace
, drop
);
847 //ust// put_trace_clock();
852 trace
= _ltt_trace_find_setup(trace_name
);
854 _ltt_trace_free(trace
);
867 /* must be called from within a traces lock. */
868 static int _ltt_trace_start(struct ust_trace
*trace
)
877 DBG("Tracing already active for trace %s", trace
->trace_name
);
878 //ust// if (!try_module_get(ltt_run_filter_owner)) {
879 //ust// err = -ENODEV;
880 //ust// ERR("Cannot lock filter module");
881 //ust// goto get_ltt_run_filter_error;
884 /* Read by trace points without protection : be careful */
885 ltt_traces
.num_active_traces
++;
889 //ust// get_ltt_run_filter_error:
894 int ltt_trace_start(const char *trace_name
)
897 struct ust_trace
*trace
;
901 trace
= _ltt_trace_find(trace_name
);
902 err
= _ltt_trace_start(trace
);
909 * Call the process-wide state dump.
910 * Notice that there is no protection on the trace : that's exactly
911 * why we iterate on the list and check for trace equality instead of
912 * directly using this trace handle inside the logging function: we want
913 * to record events only in a single trace in the trace session list.
916 ltt_dump_marker_state(trace
);
918 //ust// if (!try_module_get(ltt_statedump_owner)) {
919 //ust// err = -ENODEV;
920 //ust// ERR("Cannot lock state dump module");
922 ltt_statedump_functor(trace
);
923 //ust// module_put(ltt_statedump_owner);
934 /* must be called from within traces lock */
935 static int _ltt_trace_stop(struct ust_trace
*trace
)
944 DBG("LTT : Tracing not active for trace %s", trace
->trace_name
);
947 ltt_traces
.num_active_traces
--;
948 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
950 //ust// module_put(ltt_run_filter_owner);
951 /* Everything went fine */
959 int ltt_trace_stop(const char *trace_name
)
962 struct ust_trace
*trace
;
965 trace
= _ltt_trace_find(trace_name
);
966 err
= _ltt_trace_stop(trace
);
972 * ltt_filter_control - Trace filter control in-kernel API
973 * @msg: Action to perform on the filter
974 * @trace_name: Trace on which the action must be done
976 int ltt_filter_control(enum ltt_filter_control_msg msg
, const char *trace_name
)
979 struct ust_trace
*trace
;
981 DBG("ltt_filter_control : trace %s", trace_name
);
983 trace
= _ltt_trace_find(trace_name
);
985 ERR("Trace does not exist. Cannot proxy control request");
989 //ust// if (!try_module_get(ltt_filter_control_owner)) {
990 //ust// err = -ENODEV;
991 //ust// goto get_module_error;
994 case LTT_FILTER_DEFAULT_ACCEPT
:
995 DBG("Proxy filter default accept %s", trace_name
);
996 err
= (*ltt_filter_control_functor
)(msg
, trace
);
998 case LTT_FILTER_DEFAULT_REJECT
:
999 DBG("Proxy filter default reject %s", trace_name
);
1000 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1005 //ust// module_put(ltt_filter_control_owner);
1007 //ust// get_module_error:
1009 ltt_unlock_traces();