X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=libust%2Ftracer.c;h=ffcc2e74926996257ef7531fa622c1ee9179c0c9;hb=8d6300d3b3cb0219e1109e931a2219dbd812b24d;hp=60fc8b7a182270110e9408ec7d96fd72e655d8e0;hpb=8649cd59b1c15b8d4097cf63f733e8007d76ad13;p=ust.git diff --git a/libust/tracer.c b/libust/tracer.c index 60fc8b7..ffcc2e7 100644 --- a/libust/tracer.c +++ b/libust/tracer.c @@ -34,7 +34,8 @@ #include #include -#include +#include + #include "tracercore.h" #include "tracer.h" #include "usterr.h" @@ -68,11 +69,7 @@ int (*ltt_statedump_functor)(struct ust_trace *trace) = ltt_statedump_default; struct module *ltt_statedump_owner; -struct chan_info_struct { - const char *name; - unsigned int def_subbufsize; - unsigned int def_subbufcount; -} chan_infos[] = { +struct chan_info_struct chan_infos[] = { [LTT_CHANNEL_METADATA] = { LTT_METADATA_CHANNEL, LTT_DEFAULT_SUBBUF_SIZE_LOW, @@ -184,8 +181,9 @@ static enum ltt_channels get_channel_type_from_name(const char *name) //ust// //ust// } -static LIST_HEAD(ltt_transport_list); - +static CDS_LIST_HEAD(ltt_transport_list); +/* transport mutex, nests inside traces mutex (ltt_lock_traces) */ +static DEFINE_MUTEX(ltt_transport_mutex); /** * ltt_transport_register - LTT transport registration * @transport: transport structure @@ -207,9 +205,9 @@ void ltt_transport_register(struct ltt_transport *transport) */ //ust// vmalloc_sync_all(); - ltt_lock_traces(); - list_add_tail(&transport->node, <t_transport_list); - ltt_unlock_traces(); + pthread_mutex_lock(<t_transport_mutex); + cds_list_add_tail(&transport->node, <t_transport_list); + pthread_mutex_unlock(<t_transport_mutex); } /** @@ -218,9 +216,9 @@ void ltt_transport_register(struct ltt_transport *transport) */ void ltt_transport_unregister(struct ltt_transport *transport) { - ltt_lock_traces(); - list_del(&transport->node); - ltt_unlock_traces(); + pthread_mutex_lock(<t_transport_mutex); + cds_list_del(&transport->node); + pthread_mutex_unlock(<t_transport_mutex); } static inline int is_channel_overwrite(enum ltt_channels chan, @@ -278,7 +276,7 @@ static void trace_async_wakeup(struct ust_trace *trace) //ust// #else //ust// ltt_lock_traces(); //ust// #endif -//ust// list_for_each_entry_rcu(trace, <t_traces.head, list) { +//ust// cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { //ust// trace_async_wakeup(trace); //ust// } //ust// #ifndef CONFIG_PREEMPT_RT @@ -300,7 +298,7 @@ struct ust_trace *_ltt_trace_find(const char *trace_name) { struct ust_trace *trace; - list_for_each_entry(trace, <t_traces.head, list) + cds_list_for_each_entry(trace, <t_traces.head, list) if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) return trace; @@ -316,7 +314,7 @@ struct ust_trace *_ltt_trace_find_setup(const char *trace_name) { struct ust_trace *trace; - list_for_each_entry(trace, <t_traces.setup_head, list) + cds_list_for_each_entry(trace, <t_traces.setup_head, list) if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) return trace; @@ -327,7 +325,7 @@ struct ust_trace *_ltt_trace_find_setup(const char *trace_name) * ltt_release_transport - Release an LTT transport * @kref : reference count on the transport */ -void ltt_release_transport(struct kref *kref) +void ltt_release_transport(struct urcu_ref *urcu_ref) { //ust// struct ust_trace *trace = container_of(kref, //ust// struct ust_trace, ltt_transport_kref); @@ -338,10 +336,10 @@ void ltt_release_transport(struct kref *kref) * ltt_release_trace - Release a LTT trace * @kref : reference count on the trace */ -void ltt_release_trace(struct kref *kref) +void ltt_release_trace(struct urcu_ref *urcu_ref) { - struct ust_trace *trace = container_of(kref, - struct ust_trace, kref); + struct ust_trace *trace = _ust_container_of(urcu_ref, + struct ust_trace, urcu_ref); ltt_channels_trace_free(trace->channels); free(trace); } @@ -420,7 +418,7 @@ int _ltt_trace_setup(const char *trace_name) chan_infos[chantype].def_subbufcount; } - list_add(&new_trace->list, <t_traces.setup_head); + cds_list_add(&new_trace->list, <t_traces.setup_head); return 0; trace_free: @@ -442,7 +440,7 @@ int ltt_trace_setup(const char *trace_name) /* must be called from within a traces lock. */ static void _ltt_trace_free(struct ust_trace *trace) { - list_del(&trace->list); + cds_list_del(&trace->list); free(trace); } @@ -461,12 +459,15 @@ int ltt_trace_set_type(const char *trace_name, const char *trace_type) goto traces_error; } - list_for_each_entry(tran_iter, <t_transport_list, node) { + pthread_mutex_lock(<t_transport_mutex); + cds_list_for_each_entry(tran_iter, <t_transport_list, node) { if (!strcmp(tran_iter->name, trace_type)) { transport = tran_iter; break; } } + pthread_mutex_unlock(<t_transport_mutex); + if (!transport) { ERR("Transport %s is not present", trace_type); err = -EINVAL; @@ -643,9 +644,9 @@ int ltt_trace_alloc(const char *trace_name) goto traces_error; } - kref_init(&trace->kref); - kref_init(&trace->ltt_transport_kref); -//ust// init_waitqueue_head(&trace->kref_wq); + urcu_ref_init(&trace->urcu_ref); + urcu_ref_init(&trace->ltt_transport_urcu_ref); +//ust// init_waitqueue_head(&trace->urcu_ref_wq); trace->active = 0; //ust// get_trace_clock(); trace->freq_scale = trace_clock_freq_scale(); @@ -695,13 +696,13 @@ int ltt_trace_alloc(const char *trace_name) } } - list_del(&trace->list); -//ust// if (list_empty(<t_traces.head)) { + cds_list_del(&trace->list); +//ust// if (cds_list_empty(<t_traces.head)) { //ust// mod_timer(<t_async_wakeup_timer, //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL); //ust// set_kernel_trace_flag_all_tasks(); //ust// } - list_add_rcu(&trace->list, <t_traces.head); + cds_list_add_rcu(&trace->list, <t_traces.head); //ust// synchronize_sched(); ltt_unlock_traces(); @@ -765,9 +766,9 @@ static int _ltt_trace_destroy(struct ust_trace *trace) goto active_error; } /* Everything went fine */ - list_del_rcu(&trace->list); + cds_list_del_rcu(&trace->list); synchronize_rcu(); - if (list_empty(<t_traces.head)) { + if (cds_list_empty(<t_traces.head)) { //ust// clear_kernel_trace_flag_all_tasks(); /* * We stop the asynchronous delivery of reader wakeup, but @@ -798,9 +799,6 @@ static void __ltt_trace_destroy(struct ust_trace *trace, int drop) } } - return; /* FIXME: temporary for ust */ -//ust// flush_scheduled_work(); - /* * The currently destroyed trace is not in the trace list anymore, * so it's safe to call the async wakeup ourself. It will deliver @@ -814,7 +812,7 @@ static void __ltt_trace_destroy(struct ust_trace *trace, int drop) trace->ops->remove_channel(chan); } - kref_put(&trace->ltt_transport_kref, ltt_release_transport); + urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport); //ust// module_put(trace->transport->owner); @@ -827,7 +825,7 @@ static void __ltt_trace_destroy(struct ust_trace *trace, int drop) //ust// __wait_event_interruptible(trace->kref_wq, //ust// (atomic_read(&trace->kref.refcount) == 1), ret); //ust// } - kref_put(&trace->kref, ltt_release_trace); + urcu_ref_put(&trace->urcu_ref, ltt_release_trace); } int ltt_trace_destroy(const char *trace_name, int drop) @@ -908,11 +906,11 @@ int ltt_trace_start(const char *trace_name) ltt_unlock_traces(); /* - * Call the kernel state dump. - * Events will be mixed with real kernel events, it's ok. + * Call the process-wide state dump. * Notice that there is no protection on the trace : that's exactly * why we iterate on the list and check for trace equality instead of - * directly using this trace handle inside the logging function. + * directly using this trace handle inside the logging function: we want + * to record events only in a single trace in the trace session list. */ ltt_dump_marker_state(trace);