libust: New transport mutex v2
[ust.git] / libust / tracer.c
index 52928b836ba290c9d67d1ce45cc63377b32ba693..ffcc2e74926996257ef7531fa622c1ee9179c0c9 100644 (file)
@@ -34,7 +34,8 @@
 #include <urcu-bp.h>
 #include <urcu/rculist.h>
 
-#include <ust/kernelcompat.h>
+#include <ust/clock.h>
+
 #include "tracercore.h"
 #include "tracer.h"
 #include "usterr.h"
@@ -68,11 +69,7 @@ int (*ltt_statedump_functor)(struct ust_trace *trace) =
                                        ltt_statedump_default;
 struct module *ltt_statedump_owner;
 
-struct chan_info_struct {
-       const char *name;
-       unsigned int def_subbufsize;
-       unsigned int def_subbufcount;
-} chan_infos[] = {
+struct chan_info_struct chan_infos[] = {
        [LTT_CHANNEL_METADATA] = {
                LTT_METADATA_CHANNEL,
                LTT_DEFAULT_SUBBUF_SIZE_LOW,
@@ -184,8 +181,9 @@ static enum ltt_channels get_channel_type_from_name(const char *name)
 //ust// 
 //ust// }
 
-static LIST_HEAD(ltt_transport_list);
-
+static CDS_LIST_HEAD(ltt_transport_list);
+/* transport mutex, nests inside traces mutex (ltt_lock_traces) */
+static DEFINE_MUTEX(ltt_transport_mutex);
 /**
  * ltt_transport_register - LTT transport registration
  * @transport: transport structure
@@ -207,9 +205,9 @@ void ltt_transport_register(struct ltt_transport *transport)
         */
 //ust//        vmalloc_sync_all();
 
-       ltt_lock_traces();
-       list_add_tail(&transport->node, &ltt_transport_list);
-       ltt_unlock_traces();
+       pthread_mutex_lock(&ltt_transport_mutex);
+       cds_list_add_tail(&transport->node, &ltt_transport_list);
+       pthread_mutex_unlock(&ltt_transport_mutex);
 }
 
 /**
@@ -218,9 +216,9 @@ void ltt_transport_register(struct ltt_transport *transport)
  */
 void ltt_transport_unregister(struct ltt_transport *transport)
 {
-       ltt_lock_traces();
-       list_del(&transport->node);
-       ltt_unlock_traces();
+       pthread_mutex_lock(&ltt_transport_mutex);
+       cds_list_del(&transport->node);
+       pthread_mutex_unlock(&ltt_transport_mutex);
 }
 
 static inline int is_channel_overwrite(enum ltt_channels chan,
@@ -278,7 +276,7 @@ static void trace_async_wakeup(struct ust_trace *trace)
 //ust// #else
 //ust//        ltt_lock_traces();
 //ust// #endif
-//ust//        list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
+//ust//        cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
 //ust//                trace_async_wakeup(trace);
 //ust//        }
 //ust// #ifndef CONFIG_PREEMPT_RT
@@ -300,7 +298,7 @@ struct ust_trace *_ltt_trace_find(const char *trace_name)
 {
        struct ust_trace *trace;
 
-       list_for_each_entry(trace, &ltt_traces.head, list)
+       cds_list_for_each_entry(trace, &ltt_traces.head, list)
                if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
                        return trace;
 
@@ -316,7 +314,7 @@ struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
 {
        struct ust_trace *trace;
 
-       list_for_each_entry(trace, &ltt_traces.setup_head, list)
+       cds_list_for_each_entry(trace, &ltt_traces.setup_head, list)
                if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
                        return trace;
 
@@ -327,7 +325,7 @@ struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
  * ltt_release_transport - Release an LTT transport
  * @kref : reference count on the transport
  */
-void ltt_release_transport(struct kref *kref)
+void ltt_release_transport(struct urcu_ref *urcu_ref)
 {
 //ust//        struct ust_trace *trace = container_of(kref,
 //ust//                        struct ust_trace, ltt_transport_kref);
@@ -338,12 +336,12 @@ void ltt_release_transport(struct kref *kref)
  * ltt_release_trace - Release a LTT trace
  * @kref : reference count on the trace
  */
-void ltt_release_trace(struct kref *kref)
+void ltt_release_trace(struct urcu_ref *urcu_ref)
 {
-       struct ust_trace *trace = container_of(kref,
-                       struct ust_trace, kref);
+       struct ust_trace *trace = _ust_container_of(urcu_ref,
+                       struct ust_trace, urcu_ref);
        ltt_channels_trace_free(trace->channels);
-       kfree(trace);
+       free(trace);
 }
 
 static inline void prepare_chan_size_num(unsigned int *subbuf_size,
@@ -381,7 +379,7 @@ int _ltt_trace_setup(const char *trace_name)
                goto traces_error;
        }
 
-       new_trace = kzalloc(sizeof(struct ust_trace), GFP_KERNEL);
+       new_trace = zmalloc(sizeof(struct ust_trace));
        if (!new_trace) {
                ERR("Unable to allocate memory for trace %s", trace_name);
                err = -ENOMEM;
@@ -389,7 +387,8 @@ int _ltt_trace_setup(const char *trace_name)
        }
        strncpy(new_trace->trace_name, trace_name, NAME_MAX);
        new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
-                                                      0, 1);
+                               ust_channels_overwrite_by_default,
+                               ust_channels_request_collection_by_default, 1);
        if (!new_trace->channels) {
                ERR("Unable to allocate memory for chaninfo  %s\n", trace_name);
                err = -ENOMEM;
@@ -419,11 +418,11 @@ int _ltt_trace_setup(const char *trace_name)
                        chan_infos[chantype].def_subbufcount;
        }
 
-       list_add(&new_trace->list, &ltt_traces.setup_head);
+       cds_list_add(&new_trace->list, &ltt_traces.setup_head);
        return 0;
 
 trace_free:
-       kfree(new_trace);
+       free(new_trace);
 traces_error:
        return err;
 }
@@ -441,8 +440,8 @@ int ltt_trace_setup(const char *trace_name)
 /* must be called from within a traces lock. */
 static void _ltt_trace_free(struct ust_trace *trace)
 {
-       list_del(&trace->list);
-       kfree(trace);
+       cds_list_del(&trace->list);
+       free(trace);
 }
 
 int ltt_trace_set_type(const char *trace_name, const char *trace_type)
@@ -460,12 +459,15 @@ int ltt_trace_set_type(const char *trace_name, const char *trace_type)
                goto traces_error;
        }
 
-       list_for_each_entry(tran_iter, &ltt_transport_list, node) {
+       pthread_mutex_lock(&ltt_transport_mutex);
+       cds_list_for_each_entry(tran_iter, &ltt_transport_list, node) {
                if (!strcmp(tran_iter->name, trace_type)) {
                        transport = tran_iter;
                        break;
                }
        }
+       pthread_mutex_unlock(&ltt_transport_mutex);
+
        if (!transport) {
                ERR("Transport %s is not present", trace_type);
                err = -EINVAL;
@@ -642,9 +644,9 @@ int ltt_trace_alloc(const char *trace_name)
                goto traces_error;
        }
 
-       kref_init(&trace->kref);
-       kref_init(&trace->ltt_transport_kref);
-//ust//        init_waitqueue_head(&trace->kref_wq);
+       urcu_ref_init(&trace->urcu_ref);
+       urcu_ref_init(&trace->ltt_transport_urcu_ref);
+//ust//        init_waitqueue_head(&trace->urcu_ref_wq);
        trace->active = 0;
 //ust//        get_trace_clock();
        trace->freq_scale = trace_clock_freq_scale();
@@ -694,13 +696,13 @@ int ltt_trace_alloc(const char *trace_name)
                }
        }
 
-       list_del(&trace->list);
-//ust//        if (list_empty(&ltt_traces.head)) {
+       cds_list_del(&trace->list);
+//ust//        if (cds_list_empty(&ltt_traces.head)) {
 //ust//                mod_timer(&ltt_async_wakeup_timer,
 //ust//                                jiffies + LTT_PERCPU_TIMER_INTERVAL);
 //ust//                set_kernel_trace_flag_all_tasks();
 //ust//        }
-       list_add_rcu(&trace->list, &ltt_traces.head);
+       cds_list_add_rcu(&trace->list, &ltt_traces.head);
 //ust//        synchronize_sched();
 
        ltt_unlock_traces();
@@ -764,9 +766,9 @@ static int _ltt_trace_destroy(struct ust_trace *trace)
                goto active_error;
        }
        /* Everything went fine */
-       list_del_rcu(&trace->list);
+       cds_list_del_rcu(&trace->list);
        synchronize_rcu();
-       if (list_empty(&ltt_traces.head)) {
+       if (cds_list_empty(&ltt_traces.head)) {
 //ust//                clear_kernel_trace_flag_all_tasks();
                /*
                 * We stop the asynchronous delivery of reader wakeup, but
@@ -784,20 +786,19 @@ traces_error:
 }
 
 /* Sleepable part of the destroy */
-static void __ltt_trace_destroy(struct ust_trace *trace)
+static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
 {
        int i;
        struct ust_channel *chan;
 
-       for (i = 0; i < trace->nr_channels; i++) {
-               chan = &trace->channels[i];
-               if (chan->active)
-                       trace->ops->finish_channel(chan);
+       if(!drop) {
+               for (i = 0; i < trace->nr_channels; i++) {
+                       chan = &trace->channels[i];
+                       if (chan->active)
+                               trace->ops->finish_channel(chan);
+               }
        }
 
-       return; /* FIXME: temporary for ust */
-//ust//        flush_scheduled_work();
-
        /*
         * The currently destroyed trace is not in the trace list anymore,
         * so it's safe to call the async wakeup ourself. It will deliver
@@ -811,7 +812,7 @@ static void __ltt_trace_destroy(struct ust_trace *trace)
                        trace->ops->remove_channel(chan);
        }
 
-       kref_put(&trace->ltt_transport_kref, ltt_release_transport);
+       urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport);
 
 //ust//        module_put(trace->transport->owner);
 
@@ -824,10 +825,10 @@ static void __ltt_trace_destroy(struct ust_trace *trace)
 //ust//                __wait_event_interruptible(trace->kref_wq,
 //ust//                        (atomic_read(&trace->kref.refcount) == 1), ret);
 //ust//        }
-       kref_put(&trace->kref, ltt_release_trace);
+       urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
 }
 
-int ltt_trace_destroy(const char *trace_name)
+int ltt_trace_destroy(const char *trace_name, int drop)
 {
        int err = 0;
        struct ust_trace *trace;
@@ -842,7 +843,7 @@ int ltt_trace_destroy(const char *trace_name)
 
                ltt_unlock_traces();
 
-               __ltt_trace_destroy(trace);
+               __ltt_trace_destroy(trace, drop);
 //ust//                put_trace_clock();
 
                return 0;
@@ -905,11 +906,11 @@ int ltt_trace_start(const char *trace_name)
        ltt_unlock_traces();
 
        /*
-        * Call the kernel state dump.
-        * Events will be mixed with real kernel events, it's ok.
+        * Call the process-wide state dump.
         * Notice that there is no protection on the trace : that's exactly
         * why we iterate on the list and check for trace equality instead of
-        * directly using this trace handle inside the logging function.
+        * directly using this trace handle inside the logging function: we want
+        * to record events only in a single trace in the trace session list.
         */
 
        ltt_dump_marker_state(trace);
This page took 0.027353 seconds and 4 git commands to generate.