ust: add kernel versions for tracer.c (ltt-tracer.c) tracer.h (ltt-tracer.h) and...
authorPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 9 Feb 2009 20:25:23 +0000 (15:25 -0500)
committerPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 9 Feb 2009 20:25:23 +0000 (15:25 -0500)
libtracing/tracer.c [new file with mode: 0644]
libtracing/tracer.h [new file with mode: 0644]
libtracing/tracercore.c [new file with mode: 0644]

diff --git a/libtracing/tracer.c b/libtracing/tracer.c
new file mode 100644 (file)
index 0000000..8c3bcd8
--- /dev/null
@@ -0,0 +1,1228 @@
+/*
+ * ltt/ltt-tracer.c
+ *
+ * (C) Copyright       2005-2008 -
+ *             Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Tracing management internal kernel API. Trace buffer allocation/free, tracing
+ * start/stop.
+ *
+ * Author:
+ *     Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Inspired from LTT :
+ *  Karim Yaghmour (karim@opersys.com)
+ *  Tom Zanussi (zanussi@us.ibm.com)
+ *  Bob Wisniewski (bob@watson.ibm.com)
+ * And from K42 :
+ *  Bob Wisniewski (bob@watson.ibm.com)
+ *
+ * Changelog:
+ *  22/09/06, Move to the marker/probes mechanism.
+ *  19/10/05, Complete lockless mechanism.
+ *  27/05/05, Modular redesign and rewrite.
+ */
+
+#include <linux/time.h>
+#include <linux/ltt-tracer.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/cpu.h>
+#include <linux/kref.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/atomic.h>
+
+static void async_wakeup(unsigned long data);
+
+static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
+
+/* Default callbacks for modules */
+notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
+               struct ltt_trace_struct *trace)
+{
+       return 0;
+}
+
+int ltt_statedump_default(struct ltt_trace_struct *trace)
+{
+       return 0;
+}
+
+/* Callbacks for registered modules */
+
+int (*ltt_filter_control_functor)
+       (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) =
+                                       ltt_filter_control_default;
+struct module *ltt_filter_control_owner;
+
+/* These function pointers are protected by a trace activation check */
+struct module *ltt_run_filter_owner;
+int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) =
+                                       ltt_statedump_default;
+struct module *ltt_statedump_owner;
+
+struct chan_info_struct {
+       const char *name;
+       unsigned int def_subbufsize;
+       unsigned int def_subbufcount;
+} chan_infos[] = {
+       [LTT_CHANNEL_METADATA] = {
+               LTT_METADATA_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_FD_STATE] = {
+               LTT_FD_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_GLOBAL_STATE] = {
+               LTT_GLOBAL_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_IRQ_STATE] = {
+               LTT_IRQ_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_MODULE_STATE] = {
+               LTT_MODULE_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_NETIF_STATE] = {
+               LTT_NETIF_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_SOFTIRQ_STATE] = {
+               LTT_SOFTIRQ_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_SWAP_STATE] = {
+               LTT_SWAP_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_SYSCALL_STATE] = {
+               LTT_SYSCALL_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_TASK_STATE] = {
+               LTT_TASK_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_VM_STATE] = {
+               LTT_VM_STATE_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_FS] = {
+               LTT_FS_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_MED,
+               LTT_DEFAULT_N_SUBBUFS_MED,
+       },
+       [LTT_CHANNEL_INPUT] = {
+               LTT_INPUT_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_IPC] = {
+               LTT_IPC_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_LOW,
+               LTT_DEFAULT_N_SUBBUFS_LOW,
+       },
+       [LTT_CHANNEL_KERNEL] = {
+               LTT_KERNEL_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_HIGH,
+               LTT_DEFAULT_N_SUBBUFS_HIGH,
+       },
+       [LTT_CHANNEL_MM] = {
+               LTT_MM_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_MED,
+               LTT_DEFAULT_N_SUBBUFS_MED,
+       },
+       [LTT_CHANNEL_RCU] = {
+               LTT_RCU_CHANNEL,
+               LTT_DEFAULT_SUBBUF_SIZE_MED,
+               LTT_DEFAULT_N_SUBBUFS_MED,
+       },
+       [LTT_CHANNEL_DEFAULT] = {
+               NULL,
+               LTT_DEFAULT_SUBBUF_SIZE_MED,
+               LTT_DEFAULT_N_SUBBUFS_MED,
+       },
+};
+
+static enum ltt_channels get_channel_type_from_name(const char *name)
+{
+       int i;
+
+       if (!name)
+               return LTT_CHANNEL_DEFAULT;
+
+       for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
+               if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
+                       return (enum ltt_channels)i;
+
+       return LTT_CHANNEL_DEFAULT;
+}
+
+/**
+ * ltt_module_register - LTT module registration
+ * @name: module type
+ * @function: callback to register
+ * @owner: module which owns the callback
+ *
+ * The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
+ * must be called between a vmalloc and the moment the memory is made visible to
+ * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
+ * the module allocates virtual memory after its registration must it
+ * synchronize the TLBs.
+ */
+int ltt_module_register(enum ltt_module_function name, void *function,
+               struct module *owner)
+{
+       int ret = 0;
+
+       /*
+        * Make sure no page fault can be triggered by the module about to be
+        * registered. We deal with this here so we don't have to call
+        * vmalloc_sync_all() in each module's init.
+        */
+       vmalloc_sync_all();
+
+       switch (name) {
+       case LTT_FUNCTION_RUN_FILTER:
+               if (ltt_run_filter_owner != NULL) {
+                       ret = -EEXIST;
+                       goto end;
+               }
+               ltt_filter_register((ltt_run_filter_functor)function);
+               ltt_run_filter_owner = owner;
+               break;
+       case LTT_FUNCTION_FILTER_CONTROL:
+               if (ltt_filter_control_owner != NULL) {
+                       ret = -EEXIST;
+                       goto end;
+               }
+               ltt_filter_control_functor =
+                       (int (*)(enum ltt_filter_control_msg,
+                       struct ltt_trace_struct *))function;
+               ltt_filter_control_owner = owner;
+               break;
+       case LTT_FUNCTION_STATEDUMP:
+               if (ltt_statedump_owner != NULL) {
+                       ret = -EEXIST;
+                       goto end;
+               }
+               ltt_statedump_functor =
+                       (int (*)(struct ltt_trace_struct *))function;
+               ltt_statedump_owner = owner;
+               break;
+       }
+
+end:
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_module_register);
+
+/**
+ * ltt_module_unregister - LTT module unregistration
+ * @name: module type
+ */
+void ltt_module_unregister(enum ltt_module_function name)
+{
+       switch (name) {
+       case LTT_FUNCTION_RUN_FILTER:
+               ltt_filter_unregister();
+               ltt_run_filter_owner = NULL;
+               /* Wait for preempt sections to finish */
+               synchronize_sched();
+               break;
+       case LTT_FUNCTION_FILTER_CONTROL:
+               ltt_filter_control_functor = ltt_filter_control_default;
+               ltt_filter_control_owner = NULL;
+               break;
+       case LTT_FUNCTION_STATEDUMP:
+               ltt_statedump_functor = ltt_statedump_default;
+               ltt_statedump_owner = NULL;
+               break;
+       }
+
+}
+EXPORT_SYMBOL_GPL(ltt_module_unregister);
+
+static LIST_HEAD(ltt_transport_list);
+
+/**
+ * ltt_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. The module calling this registration function must ensure that no
+ * trap-inducing code will be executed by the transport functions. E.g.
+ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
+ * is made visible to the transport function. This registration acts as a
+ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
+ * after its registration must it synchronize the TLBs.
+ */
+void ltt_transport_register(struct ltt_transport *transport)
+{
+       /*
+        * Make sure no page fault can be triggered by the module about to be
+        * registered. We deal with this here so we don't have to call
+        * vmalloc_sync_all() in each module's init.
+        */
+       vmalloc_sync_all();
+
+       ltt_lock_traces();
+       list_add_tail(&transport->node, &ltt_transport_list);
+       ltt_unlock_traces();
+}
+EXPORT_SYMBOL_GPL(ltt_transport_register);
+
+/**
+ * ltt_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ */
+void ltt_transport_unregister(struct ltt_transport *transport)
+{
+       ltt_lock_traces();
+       list_del(&transport->node);
+       ltt_unlock_traces();
+}
+EXPORT_SYMBOL_GPL(ltt_transport_unregister);
+
+static inline int is_channel_overwrite(enum ltt_channels chan,
+       enum trace_mode mode)
+{
+       switch (mode) {
+       case LTT_TRACE_NORMAL:
+               return 0;
+       case LTT_TRACE_FLIGHT:
+               switch (chan) {
+               case LTT_CHANNEL_METADATA:
+                       return 0;
+               default:
+                       return 1;
+               }
+       case LTT_TRACE_HYBRID:
+               switch (chan) {
+               case LTT_CHANNEL_KERNEL:
+               case LTT_CHANNEL_FS:
+               case LTT_CHANNEL_MM:
+               case LTT_CHANNEL_RCU:
+               case LTT_CHANNEL_IPC:
+               case LTT_CHANNEL_INPUT:
+                       return 1;
+               default:
+                       return 0;
+               }
+       default:
+               return 0;
+       }
+}
+
+/**
+ * ltt_write_trace_header - Write trace header
+ * @trace: Trace information
+ * @header: Memory address where the information must be written to
+ */
+void notrace ltt_write_trace_header(struct ltt_trace_struct *trace,
+               struct ltt_subbuffer_header *header)
+{
+       header->magic_number = LTT_TRACER_MAGIC_NUMBER;
+       header->major_version = LTT_TRACER_VERSION_MAJOR;
+       header->minor_version = LTT_TRACER_VERSION_MINOR;
+       header->arch_size = sizeof(void *);
+       header->alignment = ltt_get_alignment();
+       header->start_time_sec = trace->start_time.tv_sec;
+       header->start_time_usec = trace->start_time.tv_usec;
+       header->start_freq = trace->start_freq;
+       header->freq_scale = trace->freq_scale;
+}
+EXPORT_SYMBOL_GPL(ltt_write_trace_header);
+
+static void trace_async_wakeup(struct ltt_trace_struct *trace)
+{
+       int i;
+       struct ltt_channel_struct *chan;
+
+       /* Must check each channel for pending read wakeup */
+       for (i = 0; i < trace->nr_channels; i++) {
+               chan = &trace->channels[i];
+               if (chan->active)
+                       trace->ops->wakeup_channel(chan);
+       }
+}
+
+/* Timer to send async wakeups to the readers */
+static void async_wakeup(unsigned long data)
+{
+       struct ltt_trace_struct *trace;
+
+       /*
+        * PREEMPT_RT does not allow spinlocks to be taken within preempt
+        * disable sections (spinlock taken in wake_up). However, mainline won't
+        * allow mutex to be taken in interrupt context. Ugly.
+        * A proper way to do this would be to turn the timer into a
+        * periodically woken up thread, but it adds to the footprint.
+        */
+#ifndef CONFIG_PREEMPT_RT
+       rcu_read_lock_sched();
+#else
+       ltt_lock_traces();
+#endif
+       list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
+               trace_async_wakeup(trace);
+       }
+#ifndef CONFIG_PREEMPT_RT
+       rcu_read_unlock_sched();
+#else
+       ltt_unlock_traces();
+#endif
+
+       mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
+}
+
+/**
+ * _ltt_trace_find - find a trace by given name.
+ * trace_name: trace name
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+static struct ltt_trace_struct *_ltt_trace_find(const char *trace_name)
+{
+       struct ltt_trace_struct *trace;
+
+       list_for_each_entry(trace, &ltt_traces.head, list)
+               if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+                       return trace;
+
+       return NULL;
+}
+
+/* _ltt_trace_find_setup :
+ * find a trace in setup list by given name.
+ *
+ * Returns a pointer to the trace structure, NULL if not found.
+ */
+struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name)
+{
+       struct ltt_trace_struct *trace;
+
+       list_for_each_entry(trace, &ltt_traces.setup_head, list)
+               if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
+                       return trace;
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
+
+/**
+ * ltt_release_transport - Release an LTT transport
+ * @kref : reference count on the transport
+ */
+void ltt_release_transport(struct kref *kref)
+{
+       struct ltt_trace_struct *trace = container_of(kref,
+                       struct ltt_trace_struct, ltt_transport_kref);
+       trace->ops->remove_dirs(trace);
+}
+EXPORT_SYMBOL_GPL(ltt_release_transport);
+
+/**
+ * ltt_release_trace - Release a LTT trace
+ * @kref : reference count on the trace
+ */
+void ltt_release_trace(struct kref *kref)
+{
+       struct ltt_trace_struct *trace = container_of(kref,
+                       struct ltt_trace_struct, kref);
+       ltt_channels_trace_free(trace->channels);
+       kfree(trace);
+}
+EXPORT_SYMBOL_GPL(ltt_release_trace);
+
+static inline void prepare_chan_size_num(unsigned int *subbuf_size,
+                                        unsigned int *n_subbufs)
+{
+       *subbuf_size = 1 << get_count_order(*subbuf_size);
+       *n_subbufs = 1 << get_count_order(*n_subbufs);
+
+       /* Subbuf size and number must both be power of two */
+       WARN_ON(hweight32(*subbuf_size) != 1);
+       WARN_ON(hweight32(*n_subbufs) != 1);
+}
+
+int _ltt_trace_setup(const char *trace_name)
+{
+       int err = 0;
+       struct ltt_trace_struct *new_trace = NULL;
+       int metadata_index;
+       unsigned int chan;
+       enum ltt_channels chantype;
+
+       if (_ltt_trace_find_setup(trace_name)) {
+               printk(KERN_ERR "LTT : Trace name %s already used.\n",
+                               trace_name);
+               err = -EEXIST;
+               goto traces_error;
+       }
+
+       if (_ltt_trace_find(trace_name)) {
+               printk(KERN_ERR "LTT : Trace name %s already used.\n",
+                               trace_name);
+               err = -EEXIST;
+               goto traces_error;
+       }
+
+       new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL);
+       if (!new_trace) {
+               printk(KERN_ERR
+                       "LTT : Unable to allocate memory for trace %s\n",
+                       trace_name);
+               err = -ENOMEM;
+               goto traces_error;
+       }
+       strncpy(new_trace->trace_name, trace_name, NAME_MAX);
+       new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
+                                                      0, 1);
+       if (!new_trace->channels) {
+               printk(KERN_ERR
+                       "LTT : Unable to allocate memory for chaninfo  %s\n",
+                       trace_name);
+               err = -ENOMEM;
+               goto trace_free;
+       }
+
+       /*
+        * Force metadata channel to active, no overwrite.
+        */
+       metadata_index = ltt_channels_get_index_from_name("metadata");
+       WARN_ON(metadata_index < 0);
+       new_trace->channels[metadata_index].overwrite = 0;
+       new_trace->channels[metadata_index].active = 1;
+
+       /*
+        * Set hardcoded tracer defaults for some channels
+        */
+       for (chan = 0; chan < new_trace->nr_channels; chan++) {
+               if (!(new_trace->channels[chan].active))
+                       continue;
+
+               chantype = get_channel_type_from_name(
+                       ltt_channels_get_name_from_index(chan));
+               new_trace->channels[chan].subbuf_size =
+                       chan_infos[chantype].def_subbufsize;
+               new_trace->channels[chan].subbuf_cnt =
+                       chan_infos[chantype].def_subbufcount;
+       }
+
+       list_add(&new_trace->list, &ltt_traces.setup_head);
+       return 0;
+
+trace_free:
+       kfree(new_trace);
+traces_error:
+       return err;
+}
+EXPORT_SYMBOL_GPL(_ltt_trace_setup);
+
+
+int ltt_trace_setup(const char *trace_name)
+{
+       int ret;
+       ltt_lock_traces();
+       ret = _ltt_trace_setup(trace_name);
+       ltt_unlock_traces();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_setup);
+
+/* must be called from within a traces lock. */
+static void _ltt_trace_free(struct ltt_trace_struct *trace)
+{
+       list_del(&trace->list);
+       kfree(trace);
+}
+
+int ltt_trace_set_type(const char *trace_name, const char *trace_type)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       struct ltt_transport *tran_iter, *transport = NULL;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       list_for_each_entry(tran_iter, &ltt_transport_list, node) {
+               if (!strcmp(tran_iter->name, trace_type)) {
+                       transport = tran_iter;
+                       break;
+               }
+       }
+       if (!transport) {
+               printk(KERN_ERR "LTT : Transport %s is not present.\n",
+                       trace_type);
+               err = -EINVAL;
+               goto traces_error;
+       }
+
+       trace->transport = transport;
+
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_set_type);
+
+int ltt_trace_set_channel_subbufsize(const char *trace_name,
+               const char *channel_name, unsigned int size)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       int index;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       index = ltt_channels_get_index_from_name(channel_name);
+       if (index < 0) {
+               printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+       trace->channels[index].subbuf_size = size;
+
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
+
+int ltt_trace_set_channel_subbufcount(const char *trace_name,
+               const char *channel_name, unsigned int cnt)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       int index;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       index = ltt_channels_get_index_from_name(channel_name);
+       if (index < 0) {
+               printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+       trace->channels[index].subbuf_cnt = cnt;
+
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
+
+int ltt_trace_set_channel_enable(const char *trace_name,
+               const char *channel_name, unsigned int enable)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       int index;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       /*
+        * Datas in metadata channel(marker info) is necessary to be able to
+        * read the trace, we always enable this channel.
+        */
+       if (!enable && !strcmp(channel_name, "metadata")) {
+               printk(KERN_ERR "LTT : Trying to disable metadata channel\n");
+               err = -EINVAL;
+               goto traces_error;
+       }
+
+       index = ltt_channels_get_index_from_name(channel_name);
+       if (index < 0) {
+               printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       trace->channels[index].active = enable;
+
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
+
+int ltt_trace_set_channel_overwrite(const char *trace_name,
+               const char *channel_name, unsigned int overwrite)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       int index;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       /*
+        * Always put the metadata channel in non-overwrite mode :
+        * This is a very low traffic channel and it can't afford to have its
+        * data overwritten : this data (marker info) is necessary to be
+        * able to read the trace.
+        */
+       if (overwrite && !strcmp(channel_name, "metadata")) {
+               printk(KERN_ERR "LTT : Trying to set metadata channel to "
+                               "overwrite mode\n");
+               err = -EINVAL;
+               goto traces_error;
+       }
+
+       index = ltt_channels_get_index_from_name(channel_name);
+       if (index < 0) {
+               printk(KERN_ERR "LTT : Channel %s not found\n", channel_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       trace->channels[index].overwrite = overwrite;
+
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
+
+int ltt_trace_alloc(const char *trace_name)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+       int subbuf_size, subbuf_cnt;
+       unsigned long flags;
+       int chan;
+       const char *channel_name;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (!trace) {
+               printk(KERN_ERR "LTT : Trace not found %s\n", trace_name);
+               err = -ENOENT;
+               goto traces_error;
+       }
+
+       kref_init(&trace->kref);
+       kref_init(&trace->ltt_transport_kref);
+       init_waitqueue_head(&trace->kref_wq);
+       trace->active = 0;
+       get_trace_clock();
+       trace->freq_scale = trace_clock_freq_scale();
+
+       if (!trace->transport) {
+               printk(KERN_ERR "LTT : Transport is not set.\n");
+               err = -EINVAL;
+               goto transport_error;
+       }
+       if (!try_module_get(trace->transport->owner)) {
+               printk(KERN_ERR "LTT : Can't lock transport module.\n");
+               err = -ENODEV;
+               goto transport_error;
+       }
+       trace->ops = &trace->transport->ops;
+
+       err = trace->ops->create_dirs(trace);
+       if (err) {
+               printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
+                       trace_name);
+               goto dirs_error;
+       }
+
+       local_irq_save(flags);
+       trace->start_freq = trace_clock_frequency();
+       trace->start_tsc = trace_clock_read64();
+       do_gettimeofday(&trace->start_time);
+       local_irq_restore(flags);
+
+       for (chan = 0; chan < trace->nr_channels; chan++) {
+               if (!(trace->channels[chan].active))
+                       continue;
+
+               channel_name = ltt_channels_get_name_from_index(chan);
+               WARN_ON(!channel_name);
+               subbuf_size = trace->channels[chan].subbuf_size;
+               subbuf_cnt = trace->channels[chan].subbuf_cnt;
+               prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
+               err = trace->ops->create_channel(trace_name, trace,
+                               trace->dentry.trace_root,
+                               channel_name,
+                               &trace->channels[chan],
+                               subbuf_size,
+                               subbuf_cnt,
+                               trace->channels[chan].overwrite);
+               if (err != 0) {
+                       printk(KERN_ERR "LTT : Can't create channel %s.\n",
+                               channel_name);
+                       goto create_channel_error;
+               }
+       }
+
+       list_del(&trace->list);
+       if (list_empty(&ltt_traces.head)) {
+               mod_timer(&ltt_async_wakeup_timer,
+                               jiffies + LTT_PERCPU_TIMER_INTERVAL);
+               set_kernel_trace_flag_all_tasks();
+       }
+       list_add_rcu(&trace->list, &ltt_traces.head);
+       synchronize_sched();
+
+       ltt_unlock_traces();
+
+       return 0;
+
+create_channel_error:
+       for (chan--; chan >= 0; chan--)
+               if (trace->channels[chan].active)
+                       trace->ops->remove_channel(&trace->channels[chan]);
+
+dirs_error:
+       module_put(trace->transport->owner);
+transport_error:
+       put_trace_clock();
+traces_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_alloc);
+
+/*
+ * It is worked as a wrapper for current version of ltt_control.ko.
+ * We will make a new ltt_control based on debugfs, and control each channel's
+ * buffer.
+ */
+static int ltt_trace_create(const char *trace_name, const char *trace_type,
+               enum trace_mode mode,
+               unsigned int subbuf_size_low, unsigned int n_subbufs_low,
+               unsigned int subbuf_size_med, unsigned int n_subbufs_med,
+               unsigned int subbuf_size_high, unsigned int n_subbufs_high)
+{
+       int err = 0;
+
+       err = ltt_trace_setup(trace_name);
+       if (IS_ERR_VALUE(err))
+               return err;
+
+       err = ltt_trace_set_type(trace_name, trace_type);
+       if (IS_ERR_VALUE(err))
+               return err;
+
+       err = ltt_trace_alloc(trace_name);
+       if (IS_ERR_VALUE(err))
+               return err;
+
+       return err;
+}
+
+/* Must be called while sure that trace is in the list. */
+static int _ltt_trace_destroy(struct ltt_trace_struct  *trace)
+{
+       int err = -EPERM;
+
+       if (trace == NULL) {
+               err = -ENOENT;
+               goto traces_error;
+       }
+       if (trace->active) {
+               printk(KERN_ERR
+                       "LTT : Can't destroy trace %s : tracer is active\n",
+                       trace->trace_name);
+               err = -EBUSY;
+               goto active_error;
+       }
+       /* Everything went fine */
+       list_del_rcu(&trace->list);
+       synchronize_sched();
+       if (list_empty(&ltt_traces.head)) {
+               clear_kernel_trace_flag_all_tasks();
+               /*
+                * We stop the asynchronous delivery of reader wakeup, but
+                * we must make one last check for reader wakeups pending
+                * later in __ltt_trace_destroy.
+                */
+               del_timer_sync(&ltt_async_wakeup_timer);
+       }
+       return 0;
+
+       /* error handling */
+active_error:
+traces_error:
+       return err;
+}
+
+/* Sleepable part of the destroy */
+static void __ltt_trace_destroy(struct ltt_trace_struct        *trace)
+{
+       int i;
+       struct ltt_channel_struct *chan;
+
+       for (i = 0; i < trace->nr_channels; i++) {
+               chan = &trace->channels[i];
+               if (chan->active)
+                       trace->ops->finish_channel(chan);
+       }
+
+       flush_scheduled_work();
+
+       /*
+        * The currently destroyed trace is not in the trace list anymore,
+        * so it's safe to call the async wakeup ourself. It will deliver
+        * the last subbuffers.
+        */
+       trace_async_wakeup(trace);
+
+       for (i = 0; i < trace->nr_channels; i++) {
+               chan = &trace->channels[i];
+               if (chan->active)
+                       trace->ops->remove_channel(chan);
+       }
+
+       kref_put(&trace->ltt_transport_kref, ltt_release_transport);
+
+       module_put(trace->transport->owner);
+
+       /*
+        * Wait for lttd readers to release the files, therefore making sure
+        * the last subbuffers have been read.
+        */
+       if (atomic_read(&trace->kref.refcount) > 1) {
+               int ret = 0;
+               __wait_event_interruptible(trace->kref_wq,
+                       (atomic_read(&trace->kref.refcount) == 1), ret);
+       }
+       kref_put(&trace->kref, ltt_release_trace);
+}
+
+int ltt_trace_destroy(const char *trace_name)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find(trace_name);
+       if (trace) {
+               err = _ltt_trace_destroy(trace);
+               if (err)
+                       goto error;
+
+               ltt_unlock_traces();
+
+               __ltt_trace_destroy(trace);
+               put_trace_clock();
+
+               return 0;
+       }
+
+       trace = _ltt_trace_find_setup(trace_name);
+       if (trace) {
+               _ltt_trace_free(trace);
+               ltt_unlock_traces();
+               return 0;
+       }
+
+       err = -ENOENT;
+
+       /* Error handling */
+error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_destroy);
+
+/* must be called from within a traces lock. */
+static int _ltt_trace_start(struct ltt_trace_struct *trace)
+{
+       int err = 0;
+
+       if (trace == NULL) {
+               err = -ENOENT;
+               goto traces_error;
+       }
+       if (trace->active)
+               printk(KERN_INFO "LTT : Tracing already active for trace %s\n",
+                               trace->trace_name);
+       if (!try_module_get(ltt_run_filter_owner)) {
+               err = -ENODEV;
+               printk(KERN_ERR "LTT : Can't lock filter module.\n");
+               goto get_ltt_run_filter_error;
+       }
+       trace->active = 1;
+       /* Read by trace points without protection : be careful */
+       ltt_traces.num_active_traces++;
+       return err;
+
+       /* error handling */
+get_ltt_run_filter_error:
+traces_error:
+       return err;
+}
+
+int ltt_trace_start(const char *trace_name)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+
+       ltt_lock_traces();
+
+       trace = _ltt_trace_find(trace_name);
+       err = _ltt_trace_start(trace);
+       if (err)
+               goto no_trace;
+
+       ltt_unlock_traces();
+
+       /*
+        * Call the kernel state dump.
+        * Events will be mixed with real kernel events, it's ok.
+        * Notice that there is no protection on the trace : that's exactly
+        * why we iterate on the list and check for trace equality instead of
+        * directly using this trace handle inside the logging function.
+        */
+
+       ltt_dump_marker_state(trace);
+
+       if (!try_module_get(ltt_statedump_owner)) {
+               err = -ENODEV;
+               printk(KERN_ERR
+                       "LTT : Can't lock state dump module.\n");
+       } else {
+               ltt_statedump_functor(trace);
+               module_put(ltt_statedump_owner);
+       }
+
+       return err;
+
+       /* Error handling */
+no_trace:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_start);
+
+/* must be called from within traces lock */
+static int _ltt_trace_stop(struct ltt_trace_struct *trace)
+{
+       int err = -EPERM;
+
+       if (trace == NULL) {
+               err = -ENOENT;
+               goto traces_error;
+       }
+       if (!trace->active)
+               printk(KERN_INFO "LTT : Tracing not active for trace %s\n",
+                               trace->trace_name);
+       if (trace->active) {
+               trace->active = 0;
+               ltt_traces.num_active_traces--;
+               synchronize_sched(); /* Wait for each tracing to be finished */
+       }
+       module_put(ltt_run_filter_owner);
+       /* Everything went fine */
+       return 0;
+
+       /* Error handling */
+traces_error:
+       return err;
+}
+
+int ltt_trace_stop(const char *trace_name)
+{
+       int err = 0;
+       struct ltt_trace_struct *trace;
+
+       ltt_lock_traces();
+       trace = _ltt_trace_find(trace_name);
+       err = _ltt_trace_stop(trace);
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_trace_stop);
+
+/**
+ * ltt_control - Trace control in-kernel API
+ * @msg: Action to perform
+ * @trace_name: Trace on which the action must be done
+ * @trace_type: Type of trace (normal, flight, hybrid)
+ * @args: Arguments specific to the action
+ */
+int ltt_control(enum ltt_control_msg msg, const char *trace_name,
+               const char *trace_type, union ltt_control_args args)
+{
+       int err = -EPERM;
+
+       printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
+       switch (msg) {
+       case LTT_CONTROL_START:
+               printk(KERN_DEBUG "Start tracing %s\n", trace_name);
+               err = ltt_trace_start(trace_name);
+               break;
+       case LTT_CONTROL_STOP:
+               printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
+               err = ltt_trace_stop(trace_name);
+               break;
+       case LTT_CONTROL_CREATE_TRACE:
+               printk(KERN_DEBUG "Creating trace %s\n", trace_name);
+               err = ltt_trace_create(trace_name, trace_type,
+                       args.new_trace.mode,
+                       args.new_trace.subbuf_size_low,
+                       args.new_trace.n_subbufs_low,
+                       args.new_trace.subbuf_size_med,
+                       args.new_trace.n_subbufs_med,
+                       args.new_trace.subbuf_size_high,
+                       args.new_trace.n_subbufs_high);
+               break;
+       case LTT_CONTROL_DESTROY_TRACE:
+               printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
+               err = ltt_trace_destroy(trace_name);
+               break;
+       }
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_control);
+
+/**
+ * ltt_filter_control - Trace filter control in-kernel API
+ * @msg: Action to perform on the filter
+ * @trace_name: Trace on which the action must be done
+ */
+int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
+{
+       int err;
+       struct ltt_trace_struct *trace;
+
+       printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name);
+       ltt_lock_traces();
+       trace = _ltt_trace_find(trace_name);
+       if (trace == NULL) {
+               printk(KERN_ALERT
+                       "Trace does not exist. Cannot proxy control request\n");
+               err = -ENOENT;
+               goto trace_error;
+       }
+       if (!try_module_get(ltt_filter_control_owner)) {
+               err = -ENODEV;
+               goto get_module_error;
+       }
+       switch (msg) {
+       case LTT_FILTER_DEFAULT_ACCEPT:
+               printk(KERN_DEBUG
+                       "Proxy filter default accept %s\n", trace_name);
+               err = (*ltt_filter_control_functor)(msg, trace);
+               break;
+       case LTT_FILTER_DEFAULT_REJECT:
+               printk(KERN_DEBUG
+                       "Proxy filter default reject %s\n", trace_name);
+               err = (*ltt_filter_control_functor)(msg, trace);
+               break;
+       default:
+               err = -EPERM;
+       }
+       module_put(ltt_filter_control_owner);
+
+get_module_error:
+trace_error:
+       ltt_unlock_traces();
+       return err;
+}
+EXPORT_SYMBOL_GPL(ltt_filter_control);
+
+int __init ltt_init(void)
+{
+       /* Make sure no page fault can be triggered by this module */
+       vmalloc_sync_all();
+       return 0;
+}
+
+module_init(ltt_init)
+
+static void __exit ltt_exit(void)
+{
+       struct ltt_trace_struct *trace;
+       struct list_head *pos, *n;
+
+       ltt_lock_traces();
+       /* Stop each trace, currently being read by RCU read-side */
+       list_for_each_entry_rcu(trace, &ltt_traces.head, list)
+               _ltt_trace_stop(trace);
+       /* Wait for quiescent state. Readers have preemption disabled. */
+       synchronize_sched();
+       /* Safe iteration is now permitted. It does not have to be RCU-safe
+        * because no readers are left. */
+       list_for_each_safe(pos, n, &ltt_traces.head) {
+               trace = container_of(pos, struct ltt_trace_struct, list);
+               /* _ltt_trace_destroy does a synchronize_sched() */
+               _ltt_trace_destroy(trace);
+               __ltt_trace_destroy(trace);
+       }
+       /* free traces in pre-alloc status */
+       list_for_each_safe(pos, n, &ltt_traces.setup_head) {
+               trace = container_of(pos, struct ltt_trace_struct, list);
+               _ltt_trace_free(trace);
+       }
+
+       ltt_unlock_traces();
+}
+
+module_exit(ltt_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mathieu Desnoyers");
+MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");
diff --git a/libtracing/tracer.h b/libtracing/tracer.h
new file mode 100644 (file)
index 0000000..822311e
--- /dev/null
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * This contains the definitions for the Linux Trace Toolkit tracer.
+ */
+
+#ifndef _LTT_TRACER_H
+#define _LTT_TRACER_H
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/timex.h>
+#include <linux/wait.h>
+#include <linux/ltt-relay.h>
+#include <linux/ltt-channels.h>
+#include <linux/ltt-core.h>
+#include <linux/marker.h>
+#include <linux/trace-clock.h>
+#include <asm/atomic.h>
+#include <asm/local.h>
+
+/* Number of bytes to log with a read/write event */
+#define LTT_LOG_RW_SIZE                        32L
+
+/* Interval (in jiffies) at which the LTT per-CPU timer fires */
+#define LTT_PERCPU_TIMER_INTERVAL      1
+
+#ifndef LTT_ARCH_TYPE
+#define LTT_ARCH_TYPE                  LTT_ARCH_TYPE_UNDEFINED
+#endif
+
+#ifndef LTT_ARCH_VARIANT
+#define LTT_ARCH_VARIANT               LTT_ARCH_VARIANT_NONE
+#endif
+
+struct ltt_active_marker;
+
+/* Maximum number of callbacks per marker */
+#define LTT_NR_CALLBACKS       10
+
+struct ltt_serialize_closure;
+struct ltt_probe_private_data;
+
+/* Serialization callback '%k' */
+typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
+                       struct ltt_serialize_closure *closure,
+                       void *serialize_private, int *largest_align,
+                       const char *fmt, va_list *args);
+
+struct ltt_serialize_closure {
+       ltt_serialize_cb *callbacks;
+       long cb_args[LTT_NR_CALLBACKS];
+       unsigned int cb_idx;
+};
+
+size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
+                       struct ltt_serialize_closure *closure,
+                       void *serialize_private,
+                       int *largest_align, const char *fmt, va_list *args);
+
+struct ltt_available_probe {
+       const char *name;               /* probe name */
+       const char *format;
+       marker_probe_func *probe_func;
+       ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
+       struct list_head node;          /* registered probes list */
+};
+
+struct ltt_probe_private_data {
+       struct ltt_trace_struct *trace; /*
+                                        * Target trace, for metadata
+                                        * or statedump.
+                                        */
+       ltt_serialize_cb serializer;    /*
+                                        * Serialization function override.
+                                        */
+       void *serialize_private;        /*
+                                        * Private data for serialization
+                                        * functions.
+                                        */
+};
+
+enum ltt_channels {
+       LTT_CHANNEL_METADATA,
+       LTT_CHANNEL_FD_STATE,
+       LTT_CHANNEL_GLOBAL_STATE,
+       LTT_CHANNEL_IRQ_STATE,
+       LTT_CHANNEL_MODULE_STATE,
+       LTT_CHANNEL_NETIF_STATE,
+       LTT_CHANNEL_SOFTIRQ_STATE,
+       LTT_CHANNEL_SWAP_STATE,
+       LTT_CHANNEL_SYSCALL_STATE,
+       LTT_CHANNEL_TASK_STATE,
+       LTT_CHANNEL_VM_STATE,
+       LTT_CHANNEL_FS,
+       LTT_CHANNEL_INPUT,
+       LTT_CHANNEL_IPC,
+       LTT_CHANNEL_KERNEL,
+       LTT_CHANNEL_MM,
+       LTT_CHANNEL_RCU,
+       LTT_CHANNEL_DEFAULT,
+};
+
+struct ltt_active_marker {
+       struct list_head node;          /* active markers list */
+       const char *channel;
+       const char *name;
+       const char *format;
+       struct ltt_available_probe *probe;
+};
+
+extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
+       void *call_data, const char *fmt, va_list *args);
+extern void ltt_trace(const struct marker *mdata, void *probe_data,
+       void *call_data, const char *fmt, ...);
+
+/*
+ * Unique ID assigned to each registered probe.
+ */
+enum marker_id {
+       MARKER_ID_SET_MARKER_ID = 0,    /* Static IDs available (range 0-7) */
+       MARKER_ID_SET_MARKER_FORMAT,
+       MARKER_ID_COMPACT,              /* Compact IDs (range: 8-127)       */
+       MARKER_ID_DYNAMIC,              /* Dynamic IDs (range: 128-65535)   */
+};
+
+/* static ids 0-1 reserved for internal use. */
+#define MARKER_CORE_IDS                2
+static inline enum marker_id marker_id_type(uint16_t id)
+{
+       if (id < MARKER_CORE_IDS)
+               return (enum marker_id)id;
+       else
+               return MARKER_ID_DYNAMIC;
+}
+
+#ifdef CONFIG_LTT
+
+struct user_dbg_data {
+       unsigned long avail_size;
+       unsigned long write;
+       unsigned long read;
+};
+
+struct ltt_trace_ops {
+       /* First 32 bytes cache-hot cacheline */
+       int (*reserve_slot) (struct ltt_trace_struct *trace,
+                               struct ltt_channel_struct *channel,
+                               void **transport_data, size_t data_size,
+                               size_t *slot_size, long *buf_offset, u64 *tsc,
+                               unsigned int *rflags,
+                               int largest_align,
+                               int cpu);
+       void (*commit_slot) (struct ltt_channel_struct *channel,
+                               void **transport_data, long buf_offset,
+                               size_t slot_size);
+       void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
+       int (*user_blocking) (struct ltt_trace_struct *trace,
+                               unsigned int index, size_t data_size,
+                               struct user_dbg_data *dbg);
+       /* End of first 32 bytes cacheline */
+       int (*create_dirs) (struct ltt_trace_struct *new_trace);
+       void (*remove_dirs) (struct ltt_trace_struct *new_trace);
+       int (*create_channel) (const char *trace_name,
+                               struct ltt_trace_struct *trace,
+                               struct dentry *dir, const char *channel_name,
+                               struct ltt_channel_struct *ltt_chan,
+                               unsigned int subbuf_size,
+                               unsigned int n_subbufs, int overwrite);
+       void (*finish_channel) (struct ltt_channel_struct *channel);
+       void (*remove_channel) (struct ltt_channel_struct *channel);
+       void (*user_errors) (struct ltt_trace_struct *trace,
+                               unsigned int index, size_t data_size,
+                               struct user_dbg_data *dbg, int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+       int (*handle_cpuhp) (struct notifier_block *nb,
+                               unsigned long action, void *hcpu,
+                               struct ltt_trace_struct *trace);
+#endif
+} ____cacheline_aligned;
+
+struct ltt_transport {
+       char *name;
+       struct module *owner;
+       struct list_head node;
+       struct ltt_trace_ops ops;
+};
+
+enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
+
+#define CHANNEL_FLAG_ENABLE    (1U<<0)
+#define CHANNEL_FLAG_OVERWRITE (1U<<1)
+
+/* Per-trace information - each trace/flight recorder represented by one */
+struct ltt_trace_struct {
+       /* First 32 bytes cache-hot cacheline */
+       struct list_head list;
+       struct ltt_trace_ops *ops;
+       int active;
+       /* Second 32 bytes cache-hot cacheline */
+       struct ltt_channel_struct *channels;
+       unsigned int nr_channels;
+       u32 freq_scale;
+       u64 start_freq;
+       u64 start_tsc;
+       unsigned long long start_monotonic;
+       struct timeval          start_time;
+       struct ltt_channel_setting *settings;
+       struct {
+               struct dentry                   *trace_root;
+       } dentry;
+       struct rchan_callbacks callbacks;
+       struct kref kref; /* Each channel has a kref of the trace struct */
+       struct ltt_transport *transport;
+       struct kref ltt_transport_kref;
+       wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
+       char trace_name[NAME_MAX];
+} ____cacheline_aligned;
+
+/* Hardcoded event headers
+ *
+ * event header for a trace with active heartbeat : 27 bits timestamps
+ *
+ * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
+ * trace alignment value must be done.
+ *
+ * Remember that the C compiler does align each member on the boundary
+ * equivalent to their own size.
+ *
+ * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
+ * bytes aligned, so the buffer header and trace header are aligned.
+ *
+ * Event headers are aligned depending on the trace alignment option.
+ *
+ * Note using C structure bitfields for cross-endianness and portability
+ * concerns.
+ */
+
+#define LTT_RESERVED_EVENTS    3
+#define LTT_EVENT_BITS         5
+#define LTT_FREE_EVENTS                ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
+#define LTT_TSC_BITS           27
+#define LTT_TSC_MASK           ((1 << LTT_TSC_BITS) - 1)
+
+struct ltt_event_header {
+       u32 id_time;            /* 5 bits event id (MSB); 27 bits time (LSB) */
+};
+
+/* Reservation flags */
+#define        LTT_RFLAG_ID                    (1 << 0)
+#define        LTT_RFLAG_ID_SIZE               (1 << 1)
+#define        LTT_RFLAG_ID_SIZE_TSC           (1 << 2)
+
+/*
+ * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
+ * specifically with CPU frequency scaling someday, so using an interpolation
+ * between the start and end of buffer values is not flexible enough. Using an
+ * immediate frequency value permits to calculate directly the times for parts
+ * of a buffer that would be before a frequency change.
+ *
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ */
+struct ltt_subbuffer_header {
+       uint64_t cycle_count_begin;     /* Cycle count at subbuffer start */
+       uint64_t cycle_count_end;       /* Cycle count at subbuffer end */
+       uint32_t magic_number;          /*
+                                        * Trace magic number.
+                                        * contains endianness information.
+                                        */
+       uint8_t major_version;
+       uint8_t minor_version;
+       uint8_t arch_size;              /* Architecture pointer size */
+       uint8_t alignment;              /* LTT data alignment */
+       uint64_t start_time_sec;        /* NTP-corrected start time */
+       uint64_t start_time_usec;
+       uint64_t start_freq;            /*
+                                        * Frequency at trace start,
+                                        * used all along the trace.
+                                        */
+       uint32_t freq_scale;            /* Frequency scaling (divisor) */
+       uint32_t lost_size;             /* Size unused at end of subbuffer */
+       uint32_t buf_size;              /* Size of this subbuffer */
+       uint32_t events_lost;           /*
+                                        * Events lost in this subbuffer since
+                                        * the beginning of the trace.
+                                        * (may overflow)
+                                        */
+       uint32_t subbuf_corrupt;        /*
+                                        * Corrupted (lost) subbuffers since
+                                        * the begginig of the trace.
+                                        * (may overflow)
+                                        */
+       uint8_t header_end[0];          /* End of header */
+};
+
+/**
+ * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static inline size_t ltt_subbuffer_header_size(void)
+{
+       return offsetof(struct ltt_subbuffer_header, header_end);
+}
+
+/*
+ * ltt_get_header_size
+ *
+ * Calculate alignment offset to 32-bits. This is the alignment offset of the
+ * event header.
+ *
+ * Important note :
+ * The event header must be 32-bits. The total offset calculated here :
+ *
+ * Alignment of header struct on 32 bits (min arch size, header size)
+ * + sizeof(header struct)  (32-bits)
+ * + (opt) u16 (ext. event id)
+ * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
+ * + (opt) u32 (ext. event size)
+ * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ * */
+static inline unsigned char ltt_get_header_size(
+               struct ltt_channel_struct *channel,
+               size_t offset,
+               size_t data_size,
+               size_t *before_hdr_pad,
+               unsigned int rflags)
+{
+       size_t orig_offset = offset;
+       size_t padding;
+
+       BUILD_BUG_ON(sizeof(struct ltt_event_header) != sizeof(u32));
+
+       padding = ltt_align(offset, sizeof(struct ltt_event_header));
+       offset += padding;
+       offset += sizeof(struct ltt_event_header);
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               offset += sizeof(u16) + sizeof(u16);
+               if (data_size >= 0xFFFFU)
+                       offset += sizeof(u32);
+               offset += ltt_align(offset, sizeof(u64));
+               offset += sizeof(u64);
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               offset += sizeof(u16) + sizeof(u16);
+               if (data_size >= 0xFFFFU)
+                       offset += sizeof(u32);
+               break;
+       case LTT_RFLAG_ID:
+               offset += sizeof(u16);
+               break;
+       }
+
+       *before_hdr_pad = padding;
+       return offset - orig_offset;
+}
+
+/*
+ * ltt_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @trace : trace to write to.
+ * @channel : pointer to the channel structure..
+ * @buf : buffer to write to.
+ * @buf_offset : buffer offset to write to (aligned on 32 bits).
+ * @eID : event ID
+ * @event_size : size of the event, excluding the event header.
+ * @tsc : time stamp counter.
+ * @rflags : reservation flags.
+ *
+ * returns : offset where the event data must be written.
+ */
+static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
+               struct ltt_channel_struct *channel,
+               struct rchan_buf *buf, long buf_offset,
+               u16 eID, size_t event_size,
+               u64 tsc, unsigned int rflags)
+{
+       struct ltt_event_header header;
+       size_t small_size;
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               header.id_time = 29 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               header.id_time = 30 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID:
+               header.id_time = 31 << LTT_TSC_BITS;
+               break;
+       default:
+               header.id_time = eID << LTT_TSC_BITS;
+               break;
+       }
+       header.id_time |= (u32)tsc & LTT_TSC_MASK;
+       ltt_relay_write(buf, buf_offset, &header, sizeof(header));
+       buf_offset += sizeof(header);
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               small_size = min_t(size_t, event_size, 0xFFFFU);
+               ltt_relay_write(buf, buf_offset,
+                       (u16[]){ (u16)eID }, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ltt_relay_write(buf, buf_offset,
+                       (u16[]){ (u16)small_size }, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == 0xFFFFU) {
+                       ltt_relay_write(buf, buf_offset,
+                               (u32[]){ (u32)event_size }, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               buf_offset += ltt_align(buf_offset, sizeof(u64));
+               ltt_relay_write(buf, buf_offset,
+                       (u64[]){ (u64)tsc }, sizeof(u64));
+               buf_offset += sizeof(u64);
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               small_size = min_t(size_t, event_size, 0xFFFFU);
+               ltt_relay_write(buf, buf_offset,
+                       (u16[]){ (u16)eID }, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ltt_relay_write(buf, buf_offset,
+                       (u16[]){ (u16)small_size }, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == 0xFFFFU) {
+                       ltt_relay_write(buf, buf_offset,
+                               (u32[]){ (u32)event_size }, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               break;
+       case LTT_RFLAG_ID:
+               ltt_relay_write(buf, buf_offset,
+                       (u16[]){ (u16)eID }, sizeof(u16));
+               buf_offset += sizeof(u16);
+               break;
+       default:
+               break;
+       }
+
+       return buf_offset;
+}
+
+/* Lockless LTTng */
+
+/* Buffer offset macros */
+
+/*
+ * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
+ * the offset, which leaves only the buffer number.
+ */
+#define BUFFER_TRUNC(offset, chan) \
+       ((offset) & (~((chan)->alloc_size-1)))
+#define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
+#define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
+#define SUBBUF_ALIGN(offset, chan) \
+       (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
+#define SUBBUF_TRUNC(offset, chan) \
+       ((offset) & (~((chan)->subbuf_size - 1)))
+#define SUBBUF_INDEX(offset, chan) \
+       (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
+
+/*
+ * ltt_reserve_slot
+ *
+ * Atomic slot reservation in a LTTng buffer. It will take care of
+ * sub-buffer switching.
+ *
+ * Parameters:
+ *
+ * @trace : the trace structure to log to.
+ * @channel : the chanel to reserve space into.
+ * @transport_data : specific transport data.
+ * @data_size : size of the variable length data to log.
+ * @slot_size : pointer to total size of the slot (out)
+ * @buf_offset : pointer to reserve offset (out)
+ * @tsc : pointer to the tsc at the slot reservation (out)
+ * @rflags : reservation flags (header specificity)
+ * @cpu : cpu id
+ *
+ * Return : -ENOSPC if not enough space, else 0.
+ */
+static inline int ltt_reserve_slot(
+               struct ltt_trace_struct *trace,
+               struct ltt_channel_struct *channel,
+               void **transport_data,
+               size_t data_size,
+               size_t *slot_size,
+               long *buf_offset,
+               u64 *tsc,
+               unsigned int *rflags,
+               int largest_align,
+               int cpu)
+{
+       return trace->ops->reserve_slot(trace, channel, transport_data,
+                       data_size, slot_size, buf_offset, tsc, rflags,
+                       largest_align, cpu);
+}
+
+
+/*
+ * ltt_commit_slot
+ *
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ *
+ * Parameters:
+ *
+ * @channel : the chanel to reserve space into.
+ * @transport_data : specific transport data.
+ * @buf_offset : offset of beginning of reserved slot
+ * @slot_size : size of the reserved slot.
+ */
+static inline void ltt_commit_slot(
+               struct ltt_channel_struct *channel,
+               void **transport_data,
+               long buf_offset,
+               size_t slot_size)
+{
+       struct ltt_trace_struct *trace = channel->trace;
+
+       trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
+}
+
+/*
+ * Control channels :
+ * control/metadata
+ * control/interrupts
+ * control/...
+ *
+ * cpu channel :
+ * cpu
+ */
+#define LTT_RELAY_ROOT         "ltt"
+#define LTT_RELAY_LOCKED_ROOT  "ltt-locked"
+
+#define LTT_METADATA_CHANNEL           "metadata_state"
+#define LTT_FD_STATE_CHANNEL           "fd_state"
+#define LTT_GLOBAL_STATE_CHANNEL       "global_state"
+#define LTT_IRQ_STATE_CHANNEL          "irq_state"
+#define LTT_MODULE_STATE_CHANNEL       "module_state"
+#define LTT_NETIF_STATE_CHANNEL                "netif_state"
+#define LTT_SOFTIRQ_STATE_CHANNEL      "softirq_state"
+#define LTT_SWAP_STATE_CHANNEL         "swap_state"
+#define LTT_SYSCALL_STATE_CHANNEL      "syscall_state"
+#define LTT_TASK_STATE_CHANNEL         "task_state"
+#define LTT_VM_STATE_CHANNEL           "vm_state"
+#define LTT_FS_CHANNEL                 "fs"
+#define LTT_INPUT_CHANNEL              "input"
+#define LTT_IPC_CHANNEL                        "ipc"
+#define LTT_KERNEL_CHANNEL             "kernel"
+#define LTT_MM_CHANNEL                 "mm"
+#define LTT_RCU_CHANNEL                        "rcu"
+
+#define LTT_FLIGHT_PREFIX      "flight-"
+
+/* Tracer properties */
+#define LTT_DEFAULT_SUBBUF_SIZE_LOW    65536
+#define LTT_DEFAULT_N_SUBBUFS_LOW      2
+#define LTT_DEFAULT_SUBBUF_SIZE_MED    262144
+#define LTT_DEFAULT_N_SUBBUFS_MED      2
+#define LTT_DEFAULT_SUBBUF_SIZE_HIGH   1048576
+#define LTT_DEFAULT_N_SUBBUFS_HIGH     2
+#define LTT_TRACER_MAGIC_NUMBER                0x00D6B7ED
+#define LTT_TRACER_VERSION_MAJOR       2
+#define LTT_TRACER_VERSION_MINOR       3
+
+/*
+ * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
+ * nearly full buffer. User space won't use this last amount of space when in
+ * blocking mode. This space also includes the event header that would be
+ * written by this user space event.
+ */
+#define LTT_RESERVE_CRITICAL           4096
+
+/* Register and unregister function pointers */
+
+enum ltt_module_function {
+       LTT_FUNCTION_RUN_FILTER,
+       LTT_FUNCTION_FILTER_CONTROL,
+       LTT_FUNCTION_STATEDUMP
+};
+
+extern int ltt_module_register(enum ltt_module_function name, void *function,
+               struct module *owner);
+extern void ltt_module_unregister(enum ltt_module_function name);
+
+void ltt_transport_register(struct ltt_transport *transport);
+void ltt_transport_unregister(struct ltt_transport *transport);
+
+/* Exported control function */
+
+enum ltt_control_msg {
+       LTT_CONTROL_START,
+       LTT_CONTROL_STOP,
+       LTT_CONTROL_CREATE_TRACE,
+       LTT_CONTROL_DESTROY_TRACE
+};
+
+union ltt_control_args {
+       struct {
+               enum trace_mode mode;
+               unsigned int subbuf_size_low;
+               unsigned int n_subbufs_low;
+               unsigned int subbuf_size_med;
+               unsigned int n_subbufs_med;
+               unsigned int subbuf_size_high;
+               unsigned int n_subbufs_high;
+       } new_trace;
+};
+
+int _ltt_trace_setup(const char *trace_name);
+int ltt_trace_setup(const char *trace_name);
+struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
+int ltt_trace_set_type(const char *trace_name, const char *trace_type);
+int ltt_trace_set_channel_subbufsize(const char *trace_name,
+               const char *channel_name, unsigned int size);
+int ltt_trace_set_channel_subbufcount(const char *trace_name,
+               const char *channel_name, unsigned int cnt);
+int ltt_trace_set_channel_enable(const char *trace_name,
+               const char *channel_name, unsigned int enable);
+int ltt_trace_set_channel_overwrite(const char *trace_name,
+               const char *channel_name, unsigned int overwrite);
+int ltt_trace_alloc(const char *trace_name);
+int ltt_trace_destroy(const char *trace_name);
+int ltt_trace_start(const char *trace_name);
+int ltt_trace_stop(const char *trace_name);
+
+extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
+               const char *trace_type, union ltt_control_args args);
+
+enum ltt_filter_control_msg {
+       LTT_FILTER_DEFAULT_ACCEPT,
+       LTT_FILTER_DEFAULT_REJECT
+};
+
+extern int ltt_filter_control(enum ltt_filter_control_msg msg,
+               const char *trace_name);
+
+extern struct dentry *get_filter_root(void);
+
+void ltt_write_trace_header(struct ltt_trace_struct *trace,
+               struct ltt_subbuffer_header *header);
+extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
+
+void ltt_core_register(int (*function)(u8, void *));
+
+void ltt_core_unregister(void);
+
+void ltt_release_trace(struct kref *kref);
+void ltt_release_transport(struct kref *kref);
+
+extern int ltt_probe_register(struct ltt_available_probe *pdata);
+extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
+extern int ltt_marker_connect(const char *channel, const char *mname,
+               const char *pname);
+extern int ltt_marker_disconnect(const char *channel, const char *mname,
+               const char *pname);
+extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
+
+void ltt_lock_traces(void);
+void ltt_unlock_traces(void);
+
+extern void ltt_dump_softirq_vec(void *call_data);
+
+#ifdef CONFIG_HAVE_LTT_DUMP_TABLES
+extern void ltt_dump_sys_call_table(void *call_data);
+extern void ltt_dump_idt_table(void *call_data);
+#else
+static inline void ltt_dump_sys_call_table(void *call_data)
+{
+}
+
+static inline void ltt_dump_idt_table(void *call_data)
+{
+}
+#endif
+
+#ifdef CONFIG_LTT_KPROBES
+extern void ltt_dump_kprobes_table(void *call_data);
+#else
+static inline void ltt_dump_kprobes_table(void *call_data)
+{
+}
+#endif
+
+/* Relay IOCTL */
+
+/* Get the next sub buffer that can be read. */
+#define RELAY_GET_SUBBUF               _IOR(0xF5, 0x00, __u32)
+/* Release the oldest reserved (by "get") sub buffer. */
+#define RELAY_PUT_SUBBUF               _IOW(0xF5, 0x01, __u32)
+/* returns the number of sub buffers in the per cpu channel. */
+#define RELAY_GET_N_SUBBUFS            _IOR(0xF5, 0x02, __u32)
+/* returns the size of the sub buffers. */
+#define RELAY_GET_SUBBUF_SIZE          _IOR(0xF5, 0x03, __u32)
+
+#endif /* CONFIG_LTT */
+
+#endif /* _LTT_TRACER_H */
diff --git a/libtracing/tracercore.c b/libtracing/tracercore.c
new file mode 100644 (file)
index 0000000..18db91d
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * LTT core in-kernel infrastructure.
+ *
+ * Copyright 2006 - Mathieu Desnoyers mathieu.desnoyers@polymtl.ca
+ *
+ * Distributed under the GPL license
+ */
+
+#include <linux/ltt-core.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+/* Traces structures */
+struct ltt_traces ltt_traces = {
+       .setup_head = LIST_HEAD_INIT(ltt_traces.setup_head),
+       .head = LIST_HEAD_INIT(ltt_traces.head),
+};
+EXPORT_SYMBOL(ltt_traces);
+
+/* Traces list writer locking */
+static DEFINE_MUTEX(ltt_traces_mutex);
+
+/* dentry of ltt's root dir */
+static struct dentry *ltt_root_dentry;
+struct dentry *get_ltt_root(void)
+{
+       if (!ltt_root_dentry) {
+               ltt_root_dentry = debugfs_create_dir(LTT_ROOT, NULL);
+               if (!ltt_root_dentry)
+                       printk(KERN_ERR "LTT : create ltt root dir failed\n");
+       }
+       return ltt_root_dentry;
+}
+EXPORT_SYMBOL_GPL(get_ltt_root);
+
+void ltt_lock_traces(void)
+{
+       mutex_lock(&ltt_traces_mutex);
+}
+EXPORT_SYMBOL_GPL(ltt_lock_traces);
+
+void ltt_unlock_traces(void)
+{
+       mutex_unlock(&ltt_traces_mutex);
+}
+EXPORT_SYMBOL_GPL(ltt_unlock_traces);
+
+DEFINE_PER_CPU(unsigned int, ltt_nesting);
+EXPORT_PER_CPU_SYMBOL(ltt_nesting);
+
+int ltt_run_filter_default(void *trace, uint16_t eID)
+{
+       return 1;
+}
+
+/* This function pointer is protected by a trace activation check */
+ltt_run_filter_functor ltt_run_filter = ltt_run_filter_default;
+EXPORT_SYMBOL_GPL(ltt_run_filter);
+
+void ltt_filter_register(ltt_run_filter_functor func)
+{
+       ltt_run_filter = func;
+}
+EXPORT_SYMBOL_GPL(ltt_filter_register);
+
+void ltt_filter_unregister(void)
+{
+       ltt_run_filter = ltt_run_filter_default;
+}
+EXPORT_SYMBOL_GPL(ltt_filter_unregister);
This page took 0.042098 seconds and 4 git commands to generate.