Cleanup leftover UST comments, add comments about current implementation limitations
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 20 May 2011 17:58:06 +0000 (13:58 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 20 May 2011 17:58:06 +0000 (13:58 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
libust/buffers.c
libust/buffers.h
libust/channels.c
libust/marker-control.c
libust/serialize.c
libust/tracectl.c
libust/tracercore.c

index b2a949db454a97247ec25e90a6f2ad607d917131..c8976c7841834e82bd8fae53e0dbe8ca2783d51b 100644 (file)
@@ -3,7 +3,7 @@
  * LTTng userspace tracer buffering system
  *
  * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
- * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ * Copyright (C) 2008-2011 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
  */
 
+/*
+ * Note: this code does not support the ref/noref flag and reader-owned
+ * subbuffer scheme needed for flight recorder mode.
+ */
+
 #include <unistd.h>
 #include <sys/mman.h>
 #include <sys/ipc.h>
@@ -45,6 +50,9 @@ struct ltt_reserve_switch_offsets {
 static DEFINE_MUTEX(ust_buffers_channels_mutex);
 static CDS_LIST_HEAD(ust_buffers_channels);
 
+static void ltt_force_switch(struct ust_buffer *buf,
+               enum force_switch_mode mode);
+
 static int get_n_cpus(void)
 {
        int result;
@@ -332,10 +340,14 @@ static void close_channel(struct ust_channel *chan)
                return;
 
        pthread_mutex_lock(&ust_buffers_channels_mutex);
-       for(i=0; i<chan->n_cpus; i++) {
-       /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
-        * initialize to NULL so we cannot use this check. Should we? */
-//ust//                if (chan->buf[i])
+       /*
+        * checking for chan->buf[i] being NULL or not is useless in
+        * practice because we allocate buffers for all possible cpus.
+        * However, should we decide to change this and only allocate
+        * for online cpus, this check becomes useful.
+        */
+       for (i=0; i<chan->n_cpus; i++) {
+               if (chan->buf[i])
                        close_buf(chan->buf[i]);
        }
 
@@ -344,11 +356,6 @@ static void close_channel(struct ust_channel *chan)
        pthread_mutex_unlock(&ust_buffers_channels_mutex);
 }
 
-static void ltt_force_switch(struct ust_buffer *buf,
-               enum force_switch_mode mode);
-
-
-
 /*
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. The lost size is between 0 and subbuf_size-1.
@@ -412,51 +419,13 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
         * data and the write offset. Correct consumed offset ordering
         * wrt commit count is insured by the use of cmpxchg to update
         * the consumed offset.
-        * smp_call_function_single can fail if the remote CPU is offline,
-        * this is OK because then there is no wmb to execute there.
-        * If our thread is executing on the same CPU as the on the buffers
-        * belongs to, we don't have to synchronize it at all. If we are
-        * migrated, the scheduler will take care of the memory cmm_barriers.
-        * Normally, smp_call_function_single() should ensure program order when
-        * executing the remote function, which implies that it surrounds the
-        * function execution with :
-        * smp_mb()
-        * send IPI
-        * csd_lock_wait
-        *                recv IPI
-        *                smp_mb()
-        *                exec. function
-        *                smp_mb()
-        *                csd unlock
-        * smp_mb()
-        *
-        * However, smp_call_function_single() does not seem to clearly execute
-        * such barriers. It depends on spinlock semantic to provide the barrier
-        * before executing the IPI and, when busy-looping, csd_lock_wait only
-        * executes smp_mb() when it has to wait for the other CPU.
-        *
-        * I don't trust this code. Therefore, let's add the smp_mb() sequence
-        * required ourself, even if duplicated. It has no performance impact
-        * anyway.
-        *
-        * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs
-        * read and write vs write. They do not ensure core synchronization. We
-        * really have to ensure total order between the 3 cmm_barriers running on
-        * the 2 CPUs.
         */
-//ust// #ifdef LTT_NO_IPI_BARRIER
+
        /*
         * Local rmb to match the remote wmb to read the commit count before the
         * buffer data and the write offset.
         */
        cmm_smp_rmb();
-//ust// #else
-//ust//        if (raw_smp_processor_id() != buf->cpu) {
-//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
-//ust//                smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
-//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
-//ust//        }
-//ust// #endif
 
        write_offset = uatomic_read(&buf->offset);
        /*
@@ -479,12 +448,6 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
           == 0) {
                return -EAGAIN;
        }
-
-       /* FIXME: is this ok to disable the reading feature? */
-//ust//        retval = update_read_sb_index(buf, consumed_idx);
-//ust//        if (retval)
-//ust//                return retval;
-
        *consumed = consumed_old;
 
        return 0;
@@ -499,14 +462,12 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
        consumed_old = consumed_old | uconsumed_old;
        consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
 
-//ust//        spin_lock(&ltt_buf->full_lock);
        if (uatomic_cmpxchg(&buf->consumed, consumed_old,
                                consumed_new)
            != consumed_old) {
                /* We have been pushed by the writer : the last
                 * buffer read _is_ corrupted! It can also
                 * happen if this is a buffer we never got. */
-//ust//                spin_unlock(&ltt_buf->full_lock);
                return -EIO;
        } else {
                /* tell the client that buffer is now unfull */
@@ -515,7 +476,6 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
                index = SUBBUF_INDEX(consumed_old, buf->chan);
                data = BUFFER_OFFSET(consumed_old, buf->chan);
                ltt_buf_unfull(buf, index, data);
-//ust//                spin_unlock(&ltt_buf->full_lock);
        }
        return 0;
 }
@@ -656,24 +616,10 @@ static void remove_channel(struct ust_channel *chan)
 
 static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
 {
-//ust//        unsigned int i;
-//ust//        struct rchan *rchan = ltt_channel->trans_channel_data;
-//ust//
-//ust//        for_each_possible_cpu(i) {
-//ust//                struct ltt_channel_buf_struct *ltt_buf =
-//ust//                        percpu_ptr(ltt_channel->buf, i);
-//ust//
-//ust//                if (uatomic_read(&ltt_buf->wakeup_readers) == 1) {
-//ust//                        uatomic_set(&ltt_buf->wakeup_readers, 0);
-//ust//                        wake_up_interruptible(&rchan->buf[i]->read_wait);
-//ust//                }
-//ust//        }
 }
 
 static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu)
 {
-//     int result;
-
        if (channel->buf[cpu]) {
                struct ust_buffer *buf = channel->buf[cpu];
                ltt_force_switch(buf, FORCE_FLUSH);
@@ -688,7 +634,7 @@ static void finish_channel(struct ust_channel *channel)
 {
        unsigned int i;
 
-       for(i=0; i<channel->n_cpus; i++) {
+       for (i=0; i<channel->n_cpus; i++) {
                ltt_relay_finish_buffer(channel, i);
        }
 }
@@ -914,14 +860,12 @@ void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
         */
        if (mode == FORCE_ACTIVE) {
                ltt_reserve_push_reader(chan, buf, offsets.end - 1);
-//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
        }
 
        /*
         * Switch old subbuffer if needed.
         */
        if (offsets.end_switch_old) {
-//ust//                ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
                ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
        }
 
@@ -1099,16 +1043,10 @@ int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
         */
        ltt_reserve_push_reader(chan, buf, offsets.end - 1);
 
-       /*
-        * Clear noref flag for this subbuffer.
-        */
-//ust//        ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
-
        /*
         * Switch old subbuffer if needed.
         */
        if (unlikely(offsets.end_switch_old)) {
-//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
                ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
                DBG("Switching %s_%d", chan->channel_name, cpu);
        }
index afeaef35897e7d7b0c71e3fe28950452bf5364cc..850b9355017299ac3bc38c265f2ac86ce68aafe3 100644 (file)
@@ -33,8 +33,6 @@
 #include "tracerconst.h"
 #include "tracercore.h"
 
-/***** FIXME: SHOULD BE REMOVED ***** */
-
 /*
  * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
  * the offset, which leaves only the buffer number.
 
 /**************************************/
 
+/*
+ * TODO: using "long" type for struct ust_buffer (control structure
+ * shared between traced apps and the consumer) is a very bad idea when
+ * we get to systems with mixed 32/64-bit processes.
+ *
+ * But on 64-bit system, we want the full power of 64-bit counters,
+ * which wraps less often. Therefore, it's not as easy as "use 32-bit
+ * types everywhere".
+ *
+ * One way to deal with this is to:
+ * 1) Design 64-bit consumer so it can detect 32-bit and 64-bit apps.
+ * 2) The 32-bit consumer only supports 32-bit apps.
+ */
+
 struct commit_counters {
        long cc;                        /* ATOMIC */
        long cc_sb;                     /* ATOMIC - Incremented _once_ at sb switch */
@@ -105,9 +117,9 @@ struct ust_buffer {
 } ____cacheline_aligned;
 
 /*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- * FIXME: make this message clearer
+ * A switch is either done during tracing (FORCE_ACTIVE) or as a final
+ * flush after tracing (with FORCE_FLUSH). FORCE_FLUSH ensures we won't
+ * write in the new sub-buffer).
  */
 enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
 
@@ -415,12 +427,6 @@ static __inline__ int ltt_relay_try_reserve(
 
        *tsc = trace_clock_read64();
 
-//ust// #ifdef CONFIG_LTT_VMCORE
-//ust//        prefetch(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
-//ust//        prefetch(&buf->commit_seq[SUBBUF_INDEX(*o_begin, rchan)]);
-//ust// #else
-//ust//        prefetchw(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
-//ust// #endif
        if (last_tsc_overflow(buf, *tsc))
                *rflags = LTT_RFLAG_ID_SIZE_TSC;
 
@@ -465,7 +471,6 @@ static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
        /*
         * Perform retryable operations.
         */
-       /* FIXME: make this really per cpu? */
        if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) {
                DBG("Dropping event because nesting is too deep.");
                uatomic_inc(&buf->events_lost);
@@ -494,11 +499,6 @@ static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
         */
        ltt_reserve_push_reader(chan, buf, o_end - 1);
 
-       /*
-        * Clear noref flag for this subbuffer.
-        */
-//ust//        ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(o_end - 1, chan));
-
        *buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
@@ -525,7 +525,6 @@ static __inline__ void ltt_force_switch(struct ust_buffer *buf,
  * commit count reaches back the reserve offset (module subbuffer size). It is
  * useful for crash dump.
  */
-//ust// #ifdef CONFIG_LTT_VMCORE
 static __inline__ void ltt_write_commit_counter(struct ust_channel *chan,
                struct ust_buffer *buf, long idx, long buf_offset,
                long commit_count, size_t data_size)
@@ -551,12 +550,6 @@ static __inline__ void ltt_write_commit_counter(struct ust_channel *chan,
 
        DBG("commit_seq for channel %s_%d, subbuf %ld is now %ld", buf->chan->channel_name, buf->cpu, idx, commit_count);
 }
-//ust// #else
-//ust// static __inline__ void ltt_write_commit_counter(struct ust_buffer *buf,
-//ust//                long idx, long buf_offset, long commit_count, size_t data_size)
-//ust// {
-//ust// }
-//ust// #endif
 
 /*
  * Atomic unordered slot commit. Increments the commit count in the
index 85e5944e325db8010e556b5b94f3fa5cdfc32a52..ab6afd7706def12005b69b5e02bb8d8317bd94a4 100644 (file)
@@ -134,7 +134,6 @@ end:
        pthread_mutex_unlock(&ltt_channel_mutex);
        return ret;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
 
 /**
  * ltt_channels_unregister - Unregister a trace channel.
@@ -158,7 +157,6 @@ end:
        pthread_mutex_unlock(&ltt_channel_mutex);
        return ret;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
 
 /**
  * ltt_channels_set_default - Set channel default behavior.
@@ -185,7 +183,6 @@ end:
        pthread_mutex_unlock(&ltt_channel_mutex);
        return ret;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
 
 /**
  * ltt_channels_get_name_from_index - get channel name from channel index
@@ -203,7 +200,6 @@ const char *ltt_channels_get_name_from_index(unsigned int index)
                        return iter->name;
        return NULL;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
 
 static struct ltt_channel_setting *
 ltt_channels_get_setting_from_name(const char *name)
@@ -235,7 +231,6 @@ int ltt_channels_get_index_from_name(const char *name)
        else
                return -1;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
 
 /**
  * ltt_channels_trace_alloc - Allocate channel structures for a trace
@@ -284,7 +279,6 @@ end:
        pthread_mutex_unlock(&ltt_channel_mutex);
        return channel;
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
 
 /**
  * ltt_channels_trace_free - Free one trace's channels
@@ -302,7 +296,6 @@ void ltt_channels_trace_free(struct ust_channel *channels)
        pthread_mutex_unlock(&ltt_channel_mutex);
        unlock_ust_marker();
 }
-//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
 
 /**
  * _ltt_channels_get_event_id - get next event ID for a marker
@@ -358,7 +351,3 @@ int ltt_channels_get_event_id(const char *channel, const char *name)
        pthread_mutex_unlock(&ltt_channel_mutex);
        return ret;
 }
-
-//ust// MODULE_LICENSE("GPL");
-//ust// MODULE_AUTHOR("Mathieu Desnoyers");
-//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
index 4b3096c51753a9133b50dc62045d8a7eaa450109..0d3cb2ceb3ab14ee480995400bd67f5532df5bae 100644 (file)
@@ -31,6 +31,8 @@
 #define DEFAULT_CHANNEL "cpu"
 #define DEFAULT_PROBE "default"
 
+static int initialized;
+
 CDS_LIST_HEAD(probes_list);
 
 /*
@@ -53,10 +55,6 @@ static CDS_LIST_HEAD(ust_markers_loaded_list);
  */
 static CDS_LIST_HEAD(probes_registered_list);
 
-//ust// static struct proc_dir_entry *pentry;
-
-//ust// static struct file_operations ltt_fops;
-
 static struct ltt_available_probe *get_probe_from_name(const char *pname)
 {
        struct ltt_available_probe *iter;
@@ -235,158 +233,6 @@ end:
        return ret;
 }
 
-/*
- * function handling proc entry write.
- *
- * connect <channel name> <ust_marker name> [<probe name>]]
- * disconnect <channel name> <ust_marker name> [<probe name>]
- */
-//ust// static ssize_t ltt_write(struct file *file, const char __user *buffer,
-//ust//                           size_t count, loff_t *offset)
-//ust// {
-//ust//        char *kbuf;
-//ust//        char *iter, *ust_marker_action, *arg[4];
-//ust//        ssize_t ret;
-//ust//        int i;
-//ust// 
-//ust//        if (!count)
-//ust//                return -EINVAL;
-//ust// 
-//ust//        kbuf = vmalloc(count + 1);
-//ust//        kbuf[count] = '\0';             /* Transform into a string */
-//ust//        ret = copy_from_user(kbuf, buffer, count);
-//ust//        if (ret) {
-//ust//                ret = -EINVAL;
-//ust//                goto end;
-//ust//        }
-//ust//        get_ust_marker_string(kbuf, &ust_marker_action, &iter);
-//ust//        if (!ust_marker_action || ust_marker_action == iter) {
-//ust//                ret = -EINVAL;
-//ust//                goto end;
-//ust//        }
-//ust//        for (i = 0; i < 4; i++) {
-//ust//                arg[i] = NULL;
-//ust//                if (iter < kbuf + count) {
-//ust//                        iter++;                 /* skip the added '\0' */
-//ust//                        get_ust_marker_string(iter, &arg[i], &iter);
-//ust//                        if (arg[i] == iter)
-//ust//                                arg[i] = NULL;
-//ust//                }
-//ust//        }
-//ust// 
-//ust//        if (!arg[0] || !arg[1]) {
-//ust//                ret = -EINVAL;
-//ust//                goto end;
-//ust//        }
-//ust// 
-//ust//        if (!strcmp(ust_marker_action, "connect")) {
-//ust//                ret = ltt_ust_marker_connect(arg[0], arg[1], arg[2]);
-//ust//                if (ret)
-//ust//                        goto end;
-//ust//        } else if (!strcmp(ust_marker_action, "disconnect")) {
-//ust//                ret = ltt_ust_marker_disconnect(arg[0], arg[1], arg[2]);
-//ust//                if (ret)
-//ust//                        goto end;
-//ust//        }
-//ust//        ret = count;
-//ust// end:
-//ust//        vfree(kbuf);
-//ust//        return ret;
-//ust// }
-//ust// 
-//ust// static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-//ust// {
-//ust//        struct ust_marker_iter *iter = m->private;
-//ust// 
-//ust//        ust_marker_iter_next(iter);
-//ust//        if (!iter->ust_marker) {
-//ust//                /*
-//ust//                 * Setting the iter module to -1UL will make sure
-//ust//                 * that no module can possibly hold the current ust_marker.
-//ust//                 */
-//ust//                iter->module = (void *)-1UL;
-//ust//                return NULL;
-//ust//        }
-//ust//        return iter->ust_marker;
-//ust// }
-//ust// 
-//ust// static void *s_start(struct seq_file *m, loff_t *pos)
-//ust// {
-//ust//        struct ust_marker_iter *iter = m->private;
-//ust// 
-//ust//        if (!*pos)
-//ust//                ust_marker_iter_reset(iter);
-//ust//        ust_marker_iter_start(iter);
-//ust//        if (!iter->ust_marker) {
-//ust//                /*
-//ust//                 * Setting the iter module to -1UL will make sure
-//ust//                 * that no module can possibly hold the current ust_marker.
-//ust//                 */
-//ust//                iter->module = (void *)-1UL;
-//ust//                return NULL;
-//ust//        }
-//ust//        return iter->ust_marker;
-//ust// }
-//ust// 
-//ust// static void s_stop(struct seq_file *m, void *p)
-//ust// {
-//ust//        ust_marker_iter_stop(m->private);
-//ust// }
-//ust// 
-//ust// static int s_show(struct seq_file *m, void *p)
-//ust// {
-//ust//        struct ust_marker_iter *iter = m->private;
-//ust// 
-//ust//        seq_printf(m, "channel: %s ust_marker: %s format: \"%s\" state: %d "
-//ust//                "event_id: %hu call: 0x%p probe %s : 0x%p\n",
-//ust//                iter->ust_marker->channel,
-//ust//                iter->ust_marker->name, iter->ust_marker->format,
-//ust//                _imv_read(iter->ust_marker->state),
-//ust//                iter->ust_marker->event_id,
-//ust//                iter->ust_marker->call,
-//ust//                iter->ust_marker->ptype ? "multi" : "single",
-//ust//                iter->ust_marker->ptype ?
-//ust//                (void*)iter->ust_marker->multi : (void*)iter->ust_marker->single.func);
-//ust//        return 0;
-//ust// }
-//ust// 
-//ust// static const struct seq_operations ltt_seq_op = {
-//ust//        .start = s_start,
-//ust//        .next = s_next,
-//ust//        .stop = s_stop,
-//ust//        .show = s_show,
-//ust// };
-//ust// 
-//ust// static int ltt_open(struct inode *inode, struct file *file)
-//ust// {
-//ust//        /*
-//ust//         * Iterator kept in m->private.
-//ust//         * Restart iteration on all modules between reads because we do not lock
-//ust//         * the module mutex between those.
-//ust//         */
-//ust//        int ret;
-//ust//        struct ust_marker_iter *iter;
-//ust// 
-//ust//        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
-//ust//        if (!iter)
-//ust//                return -ENOMEM;
-//ust// 
-//ust//        ret = seq_open(file, &ltt_seq_op);
-//ust//        if (ret == 0)
-//ust//                ((struct seq_file *)file->private_data)->private = iter;
-//ust//        else
-//ust//                kfree(iter);
-//ust//        return ret;
-//ust// }
-//ust// 
-//ust// static struct file_operations ltt_fops = {
-//ust//        .write = ltt_write,
-//ust//        .open = ltt_open,
-//ust//        .read = seq_read,
-//ust//        .llseek = seq_lseek,
-//ust//        .release = seq_release_private,
-//ust// };
-
 static void disconnect_all_ust_markers(void)
 {
        struct ltt_active_ust_marker *pdata, *tmp;
@@ -399,8 +245,6 @@ static void disconnect_all_ust_markers(void)
        }
 }
 
-static char initialized = 0;
-
 void __attribute__((constructor)) init_ust_marker_control(void)
 {
        if (!initialized) {
index f2c5df03b09d2513f538dfc21f633451341b41a5..81f8e4c610107e6080c1ea6eee8e3702bcd74cd6 100644 (file)
@@ -653,7 +653,7 @@ notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
        if (unlikely(ltt_traces.num_active_traces == 0))
                return;
 
-       rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
+       rcu_read_lock();
        cpu = ust_get_cpu();
 
        /* Force volatile access. */
@@ -713,11 +713,12 @@ notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
                if (!channel->active)
                        continue;
 
-               /* If a new cpu was plugged since the trace was started, we did
+               /*
+                * If a new cpu was plugged since the trace was started, we did
                 * not add it to the trace, and therefore we write the event to
                 * cpu 0.
                 */
-               if(cpu >= channel->n_cpus) {
+               if (cpu >= channel->n_cpus) {
                        cpu = 0;
                }
 
@@ -730,7 +731,6 @@ notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
 
                va_copy(args_copy, *args);
                /* FIXME : could probably encapsulate transport better. */
-//ust//                buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
                buf = channel->buf[cpu];
                /* Out-of-order write : header and data */
                buf_offset = ltt_write_event_header(channel, buf, buf_offset,
@@ -749,7 +749,7 @@ notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
        tracer_stack_pos = stack_pos_ctx;
        CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
 
-       rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
+       rcu_read_unlock();
 }
 
 notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data,
@@ -771,7 +771,7 @@ static notrace void skip_space(const char **ps)
 
 static notrace void copy_token(char **out, const char **in)
 {
-       while(**in != ' ' && **in != '\0') {
+       while (**in != ' ' && **in != '\0') {
                **out = **in;
                (*out)++;
                (*in)++;
@@ -808,23 +808,23 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
        int result;
        enum { none, cfmt, tracefmt, argname } prev_token = none;
 
-       while(*orig_fmt_p != '\0') {
-               if(*orig_fmt_p == '%') {
+       while (*orig_fmt_p != '\0') {
+               if (*orig_fmt_p == '%') {
                        prev_token = cfmt;
                        copy_token(&new_fmt_p, &orig_fmt_p);
                }
-               else if(*orig_fmt_p == '#') {
+               else if (*orig_fmt_p == '#') {
                        prev_token = tracefmt;
                        do {
                                orig_fmt_p++;
-                       } while(*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
+                       } while (*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
                }
-               else if(*orig_fmt_p == ' ') {
-                       if(prev_token == argname) {
+               else if (*orig_fmt_p == ' ') {
+                       if (prev_token == argname) {
                                *new_fmt_p = '=';
                                new_fmt_p++;
                        }
-                       else if(prev_token == cfmt) {
+                       else if (prev_token == cfmt) {
                                *new_fmt_p = ' ';
                                new_fmt_p++;
                        }
@@ -839,7 +839,7 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
 
        *new_fmt_p = '\0';
 
-       if(outbuf == NULL) {
+       if (outbuf == NULL) {
                /* use this false_buffer for compatibility with pre-C99 */
                outbuf = &false_buf;
                bufsize = 1;
index 771e4e1f6c86540cd1a478b2b0abde8c98d90b96..6e467aa4fe81861a7401c6f41e3f04eb52df6a68 100644 (file)
@@ -1709,7 +1709,6 @@ static void ust_after_fork_common(ust_fork_info_t *fork_info)
 
        pthread_mutex_unlock(&listen_sock_mutex);
        pthread_mutex_unlock(&listener_thread_data_mutex);
-
         /* Restore signals */
         result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
         if (result == -1) {
@@ -1733,7 +1732,7 @@ void ust_after_fork_child(ust_fork_info_t *fork_info)
        /* Sanitize the child */
        ust_fork();
 
-       /* Then release mutexes and reenable signals */
+       /* Release mutexes and reenable signals */
        ust_after_fork_common(fork_info);
 }
 
index 1e418b6a97d6be33f7d6379c82b72e69119f420e..8d3dad605f0d9d2598a2dc1eae01ddd5e2058e3f 100644 (file)
@@ -39,8 +39,6 @@ void ltt_unlock_traces(void)
        pthread_mutex_unlock(&ltt_traces_mutex);
 }
 
-//ust// DEFINE_PER_CPU(unsigned int, ltt_nesting);
-//ust// EXPORT_PER_CPU_SYMBOL(ltt_nesting);
 __thread int ltt_nesting;
 
 int ltt_run_filter_default(void *trace, uint16_t eID)
This page took 0.033313 seconds and 4 git commands to generate.