tracectl cleanup v3
[ust.git] / libust / buffers.c
index 13660a8910402ad45e163c524aa845d37025a6df..374ec613240fde5028fbac694b9a4d49a22c20c8 100644 (file)
 #include <sys/ipc.h>
 #include <sys/shm.h>
 #include <fcntl.h>
-#include <ust/kernelcompat.h>
-#include <kcompat/kref.h>
+#include <stdlib.h>
+
+#include <ust/clock.h>
+
 #include "buffers.h"
 #include "channels.h"
 #include "tracer.h"
 #include "tracercore.h"
 #include "usterr.h"
 
+struct ltt_reserve_switch_offsets {
+       long begin, end, old;
+       long begin_switch, end_switch_current, end_switch_old;
+       size_t before_hdr_pad, size;
+};
+
+
 static DEFINE_MUTEX(ust_buffers_channels_mutex);
 static LIST_HEAD(ust_buffers_channels);
 
@@ -41,21 +50,91 @@ static int get_n_cpus(void)
        int result;
        static int n_cpus = 0;
 
-       if(n_cpus) {
-               return n_cpus;
+       if(!n_cpus) {
+               /* On Linux, when some processors are offline
+                * _SC_NPROCESSORS_CONF counts the offline
+                * processors, whereas _SC_NPROCESSORS_ONLN
+                * does not. If we used _SC_NPROCESSORS_ONLN,
+                * getcpu() could return a value greater than
+                * this sysconf, in which case the arrays
+                * indexed by processor would overflow.
+                */
+               result = sysconf(_SC_NPROCESSORS_CONF);
+               if(result == -1) {
+                       return -1;
+               }
+
+               n_cpus = result;
        }
 
-       result = sysconf(_SC_NPROCESSORS_ONLN);
-       if(result == -1) {
-               return -1;
+       return n_cpus;
+}
+
+/**
+ * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @len : length to write
+ * @copied: string actually copied
+ * @terminated: does string end with \0
+ *
+ * Fills string with "X" if incomplete.
+ */
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+                               size_t len, size_t copied, int terminated)
+{
+       size_t buf_offset, cpy;
+
+       if (copied == len) {
+               /*
+                * Deal with non-terminated string.
+                */
+               assert(!terminated);
+               offset += copied - 1;
+               buf_offset = BUFFER_OFFSET(offset, buf->chan);
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               assert(buf_offset
+                      < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+               ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
+               return;
        }
 
-       n_cpus = result;
+       /*
+        * Deal with incomplete string.
+        * Overwrite string's \0 with X too.
+        */
+       cpy = copied - 1;
+       assert(terminated);
+       len -= cpy;
+       offset += cpy;
+       buf_offset = BUFFER_OFFSET(offset, buf->chan);
+
+       /*
+        * Underlying layer should never ask for writes across subbuffers.
+        */
+       assert(buf_offset
+              < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+       ust_buffers_do_memset(buf->buf_data + buf_offset,
+                             'X', len);
 
-       return result;
+       /*
+        * Overwrite last 'X' with '\0'.
+        */
+       offset += len - 1;
+       buf_offset = BUFFER_OFFSET(offset, buf->chan);
+       /*
+        * Underlying layer should never ask for writes across subbuffers.
+        */
+       assert(buf_offset
+              < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
 }
 
-static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
+static int ust_buffers_init_buffer(struct ust_trace *trace,
                struct ust_channel *ltt_chan,
                struct ust_buffer *buf,
                unsigned int n_subbufs);
@@ -186,22 +265,31 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_
        if(subbuf_size == 0 || subbuf_cnt == 0)
                return -1;
 
+       /* Check that the subbuffer size is larger than a page. */
+       WARN_ON_ONCE(subbuf_size < PAGE_SIZE);
+
+       /*
+        * Make sure the number of subbuffers and subbuffer size are power of 2.
+        */
+       WARN_ON_ONCE(hweight32(subbuf_size) != 1);
+       WARN_ON(hweight32(subbuf_cnt) != 1);
+
        chan->version = UST_CHANNEL_VERSION;
        chan->subbuf_cnt = subbuf_cnt;
        chan->subbuf_size = subbuf_size;
        chan->subbuf_size_order = get_count_order(subbuf_size);
-       chan->alloc_size = FIX_SIZE(subbuf_size * subbuf_cnt);
+       chan->alloc_size = subbuf_size * subbuf_cnt;
 
        kref_init(&chan->kref);
 
-       mutex_lock(&ust_buffers_channels_mutex);
+       pthread_mutex_lock(&ust_buffers_channels_mutex);
        for(i=0; i<chan->n_cpus; i++) {
                result = ust_buffers_open_buf(chan, i);
                if (result == -1)
                        goto error;
        }
        list_add(&chan->list, &ust_buffers_channels);
-       mutex_unlock(&ust_buffers_channels_mutex);
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
 
        return 0;
 
@@ -210,10 +298,11 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_
        for(; i>=0; i--) {
                ust_buffers_close_buf(chan->buf[i]);
 error:
+               do {} while(0);
        }
 
        kref_put(&chan->kref, ust_buffers_destroy_channel);
-       mutex_unlock(&ust_buffers_channels_mutex);
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
        return -1;
 }
 
@@ -223,7 +312,7 @@ void ust_buffers_channel_close(struct ust_channel *chan)
        if(!chan)
                return;
 
-       mutex_lock(&ust_buffers_channels_mutex);
+       pthread_mutex_lock(&ust_buffers_channels_mutex);
        for(i=0; i<chan->n_cpus; i++) {
        /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
         * initialize to NULL so we cannot use this check. Should we? */
@@ -233,89 +322,13 @@ void ust_buffers_channel_close(struct ust_channel *chan)
 
        list_del(&chan->list);
        kref_put(&chan->kref, ust_buffers_destroy_channel);
-       mutex_unlock(&ust_buffers_channels_mutex);
-}
-
-/* _ust_buffers_write()
- *
- * @buf: destination buffer
- * @offset: offset in destination
- * @src: source buffer
- * @len: length of source
- * @cpy: already copied
- */
-
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-       const void *src, size_t len, ssize_t cpy)
-{
-       do {
-               len -= cpy;
-               src += cpy;
-               offset += cpy;
-
-               WARN_ON(offset >= buf->buf_size);
-
-               cpy = min_t(size_t, len, buf->buf_size - offset);
-               ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
-       } while (unlikely(len != cpy));
-}
-
-void *ltt_buffers_offset_address(struct ust_buffer *buf, size_t offset)
-{
-       return ((char *)buf->buf_data)+offset;
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
 }
 
 /*
  * -------
  */
 
-/*
- * Last TSC comparison functions. Check if the current TSC overflows
- * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
- * atomically.
- */
-
-/* FIXME: does this test work properly? */
-#if (BITS_PER_LONG == 32)
-static inline void save_last_tsc(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
-}
-
-static inline int last_tsc_overflow(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
-
-       if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
-               return 1;
-       else
-               return 0;
-}
-#else
-static inline void save_last_tsc(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       ltt_buf->last_tsc = (unsigned long)tsc;
-}
-
-static inline int last_tsc_overflow(struct ust_buffer *ltt_buf,
-                                       u64 tsc)
-{
-       if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
-               return 1;
-       else
-               return 0;
-}
-#endif
-
-/*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- */
-enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-
 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu);
 
 static void ltt_force_switch(struct ust_buffer *buf,
@@ -324,18 +337,19 @@ static void ltt_force_switch(struct ust_buffer *buf,
 /*
  * Trace callbacks
  */
-static void ltt_buffer_begin_callback(struct ust_buffer *buf,
+static void ltt_buffer_begin(struct ust_buffer *buf,
                        u64 tsc, unsigned int subbuf_idx)
 {
        struct ust_channel *channel = buf->chan;
        struct ltt_subbuffer_header *header =
                (struct ltt_subbuffer_header *)
-                       ltt_buffers_offset_address(buf,
+                       ust_buffers_offset_address(buf,
                                subbuf_idx * buf->chan->subbuf_size);
 
        header->cycle_count_begin = tsc;
-       header->lost_size = 0xFFFFFFFF; /* for debugging */
-       header->buf_size = buf->chan->subbuf_size;
+       header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
+       header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
+       /* FIXME: add memory barrier? */
        ltt_write_trace_header(channel->trace, header);
 }
 
@@ -343,51 +357,23 @@ static void ltt_buffer_begin_callback(struct ust_buffer *buf,
  * offset is assumed to never be 0 here : never deliver a completely empty
  * subbuffer. The lost size is between 0 and subbuf_size-1.
  */
-static notrace void ltt_buffer_end_callback(struct ust_buffer *buf,
+static notrace void ltt_buffer_end(struct ust_buffer *buf,
                u64 tsc, unsigned int offset, unsigned int subbuf_idx)
 {
        struct ltt_subbuffer_header *header =
                (struct ltt_subbuffer_header *)
-                       ltt_buffers_offset_address(buf,
+                       ust_buffers_offset_address(buf,
                                subbuf_idx * buf->chan->subbuf_size);
+       u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1;
 
-       header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
-                               buf->chan);
+       header->data_size = data_size;
+       header->sb_size = PAGE_ALIGN(data_size);
        header->cycle_count_end = tsc;
-       header->events_lost = local_read(&buf->events_lost);
-       header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers);
-
-}
-
-void (*wake_consumer)(void *, int) = NULL;
-
-void relay_set_wake_consumer(void (*wake)(void *, int))
-{
-       wake_consumer = wake;
-}
-
-void relay_wake_consumer(void *arg, int finished)
-{
-       if(wake_consumer)
-               wake_consumer(arg, finished);
-}
-
-static notrace void ltt_deliver(struct ust_buffer *buf, unsigned int subbuf_idx,
-               long commit_count)
-{
-       int result;
-
-//ust// #ifdef CONFIG_LTT_VMCORE
-       local_set(&buf->commit_seq[subbuf_idx], commit_count);
-//ust// #endif
-
-       /* wakeup consumer */
-       result = write(buf->data_ready_fd_write, "1", 1);
-       if(result == -1) {
-               PERROR("write (in ltt_relay_buffer_flush)");
-               ERR("this should never happen!");
+       header->events_lost = uatomic_read(&buf->events_lost);
+       header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers);
+       if(unlikely(header->events_lost > 0)) {
+               DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu);
        }
-//ust//        atomic_set(&ltt_buf->wakeup_readers, 1);
 }
 
 /*
@@ -397,28 +383,80 @@ static notrace void ltt_buf_unfull(struct ust_buffer *buf,
                unsigned int subbuf_idx,
                long offset)
 {
-//ust//        struct ltt_channel_struct *ltt_channel =
-//ust//                (struct ltt_channel_struct *)buf->chan->private_data;
-//ust//        struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
-//ust//
-//ust//        ltt_relay_wake_writers(ltt_buf);
 }
 
-int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old)
+/*
+ * Promote compiler barrier to a smp_mb().
+ * For the specific LTTng case, this IPI call should be removed if the
+ * architecture does not reorder writes.  This should eventually be provided by
+ * a separate architecture-specific infrastructure.
+ */
+//ust// static void remote_mb(void *info)
+//ust// {
+//ust//        smp_mb();
+//ust// }
+
+int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
 {
        struct ust_channel *channel = buf->chan;
        long consumed_old, consumed_idx, commit_count, write_offset;
-       consumed_old = atomic_long_read(&buf->consumed);
+//ust//        int retval;
+
+       consumed_old = uatomic_read(&buf->consumed);
        consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-       commit_count = local_read(&buf->commit_count[consumed_idx]);
+       commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
         * wrt commit count is insured by the use of cmpxchg to update
         * the consumed offset.
+        * smp_call_function_single can fail if the remote CPU is offline,
+        * this is OK because then there is no wmb to execute there.
+        * If our thread is executing on the same CPU as the on the buffers
+        * belongs to, we don't have to synchronize it at all. If we are
+        * migrated, the scheduler will take care of the memory barriers.
+        * Normally, smp_call_function_single() should ensure program order when
+        * executing the remote function, which implies that it surrounds the
+        * function execution with :
+        * smp_mb()
+        * send IPI
+        * csd_lock_wait
+        *                recv IPI
+        *                smp_mb()
+        *                exec. function
+        *                smp_mb()
+        *                csd unlock
+        * smp_mb()
+        *
+        * However, smp_call_function_single() does not seem to clearly execute
+        * such barriers. It depends on spinlock semantic to provide the barrier
+        * before executing the IPI and, when busy-looping, csd_lock_wait only
+        * executes smp_mb() when it has to wait for the other CPU.
+        *
+        * I don't trust this code. Therefore, let's add the smp_mb() sequence
+        * required ourself, even if duplicated. It has no performance impact
+        * anyway.
+        *
+        * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
+        * read and write vs write. They do not ensure core synchronization. We
+        * really have to ensure total order between the 3 barriers running on
+        * the 2 CPUs.
+        */
+//ust// #ifdef LTT_NO_IPI_BARRIER
+       /*
+        * Local rmb to match the remote wmb to read the commit count before the
+        * buffer data and the write offset.
         */
        smp_rmb();
-       write_offset = local_read(&buf->offset);
+//ust// #else
+//ust//        if (raw_smp_processor_id() != buf->cpu) {
+//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
+//ust//                smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
+//ust//                smp_mb();       /* Total order with IPI handler smp_mb() */
+//ust//        }
+//ust// #endif
+
+       write_offset = uatomic_read(&buf->offset);
        /*
         * Check that the subbuffer we are trying to consume has been
         * already fully committed.
@@ -440,21 +478,27 @@ int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old)
                return -EAGAIN;
        }
 
-       *pconsumed_old = consumed_old;
+       /* FIXME: is this ok to disable the reading feature? */
+//ust//        retval = update_read_sb_index(buf, consumed_idx);
+//ust//        if (retval)
+//ust//                return retval;
+
+       *consumed = consumed_old;
+
        return 0;
 }
 
-int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old)
+int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
 {
        long consumed_new, consumed_old;
 
-       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_old = uatomic_read(&buf->consumed);
        consumed_old = consumed_old & (~0xFFFFFFFFL);
        consumed_old = consumed_old | uconsumed_old;
        consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
 
 //ust//        spin_lock(&ltt_buf->full_lock);
-       if (atomic_long_cmpxchg(&buf->consumed, consumed_old,
+       if (uatomic_cmpxchg(&buf->consumed, consumed_old,
                                consumed_new)
            != consumed_old) {
                /* We have been pushed by the writer : the last
@@ -479,31 +523,33 @@ static void ltt_relay_print_subbuffer_errors(
                long cons_off, int cpu)
 {
        struct ust_buffer *ltt_buf = channel->buf[cpu];
-       long cons_idx, commit_count, write_offset;
+       long cons_idx, commit_count, commit_count_sb, write_offset;
 
        cons_idx = SUBBUF_INDEX(cons_off, channel);
-       commit_count = local_read(&ltt_buf->commit_count[cons_idx]);
+       commit_count = uatomic_read(&ltt_buf->commit_count[cons_idx].cc);
+       commit_count_sb = uatomic_read(&ltt_buf->commit_count[cons_idx].cc_sb);
+
        /*
         * No need to order commit_count and write_offset reads because we
         * execute after trace is stopped when there are no readers left.
         */
-       write_offset = local_read(&ltt_buf->offset);
+       write_offset = uatomic_read(&ltt_buf->offset);
        WARN( "LTT : unread channel %s offset is %ld "
-               "and cons_off : %ld\n",
-               channel->channel_name, write_offset, cons_off);
+               "and cons_off : %ld (cpu %d)\n",
+               channel->channel_name, write_offset, cons_off, cpu);
        /* Check each sub-buffer for non filled commit count */
        if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
            - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
                ERR("LTT : %s : subbuffer %lu has non filled "
-                       "commit count %lu.\n",
-                       channel->channel_name, cons_idx, commit_count);
+                       "commit count [cc, cc_sb] [%lu,%lu].\n",
+                       channel->channel_name, cons_idx, commit_count, commit_count_sb);
        }
        ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
                        channel->channel_name, commit_count,
                        channel->subbuf_size);
 }
 
-static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
+static void ltt_relay_print_errors(struct ust_trace *trace,
                struct ust_channel *channel, int cpu)
 {
        struct ust_buffer *ltt_buf = channel->buf[cpu];
@@ -516,8 +562,11 @@ static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
        if (!channel)
                return;
 
-       for (cons_off = atomic_long_read(&ltt_buf->consumed);
-                       (SUBBUF_TRUNC(local_read(&ltt_buf->offset),
+//ust//        for (cons_off = 0; cons_off < rchan->alloc_size;
+//ust//             cons_off = SUBBUF_ALIGN(cons_off, rchan))
+//ust//                ust_buffers_print_written(ltt_chan, cons_off, cpu);
+       for (cons_off = uatomic_read(&ltt_buf->consumed);
+                       (SUBBUF_TRUNC(uatomic_read(&ltt_buf->offset),
                                      channel)
                         - cons_off) > 0;
                        cons_off = SUBBUF_ALIGN(cons_off, channel))
@@ -526,17 +575,17 @@ static void ltt_relay_print_errors(struct ltt_trace_struct *trace,
 
 static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
 {
-       struct ltt_trace_struct *trace = channel->trace;
+       struct ust_trace *trace = channel->trace;
        struct ust_buffer *ltt_buf = channel->buf[cpu];
 
-       if (local_read(&ltt_buf->events_lost))
-               ERR("channel %s: %ld events lost",
+       if (uatomic_read(&ltt_buf->events_lost))
+               ERR("channel %s: %ld events lost (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->events_lost));
-       if (local_read(&ltt_buf->corrupted_subbuffers))
-               ERR("channel %s : %ld corrupted subbuffers",
+                       uatomic_read(&ltt_buf->events_lost), cpu);
+       if (uatomic_read(&ltt_buf->corrupted_subbuffers))
+               ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->corrupted_subbuffers));
+                       uatomic_read(&ltt_buf->corrupted_subbuffers), cpu);
 
        ltt_relay_print_errors(trace, channel, cpu);
 }
@@ -551,14 +600,14 @@ static void ltt_relay_release_channel(struct kref *kref)
 /*
  * Create ltt buffer.
  */
-//ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
+//ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
 //ust//                struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
 //ust//                unsigned int cpu, unsigned int n_subbufs)
 //ust// {
 //ust//        struct ltt_channel_buf_struct *ltt_buf =
 //ust//                percpu_ptr(ltt_chan->buf, cpu);
 //ust//        unsigned int j;
-//ust// 
+//ust//
 //ust//        ltt_buf->commit_count =
 //ust//                kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
 //ust//                        GFP_KERNEL, cpu_to_node(cpu));
@@ -567,27 +616,27 @@ static void ltt_relay_release_channel(struct kref *kref)
 //ust//        kref_get(&trace->kref);
 //ust//        kref_get(&trace->ltt_transport_kref);
 //ust//        kref_get(&ltt_chan->kref);
-//ust//        local_set(&ltt_buf->offset, ltt_subbuffer_header_size());
-//ust//        atomic_long_set(&ltt_buf->consumed, 0);
-//ust//        atomic_long_set(&ltt_buf->active_readers, 0);
+//ust//        uatomic_set(&ltt_buf->offset, ltt_subbuffer_header_size());
+//ust//        uatomic_set(&ltt_buf->consumed, 0);
+//ust//        uatomic_set(&ltt_buf->active_readers, 0);
 //ust//        for (j = 0; j < n_subbufs; j++)
-//ust//                local_set(&ltt_buf->commit_count[j], 0);
+//ust//                uatomic_set(&ltt_buf->commit_count[j], 0);
 //ust//        init_waitqueue_head(&ltt_buf->write_wait);
-//ust//        atomic_set(&ltt_buf->wakeup_readers, 0);
+//ust//        uatomic_set(&ltt_buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&ltt_buf->full_lock);
-//ust// 
+//ust//
 //ust//        ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
 //ust//        /* atomic_add made on local variable on data that belongs to
 //ust//         * various CPUs : ok because tracing not started (for this cpu). */
-//ust//        local_add(ltt_subbuffer_header_size(), &ltt_buf->commit_count[0]);
-//ust// 
-//ust//        local_set(&ltt_buf->events_lost, 0);
-//ust//        local_set(&ltt_buf->corrupted_subbuffers, 0);
-//ust// 
+//ust//        uatomic_add(&ltt_buf->commit_count[0], ltt_subbuffer_header_size());
+//ust//
+//ust//        uatomic_set(&ltt_buf->events_lost, 0);
+//ust//        uatomic_set(&ltt_buf->corrupted_subbuffers, 0);
+//ust//
 //ust//        return 0;
 //ust// }
 
-static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
+static int ust_buffers_init_buffer(struct ust_trace *trace,
                struct ust_channel *ltt_chan, struct ust_buffer *buf,
                unsigned int n_subbufs)
 {
@@ -596,27 +645,29 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
        int result;
 
        buf->commit_count =
-               zmalloc(sizeof(buf->commit_count) * n_subbufs);
+               zmalloc(sizeof(*buf->commit_count) * n_subbufs);
        if (!buf->commit_count)
                return -ENOMEM;
        kref_get(&trace->kref);
        kref_get(&trace->ltt_transport_kref);
        kref_get(&ltt_chan->kref);
-       local_set(&buf->offset, ltt_subbuffer_header_size());
-       atomic_long_set(&buf->consumed, 0);
-       atomic_long_set(&buf->active_readers, 0);
-       for (j = 0; j < n_subbufs; j++)
-               local_set(&buf->commit_count[j], 0);
+       uatomic_set(&buf->offset, ltt_subbuffer_header_size());
+       uatomic_set(&buf->consumed, 0);
+       uatomic_set(&buf->active_readers, 0);
+       for (j = 0; j < n_subbufs; j++) {
+               uatomic_set(&buf->commit_count[j].cc, 0);
+               uatomic_set(&buf->commit_count[j].cc_sb, 0);
+       }
 //ust//        init_waitqueue_head(&buf->write_wait);
-//ust//        atomic_set(&buf->wakeup_readers, 0);
+//ust//        uatomic_set(&buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&buf->full_lock);
 
-       ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
+       ltt_buffer_begin(buf, trace->start_tsc, 0);
 
-       local_add(ltt_subbuffer_header_size(), &buf->commit_count[0]);
+       uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size());
 
-       local_set(&buf->events_lost, 0);
-       local_set(&buf->corrupted_subbuffers, 0);
+       uatomic_set(&buf->events_lost, 0);
+       uatomic_set(&buf->corrupted_subbuffers, 0);
 
        result = pipe(fds);
        if(result == -1) {
@@ -626,16 +677,11 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
        buf->data_ready_fd_read = fds[0];
        buf->data_ready_fd_write = fds[1];
 
-       /* FIXME: do we actually need this? */
-       result = fcntl(fds[0], F_SETFL, O_NONBLOCK);
-       if(result == -1) {
-               PERROR("fcntl");
-       }
-
 //ust//        buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs);
 //ust//        if(!ltt_buf->commit_seq) {
 //ust//                return -1;
 //ust//        }
+       memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs);
 
        /* FIXME: decrementally destroy on error */
 
@@ -645,14 +691,14 @@ static int ust_buffers_init_buffer(struct ltt_trace_struct *trace,
 /* FIXME: use this function */
 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu)
 {
-       struct ltt_trace_struct *trace = ltt_chan->trace;
+       struct ust_trace *trace = ltt_chan->trace;
        struct ust_buffer *ltt_buf = ltt_chan->buf[cpu];
 
        kref_put(&ltt_chan->trace->ltt_transport_kref,
                ltt_release_transport);
        ltt_relay_print_buffer_errors(ltt_chan, cpu);
 //ust//        free(ltt_buf->commit_seq);
-       kfree(ltt_buf->commit_count);
+       free(ltt_buf->commit_count);
        ltt_buf->commit_count = NULL;
        kref_put(&ltt_chan->kref, ltt_relay_release_channel);
        kref_put(&trace->kref, ltt_release_trace);
@@ -717,7 +763,7 @@ static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan)
 /*
  * Create channel.
  */
-static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_struct *trace,
+static int ust_buffers_create_channel(const char *trace_name, struct ust_trace *trace,
        const char *channel_name, struct ust_channel *ltt_chan,
        unsigned int subbuf_size, unsigned int n_subbufs, int overwrite)
 {
@@ -726,18 +772,16 @@ static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_s
        kref_init(&ltt_chan->kref);
 
        ltt_chan->trace = trace;
-       ltt_chan->buffer_begin = ltt_buffer_begin_callback;
-       ltt_chan->buffer_end = ltt_buffer_end_callback;
        ltt_chan->overwrite = overwrite;
        ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
        ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
        ltt_chan->n_cpus = get_n_cpus();
 //ust//        ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
-       ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *));
+       ltt_chan->buf = (void *) zmalloc(ltt_chan->n_cpus * sizeof(void *));
        if(ltt_chan->buf == NULL) {
                goto error;
        }
-       ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int));
+       ltt_chan->buf_struct_shmids = (int *) zmalloc(ltt_chan->n_cpus * sizeof(int));
        if(ltt_chan->buf_struct_shmids == NULL)
                goto free_buf;
 
@@ -767,26 +811,6 @@ error:
        return -1;
 }
 
-/*
- * LTTng channel flush function.
- *
- * Must be called when no tracing is active in the channel, because of
- * accesses across CPUs.
- */
-static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf)
-{
-       int result;
-
-//ust//        buf->finalized = 1;
-       ltt_force_switch(buf, FORCE_FLUSH);
-
-       result = write(buf->data_ready_fd_write, "1", 1);
-       if(result == -1) {
-               PERROR("write (in ltt_relay_buffer_flush)");
-               ERR("this should never happen!");
-       }
-}
-
 static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
 {
 //ust//        unsigned int i;
@@ -796,8 +820,8 @@ static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
 //ust//                struct ltt_channel_buf_struct *ltt_buf =
 //ust//                        percpu_ptr(ltt_channel->buf, i);
 //ust//
-//ust//                if (atomic_read(&ltt_buf->wakeup_readers) == 1) {
-//ust//                        atomic_set(&ltt_buf->wakeup_readers, 0);
+//ust//                if (uatomic_read(&ltt_buf->wakeup_readers) == 1) {
+//ust//                        uatomic_set(&ltt_buf->wakeup_readers, 0);
 //ust//                        wake_up_interruptible(&rchan->buf[i]->read_wait);
 //ust//                }
 //ust//        }
@@ -809,7 +833,7 @@ static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cp
 
        if (channel->buf[cpu]) {
                struct ust_buffer *buf = channel->buf[cpu];
-               ltt_relay_buffer_flush(buf);
+               ltt_force_switch(buf, FORCE_FLUSH);
 //ust//                ltt_relay_wake_writers(ltt_buf);
                /* closing the pipe tells the consumer the buffer is finished */
                
@@ -838,86 +862,310 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
        kref_put(&channel->kref, ltt_relay_release_channel);
 }
 
-struct ltt_reserve_switch_offsets {
-       long begin, end, old;
-       long begin_switch, end_switch_current, end_switch_old;
-       long commit_count, reserve_commit_diff;
-       size_t before_hdr_pad, size;
-};
+/*
+ * ltt_reserve_switch_old_subbuf: switch old subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted.  The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ *
+ * Note : offset_old should never be 0 here.
+ */
+static void ltt_reserve_switch_old_subbuf(
+               struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+       long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
+       long commit_count, padding_size;
+
+       padding_size = chan->subbuf_size
+                       - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
+       ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
+
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       smp_wmb();
+       uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
+       commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
+       ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
+       ltt_write_commit_counter(chan, buf, oldidx,
+               offsets->old, commit_count, padding_size);
+}
+
+/*
+ * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution.  The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static void ltt_reserve_switch_new_subbuf(
+               struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+       long beginidx = SUBBUF_INDEX(offsets->begin, chan);
+       long commit_count;
+
+       ltt_buffer_begin(buf, *tsc, beginidx);
+
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       smp_wmb();
+       uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
+       commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
+       /* Check if the written buffer has to be delivered */
+       ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
+       ltt_write_commit_counter(chan, buf, beginidx,
+               offsets->begin, commit_count, ltt_subbuffer_header_size());
+}
+
+/*
+ * ltt_reserve_end_switch_current: finish switching current subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted.  The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ */
+static void ltt_reserve_end_switch_current(
+               struct ust_channel *chan,
+               struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+       long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
+       long commit_count, padding_size;
+
+       padding_size = chan->subbuf_size
+                       - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
+
+       ltt_buffer_end(buf, *tsc, offsets->end, endidx);
+
+       /*
+        * Must write slot data before incrementing commit count.
+        * This compiler barrier is upgraded into a smp_wmb() by the IPI
+        * sent by get_subbuf() when it does its smp_rmb().
+        */
+       smp_wmb();
+       uatomic_add(&buf->commit_count[endidx].cc, padding_size);
+       commit_count = uatomic_read(&buf->commit_count[endidx].cc);
+       ltt_check_deliver(chan, buf,
+               offsets->end - 1, commit_count, endidx);
+       ltt_write_commit_counter(chan, buf, endidx,
+               offsets->end, commit_count, padding_size);
+}
 
 /*
  * Returns :
  * 0 if ok
  * !0 if execution must be aborted.
  */
-static inline int ltt_relay_try_reserve(
-               struct ust_channel *channel, struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, size_t data_size,
-               u64 *tsc, unsigned int *rflags, int largest_align)
+static int ltt_relay_try_switch_slow(
+               enum force_switch_mode mode,
+               struct ust_channel *chan,
+               struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets,
+               u64 *tsc)
 {
-       offsets->begin = local_read(&buf->offset);
+       long subbuf_index;
+       long reserve_commit_diff;
+
+       offsets->begin = uatomic_read(&buf->offset);
        offsets->old = offsets->begin;
        offsets->begin_switch = 0;
-       offsets->end_switch_current = 0;
        offsets->end_switch_old = 0;
 
        *tsc = trace_clock_read64();
-       if (last_tsc_overflow(buf, *tsc))
-               *rflags = LTT_RFLAG_ID_SIZE_TSC;
 
-       if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
-               offsets->begin_switch = 1;              /* For offsets->begin */
+       if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
+               offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
+               offsets->end_switch_old = 1;
        } else {
-               offsets->size = ust_get_header_size(channel,
-                                       offsets->begin, data_size,
-                                       &offsets->before_hdr_pad, *rflags);
-               offsets->size += ltt_align(offsets->begin + offsets->size,
-                                          largest_align)
-                                + data_size;
-               if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
-                               > buf->chan->subbuf_size) {
-                       offsets->end_switch_old = 1;    /* For offsets->old */
-                       offsets->begin_switch = 1;      /* For offsets->begin */
+               /* we do not have to switch : buffer is empty */
+               return -1;
+       }
+       if (mode == FORCE_ACTIVE)
+               offsets->begin += ltt_subbuffer_header_size();
+       /*
+        * Always begin_switch in FORCE_ACTIVE mode.
+        * Test new buffer integrity
+        */
+       subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+       reserve_commit_diff =
+               (BUFFER_TRUNC(offsets->begin, buf->chan)
+                >> chan->n_subbufs_order)
+               - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
+                       & chan->commit_count_mask);
+       if (reserve_commit_diff == 0) {
+               /* Next buffer not corrupted. */
+               if (mode == FORCE_ACTIVE
+                   && !chan->overwrite
+                   && offsets->begin - uatomic_read(&buf->consumed)
+                      >= chan->alloc_size) {
+                       /*
+                        * We do not overwrite non consumed buffers and we are
+                        * full : ignore switch while tracing is active.
+                        */
+                       return -1;
                }
+       } else {
+               /*
+                * Next subbuffer corrupted. Force pushing reader even in normal
+                * mode
+                */
        }
-       if (offsets->begin_switch) {
-               long subbuf_index;
-
-               if (offsets->end_switch_old)
-                       offsets->begin = SUBBUF_ALIGN(offsets->begin,
-                                                     buf->chan);
-               offsets->begin = offsets->begin + ltt_subbuffer_header_size();
-               /* Test new buffer integrity */
-               subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
-               offsets->reserve_commit_diff =
-                       (BUFFER_TRUNC(offsets->begin, buf->chan)
-                        >> channel->n_subbufs_order)
-                       - (local_read(&buf->commit_count[subbuf_index])
-                               & channel->commit_count_mask);
-               if (offsets->reserve_commit_diff == 0) {
-                       long consumed;
+       offsets->end = offsets->begin;
+       return 0;
+}
 
-                       consumed = atomic_long_read(&buf->consumed);
+/*
+ * Force a sub-buffer switch for a per-cpu buffer. This operation is
+ * completely reentrant : can be called while tracing is active with
+ * absolutely no lock held.
+ */
+void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
+               enum force_switch_mode mode)
+{
+       struct ust_channel *chan = buf->chan;
+       struct ltt_reserve_switch_offsets offsets;
+       u64 tsc;
+
+       offsets.size = 0;
+
+       DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu);
+       /*
+        * Perform retryable operations.
+        */
+       do {
+               if (ltt_relay_try_switch_slow(mode, chan, buf,
+                               &offsets, &tsc))
+                       return;
+       } while (uatomic_cmpxchg(&buf->offset, offsets.old,
+                       offsets.end) != offsets.old);
+
+       /*
+        * Atomically update last_tsc. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full TSC
+        * events, never the opposite (missing a full TSC event when it would be
+        * needed).
+        */
+       save_last_tsc(buf, tsc);
+
+       /*
+        * Push the reader if necessary
+        */
+       if (mode == FORCE_ACTIVE) {
+               ltt_reserve_push_reader(chan, buf, offsets.end - 1);
+//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
+       }
+
+       /*
+        * Switch old subbuffer if needed.
+        */
+       if (offsets.end_switch_old) {
+//ust//                ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
+               ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
+       }
 
+       /*
+        * Populate new subbuffer.
+        */
+       if (mode == FORCE_ACTIVE)
+               ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc);
+}
+
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf,
+               struct ltt_reserve_switch_offsets *offsets, size_t data_size,
+               u64 *tsc, unsigned int *rflags, int largest_align)
+{
+       long reserve_commit_diff;
+
+       offsets->begin = uatomic_read(&buf->offset);
+       offsets->old = offsets->begin;
+       offsets->begin_switch = 0;
+       offsets->end_switch_current = 0;
+       offsets->end_switch_old = 0;
+
+       *tsc = trace_clock_read64();
+       if (last_tsc_overflow(buf, *tsc))
+               *rflags = LTT_RFLAG_ID_SIZE_TSC;
+
+       if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) {
+               offsets->begin_switch = 1;              /* For offsets->begin */
+       } else {
+               offsets->size = ust_get_header_size(chan,
+                                       offsets->begin, data_size,
+                                       &offsets->before_hdr_pad, *rflags);
+               offsets->size += ltt_align(offsets->begin + offsets->size,
+                                          largest_align)
+                                + data_size;
+               if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) +
+                            offsets->size) > buf->chan->subbuf_size)) {
+                       offsets->end_switch_old = 1;    /* For offsets->old */
+                       offsets->begin_switch = 1;      /* For offsets->begin */
+               }
+       }
+       if (unlikely(offsets->begin_switch)) {
+               long subbuf_index;
+
+               /*
+                * We are typically not filling the previous buffer completely.
+                */
+               if (likely(offsets->end_switch_old))
+                       offsets->begin = SUBBUF_ALIGN(offsets->begin,
+                                                     buf->chan);
+               offsets->begin = offsets->begin + ltt_subbuffer_header_size();
+               /* Test new buffer integrity */
+               subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+               reserve_commit_diff =
+                 (BUFFER_TRUNC(offsets->begin, buf->chan)
+                  >> chan->n_subbufs_order)
+                 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
+                               & chan->commit_count_mask);
+               if (likely(reserve_commit_diff == 0)) {
                        /* Next buffer not corrupted. */
-                       if (!channel->overwrite &&
+                       if (unlikely(!chan->overwrite &&
                                (SUBBUF_TRUNC(offsets->begin, buf->chan)
-                                - SUBBUF_TRUNC(consumed, buf->chan))
-                               >= channel->alloc_size) {
-
-                               long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
-                               long commit_count = local_read(&buf->commit_count[consumed_idx]);
-                               if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
-                                       WARN("Event dropped. Caused by non-committed event.");
-                               }
-                               else {
-                                       WARN("Event dropped. Caused by non-consumed buffer.");
-                               }
+                                - SUBBUF_TRUNC(uatomic_read(
+                                                       &buf->consumed),
+                                               buf->chan))
+                               >= chan->alloc_size)) {
                                /*
                                 * We do not overwrite non consumed buffers
                                 * and we are full : event is lost.
                                 */
-                               local_inc(&buf->events_lost);
+                               uatomic_inc(&buf->events_lost);
                                return -1;
                        } else {
                                /*
@@ -928,24 +1176,26 @@ static inline int ltt_relay_try_reserve(
                        }
                } else {
                        /*
-                        * Next subbuffer corrupted. Force pushing reader even
-                        * in normal mode. It's safe to write in this new
-                        * subbuffer.
+                        * Next subbuffer corrupted. Drop event in normal and
+                        * overwrite mode. Caused by either a writer OOPS or
+                        * too many nested writes over a reserve/commit pair.
                         */
+                       uatomic_inc(&buf->events_lost);
+                       return -1;
                }
-               offsets->size = ust_get_header_size(channel,
+               offsets->size = ust_get_header_size(chan,
                                        offsets->begin, data_size,
                                        &offsets->before_hdr_pad, *rflags);
                offsets->size += ltt_align(offsets->begin + offsets->size,
                                           largest_align)
                                 + data_size;
-               if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
-                               > buf->chan->subbuf_size) {
+               if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan)
+                            + offsets->size) > buf->chan->subbuf_size)) {
                        /*
                         * Event too big for subbuffers, report error, don't
                         * complete the sub-buffer switch.
                         */
-                       local_inc(&buf->events_lost);
+                       uatomic_inc(&buf->events_lost);
                        return -1;
                } else {
                        /*
@@ -961,7 +1211,7 @@ static inline int ltt_relay_try_reserve(
        }
        offsets->end = offsets->begin + offsets->size;
 
-       if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
+       if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) {
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -971,252 +1221,8 @@ static inline int ltt_relay_try_reserve(
        return 0;
 }
 
-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static inline int ltt_relay_try_switch(
-               enum force_switch_mode mode,
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets,
-               u64 *tsc)
-{
-       long subbuf_index;
-
-       offsets->begin = local_read(&buf->offset);
-       offsets->old = offsets->begin;
-       offsets->begin_switch = 0;
-       offsets->end_switch_old = 0;
-
-       *tsc = trace_clock_read64();
-
-       if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
-               offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
-               offsets->end_switch_old = 1;
-       } else {
-               /* we do not have to switch : buffer is empty */
-               return -1;
-       }
-       if (mode == FORCE_ACTIVE)
-               offsets->begin += ltt_subbuffer_header_size();
-       /*
-        * Always begin_switch in FORCE_ACTIVE mode.
-        * Test new buffer integrity
-        */
-       subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
-       offsets->reserve_commit_diff =
-               (BUFFER_TRUNC(offsets->begin, buf->chan)
-                >> channel->n_subbufs_order)
-               - (local_read(&buf->commit_count[subbuf_index])
-                       & channel->commit_count_mask);
-       if (offsets->reserve_commit_diff == 0) {
-               /* Next buffer not corrupted. */
-               if (mode == FORCE_ACTIVE
-                   && !channel->overwrite
-                   && offsets->begin - atomic_long_read(&buf->consumed)
-                      >= channel->alloc_size) {
-                       /*
-                        * We do not overwrite non consumed buffers and we are
-                        * full : ignore switch while tracing is active.
-                        */
-                       return -1;
-               }
-       } else {
-               /*
-                * Next subbuffer corrupted. Force pushing reader even in normal
-                * mode
-                */
-       }
-       offsets->end = offsets->begin;
-       return 0;
-}
-
-static inline void ltt_reserve_push_reader(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets)
-{
-       long consumed_old, consumed_new;
-
-       do {
-               consumed_old = atomic_long_read(&buf->consumed);
-               /*
-                * If buffer is in overwrite mode, push the reader consumed
-                * count if the write position has reached it and we are not
-                * at the first iteration (don't push the reader farther than
-                * the writer). This operation can be done concurrently by many
-                * writers in the same buffer, the writer being at the farthest
-                * write position sub-buffer index in the buffer being the one
-                * which will win this loop.
-                * If the buffer is not in overwrite mode, pushing the reader
-                * only happens if a sub-buffer is corrupted.
-                */
-               if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
-                  - SUBBUF_TRUNC(consumed_old, buf->chan))
-                  >= channel->alloc_size)
-                       consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
-               else {
-                       consumed_new = consumed_old;
-                       break;
-               }
-       } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
-                       consumed_new) != consumed_old);
-
-       if (consumed_old != consumed_new) {
-               /*
-                * Reader pushed : we are the winner of the push, we can
-                * therefore reequilibrate reserve and commit. Atomic increment
-                * of the commit count permits other writers to play around
-                * with this variable before us. We keep track of
-                * corrupted_subbuffers even in overwrite mode :
-                * we never want to write over a non completely committed
-                * sub-buffer : possible causes : the buffer size is too low
-                * compared to the unordered data input, or there is a writer
-                * that died between the reserve and the commit.
-                */
-               if (offsets->reserve_commit_diff) {
-                       /*
-                        * We have to alter the sub-buffer commit count.
-                        * We do not deliver the previous subbuffer, given it
-                        * was either corrupted or not consumed (overwrite
-                        * mode).
-                        */
-                       local_add(offsets->reserve_commit_diff,
-                                 &buf->commit_count[
-                                       SUBBUF_INDEX(offsets->begin,
-                                                    buf->chan)]);
-                       if (!channel->overwrite
-                           || offsets->reserve_commit_diff
-                              != channel->subbuf_size) {
-                               /*
-                                * The reserve commit diff was not subbuf_size :
-                                * it means the subbuffer was partly written to
-                                * and is therefore corrupted. If it is multiple
-                                * of subbuffer size and we are in flight
-                                * recorder mode, we are skipping over a whole
-                                * subbuffer.
-                                */
-                               local_inc(&buf->corrupted_subbuffers);
-                       }
-               }
-       }
-}
-
-
-/*
- * ltt_reserve_switch_old_subbuf: switch old subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted.  The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- *
- * Note : offset_old should never be 0 here.
- */
-static inline void ltt_reserve_switch_old_subbuf(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long oldidx = SUBBUF_INDEX(offsets->old - 1, channel);
-
-       channel->buffer_end(buf, *tsc, offsets->old, oldidx);
-       /* Must write buffer end before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count =
-               local_add_return(channel->subbuf_size
-                                - (SUBBUF_OFFSET(offsets->old - 1, channel)
-                                + 1),
-                                &buf->commit_count[oldidx]);
-       if ((BUFFER_TRUNC(offsets->old - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, oldidx, offsets->commit_count);
-}
-
-/*
- * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution.  The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static /*inline*/ void ltt_reserve_switch_new_subbuf(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long beginidx = SUBBUF_INDEX(offsets->begin, channel);
-
-       channel->buffer_begin(buf, *tsc, beginidx);
-       /* Must write buffer end before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count = local_add_return(ltt_subbuffer_header_size(),
-                       &buf->commit_count[beginidx]);
-       /* Check if the written buffer has to be delivered */
-       if ((BUFFER_TRUNC(offsets->begin, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, beginidx, offsets->commit_count);
-}
-
-
-/*
- * ltt_reserve_end_switch_current: finish switching current subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.  Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted.  The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- */
-static inline void ltt_reserve_end_switch_current(
-               struct ust_channel *channel,
-               struct ust_buffer *buf,
-               struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
-       long endidx = SUBBUF_INDEX(offsets->end - 1, channel);
-
-       channel->buffer_end(buf, *tsc, offsets->end, endidx);
-       /* Must write buffer begin before incrementing commit count */
-       smp_wmb();
-       offsets->commit_count =
-               local_add_return(channel->subbuf_size
-                                - (SUBBUF_OFFSET(offsets->end - 1, channel)
-                                + 1),
-                                &buf->commit_count[endidx]);
-       if ((BUFFER_TRUNC(offsets->end - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((offsets->commit_count - channel->subbuf_size)
-                               & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, endidx, offsets->commit_count);
-}
-
 /**
- * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
+ * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
  * @trace: the trace structure to log to.
  * @ltt_channel: channel structure
  * @transport_data: data structure specific to ltt relay
@@ -1229,30 +1235,24 @@ static inline void ltt_reserve_end_switch_current(
  * Return : -ENOSPC if not enough space, else returns 0.
  * It will take care of sub-buffer switching.
  */
-static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
-               struct ust_channel *channel, void **transport_data,
-               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
-               unsigned int *rflags, int largest_align, int cpu)
+int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+               struct ust_trace *trace, size_t data_size,
+               int largest_align, int cpu,
+               struct ust_buffer **ret_buf,
+               size_t *slot_size, long *buf_offset,
+               u64 *tsc, unsigned int *rflags)
 {
-       struct ust_buffer *buf = *transport_data = channel->buf[cpu];
+       struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
        struct ltt_reserve_switch_offsets offsets;
 
-       offsets.reserve_commit_diff = 0;
        offsets.size = 0;
 
-       /*
-        * Perform retryable operations.
-        */
-       if (ltt_nesting > 4) {
-               local_inc(&buf->events_lost);
-               return -EPERM;
-       }
        do {
-               if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
-                               largest_align))
+               if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
+                               data_size, tsc, rflags, largest_align)))
                        return -ENOSPC;
-       } while (local_cmpxchg(&buf->offset, offsets.old,
-                       offsets.end) != offsets.old);
+       } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old,
+                       offsets.end) != offsets.old));
 
        /*
         * Atomically update last_tsc. This update races against concurrent
@@ -1265,83 +1265,36 @@ static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace,
        /*
         * Push the reader if necessary
         */
-       ltt_reserve_push_reader(channel, buf, &offsets);
+       ltt_reserve_push_reader(chan, buf, offsets.end - 1);
+
+       /*
+        * Clear noref flag for this subbuffer.
+        */
+//ust//        ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
 
        /*
         * Switch old subbuffer if needed.
         */
-       if (offsets.end_switch_old)
-               ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
+       if (unlikely(offsets.end_switch_old)) {
+//ust//                ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
+               ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
+               DBG("Switching %s_%d", chan->channel_name, cpu);
+       }
 
        /*
         * Populate new subbuffer.
         */
-       if (offsets.begin_switch)
-               ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
+       if (unlikely(offsets.begin_switch))
+               ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc);
 
-       if (offsets.end_switch_current)
-               ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
+       if (unlikely(offsets.end_switch_current))
+               ltt_reserve_end_switch_current(chan, buf, &offsets, tsc);
 
        *slot_size = offsets.size;
        *buf_offset = offsets.begin + offsets.before_hdr_pad;
        return 0;
 }
 
-/*
- * Force a sub-buffer switch for a per-cpu buffer. This operation is
- * completely reentrant : can be called while tracing is active with
- * absolutely no lock held.
- *
- * Note, however, that as a local_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
- */
-static notrace void ltt_force_switch(struct ust_buffer *buf,
-               enum force_switch_mode mode)
-{
-       struct ust_channel *channel = buf->chan;
-       struct ltt_reserve_switch_offsets offsets;
-       u64 tsc;
-
-       offsets.reserve_commit_diff = 0;
-       offsets.size = 0;
-
-       /*
-        * Perform retryable operations.
-        */
-       do {
-               if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
-                       return;
-       } while (local_cmpxchg(&buf->offset, offsets.old,
-                       offsets.end) != offsets.old);
-
-       /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * events, never the opposite (missing a full TSC event when it would be
-        * needed).
-        */
-       save_last_tsc(buf, tsc);
-
-       /*
-        * Push the reader if necessary
-        */
-       if (mode == FORCE_ACTIVE)
-               ltt_reserve_push_reader(channel, buf, &offsets);
-
-       /*
-        * Switch old subbuffer if needed.
-        */
-       if (offsets.end_switch_old)
-               ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
-
-       /*
-        * Populate new subbuffer.
-        */
-       if (mode == FORCE_ACTIVE)
-               ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
-}
-
 static struct ltt_transport ust_relay_transport = {
        .name = "ustrelay",
        .ops = {
@@ -1349,83 +1302,9 @@ static struct ltt_transport ust_relay_transport = {
                .finish_channel = ltt_relay_finish_channel,
                .remove_channel = ltt_relay_remove_channel,
                .wakeup_channel = ltt_relay_async_wakeup_chan,
-//             .commit_slot = ltt_relay_commit_slot,
-               .reserve_slot = ltt_relay_reserve_slot,
        },
 };
 
-/*
- * for flight recording. must be called after relay_commit.
- * This function decrements de subbuffer's lost_size each time the commit count
- * reaches back the reserve offset (module subbuffer size). It is useful for
- * crash dump.
- */
-static /* inline */ void ltt_write_commit_counter(struct ust_buffer *buf,
-               struct ust_buffer *ltt_buf,
-               long idx, long buf_offset, long commit_count, size_t data_size)
-{
-       long offset;
-       long commit_seq_old;
-
-       offset = buf_offset + data_size;
-
-       /*
-        * SUBBUF_OFFSET includes commit_count_mask. We can simply
-        * compare the offsets within the subbuffer without caring about
-        * buffer full/empty mismatch because offset is never zero here
-        * (subbuffer header and event headers have non-zero length).
-        */
-       if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
-               return;
-
-       commit_seq_old = local_read(&ltt_buf->commit_seq[idx]);
-       while (commit_seq_old < commit_count)
-               commit_seq_old = local_cmpxchg(&ltt_buf->commit_seq[idx],
-                                        commit_seq_old, commit_count);
-}
-
-/*
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- *
- * Parameters:
- *
- * @ltt_channel : channel structure
- * @transport_data: transport-specific data
- * @buf_offset : offset following the event header.
- * @data_size : size of the event data.
- * @slot_size : size of the reserved slot.
- */
-/* FIXME: make this function static inline in the .h! */
-/*static*/ /* inline */ notrace void ltt_commit_slot(
-               struct ust_channel *channel,
-               void **transport_data, long buf_offset,
-               size_t data_size, size_t slot_size)
-{
-       struct ust_buffer *buf = *transport_data;
-       long offset_end = buf_offset;
-       long endidx = SUBBUF_INDEX(offset_end - 1, channel);
-       long commit_count;
-
-       /* Must write slot data before incrementing commit count */
-       smp_wmb();
-       commit_count = local_add_return(slot_size,
-               &buf->commit_count[endidx]);
-       /* Check if all commits have been done */
-       if ((BUFFER_TRUNC(offset_end - 1, channel)
-                       >> channel->n_subbufs_order)
-                       - ((commit_count - channel->subbuf_size)
-                          & channel->commit_count_mask) == 0)
-               ltt_deliver(buf, endidx, commit_count);
-       /*
-        * Update lost_size for each commit. It's needed only for extracting
-        * ltt buffers from vmcore, after crash.
-        */
-       ltt_write_commit_counter(buf, buf, endidx,
-                                buf_offset, commit_count, data_size);
-}
-
-
 static char initialized = 0;
 
 void __attribute__((constructor)) init_ustrelay_transport(void)
@@ -1436,7 +1315,74 @@ void __attribute__((constructor)) init_ustrelay_transport(void)
        }
 }
 
-static void __attribute__((destructor)) ltt_relay_exit(void)
+static void __attribute__((destructor)) ust_buffers_exit(void)
 {
        ltt_transport_unregister(&ust_relay_transport);
 }
+
+size_t ltt_write_event_header_slow(struct ust_channel *channel,
+               struct ust_buffer *buf, long buf_offset,
+               u16 eID, u32 event_size,
+               u64 tsc, unsigned int rflags)
+{
+       struct ltt_event_header header;
+       u16 small_size;
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               header.id_time = 29 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               header.id_time = 30 << LTT_TSC_BITS;
+               break;
+       case LTT_RFLAG_ID:
+               header.id_time = 31 << LTT_TSC_BITS;
+               break;
+       }
+
+       header.id_time |= (u32)tsc & LTT_TSC_MASK;
+       ust_buffers_write(buf, buf_offset, &header, sizeof(header));
+       buf_offset += sizeof(header);
+
+       switch (rflags) {
+       case LTT_RFLAG_ID_SIZE_TSC:
+               small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ust_buffers_write(buf, buf_offset,
+                       &small_size, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == LTT_MAX_SMALL_SIZE) {
+                       ust_buffers_write(buf, buf_offset,
+                               &event_size, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               buf_offset += ltt_align(buf_offset, sizeof(u64));
+               ust_buffers_write(buf, buf_offset,
+                       &tsc, sizeof(u64));
+               buf_offset += sizeof(u64);
+               break;
+       case LTT_RFLAG_ID_SIZE:
+               small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               ust_buffers_write(buf, buf_offset,
+                       &small_size, sizeof(u16));
+               buf_offset += sizeof(u16);
+               if (small_size == LTT_MAX_SMALL_SIZE) {
+                       ust_buffers_write(buf, buf_offset,
+                               &event_size, sizeof(u32));
+                       buf_offset += sizeof(u32);
+               }
+               break;
+       case LTT_RFLAG_ID:
+               ust_buffers_write(buf, buf_offset,
+                       &eID, sizeof(u16));
+               buf_offset += sizeof(u16);
+               break;
+       }
+
+       return buf_offset;
+}
This page took 0.04298 seconds and 4 git commands to generate.