tracectl cleanup v3
[ust.git] / libust / buffers.h
index d3267e102ca5e56a8b6da731ce778790d002f3f7..30445000beb0e380d7af3c7cc657ad9ba51d3b21 100644 (file)
@@ -24,7 +24,9 @@
 #define _UST_BUFFERS_H
 
 #include <assert.h>
-#include <ust/kernelcompat.h>
+
+#include <ust/core.h>
+
 #include "usterr.h"
 #include "channels.h"
 #include "tracerconst.h"
@@ -94,7 +96,7 @@ struct ust_buffer {
        unsigned int cpu;
 
        /* commit count per subbuffer; must be at end of struct */
-       long commit_seq[0] ____cacheline_aligned; /* ATOMIC */
+       long commit_seq[0]; /* ATOMIC */
 } ____cacheline_aligned;
 
 /*
@@ -104,10 +106,12 @@ struct ust_buffer {
  */
 enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
 
-extern int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
-               struct ust_channel *ltt_channel, void **transport_data,
-               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
-               unsigned int *rflags, int largest_align, int cpu);
+extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+               struct ust_trace *trace, size_t data_size,
+               int largest_align, int cpu,
+               struct ust_buffer **ret_buf,
+               size_t *slot_size, long *buf_offset,
+               u64 *tsc, unsigned int *rflags);
 
 extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
                enum force_switch_mode mode);
@@ -351,19 +355,21 @@ static __inline__ int ltt_relay_try_reserve(
        return 0;
 }
 
-static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
-               struct ust_channel *chan, void **transport_data,
-               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
-               unsigned int *rflags, int largest_align, int cpu)
+static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
+                                      struct ust_trace *trace, size_t data_size,
+                                      int largest_align, int cpu,
+                                      struct ust_buffer **ret_buf,
+                                      size_t *slot_size, long *buf_offset, u64 *tsc,
+                                      unsigned int *rflags)
 {
-       struct ust_buffer *buf = chan->buf[cpu];
+       struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
        long o_begin, o_end, o_old;
        size_t before_hdr_pad;
 
        /*
         * Perform retryable operations.
         */
-       /* FIXME: make this rellay per cpu? */
+       /* FIXME: make this really per cpu? */
        if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
                DBG("Dropping event because nesting is too deep.");
                uatomic_inc(&buf->events_lost);
@@ -400,9 +406,10 @@ static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
        *buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
-       return ltt_reserve_slot_lockless_slow(trace, chan,
-               transport_data, data_size, slot_size, buf_offset, tsc,
-               rflags, largest_align, cpu);
+       return ltt_reserve_slot_lockless_slow(chan, trace, data_size,
+                                             largest_align, cpu, ret_buf,
+                                             slot_size, buf_offset, tsc,
+                                             rflags);
 }
 
 /*
@@ -476,16 +483,8 @@ static __inline__ void ltt_commit_slot(
        long endidx = SUBBUF_INDEX(offset_end - 1, chan);
        long commit_count;
 
-#ifdef LTT_NO_IPI_BARRIER
        smp_wmb();
-#else
-       /*
-        * Must write slot data before incrementing commit count.
-        * This compiler barrier is upgraded into a smp_mb() by the IPI
-        * sent by get_subbuf().
-        */
-       barrier();
-#endif
+
        uatomic_add(&buf->commit_count[endidx].cc, slot_size);
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -514,22 +513,92 @@ static __inline__ void ltt_commit_slot(
        ltt_write_commit_counter(chan, buf, endidx, buf_offset, commit_count, data_size);
 }
 
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-        const void *src, size_t len, ssize_t cpy);
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+                               size_t len, size_t copied, int terminated);
 
 static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
         const void *src, size_t len)
 {
-       size_t cpy;
        size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
 
        assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       assert(buf_offset + len
+              <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+       ust_buffers_do_copy(buf->buf_data + buf_offset, src, len);
+
+       return len;
+}
+
+/*
+ * ust_buffers_do_memset - write character into dest.
+ * @dest: destination
+ * @src: source character
+ * @len: length to write
+ */
+static __inline__
+void ust_buffers_do_memset(void *dest, char src, size_t len)
+{
+       /*
+        * What we really want here is an __inline__ memset, but we
+        * don't have constants, so gcc generally uses a function call.
+        */
+       for (; len > 0; len--)
+               *(u8 *)dest++ = src;
+}
+
+/*
+ * ust_buffers_do_strncpy - copy a string up to a certain number of bytes
+ * @dest: destination
+ * @src: source
+ * @len: max. length to copy
+ * @terminated: output string ends with \0 (output)
+ *
+ * returns the number of bytes copied. Does not finalize with \0 if len is
+ * reached.
+ */
+static __inline__
+size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len,
+                             int *terminated)
+{
+       size_t orig_len = len;
+
+       *terminated = 0;
+       /*
+        * What we really want here is an __inline__ strncpy, but we
+        * don't have constants, so gcc generally uses a function call.
+        */
+       for (; len > 0; len--) {
+               *(u8 *)dest = LOAD_SHARED(*(const u8 *)src);
+               /* Check with dest, because src may be modified concurrently */
+               if (*(const u8 *)dest == '\0') {
+                       len--;
+                       *terminated = 1;
+                       break;
+               }
+               dest++;
+               src++;
+       }
+       return orig_len - len;
+}
 
-       cpy = min_t(size_t, len, buf->buf_size - buf_offset);
-       ust_buffers_do_copy(buf->buf_data + buf_offset, src, cpy);
+static __inline__
+int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src,
+                       size_t len)
+{
+       size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
+       ssize_t copied;
+       int terminated;
 
-       if (unlikely(len != cpy))
-               _ust_buffers_write(buf, buf_offset, src, len, cpy);
+       assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       assert(buf_offset + len
+              <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+       copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset,
+                                       src, len, &terminated);
+       if (unlikely(copied < len || !terminated))
+               _ust_buffers_strncpy_fixup(buf, offset, len, copied,
+                                          terminated);
        return len;
 }
 
This page took 0.025232 seconds and 4 git commands to generate.