Convert buffering system to per-cpu
[ust.git] / libust / serialize.c
index ee8b8284d1eb8039b4891a1872b08e652d0890e6..9cb6bcaed6cd52794a01ebbc80635c197fb4b6d1 100644 (file)
  * va_list * to ltt_vtrace.
  */
 
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
 #include <stdarg.h>
-//ust// #include <linux/ctype.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/ltt-tracer.h>
 #include <string.h>
 #include <stdint.h>
 #include <stdio.h>
@@ -38,8 +37,8 @@
 #include <urcu-bp.h>
 #include <urcu/rculist.h>
 
-#include "relay.h"
-#include <ust/tracer.h>
+#include "buffers.h"
+#include "tracer.h"
 //#include "list.h"
 #include "usterr.h"
 
@@ -50,6 +49,11 @@ enum ltt_type {
        LTT_TYPE_NONE,
 };
 
+static int ust_get_cpu(void)
+{
+       return sched_getcpu();
+}
+
 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
 
 /*
@@ -337,7 +341,7 @@ parse_end:
        return fmt;
 }
 
-static inline size_t serialize_trace_data(struct rchan_buf *buf,
+static inline size_t serialize_trace_data(struct ust_buffer *buf,
                size_t buf_offset,
                char trace_size, enum ltt_type trace_type,
                char c_size, enum ltt_type c_type,
@@ -402,7 +406,7 @@ static inline size_t serialize_trace_data(struct rchan_buf *buf,
                        tmp.v_string.s = "<NULL>";
                tmp.v_string.len = strlen(tmp.v_string.s)+1;
                if (buf)
-                       ltt_relay_write(buf, buf_offset, tmp.v_string.s,
+                       ust_buffers_write(buf, buf_offset, tmp.v_string.s,
                                tmp.v_string.len);
                buf_offset += tmp.v_string.len;
                goto copydone;
@@ -430,31 +434,31 @@ static inline size_t serialize_trace_data(struct rchan_buf *buf,
                        switch (trace_size) {
                        case 1:
                                if (c_size == 8)
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint8_t[]){ (uint8_t)tmp.v_uint64 },
                                        sizeof(uint8_t));
                                else
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint8_t[]){ (uint8_t)tmp.v_ulong },
                                        sizeof(uint8_t));
                                break;
                        case 2:
                                if (c_size == 8)
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint16_t[]){ (uint16_t)tmp.v_uint64 },
                                        sizeof(uint16_t));
                                else
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint16_t[]){ (uint16_t)tmp.v_ulong },
                                        sizeof(uint16_t));
                                break;
                        case 4:
                                if (c_size == 8)
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint32_t[]){ (uint32_t)tmp.v_uint64 },
                                        sizeof(uint32_t));
                                else
-                                       ltt_relay_write(buf, buf_offset,
+                                       ust_buffers_write(buf, buf_offset,
                                        (uint32_t[]){ (uint32_t)tmp.v_ulong },
                                        sizeof(uint32_t));
                                break;
@@ -463,7 +467,7 @@ static inline size_t serialize_trace_data(struct rchan_buf *buf,
                                 * c_size cannot be other than 8 here because
                                 * trace_size > 4.
                                 */
-                               ltt_relay_write(buf, buf_offset,
+                               ust_buffers_write(buf, buf_offset,
                                (uint64_t[]){ (uint64_t)tmp.v_uint64 },
                                sizeof(uint64_t));
                                break;
@@ -480,12 +484,12 @@ static inline size_t serialize_trace_data(struct rchan_buf *buf,
                if (buf) {
                        switch (trace_type) {
                        case LTT_TYPE_SIGNED_INT:
-                               ltt_relay_write(buf, buf_offset,
+                               ust_buffers_write(buf, buf_offset,
                                        (int64_t[]){ (int64_t)tmp.v_ulong },
                                        sizeof(int64_t));
                                break;
                        case LTT_TYPE_UNSIGNED_INT:
-                               ltt_relay_write(buf, buf_offset,
+                               ust_buffers_write(buf, buf_offset,
                                        (uint64_t[]){ (uint64_t)tmp.v_ulong },
                                        sizeof(uint64_t));
                                break;
@@ -587,7 +591,7 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
        return result;
 }
 
-notrace size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
+notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        struct ltt_serialize_closure *closure,
                        void *serialize_private, int *largest_align,
                        const char *fmt, va_list *args)
@@ -657,7 +661,7 @@ static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
 }
 
 static notrace
-void ltt_write_event_data(struct rchan_buf *buf, size_t buf_offset,
+void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
                                struct ltt_serialize_closure *closure,
                                void *serialize_private, int largest_align,
                                const char *fmt, va_list *args)
@@ -678,9 +682,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
        uint16_t eID;
        size_t data_size, slot_size;
        unsigned int chan_index;
-       struct ltt_channel_struct *channel;
+       struct ust_channel *channel;
        struct ltt_trace_struct *trace, *dest_trace = NULL;
-       struct rchan_buf *buf;
+       struct ust_buffer *buf;
        void *transport_data;
        u64 tsc;
        long buf_offset;
@@ -688,7 +692,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
        struct ltt_serialize_closure closure;
        struct ltt_probe_private_data *private_data = call_data;
        void *serialize_private = NULL;
-//ust//        int cpu;
+       int cpu;
        unsigned int rflags;
 
        /*
@@ -700,7 +704,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
 
        rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
 //ust//        cpu = smp_processor_id();
+       cpu = ust_get_cpu();
 //ust//        __get_cpu_var(ltt_nesting)++;
+       /* FIXME: should nesting be per-cpu? */
        ltt_nesting++;
 
        pdata = (struct ltt_active_marker *)probe_data;
@@ -754,21 +760,29 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
                if (!channel->active)
                        continue;
 
+               /* If a new cpu was plugged since the trace was started, we did
+                * not add it to the trace, and therefore we write the event to
+                * cpu 0.
+                */
+               if(cpu >= channel->n_cpus) {
+                       cpu = 0;
+               }
+
                /* reserve space : header and data */
                ret = ltt_reserve_slot(trace, channel, &transport_data,
                                        data_size, &slot_size, &buf_offset,
                                        &tsc, &rflags,
-                                       largest_align);
+                                       largest_align, cpu);
                if (unlikely(ret < 0))
                        continue; /* buffer full */
 
                va_copy(args_copy, *args);
                /* FIXME : could probably encapsulate transport better. */
 //ust//                buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
-               buf = ((struct rchan *)channel->trans_channel_data)->buf;
+               buf = channel->buf[cpu];
                /* Out-of-order write : header and data */
                buf_offset = ltt_write_event_header(trace,
-                                       channel, buf, buf_offset,
+                                       buf, buf_offset,
                                        eID, data_size, tsc, rflags);
                ltt_write_event_data(buf, buf_offset, &closure,
                                        serialize_private,
This page took 0.025328 seconds and 4 git commands to generate.