X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libust%2Fserialize.c;h=8aa3f4b73fac66330c139e6572dd1bfb407b728d;hb=8d6300d3b3cb0219e1109e931a2219dbd812b24d;hp=4c23e8dd02451d46d6b8de34c418d40e872bc9f3;hpb=015d08b66af5ebd10665aa90f5426930e56c540d;p=ust.git diff --git a/libust/serialize.c b/libust/serialize.c index 4c23e8d..8aa3f4b 100644 --- a/libust/serialize.c +++ b/libust/serialize.c @@ -32,17 +32,23 @@ #include #include -#include #define _LGPL_SOURCE #include #include +#include +#include #include "buffers.h" #include "tracer.h" -//#include "list.h" #include "usterr.h" #include "ust_snprintf.h" +/* + * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here. + * It is just an approximation for the tracer stack. + */ +#define PAGE_SIZE_STATIC 4096 + enum ltt_type { LTT_TYPE_SIGNED_INT, LTT_TYPE_UNSIGNED_INT, @@ -50,10 +56,15 @@ enum ltt_type { LTT_TYPE_NONE, }; -static int ust_get_cpu(void) -{ - return sched_getcpu(); -} +/* + * Special stack for the tracer. Keeps serialization offsets for each field. + * Per-thread. Deals with reentrancy from signals by simply ensuring that + * interrupting signals put the stack back to its original position. + */ +#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long)) +static unsigned long __thread tracer_stack[TRACER_STACK_LEN]; + +static unsigned int __thread tracer_stack_pos; #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) @@ -354,7 +365,9 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, size_t buf_offset, char trace_size, enum ltt_type trace_type, char c_size, enum ltt_type c_type, - int *largest_align, va_list *args) + unsigned int *stack_pos_ctx, + int *largest_align, + va_list *args) { union { unsigned long v_ulong; @@ -410,10 +423,20 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, tmp.v_string.s = va_arg(*args, const char *); if ((unsigned long)tmp.v_string.s < PAGE_SIZE) tmp.v_string.s = ""; - tmp.v_string.len = strlen(tmp.v_string.s)+1; + if (!buf) { + /* + * Reserve tracer stack entry. + */ + tracer_stack_pos++; + assert(tracer_stack_pos <= TRACER_STACK_LEN); + cmm_barrier(); + tracer_stack[*stack_pos_ctx] = + strlen(tmp.v_string.s) + 1; + } + tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++]; if (buf) - ust_buffers_write(buf, buf_offset, tmp.v_string.s, - tmp.v_string.len); + ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s, + tmp.v_string.len); buf_offset += tmp.v_string.len; goto copydone; default: @@ -513,7 +536,9 @@ copydone: notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int *largest_align, const char *fmt, va_list *args) { char trace_size = 0, c_size = 0; /* @@ -553,7 +578,9 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, buf_offset = serialize_trace_data(buf, buf_offset, trace_size, trace_type, c_size, c_type, - largest_align, args); + &stack_pos_ctx, + largest_align, + args); trace_size = 0; c_size = 0; trace_type = LTT_TYPE_NONE; @@ -571,25 +598,29 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, * Assume that the padding for alignment starts at a sizeof(void *) address. */ static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, int *largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; return (size_t)cb(NULL, 0, closure, serialize_private, - largest_align, fmt, args); + stack_pos_ctx, largest_align, fmt, args); } static notrace void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; buf_offset += ltt_align(buf_offset, largest_align); - cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args); + cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL, + fmt, args); } @@ -605,7 +636,6 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, struct ust_channel *channel; struct ust_trace *trace, *dest_trace = NULL; struct ust_buffer *buf; - void *transport_data; u64 tsc; long buf_offset; va_list args_copy; @@ -614,6 +644,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, void *serialize_private = NULL; int cpu; unsigned int rflags; + unsigned int stack_pos_ctx; /* * This test is useful for quickly exiting static tracing when no trace @@ -626,8 +657,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, cpu = ust_get_cpu(); /* Force volatile access. */ - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); - barrier(); + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1); + stack_pos_ctx = tracer_stack_pos; + cmm_barrier(); pdata = (struct ltt_active_marker *)probe_data; eID = mdata->event_id; @@ -647,12 +679,13 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, */ largest_align = 1; /* must be non-zero for ltt_align */ data_size = ltt_get_data_size(&closure, serialize_private, - &largest_align, fmt, &args_copy); + stack_pos_ctx, &largest_align, + fmt, &args_copy); largest_align = min_t(int, largest_align, sizeof(void *)); va_end(args_copy); /* Iterate on each trace */ - list_for_each_entry_rcu(trace, <t_traces.head, list) { + cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { /* * Expect the filter to filter out events. If we get here, * we went through tracepoint activation as a first step. @@ -689,10 +722,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, } /* reserve space : header and data */ - ret = ltt_reserve_slot(trace, channel, &transport_data, - data_size, &slot_size, &buf_offset, - &tsc, &rflags, - largest_align, cpu); + ret = ltt_reserve_slot(channel, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, + &tsc, &rflags); if (unlikely(ret < 0)) continue; /* buffer full */ @@ -701,20 +733,21 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, //ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu]; buf = channel->buf[cpu]; /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(trace, - channel, buf, buf_offset, + buf_offset = ltt_write_event_header(channel, buf, buf_offset, eID, data_size, tsc, rflags); ltt_write_event_data(buf, buf_offset, &closure, - serialize_private, - largest_align, fmt, &args_copy); + serialize_private, + stack_pos_ctx, largest_align, + fmt, &args_copy); va_end(args_copy); /* Out-of-order commit */ ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size); DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size); } - barrier(); - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); + cmm_barrier(); + tracer_stack_pos = stack_pos_ctx; + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1); rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace(); }