X-Git-Url: http://git.lttng.org/?p=ust.git;a=blobdiff_plain;f=libust%2Fserialize.c;h=bd947ab1f90d9cd1358bae1538d0f80de2429e62;hp=7f9ce5ad44f2a4f80cd77c68ca593fe0096120c8;hb=5f9aacae75a4181a54bf16f31ce8e18229dddc60;hpb=b73a4c471dc987ea8548632dffb3c7050de77dd0 diff --git a/libust/serialize.c b/libust/serialize.c index 7f9ce5a..bd947ab 100644 --- a/libust/serialize.c +++ b/libust/serialize.c @@ -32,15 +32,22 @@ #include #include -#include #define _LGPL_SOURCE #include #include +#include +#include #include "buffers.h" #include "tracer.h" -//#include "list.h" #include "usterr.h" +#include "ust_snprintf.h" + +/* + * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here. + * It is just an approximation for the tracer stack. + */ +#define PAGE_SIZE_STATIC 4096 enum ltt_type { LTT_TYPE_SIGNED_INT, @@ -49,10 +56,15 @@ enum ltt_type { LTT_TYPE_NONE, }; -static int ust_get_cpu(void) -{ - return sched_getcpu(); -} +/* + * Special stack for the tracer. Keeps serialization offsets for each field. + * Per-thread. Deals with reentrancy from signals by simply ensuring that + * interrupting signals put the stack back to its original position. + */ +#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long)) +static unsigned long __thread tracer_stack[TRACER_STACK_LEN]; + +static unsigned int __thread tracer_stack_pos; #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) @@ -353,7 +365,9 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, size_t buf_offset, char trace_size, enum ltt_type trace_type, char c_size, enum ltt_type c_type, - int *largest_align, va_list *args) + unsigned int *stack_pos_ctx, + int *largest_align, + va_list *args) { union { unsigned long v_ulong; @@ -409,10 +423,20 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, tmp.v_string.s = va_arg(*args, const char *); if ((unsigned long)tmp.v_string.s < PAGE_SIZE) tmp.v_string.s = ""; - tmp.v_string.len = strlen(tmp.v_string.s)+1; + if (!buf) { + /* + * Reserve tracer stack entry. + */ + tracer_stack_pos++; + assert(tracer_stack_pos <= TRACER_STACK_LEN); + barrier(); + tracer_stack[*stack_pos_ctx] = + strlen(tmp.v_string.s) + 1; + } + tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++]; if (buf) - ust_buffers_write(buf, buf_offset, tmp.v_string.s, - tmp.v_string.len); + ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s, + tmp.v_string.len); buf_offset += tmp.v_string.len; goto copydone; default: @@ -512,7 +536,9 @@ copydone: notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int *largest_align, const char *fmt, va_list *args) { char trace_size = 0, c_size = 0; /* @@ -537,7 +563,7 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, ++fmt; /* skip first '%' */ if (*fmt == '%') /* Escaped %% */ break; - fmt = parse_c_type(fmt, &c_size, &c_type); + fmt = parse_c_type(fmt, &c_size, &c_type, NULL); /* * Output c types if no trace types has been * specified. @@ -552,7 +578,9 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, buf_offset = serialize_trace_data(buf, buf_offset, trace_size, trace_type, c_size, c_type, - largest_align, args); + &stack_pos_ctx, + largest_align, + args); trace_size = 0; c_size = 0; trace_type = LTT_TYPE_NONE; @@ -570,25 +598,29 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, * Assume that the padding for alignment starts at a sizeof(void *) address. */ static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, int *largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; return (size_t)cb(NULL, 0, closure, serialize_private, - largest_align, fmt, args); + stack_pos_ctx, largest_align, fmt, args); } static notrace void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; buf_offset += ltt_align(buf_offset, largest_align); - cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args); + cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL, + fmt, args); } @@ -604,7 +636,6 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, struct ust_channel *channel; struct ust_trace *trace, *dest_trace = NULL; struct ust_buffer *buf; - void *transport_data; u64 tsc; long buf_offset; va_list args_copy; @@ -613,6 +644,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, void *serialize_private = NULL; int cpu; unsigned int rflags; + unsigned int stack_pos_ctx; /* * This test is useful for quickly exiting static tracing when no trace @@ -622,11 +654,12 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, return; rcu_read_lock(); //ust// rcu_read_lock_sched_notrace(); -//ust// cpu = smp_processor_id(); cpu = ust_get_cpu(); -//ust// __get_cpu_var(ltt_nesting)++; - /* FIXME: should nesting be per-cpu? */ - ltt_nesting++; + + /* Force volatile access. */ + STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); + stack_pos_ctx = tracer_stack_pos; + barrier(); pdata = (struct ltt_active_marker *)probe_data; eID = mdata->event_id; @@ -646,7 +679,8 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, */ largest_align = 1; /* must be non-zero for ltt_align */ data_size = ltt_get_data_size(&closure, serialize_private, - &largest_align, fmt, &args_copy); + stack_pos_ctx, &largest_align, + fmt, &args_copy); largest_align = min_t(int, largest_align, sizeof(void *)); va_end(args_copy); @@ -688,10 +722,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, } /* reserve space : header and data */ - ret = ltt_reserve_slot(trace, channel, &transport_data, - data_size, &slot_size, &buf_offset, - &tsc, &rflags, - largest_align, cpu); + ret = ltt_reserve_slot(channel, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, + &tsc, &rflags); if (unlikely(ret < 0)) continue; /* buffer full */ @@ -700,19 +733,22 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, //ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu]; buf = channel->buf[cpu]; /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(trace, - channel, buf, buf_offset, + buf_offset = ltt_write_event_header(channel, buf, buf_offset, eID, data_size, tsc, rflags); ltt_write_event_data(buf, buf_offset, &closure, - serialize_private, - largest_align, fmt, &args_copy); + serialize_private, + stack_pos_ctx, largest_align, + fmt, &args_copy); va_end(args_copy); /* Out-of-order commit */ ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size); - DBG("just commited event at offset %ld and size %zd", buf_offset, slot_size); + DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size); } -//ust// __get_cpu_var(ltt_nesting)--; - ltt_nesting--; + + barrier(); + tracer_stack_pos = stack_pos_ctx; + STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); + rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace(); } @@ -808,7 +844,7 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap) outbuf = &false_buf; bufsize = 1; } - result = vsnprintf(outbuf, bufsize, new_fmt, ap); + result = ust_safe_vsnprintf(outbuf, bufsize, new_fmt, ap); return result; }