X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lttng-context-callstack.c;h=2fa5f52dbf19929ff2686a1e594dc5d22cbd724a;hb=a2b0231a9ae996cc69e48d75c338ad7d081a0f85;hp=bdc1761ee705669ad8d7c95c8edeede0b434cb34;hpb=12c1f012312009f2af95757306aa51af792f1bf6;p=lttng-modules.git diff --git a/lttng-context-callstack.c b/lttng-context-callstack.c index bdc1761e..2fa5f52d 100644 --- a/lttng-context-callstack.c +++ b/lttng-context-callstack.c @@ -32,7 +32,7 @@ * * size = cpus * nest * depth * sizeof(unsigned long) * - * Which is about 800 bytes per CPU on 64-bit host and a depth of 25. + * Which is 4096 bytes per CPU on 64-bit host and a depth of 128. * The allocation is done at the initialization to avoid memory * allocation overhead while tracing, using a shallow stack. * @@ -62,7 +62,7 @@ #include "wrapper/vmalloc.h" #include "lttng-tracer.h" -#define MAX_ENTRIES 25 +#define MAX_ENTRIES 128 enum lttng_cs_ctx_modes { CALLSTACK_KERNEL = 0, @@ -120,14 +120,26 @@ int init_type(enum lttng_cs_ctx_modes mode) return 0; } +/* Keep track of nesting inside userspace callstack context code */ +DEFINE_PER_CPU(int, callstack_user_nesting); + static struct stack_trace *stack_trace_context(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx) { - int nesting; + int buffer_nesting, cs_user_nesting; struct lttng_cs *cs; struct field_data *fdata = field->priv; + /* + * Do not gather the userspace callstack context when the event was + * triggered by the userspace callstack context saving mechanism. + */ + cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); + + if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) + return NULL; + /* * get_cpu() is not required, preemption is already * disabled while event is written. @@ -136,11 +148,11 @@ struct stack_trace *stack_trace_context(struct lttng_ctx_field *field, * Check it again as a safety net. */ cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); - nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; - if (nesting >= RING_BUFFER_MAX_NESTING) { + buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; + if (buffer_nesting >= RING_BUFFER_MAX_NESTING) return NULL; - } - return &cs->dispatch[nesting].stack_trace; + + return &cs->dispatch[buffer_nesting].stack_trace; } /* @@ -168,8 +180,15 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field, /* reset stack trace, no need to clear memory */ trace->nr_entries = 0; + if (fdata->mode == CALLSTACK_USER) + ++per_cpu(callstack_user_nesting, ctx->cpu); + /* do the real work and reserve space */ cs_types[fdata->mode].save_func(trace); + + if (fdata->mode == CALLSTACK_USER) + per_cpu(callstack_user_nesting, ctx->cpu)--; + /* * Remove final ULONG_MAX delimiter. If we cannot find it, add * our own marker to show that the stack is incomplete. This is