Fix: ASoC level IO tracing removed upstream
[lttng-modules.git] / lttng-context-callstack.c
index 4dd984be40fee4e3ef6ba9bbbc2d153244f50ee1..432fadb652f349566ae1eee7e646ed29f83b507b 100644 (file)
@@ -32,7 +32,7 @@
  *
  *   size = cpus * nest * depth * sizeof(unsigned long)
  *
- * Which is about 800 bytes per CPU on 64-bit host and a depth of 25.
+ * Which is 4096 bytes per CPU on 64-bit host and a depth of 128.
  * The allocation is done at the initialization to avoid memory
  * allocation overhead while tracing, using a shallow stack.
  *
@@ -62,7 +62,7 @@
 #include "wrapper/vmalloc.h"
 #include "lttng-tracer.h"
 
-#define MAX_ENTRIES 25
+#define MAX_ENTRIES 128
 
 enum lttng_cs_ctx_modes {
        CALLSTACK_KERNEL = 0,
@@ -120,14 +120,26 @@ int init_type(enum lttng_cs_ctx_modes mode)
        return 0;
 }
 
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
 static
 struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
                                        struct lib_ring_buffer_ctx *ctx)
 {
-       int nesting;
+       int buffer_nesting, cs_user_nesting;
        struct lttng_cs *cs;
        struct field_data *fdata = field->priv;
 
+       /*
+        * Do not gather the userspace callstack context when the event was
+        * triggered by the userspace callstack context saving mechanism.
+        */
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+               return NULL;
+
        /*
         * get_cpu() is not required, preemption is already
         * disabled while event is written.
@@ -136,11 +148,11 @@ struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
         * Check it again as a safety net.
         */
        cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
-       nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
-       if (nesting >= RING_BUFFER_MAX_NESTING) {
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
                return NULL;
-       }
-       return &cs->dispatch[nesting].stack_trace;
+
+       return &cs->dispatch[buffer_nesting].stack_trace;
 }
 
 /*
@@ -152,24 +164,31 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
                                struct lib_ring_buffer_ctx *ctx,
                                struct lttng_channel *chan)
 {
-       size_t size = 0;
        struct stack_trace *trace;
        struct field_data *fdata = field->priv;
+       size_t orig_offset = offset;
 
        /* do not write data if no space is available */
        trace = stack_trace_context(field, ctx);
        if (unlikely(!trace)) {
-               size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-               size += sizeof(unsigned int);
-               size += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-               return size;
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+               offset += sizeof(unsigned int);
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+               return offset - orig_offset;
        }
 
        /* reset stack trace, no need to clear memory */
        trace->nr_entries = 0;
 
+       if (fdata->mode == CALLSTACK_USER)
+               ++per_cpu(callstack_user_nesting, ctx->cpu);
+
        /* do the real work and reserve space */
        cs_types[fdata->mode].save_func(trace);
+
+       if (fdata->mode == CALLSTACK_USER)
+               per_cpu(callstack_user_nesting, ctx->cpu)--;
+
        /*
         * Remove final ULONG_MAX delimiter. If we cannot find it, add
         * our own marker to show that the stack is incomplete. This is
@@ -179,14 +198,14 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
                        && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
                trace->nr_entries--;
        }
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       size += sizeof(unsigned int);
-       size += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
-       size += sizeof(unsigned long) * trace->nr_entries;
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+       offset += sizeof(unsigned long) * trace->nr_entries;
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
        if (trace->nr_entries == trace->max_entries)
-               size += sizeof(unsigned long);
-       return size;
+               offset += sizeof(unsigned long);
+       return offset - orig_offset;
 }
 
 static
@@ -348,8 +367,10 @@ int lttng_add_callstack_to_ctx(struct lttng_ctx **ctx, int type)
        switch (type) {
        case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
                return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
+#ifdef CONFIG_X86
        case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
                return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
+#endif
        default:
                return -EINVAL;
        }
This page took 0.025872 seconds and 4 git commands to generate.