Introduce callstack legacy implementation header
[lttng-modules.git] / lttng-context-callstack-legacy-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-context-callstack-legacy-impl.h
4 *
5 * LTTng callstack event context, legacy implementation. Targets
6 * kernels and architectures not yet using the stacktrace common
7 * infrastructure introduced in the upstream Linux kernel by commit
8 * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
9 * Linux 5.2, then gradually introduced within architectures).
10 *
11 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
13 */
14
15 #define MAX_ENTRIES 128
16
17 enum lttng_cs_ctx_modes {
18 CALLSTACK_KERNEL = 0,
19 CALLSTACK_USER = 1,
20 NR_CALLSTACK_MODES,
21 };
22
23 struct lttng_cs_dispatch {
24 struct stack_trace stack_trace;
25 unsigned long entries[MAX_ENTRIES];
26 };
27
28 struct lttng_cs {
29 struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING];
30 };
31
32 struct field_data {
33 struct lttng_cs __percpu *cs_percpu;
34 enum lttng_cs_ctx_modes mode;
35 };
36
37 struct lttng_cs_type {
38 const char *name;
39 const char *save_func_name;
40 void (*save_func)(struct stack_trace *trace);
41 };
42
43 static struct lttng_cs_type cs_types[] = {
44 {
45 .name = "callstack_kernel",
46 .save_func_name = "save_stack_trace",
47 .save_func = NULL,
48 },
49 {
50 .name = "callstack_user",
51 .save_func_name = "save_stack_trace_user",
52 .save_func = NULL,
53 },
54 };
55
56 static
57 int init_type(enum lttng_cs_ctx_modes mode)
58 {
59 unsigned long func;
60
61 if (cs_types[mode].save_func)
62 return 0;
63 func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
64 if (!func) {
65 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
66 cs_types[mode].save_func_name);
67 return -EINVAL;
68 }
69 cs_types[mode].save_func = (void *) func;
70 return 0;
71 }
72
73 /* Keep track of nesting inside userspace callstack context code */
74 DEFINE_PER_CPU(int, callstack_user_nesting);
75
76 static
77 struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
78 struct lib_ring_buffer_ctx *ctx)
79 {
80 int buffer_nesting, cs_user_nesting;
81 struct lttng_cs *cs;
82 struct field_data *fdata = field->priv;
83
84 /*
85 * Do not gather the userspace callstack context when the event was
86 * triggered by the userspace callstack context saving mechanism.
87 */
88 cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
89
90 if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
91 return NULL;
92
93 /*
94 * get_cpu() is not required, preemption is already
95 * disabled while event is written.
96 *
97 * max nesting is checked in lib_ring_buffer_get_cpu().
98 * Check it again as a safety net.
99 */
100 cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
101 buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
102 if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
103 return NULL;
104
105 return &cs->dispatch[buffer_nesting].stack_trace;
106 }
107
108 /*
109 * In order to reserve the correct size, the callstack is computed. The
110 * resulting callstack is saved to be accessed in the record step.
111 */
112 static
113 size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
114 struct lib_ring_buffer_ctx *ctx,
115 struct lttng_channel *chan)
116 {
117 struct stack_trace *trace;
118 struct field_data *fdata = field->priv;
119 size_t orig_offset = offset;
120
121 /* do not write data if no space is available */
122 trace = stack_trace_context(field, ctx);
123 if (unlikely(!trace)) {
124 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
125 offset += sizeof(unsigned int);
126 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
127 return offset - orig_offset;
128 }
129
130 /* reset stack trace, no need to clear memory */
131 trace->nr_entries = 0;
132
133 if (fdata->mode == CALLSTACK_USER)
134 ++per_cpu(callstack_user_nesting, ctx->cpu);
135
136 /* do the real work and reserve space */
137 cs_types[fdata->mode].save_func(trace);
138
139 if (fdata->mode == CALLSTACK_USER)
140 per_cpu(callstack_user_nesting, ctx->cpu)--;
141
142 /*
143 * Remove final ULONG_MAX delimiter. If we cannot find it, add
144 * our own marker to show that the stack is incomplete. This is
145 * more compact for a trace.
146 */
147 if (trace->nr_entries > 0
148 && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
149 trace->nr_entries--;
150 }
151 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
152 offset += sizeof(unsigned int);
153 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
154 offset += sizeof(unsigned long) * trace->nr_entries;
155 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
156 if (trace->nr_entries == trace->max_entries)
157 offset += sizeof(unsigned long);
158 return offset - orig_offset;
159 }
160
161 static
162 void lttng_callstack_record(struct lttng_ctx_field *field,
163 struct lib_ring_buffer_ctx *ctx,
164 struct lttng_channel *chan)
165 {
166 struct stack_trace *trace = stack_trace_context(field, ctx);
167 unsigned int nr_seq_entries;
168
169 if (unlikely(!trace)) {
170 nr_seq_entries = 0;
171 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
172 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
173 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
174 return;
175 }
176 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
177 nr_seq_entries = trace->nr_entries;
178 if (trace->nr_entries == trace->max_entries)
179 nr_seq_entries++;
180 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
181 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
182 chan->ops->event_write(ctx, trace->entries,
183 sizeof(unsigned long) * trace->nr_entries);
184 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
185 if (trace->nr_entries == trace->max_entries) {
186 unsigned long delim = ULONG_MAX;
187
188 chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
189 }
190 }
This page took 0.050248 seconds and 4 git commands to generate.