8c3ee3dcf6d32aaa111b7a328896cb83be7fdff3
[lttng-modules.git] / lttng-context-callstack.c
1 /*
2 * lttng-context-callstack.c
3 *
4 * LTTng callstack event context.
5 *
6 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * The callstack context can be added to any kernel
24 * event. It records either the kernel or the userspace callstack, up to a
25 * max depth. The context is a CTF sequence, such that it uses only the space
26 * required for the number of callstack entries.
27 *
28 * It allocates callstack buffers per-CPU up to 4 interrupt nesting. This
29 * nesting limit is the same as defined in the ring buffer. It therefore uses a
30 * fixed amount of memory, proportional to the number of CPUs:
31 *
32 * size = cpus * nest * depth * sizeof(unsigned long)
33 *
34 * Which is about 800 bytes per-CPUs on 64-bit host and a depth of 25. The
35 * allocation is done at the initialization to avoid memory allocation
36 * overhead while tracing, using a shallow stack.
37 *
38 * The kernel callstack is recovered using save_stack_trace(), and the
39 * userspace callstack uses save_stack_trace_user(). They rely on frame
40 * pointers. These are usually available for the kernel, but the compiler
41 * option -fomit-frame-pointer frequently used in popular Linux distributions
42 * may cause the userspace callstack to be unreliable, and is a known
43 * limitation of this approach. If frame pointers are not available, it
44 * produces no error, but the callstack will be empty. We still provide the
45 * feature, because it works well for runtime environments having frame
46 * pointers. In the future, unwind support and/or last branch record may
47 * provide a solution to this problem.
48 *
49 * The symbol name resolution is left to the trace reader.
50 */
51
52 #include <linux/module.h>
53 #include <linux/slab.h>
54 #include <linux/sched.h>
55 #include <linux/utsname.h>
56 #include <linux/stacktrace.h>
57 #include <linux/spinlock.h>
58 #include "lttng-events.h"
59 #include "wrapper/ringbuffer/backend.h"
60 #include "wrapper/ringbuffer/frontend.h"
61 #include "wrapper/vmalloc.h"
62 #include "lttng-tracer.h"
63
64 #define MAX_ENTRIES 25 /* BUG: saving more than 30 entries causes trace corruption */
65
66 struct lttng_cs {
67 struct stack_trace items[RING_BUFFER_MAX_NESTING];
68 };
69
70 struct field_data {
71 int mode;
72 struct lttng_cs __percpu *cs_percpu;
73 };
74
75 struct lttng_cs_type {
76 const char *name;
77 const char *save_func_name;
78 void (*save_func)(struct stack_trace *trace);
79 };
80
81 enum lttng_cs_ctx_modes {
82 CALLSTACK_KERNEL = 0,
83 CALLSTACK_USER = 1,
84 };
85
86 static struct lttng_cs_type cs_types[] = {
87 {
88 .name = "callstack_kernel",
89 .save_func_name = "save_stack_trace",
90 .save_func = NULL,
91 },
92 {
93 .name = "callstack_user",
94 .save_func_name = "save_stack_trace_user",
95 .save_func = NULL,
96 },
97 };
98
99 static
100 int init_type(int mode)
101 {
102 unsigned long func;
103
104 if (cs_types[mode].save_func)
105 return 0;
106 func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
107 if (!func) {
108 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
109 cs_types[mode].save_func_name);
110 return -EINVAL;
111 }
112 cs_types[mode].save_func = (void *) func;
113 return 0;
114 }
115
116 static
117 struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
118 struct lib_ring_buffer_ctx *ctx)
119 {
120 int nesting;
121 struct lttng_cs *cs;
122 struct field_data *fdata = field->private;
123
124 /*
125 * get_cpu() is not required, preemption is already
126 * disabled while event is written.
127 *
128 * max nesting is checked in lib_ring_buffer_get_cpu().
129 * Check it again as a safety net.
130 */
131 cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
132 nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
133 if (nesting >= RING_BUFFER_MAX_NESTING) {
134 return NULL;
135 }
136 return &cs->items[nesting];
137 }
138
139 /*
140 * In order to reserve the correct size, the callstack is computed. The
141 * resulting callstack is saved to be accessed in the record step.
142 */
143 static
144 size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
145 struct lib_ring_buffer_ctx *ctx,
146 struct lttng_channel *chan)
147 {
148 size_t size = 0;
149 struct stack_trace *trace;
150 struct field_data *fdata = field->private;
151
152 /* do not write data if no space is available */
153 trace = stack_trace_context(field, ctx);
154 if (!trace)
155 return 0;
156
157 /* reset stack trace, no need to clear memory */
158 trace->nr_entries = 0;
159
160 /* do the real work and reserve space */
161 cs_types[fdata->mode].save_func(trace);
162 size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
163 size += sizeof(unsigned int);
164 size += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
165 size += sizeof(unsigned long) * trace->nr_entries;
166 return size;
167 }
168
169 static
170 void lttng_callstack_record(struct lttng_ctx_field *field,
171 struct lib_ring_buffer_ctx *ctx,
172 struct lttng_channel *chan)
173 {
174 struct stack_trace *trace = stack_trace_context(field, ctx);
175
176 if (!trace)
177 return;
178 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
179 chan->ops->event_write(ctx, &trace->nr_entries, sizeof(unsigned int));
180 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
181 chan->ops->event_write(ctx, trace->entries,
182 sizeof(unsigned long) * trace->nr_entries);
183 }
184
185 static
186 void field_data_free(struct field_data *fdata)
187 {
188 int cpu, i;
189 struct lttng_cs *cs;
190
191 if (!fdata)
192 return;
193 for_each_possible_cpu(cpu) {
194 cs = per_cpu_ptr(fdata->cs_percpu, cpu);
195 for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
196 kfree(cs->items[i].entries);
197 }
198 }
199 free_percpu(fdata->cs_percpu);
200 kfree(fdata);
201 }
202
203 static
204 struct field_data __percpu *field_data_create(unsigned int entries, int type)
205 {
206 int cpu, i;
207 struct stack_trace *item;
208 struct lttng_cs *cs;
209 struct lttng_cs __percpu *cs_set;
210 struct field_data* fdata;
211
212 fdata = kzalloc(sizeof(unsigned long) * entries, GFP_KERNEL);
213 if (!fdata)
214 return NULL;
215 cs_set = alloc_percpu(struct lttng_cs);
216 if (!cs_set)
217 goto error_alloc;
218
219 fdata->cs_percpu = cs_set;
220 for_each_possible_cpu(cpu) {
221 cs = per_cpu_ptr(cs_set, cpu);
222 for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
223 item = &cs->items[i];
224 item->entries = kzalloc(sizeof(unsigned long) * entries, GFP_KERNEL);
225 if (!item->entries) {
226 goto error_alloc;
227 }
228 item->max_entries = entries;
229 }
230 }
231 fdata->mode = type;
232 return fdata;
233
234 error_alloc:
235 field_data_free(fdata);
236 return NULL;
237 }
238
239 static
240 void lttng_callstack_destroy(struct lttng_ctx_field *field)
241 {
242 struct field_data *fdata = field->private;
243
244 field_data_free(fdata);
245 }
246
247 static
248 int __lttng_add_callstack_generic(struct lttng_ctx **ctx, int mode)
249 {
250 const char *ctx_name = cs_types[mode].name;
251 struct lttng_ctx_field *field;
252 struct field_data *fdata;
253 int ret;
254
255 ret = init_type(mode);
256 if (ret)
257 return ret;
258 field = lttng_append_context(ctx);
259 if (!field)
260 return -ENOMEM;
261 if (lttng_find_context(*ctx, ctx_name)) {
262 printk("%s lttng_find_context failed\n", ctx_name);
263 ret = -EEXIST;
264 goto error_find;
265 }
266 fdata = field_data_create(MAX_ENTRIES, mode);
267 if (!fdata) {
268 ret = -ENOMEM;
269 goto error_create;
270 }
271
272 field->event_field.name = ctx_name;
273 field->event_field.type.atype = atype_sequence;
274 field->event_field.type.u.sequence.elem_type.atype = atype_integer;
275 field->event_field.type.u.sequence.elem_type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
276 field->event_field.type.u.sequence.elem_type.u.basic.integer.alignment = lttng_alignof(long) * CHAR_BIT;
277 field->event_field.type.u.sequence.elem_type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
278 field->event_field.type.u.sequence.elem_type.u.basic.integer.reverse_byte_order = 0;
279 field->event_field.type.u.sequence.elem_type.u.basic.integer.base = 16;
280 field->event_field.type.u.sequence.elem_type.u.basic.integer.encoding = lttng_encode_none;
281
282 field->event_field.type.u.sequence.length_type.atype = atype_integer;
283 field->event_field.type.u.sequence.length_type.u.basic.integer.size = sizeof(unsigned int) * CHAR_BIT;
284 field->event_field.type.u.sequence.length_type.u.basic.integer.alignment = lttng_alignof(unsigned int) * CHAR_BIT;
285 field->event_field.type.u.sequence.length_type.u.basic.integer.signedness = lttng_is_signed_type(unsigned int);
286 field->event_field.type.u.sequence.length_type.u.basic.integer.reverse_byte_order = 0;
287 field->event_field.type.u.sequence.length_type.u.basic.integer.base = 10;
288 field->event_field.type.u.sequence.length_type.u.basic.integer.encoding = lttng_encode_none;
289
290 field->get_size_arg = lttng_callstack_get_size;
291 field->record = lttng_callstack_record;
292 field->private = fdata;
293 field->destroy = lttng_callstack_destroy;
294 wrapper_vmalloc_sync_all();
295 printk("lttng add-context %s\n", ctx_name);
296 return 0;
297
298 error_create:
299 field_data_free(fdata);
300 error_find:
301 lttng_remove_context_field(ctx, field);
302 return ret;
303 }
304
305 /**
306 * lttng_add_callstack_to_ctx - add callstack event context
307 *
308 * @ctx: the lttng_ctx pointer to initialize
309 * @type: the context type
310 *
311 * Supported callstack type supported:
312 * LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
313 * Records the callstack of the kernel
314 * LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
315 * Records the callstack of the userspace program (from the kernel)
316 *
317 * Return 0 for success, or error code.
318 */
319 int lttng_add_callstack_to_ctx(struct lttng_ctx **ctx, int type)
320 {
321 switch (type) {
322 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
323 return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
324 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
325 return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
326 default:
327 return -EINVAL;
328 }
329 }
330 EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
331
332 MODULE_LICENSE("GPL and additional rights");
333 MODULE_AUTHOR("Francis Giraldeau");
334 MODULE_DESCRIPTION("Linux Trace Toolkit Callstack Support");
This page took 0.035343 seconds and 3 git commands to generate.