page alloc wrapper: Fix get_pfnblock_flags_mask prototype
[lttng-modules.git] / src / lttng-context-callstack.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
2fa2d39a
FG
3 * lttng-context-callstack.c
4 *
5 * LTTng callstack event context.
6 *
7 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
9 *
0bb47c89
MD
10 * The callstack context can be added to any kernel event. It records
11 * either the kernel or the userspace callstack, up to a max depth. The
12 * context is a CTF sequence, such that it uses only the space required
13 * for the number of callstack entries.
2fa2d39a 14 *
0bb47c89
MD
15 * It allocates callstack buffers per-CPU up to 4 interrupt nesting.
16 * This nesting limit is the same as defined in the ring buffer. It
17 * therefore uses a fixed amount of memory, proportional to the number
18 * of CPUs:
2fa2d39a
FG
19 *
20 * size = cpus * nest * depth * sizeof(unsigned long)
21 *
3685cc80 22 * Which is 4096 bytes per CPU on 64-bit host and a depth of 128.
0bb47c89
MD
23 * The allocation is done at the initialization to avoid memory
24 * allocation overhead while tracing, using a shallow stack.
2fa2d39a
FG
25 *
26 * The kernel callstack is recovered using save_stack_trace(), and the
27 * userspace callstack uses save_stack_trace_user(). They rely on frame
0bb47c89
MD
28 * pointers. These are usually available for the kernel, but the
29 * compiler option -fomit-frame-pointer frequently used in popular Linux
30 * distributions may cause the userspace callstack to be unreliable, and
31 * is a known limitation of this approach. If frame pointers are not
32 * available, it produces no error, but the callstack will be empty. We
33 * still provide the feature, because it works well for runtime
34 * environments having frame pointers. In the future, unwind support
35 * and/or last branch record may provide a solution to this problem.
2fa2d39a
FG
36 *
37 * The symbol name resolution is left to the trace reader.
38 */
39
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/sched.h>
43#include <linux/utsname.h>
44#include <linux/stacktrace.h>
45#include <linux/spinlock.h>
24591303
MD
46#include <ringbuffer/backend.h>
47#include <ringbuffer/frontend.h>
2df37e95 48#include <lttng/events.h>
0fe45627 49#include <lttng/events-internal.h>
2df37e95
MD
50#include <lttng/tracer.h>
51#include <lttng/endian.h>
2fa2d39a 52#include "wrapper/vmalloc.h"
2fa2d39a 53
b29c6286
MD
54#ifdef CONFIG_ARCH_STACKWALK
55#include "lttng-context-callstack-stackwalk-impl.h"
56#else
b6ee48d2 57#include "lttng-context-callstack-legacy-impl.h"
b29c6286 58#endif
2fa2d39a 59
437d5aa5
MD
60#define NR_FIELDS 2
61
2fa2d39a
FG
62static
63void field_data_free(struct field_data *fdata)
64{
2fa2d39a
FG
65 if (!fdata)
66 return;
2fa2d39a
FG
67 free_percpu(fdata->cs_percpu);
68 kfree(fdata);
69}
70
71static
0bb47c89 72struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
2fa2d39a 73{
2fa2d39a 74 struct lttng_cs __percpu *cs_set;
64cc198b 75 struct field_data *fdata;
2fa2d39a 76
64cc198b 77 fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
2fa2d39a
FG
78 if (!fdata)
79 return NULL;
80 cs_set = alloc_percpu(struct lttng_cs);
81 if (!cs_set)
82 goto error_alloc;
b5a89a3f 83 lttng_cs_set_init(cs_set);
2fa2d39a 84 fdata->cs_percpu = cs_set;
0bb47c89 85 fdata->mode = mode;
2fa2d39a
FG
86 return fdata;
87
88error_alloc:
89 field_data_free(fdata);
90 return NULL;
91}
92
93static
2dc781e0 94void lttng_callstack_sequence_destroy(void *priv)
2fa2d39a 95{
2dc781e0 96 struct field_data *fdata = priv;
2fa2d39a
FG
97
98 field_data_free(fdata);
99}
100
437d5aa5
MD
101static const struct lttng_kernel_event_field *event_fields_kernel[NR_FIELDS] = {
102 lttng_kernel_static_event_field("_callstack_kernel_length",
103 lttng_kernel_static_type_integer_from_type(unsigned int, __BYTE_ORDER, 10),
4697aac7 104 false, false),
437d5aa5 105 lttng_kernel_static_event_field("callstack_kernel",
51ef4536 106 lttng_kernel_static_type_sequence(NULL,
437d5aa5
MD
107 lttng_kernel_static_type_integer_from_type(unsigned long, __BYTE_ORDER, 16),
108 0, none),
4697aac7 109 false, false),
437d5aa5
MD
110};
111
112static const struct lttng_kernel_event_field *event_fields_user[NR_FIELDS] = {
113 lttng_kernel_static_event_field("_callstack_user_length",
114 lttng_kernel_static_type_integer_from_type(unsigned int, __BYTE_ORDER, 10),
4697aac7 115 false, false),
437d5aa5 116 lttng_kernel_static_event_field("callstack_user",
51ef4536 117 lttng_kernel_static_type_sequence(NULL,
437d5aa5
MD
118 lttng_kernel_static_type_integer_from_type(unsigned long, __BYTE_ORDER, 16),
119 0, none),
4697aac7 120 false, false),
437d5aa5
MD
121};
122
c88dc738 123static
437d5aa5
MD
124const struct lttng_kernel_event_field **lttng_cs_event_fields(enum lttng_cs_ctx_modes mode)
125{
126 switch (mode) {
127 case CALLSTACK_KERNEL:
128 return event_fields_kernel;
129 case CALLSTACK_USER:
130 return event_fields_user;
131 default:
132 return NULL;
133 }
134}
ceabb767 135
2fa2d39a 136static
437d5aa5 137int __lttng_add_callstack_generic(struct lttng_kernel_ctx **ctx,
0bb47c89 138 enum lttng_cs_ctx_modes mode)
2fa2d39a 139{
437d5aa5
MD
140 const struct lttng_kernel_event_field **event_fields;
141 struct lttng_kernel_ctx_field ctx_field;
2fa2d39a 142 struct field_data *fdata;
437d5aa5 143 int ret, i;
2fa2d39a
FG
144
145 ret = init_type(mode);
146 if (ret)
147 return ret;
437d5aa5
MD
148 event_fields = lttng_cs_event_fields(mode);
149 if (!event_fields) {
150 return -EINVAL;
ceabb767 151 }
437d5aa5
MD
152 for (i = 0; i < NR_FIELDS; i++) {
153 if (lttng_kernel_find_context(*ctx, event_fields[i]->name))
154 return -EEXIST;
2fa2d39a 155 }
64cc198b 156 fdata = field_data_create(mode);
2fa2d39a
FG
157 if (!fdata) {
158 ret = -ENOMEM;
159 goto error_create;
160 }
437d5aa5
MD
161 memset(&ctx_field, 0, sizeof(ctx_field));
162 ctx_field.event_field = event_fields[0];
2dc781e0 163 ctx_field.get_size = lttng_callstack_length_get_size;
437d5aa5
MD
164 ctx_field.record = lttng_callstack_length_record;
165 ctx_field.priv = fdata;
166 ret = lttng_kernel_context_append(ctx, &ctx_field);
167 if (ret) {
168 ret = -ENOMEM;
169 goto error_append0;
170 }
2fa2d39a 171
437d5aa5
MD
172 memset(&ctx_field, 0, sizeof(ctx_field));
173 ctx_field.event_field = event_fields[1];
2dc781e0 174 ctx_field.get_size = lttng_callstack_sequence_get_size;
437d5aa5
MD
175 ctx_field.record = lttng_callstack_sequence_record;
176 ctx_field.destroy = lttng_callstack_sequence_destroy;
177 ctx_field.priv = fdata;
178 ret = lttng_kernel_context_append(ctx, &ctx_field);
179 if (ret) {
180 ret = -ENOMEM;
181 goto error_append1;
182 }
2fa2d39a
FG
183 return 0;
184
437d5aa5
MD
185error_append1:
186 lttng_kernel_context_remove_last(ctx);
187error_append0:
188 field_data_free(fdata);
2fa2d39a 189error_create:
2fa2d39a
FG
190 return ret;
191}
192
193/**
194 * lttng_add_callstack_to_ctx - add callstack event context
195 *
196 * @ctx: the lttng_ctx pointer to initialize
197 * @type: the context type
198 *
199 * Supported callstack type supported:
200 * LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
201 * Records the callstack of the kernel
202 * LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
203 * Records the callstack of the userspace program (from the kernel)
204 *
205 * Return 0 for success, or error code.
206 */
437d5aa5 207int lttng_add_callstack_to_ctx(struct lttng_kernel_ctx **ctx, int type)
2fa2d39a
FG
208{
209 switch (type) {
606828e4 210 case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL:
2fa2d39a 211 return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
b874d3f3 212#ifdef CONFIG_X86
606828e4 213 case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER:
2fa2d39a 214 return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
b874d3f3 215#endif
2fa2d39a
FG
216 default:
217 return -EINVAL;
218 }
219}
220EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
This page took 0.057594 seconds and 4 git commands to generate.