Introduce callstack stackwalk implementation header
[lttng-modules.git] / lttng-context-callstack-stackwalk-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-context-callstack-stackwalk-impl.h
4 *
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
10 *
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
13 */
14
15 #define MAX_ENTRIES 128
16
17 enum lttng_cs_ctx_modes {
18 CALLSTACK_KERNEL = 0,
19 CALLSTACK_USER = 1,
20 NR_CALLSTACK_MODES,
21 };
22
23 struct lttng_stack_trace {
24 unsigned long entries[MAX_ENTRIES];
25 unsigned int nr_entries;
26 };
27
28 struct lttng_cs {
29 struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
30 };
31
32 struct field_data {
33 struct lttng_cs __percpu *cs_percpu;
34 enum lttng_cs_ctx_modes mode;
35 };
36
37 static
38 unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
39 unsigned int skipnr);
40 static
41 unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
42
43 static
44 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
45 {
46 switch (mode) {
47 case CALLSTACK_KERNEL:
48 return "callstack_kernel";
49 case CALLSTACK_USER:
50 return "callstack_user";
51 default:
52 return NULL;
53 }
54 }
55
56 static
57 int init_type_callstack_kernel(void)
58 {
59 unsigned long func;
60 const char *func_name = "stack_trace_save";
61
62 if (save_func_kernel)
63 return 0;
64 func = kallsyms_lookup_funcptr(func_name);
65 if (!func) {
66 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
67 func_name);
68 return -EINVAL;
69 }
70 save_func_kernel = (void *) func;
71 return 0;
72 }
73
74 static
75 int init_type_callstack_user(void)
76 {
77 unsigned long func;
78 const char *func_name = "stack_trace_save_user";
79
80 if (save_func_user)
81 return 0;
82 func = kallsyms_lookup_funcptr(func_name);
83 if (!func) {
84 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
85 func_name);
86 return -EINVAL;
87 }
88 save_func_user = (void *) func;
89 return 0;
90 }
91
92 static
93 int init_type(enum lttng_cs_ctx_modes mode)
94 {
95 switch (mode) {
96 case CALLSTACK_KERNEL:
97 return init_type_callstack_kernel();
98 case CALLSTACK_USER:
99 return init_type_callstack_user();
100 default:
101 return -EINVAL;
102 }
103 }
104
105 static
106 void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
107 {
108 }
109
110 /* Keep track of nesting inside userspace callstack context code */
111 DEFINE_PER_CPU(int, callstack_user_nesting);
112
113 static
114 struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
115 struct lib_ring_buffer_ctx *ctx)
116 {
117 int buffer_nesting, cs_user_nesting;
118 struct lttng_cs *cs;
119 struct field_data *fdata = field->priv;
120
121 /*
122 * Do not gather the userspace callstack context when the event was
123 * triggered by the userspace callstack context saving mechanism.
124 */
125 cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
126
127 if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
128 return NULL;
129
130 /*
131 * get_cpu() is not required, preemption is already
132 * disabled while event is written.
133 *
134 * max nesting is checked in lib_ring_buffer_get_cpu().
135 * Check it again as a safety net.
136 */
137 cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
138 buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
139 if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
140 return NULL;
141
142 return &cs->stack_trace[buffer_nesting];
143 }
144
145 /*
146 * In order to reserve the correct size, the callstack is computed. The
147 * resulting callstack is saved to be accessed in the record step.
148 */
149 static
150 size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
151 struct lib_ring_buffer_ctx *ctx,
152 struct lttng_channel *chan)
153 {
154 struct lttng_stack_trace *trace;
155 struct field_data *fdata = field->priv;
156 size_t orig_offset = offset;
157
158 /* do not write data if no space is available */
159 trace = stack_trace_context(field, ctx);
160 if (unlikely(!trace)) {
161 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
162 offset += sizeof(unsigned int);
163 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
164 return offset - orig_offset;
165 }
166
167 /* reset stack trace, no need to clear memory */
168 trace->nr_entries = 0;
169
170 switch (fdata->mode) {
171 case CALLSTACK_KERNEL:
172 /* do the real work and reserve space */
173 trace->nr_entries = save_func_kernel(trace->entries,
174 MAX_ENTRIES, 0);
175 break;
176 case CALLSTACK_USER:
177 ++per_cpu(callstack_user_nesting, ctx->cpu);
178 /* do the real work and reserve space */
179 trace->nr_entries = save_func_user(trace->entries,
180 MAX_ENTRIES);
181 per_cpu(callstack_user_nesting, ctx->cpu)--;
182 break;
183 default:
184 WARN_ON_ONCE(1);
185 }
186
187 /*
188 * If the array is filled, add our own marker to show that the
189 * stack is incomplete.
190 */
191 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
192 offset += sizeof(unsigned int);
193 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
194 offset += sizeof(unsigned long) * trace->nr_entries;
195 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
196 if (trace->nr_entries == MAX_ENTRIES)
197 offset += sizeof(unsigned long);
198 return offset - orig_offset;
199 }
200
201 static
202 void lttng_callstack_record(struct lttng_ctx_field *field,
203 struct lib_ring_buffer_ctx *ctx,
204 struct lttng_channel *chan)
205 {
206 struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
207 unsigned int nr_seq_entries;
208
209 if (unlikely(!trace)) {
210 nr_seq_entries = 0;
211 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
212 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
213 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
214 return;
215 }
216 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
217 nr_seq_entries = trace->nr_entries;
218 if (trace->nr_entries == MAX_ENTRIES)
219 nr_seq_entries++;
220 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
221 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
222 chan->ops->event_write(ctx, trace->entries,
223 sizeof(unsigned long) * trace->nr_entries);
224 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
225 if (trace->nr_entries == MAX_ENTRIES) {
226 unsigned long delim = ULONG_MAX;
227
228 chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
229 }
230 }
This page took 0.0361 seconds and 5 git commands to generate.