Cleanup: Move patches.i to include/generated/
[lttng-modules.git] / lttng-context-callstack-stackwalk-impl.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
b29c6286
MD
2 *
3 * lttng-context-callstack-stackwalk-impl.h
4 *
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
10 *
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
13 */
14
15#define MAX_ENTRIES 128
16
17enum lttng_cs_ctx_modes {
18 CALLSTACK_KERNEL = 0,
19 CALLSTACK_USER = 1,
20 NR_CALLSTACK_MODES,
21};
22
23struct lttng_stack_trace {
24 unsigned long entries[MAX_ENTRIES];
25 unsigned int nr_entries;
26};
27
28struct lttng_cs {
29 struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
30};
31
32struct field_data {
33 struct lttng_cs __percpu *cs_percpu;
34 enum lttng_cs_ctx_modes mode;
35};
36
37static
38unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
39 unsigned int skipnr);
40static
41unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
42
43static
44const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
45{
46 switch (mode) {
47 case CALLSTACK_KERNEL:
48 return "callstack_kernel";
49 case CALLSTACK_USER:
50 return "callstack_user";
51 default:
52 return NULL;
53 }
54}
55
ceabb767
MD
56static
57const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
58{
59 switch (mode) {
60 case CALLSTACK_KERNEL:
61 return "_callstack_kernel_length";
62 case CALLSTACK_USER:
63 return "_callstack_user_length";
64 default:
65 return NULL;
66 }
67}
68
b29c6286
MD
69static
70int init_type_callstack_kernel(void)
71{
72 unsigned long func;
73 const char *func_name = "stack_trace_save";
74
75 if (save_func_kernel)
76 return 0;
77 func = kallsyms_lookup_funcptr(func_name);
78 if (!func) {
79 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
80 func_name);
81 return -EINVAL;
82 }
83 save_func_kernel = (void *) func;
84 return 0;
85}
86
87static
88int init_type_callstack_user(void)
89{
90 unsigned long func;
91 const char *func_name = "stack_trace_save_user";
92
93 if (save_func_user)
94 return 0;
95 func = kallsyms_lookup_funcptr(func_name);
96 if (!func) {
97 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
98 func_name);
99 return -EINVAL;
100 }
101 save_func_user = (void *) func;
102 return 0;
103}
104
105static
106int init_type(enum lttng_cs_ctx_modes mode)
107{
108 switch (mode) {
109 case CALLSTACK_KERNEL:
110 return init_type_callstack_kernel();
111 case CALLSTACK_USER:
112 return init_type_callstack_user();
113 default:
114 return -EINVAL;
115 }
116}
117
118static
119void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
120{
121}
122
123/* Keep track of nesting inside userspace callstack context code */
124DEFINE_PER_CPU(int, callstack_user_nesting);
125
126static
127struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
128 struct lib_ring_buffer_ctx *ctx)
129{
130 int buffer_nesting, cs_user_nesting;
131 struct lttng_cs *cs;
132 struct field_data *fdata = field->priv;
133
134 /*
135 * Do not gather the userspace callstack context when the event was
136 * triggered by the userspace callstack context saving mechanism.
137 */
138 cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
139
140 if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
141 return NULL;
142
143 /*
144 * get_cpu() is not required, preemption is already
145 * disabled while event is written.
146 *
147 * max nesting is checked in lib_ring_buffer_get_cpu().
148 * Check it again as a safety net.
149 */
150 cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
151 buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
152 if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
153 return NULL;
154
155 return &cs->stack_trace[buffer_nesting];
156}
157
ceabb767
MD
158static
159size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
160 struct lib_ring_buffer_ctx *ctx,
161 struct lttng_channel *chan)
162{
163 size_t orig_offset = offset;
164
165 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
166 offset += sizeof(unsigned int);
167 return offset - orig_offset;
168}
169
b29c6286
MD
170/*
171 * In order to reserve the correct size, the callstack is computed. The
172 * resulting callstack is saved to be accessed in the record step.
173 */
174static
ceabb767
MD
175size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
176 struct lib_ring_buffer_ctx *ctx,
177 struct lttng_channel *chan)
b29c6286
MD
178{
179 struct lttng_stack_trace *trace;
180 struct field_data *fdata = field->priv;
181 size_t orig_offset = offset;
182
183 /* do not write data if no space is available */
184 trace = stack_trace_context(field, ctx);
185 if (unlikely(!trace)) {
b29c6286
MD
186 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
187 return offset - orig_offset;
188 }
189
190 /* reset stack trace, no need to clear memory */
191 trace->nr_entries = 0;
192
193 switch (fdata->mode) {
194 case CALLSTACK_KERNEL:
195 /* do the real work and reserve space */
196 trace->nr_entries = save_func_kernel(trace->entries,
197 MAX_ENTRIES, 0);
198 break;
199 case CALLSTACK_USER:
200 ++per_cpu(callstack_user_nesting, ctx->cpu);
201 /* do the real work and reserve space */
202 trace->nr_entries = save_func_user(trace->entries,
203 MAX_ENTRIES);
204 per_cpu(callstack_user_nesting, ctx->cpu)--;
205 break;
206 default:
207 WARN_ON_ONCE(1);
208 }
209
210 /*
211 * If the array is filled, add our own marker to show that the
212 * stack is incomplete.
213 */
b29c6286
MD
214 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
215 offset += sizeof(unsigned long) * trace->nr_entries;
216 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
217 if (trace->nr_entries == MAX_ENTRIES)
218 offset += sizeof(unsigned long);
219 return offset - orig_offset;
220}
221
222static
ceabb767 223void lttng_callstack_length_record(struct lttng_ctx_field *field,
b29c6286
MD
224 struct lib_ring_buffer_ctx *ctx,
225 struct lttng_channel *chan)
226{
227 struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
228 unsigned int nr_seq_entries;
229
ceabb767 230 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
b29c6286
MD
231 if (unlikely(!trace)) {
232 nr_seq_entries = 0;
ceabb767
MD
233 } else {
234 nr_seq_entries = trace->nr_entries;
235 if (trace->nr_entries == MAX_ENTRIES)
236 nr_seq_entries++;
237 }
238 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
239}
240
241static
242void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
243 struct lib_ring_buffer_ctx *ctx,
244 struct lttng_channel *chan)
245{
246 struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
247 unsigned int nr_seq_entries;
248
249 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
250 if (unlikely(!trace)) {
b29c6286
MD
251 return;
252 }
b29c6286
MD
253 nr_seq_entries = trace->nr_entries;
254 if (trace->nr_entries == MAX_ENTRIES)
255 nr_seq_entries++;
b29c6286
MD
256 chan->ops->event_write(ctx, trace->entries,
257 sizeof(unsigned long) * trace->nr_entries);
258 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
259 if (trace->nr_entries == MAX_ENTRIES) {
260 unsigned long delim = ULONG_MAX;
261
262 chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
263 }
264}
This page took 0.033353 seconds and 4 git commands to generate.