Move alignment into event write callback
[lttng-modules.git] / src / lttng-context-callstack-stackwalk-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-context-callstack-stackwalk-impl.h
4 *
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
10 *
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
13 */
14
15 #define MAX_ENTRIES 128
16
17 enum lttng_cs_ctx_modes {
18 CALLSTACK_KERNEL = 0,
19 CALLSTACK_USER = 1,
20 NR_CALLSTACK_MODES,
21 };
22
23 struct lttng_stack_trace {
24 unsigned long entries[MAX_ENTRIES];
25 unsigned int nr_entries;
26 };
27
28 struct lttng_cs {
29 struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
30 };
31
32 struct field_data {
33 struct lttng_cs __percpu *cs_percpu;
34 enum lttng_cs_ctx_modes mode;
35 };
36
37 static
38 unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
39 unsigned int skipnr);
40 static
41 unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
42
43 static
44 int init_type_callstack_kernel(void)
45 {
46 unsigned long func;
47 const char *func_name = "stack_trace_save";
48
49 if (save_func_kernel)
50 return 0;
51 func = kallsyms_lookup_funcptr(func_name);
52 if (!func) {
53 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
54 func_name);
55 return -EINVAL;
56 }
57 save_func_kernel = (void *) func;
58 return 0;
59 }
60
61 static
62 int init_type_callstack_user(void)
63 {
64 unsigned long func;
65 const char *func_name = "stack_trace_save_user";
66
67 if (save_func_user)
68 return 0;
69 func = kallsyms_lookup_funcptr(func_name);
70 if (!func) {
71 printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
72 func_name);
73 return -EINVAL;
74 }
75 save_func_user = (void *) func;
76 return 0;
77 }
78
79 static
80 int init_type(enum lttng_cs_ctx_modes mode)
81 {
82 switch (mode) {
83 case CALLSTACK_KERNEL:
84 return init_type_callstack_kernel();
85 case CALLSTACK_USER:
86 return init_type_callstack_user();
87 default:
88 return -EINVAL;
89 }
90 }
91
92 static
93 void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
94 {
95 }
96
97 /* Keep track of nesting inside userspace callstack context code */
98 DEFINE_PER_CPU(int, callstack_user_nesting);
99
100 /*
101 * Note: these callbacks expect to be invoked with preemption disabled across
102 * get_size and record due to its use of a per-cpu stack.
103 */
104 static
105 struct lttng_stack_trace *stack_trace_context(struct field_data *fdata, int cpu)
106 {
107 int buffer_nesting, cs_user_nesting;
108 struct lttng_cs *cs;
109
110 /*
111 * Do not gather the userspace callstack context when the event was
112 * triggered by the userspace callstack context saving mechanism.
113 */
114 cs_user_nesting = per_cpu(callstack_user_nesting, cpu);
115
116 if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
117 return NULL;
118
119 /*
120 * get_cpu() is not required, preemption is already
121 * disabled while event is written.
122 *
123 * max nesting is checked in lib_ring_buffer_get_cpu().
124 * Check it again as a safety net.
125 */
126 cs = per_cpu_ptr(fdata->cs_percpu, cpu);
127 buffer_nesting = per_cpu(lib_ring_buffer_nesting, cpu) - 1;
128 if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
129 return NULL;
130
131 return &cs->stack_trace[buffer_nesting];
132 }
133
134 static
135 size_t lttng_callstack_length_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
136 {
137 size_t orig_offset = offset;
138
139 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
140 offset += sizeof(unsigned int);
141 return offset - orig_offset;
142 }
143
144 /*
145 * In order to reserve the correct size, the callstack is computed. The
146 * resulting callstack is saved to be accessed in the record step.
147 */
148 static
149 size_t lttng_callstack_sequence_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
150 {
151 struct lttng_stack_trace *trace;
152 struct field_data *fdata = (struct field_data *) priv;
153 size_t orig_offset = offset;
154 int cpu = smp_processor_id();
155
156 /* do not write data if no space is available */
157 trace = stack_trace_context(fdata, cpu);
158 if (unlikely(!trace)) {
159 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
160 return offset - orig_offset;
161 }
162
163 /* reset stack trace, no need to clear memory */
164 trace->nr_entries = 0;
165
166 switch (fdata->mode) {
167 case CALLSTACK_KERNEL:
168 /* do the real work and reserve space */
169 trace->nr_entries = save_func_kernel(trace->entries,
170 MAX_ENTRIES, 0);
171 break;
172 case CALLSTACK_USER:
173 ++per_cpu(callstack_user_nesting, cpu);
174 /* do the real work and reserve space */
175 trace->nr_entries = save_func_user(trace->entries,
176 MAX_ENTRIES);
177 per_cpu(callstack_user_nesting, cpu)--;
178 break;
179 default:
180 WARN_ON_ONCE(1);
181 }
182
183 /*
184 * If the array is filled, add our own marker to show that the
185 * stack is incomplete.
186 */
187 offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
188 offset += sizeof(unsigned long) * trace->nr_entries;
189 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
190 if (trace->nr_entries == MAX_ENTRIES)
191 offset += sizeof(unsigned long);
192 return offset - orig_offset;
193 }
194
195 static
196 void lttng_callstack_length_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
197 struct lttng_kernel_ring_buffer_ctx *ctx,
198 struct lttng_kernel_channel_buffer *chan)
199 {
200 int cpu = ctx->priv.reserve_cpu;
201 struct field_data *fdata = (struct field_data *) priv;
202 struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu);
203 unsigned int nr_seq_entries;
204
205 if (unlikely(!trace)) {
206 nr_seq_entries = 0;
207 } else {
208 nr_seq_entries = trace->nr_entries;
209 if (trace->nr_entries == MAX_ENTRIES)
210 nr_seq_entries++;
211 }
212 chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int), lttng_alignof(unsigned int));
213 }
214
215 static
216 void lttng_callstack_sequence_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
217 struct lttng_kernel_ring_buffer_ctx *ctx,
218 struct lttng_kernel_channel_buffer *chan)
219 {
220 int cpu = ctx->priv.reserve_cpu;
221 struct field_data *fdata = (struct field_data *) priv;
222 struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu);
223 unsigned int nr_seq_entries;
224
225 if (unlikely(!trace)) {
226 /* We need to align even if there are 0 elements. */
227 lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
228 return;
229 }
230 nr_seq_entries = trace->nr_entries;
231 if (trace->nr_entries == MAX_ENTRIES)
232 nr_seq_entries++;
233 chan->ops->event_write(ctx, trace->entries,
234 sizeof(unsigned long) * trace->nr_entries, lttng_alignof(unsigned long));
235 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
236 if (trace->nr_entries == MAX_ENTRIES) {
237 unsigned long delim = ULONG_MAX;
238
239 chan->ops->event_write(ctx, &delim, sizeof(unsigned long), 1);
240 }
241 }
This page took 0.034381 seconds and 4 git commands to generate.