1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
3 * lttng-context-callstack-legacy-impl.h
5 * LTTng callstack event context, legacy implementation. Targets
6 * kernels and architectures not yet using the stacktrace common
7 * infrastructure introduced in the upstream Linux kernel by commit
8 * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
9 * Linux 5.2, then gradually introduced within architectures).
11 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_cs_dispatch
{
24 struct stack_trace stack_trace
;
25 unsigned long entries
[MAX_ENTRIES
];
29 struct lttng_cs_dispatch dispatch
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
37 struct lttng_cs_type
{
39 const char *save_func_name
;
40 void (*save_func
)(struct stack_trace
*trace
);
43 static struct lttng_cs_type cs_types
[] = {
45 .name
= "callstack_kernel",
46 .save_func_name
= "save_stack_trace",
50 .name
= "callstack_user",
51 .save_func_name
= "save_stack_trace_user",
57 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode
)
59 return cs_types
[mode
].name
;
63 int init_type(enum lttng_cs_ctx_modes mode
)
67 if (cs_types
[mode
].save_func
)
69 func
= kallsyms_lookup_funcptr(cs_types
[mode
].save_func_name
);
71 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
72 cs_types
[mode
].save_func_name
);
75 cs_types
[mode
].save_func
= (void *) func
;
80 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
84 for_each_possible_cpu(cpu
) {
87 cs
= per_cpu_ptr(cs_set
, cpu
);
88 for (i
= 0; i
< RING_BUFFER_MAX_NESTING
; i
++) {
89 struct lttng_cs_dispatch
*dispatch
;
91 dispatch
= &cs
->dispatch
[i
];
92 dispatch
->stack_trace
.entries
= dispatch
->entries
;
93 dispatch
->stack_trace
.max_entries
= MAX_ENTRIES
;
98 /* Keep track of nesting inside userspace callstack context code */
99 DEFINE_PER_CPU(int, callstack_user_nesting
);
102 struct stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
103 struct lib_ring_buffer_ctx
*ctx
)
105 int buffer_nesting
, cs_user_nesting
;
107 struct field_data
*fdata
= field
->priv
;
110 * Do not gather the userspace callstack context when the event was
111 * triggered by the userspace callstack context saving mechanism.
113 cs_user_nesting
= per_cpu(callstack_user_nesting
, ctx
->cpu
);
115 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
119 * get_cpu() is not required, preemption is already
120 * disabled while event is written.
122 * max nesting is checked in lib_ring_buffer_get_cpu().
123 * Check it again as a safety net.
125 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
126 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
127 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
130 return &cs
->dispatch
[buffer_nesting
].stack_trace
;
134 * In order to reserve the correct size, the callstack is computed. The
135 * resulting callstack is saved to be accessed in the record step.
138 size_t lttng_callstack_get_size(size_t offset
, struct lttng_ctx_field
*field
,
139 struct lib_ring_buffer_ctx
*ctx
,
140 struct lttng_channel
*chan
)
142 struct stack_trace
*trace
;
143 struct field_data
*fdata
= field
->priv
;
144 size_t orig_offset
= offset
;
146 /* do not write data if no space is available */
147 trace
= stack_trace_context(field
, ctx
);
148 if (unlikely(!trace
)) {
149 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
150 offset
+= sizeof(unsigned int);
151 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
152 return offset
- orig_offset
;
155 /* reset stack trace, no need to clear memory */
156 trace
->nr_entries
= 0;
158 if (fdata
->mode
== CALLSTACK_USER
)
159 ++per_cpu(callstack_user_nesting
, ctx
->cpu
);
161 /* do the real work and reserve space */
162 cs_types
[fdata
->mode
].save_func(trace
);
164 if (fdata
->mode
== CALLSTACK_USER
)
165 per_cpu(callstack_user_nesting
, ctx
->cpu
)--;
168 * Remove final ULONG_MAX delimiter. If we cannot find it, add
169 * our own marker to show that the stack is incomplete. This is
170 * more compact for a trace.
172 if (trace
->nr_entries
> 0
173 && trace
->entries
[trace
->nr_entries
- 1] == ULONG_MAX
) {
176 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
177 offset
+= sizeof(unsigned int);
178 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
179 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
180 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
181 if (trace
->nr_entries
== trace
->max_entries
)
182 offset
+= sizeof(unsigned long);
183 return offset
- orig_offset
;
187 void lttng_callstack_record(struct lttng_ctx_field
*field
,
188 struct lib_ring_buffer_ctx
*ctx
,
189 struct lttng_channel
*chan
)
191 struct stack_trace
*trace
= stack_trace_context(field
, ctx
);
192 unsigned int nr_seq_entries
;
194 if (unlikely(!trace
)) {
196 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
197 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
198 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
201 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
202 nr_seq_entries
= trace
->nr_entries
;
203 if (trace
->nr_entries
== trace
->max_entries
)
205 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
206 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
207 chan
->ops
->event_write(ctx
, trace
->entries
,
208 sizeof(unsigned long) * trace
->nr_entries
);
209 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
210 if (trace
->nr_entries
== trace
->max_entries
) {
211 unsigned long delim
= ULONG_MAX
;
213 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));