0b466363235b2a91e307645dbd1b2cef107e9419
[lttng-modules.git] / src / lttng-context-perf-counters.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-context-perf-counters.c
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <lttng/events.h>
17 #include <lttng/events-internal.h>
18 #include <ringbuffer/frontend_types.h>
19 #include <wrapper/vmalloc.h>
20 #include <wrapper/perf.h>
21 #include <lttng/tracer.h>
22
23 static
24 size_t perf_counter_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
25 {
26 size_t size = 0;
27
28 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
29 size += sizeof(uint64_t);
30 return size;
31 }
32
33 static
34 void perf_counter_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
35 struct lttng_kernel_ring_buffer_ctx *ctx,
36 struct lttng_kernel_channel_buffer *chan)
37 {
38 struct lttng_perf_counter_field *perf_field = (struct lttng_perf_counter_field *) priv;
39 struct perf_event *event;
40 uint64_t value;
41
42 event = perf_field->e[ctx->priv.reserve_cpu];
43 if (likely(event)) {
44 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
45 value = 0;
46 } else {
47 event->pmu->read(event);
48 value = local64_read(&event->count);
49 }
50 } else {
51 /*
52 * Perf chooses not to be clever and not to support enabling a
53 * perf counter before the cpu is brought up. Therefore, we need
54 * to support having events coming (e.g. scheduler events)
55 * before the counter is setup. Write an arbitrary 0 in this
56 * case.
57 */
58 value = 0;
59 }
60 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
61 chan->ops->event_write(ctx, &value, sizeof(value));
62 }
63
64 #if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
65 static
66 void overflow_callback(struct perf_event *event,
67 struct perf_sample_data *data,
68 struct pt_regs *regs)
69 {
70 }
71 #else
72 static
73 void overflow_callback(struct perf_event *event, int nmi,
74 struct perf_sample_data *data,
75 struct pt_regs *regs)
76 {
77 }
78 #endif
79
80 static
81 void lttng_destroy_perf_counter_ctx_field(void *priv)
82 {
83 struct lttng_perf_counter_field *perf_field = priv;
84 struct perf_event **events = perf_field->e;
85
86 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
87 {
88 int ret;
89
90 ret = cpuhp_state_remove_instance(lttng_hp_online,
91 &perf_field->cpuhp_online.node);
92 WARN_ON(ret);
93 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
94 &perf_field->cpuhp_prepare.node);
95 WARN_ON(ret);
96 }
97 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
98 {
99 int cpu;
100
101 get_online_cpus();
102 for_each_online_cpu(cpu)
103 perf_event_release_kernel(events[cpu]);
104 put_online_cpus();
105 #ifdef CONFIG_HOTPLUG_CPU
106 unregister_cpu_notifier(&perf_field->nb);
107 #endif
108 }
109 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
110 kfree(perf_field->name);
111 kfree(perf_field->attr);
112 kfree(perf_field->event_field);
113 lttng_kvfree(events);
114 kfree(perf_field);
115 }
116
117 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
118
119 int lttng_cpuhp_perf_counter_online(unsigned int cpu,
120 struct lttng_cpuhp_node *node)
121 {
122 struct lttng_perf_counter_field *perf_field =
123 container_of(node, struct lttng_perf_counter_field,
124 cpuhp_online);
125 struct perf_event **events = perf_field->e;
126 struct perf_event_attr *attr = perf_field->attr;
127 struct perf_event *pevent;
128
129 pevent = wrapper_perf_event_create_kernel_counter(attr,
130 cpu, NULL, overflow_callback);
131 if (!pevent || IS_ERR(pevent))
132 return -EINVAL;
133 if (pevent->state == PERF_EVENT_STATE_ERROR) {
134 perf_event_release_kernel(pevent);
135 return -EINVAL;
136 }
137 barrier(); /* Create perf counter before setting event */
138 events[cpu] = pevent;
139 return 0;
140 }
141
142 int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
143 struct lttng_cpuhp_node *node)
144 {
145 struct lttng_perf_counter_field *perf_field =
146 container_of(node, struct lttng_perf_counter_field,
147 cpuhp_prepare);
148 struct perf_event **events = perf_field->e;
149 struct perf_event *pevent;
150
151 pevent = events[cpu];
152 events[cpu] = NULL;
153 barrier(); /* NULLify event before perf counter teardown */
154 perf_event_release_kernel(pevent);
155 return 0;
156 }
157
158 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
159
160 #ifdef CONFIG_HOTPLUG_CPU
161
162 /**
163 * lttng_perf_counter_hp_callback - CPU hotplug callback
164 * @nb: notifier block
165 * @action: hotplug action to take
166 * @hcpu: CPU number
167 *
168 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
169 *
170 * We can setup perf counters when the cpu is online (up prepare seems to be too
171 * soon).
172 */
173 static
174 int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
175 unsigned long action,
176 void *hcpu)
177 {
178 unsigned int cpu = (unsigned long) hcpu;
179 struct lttng_perf_counter_field *perf_field =
180 container_of(nb, struct lttng_perf_counter_field, nb);
181 struct perf_event **events = perf_field->e;
182 struct perf_event_attr *attr = perf_field->attr;
183 struct perf_event *pevent;
184
185 if (!perf_field->hp_enable)
186 return NOTIFY_OK;
187
188 switch (action) {
189 case CPU_ONLINE:
190 case CPU_ONLINE_FROZEN:
191 pevent = wrapper_perf_event_create_kernel_counter(attr,
192 cpu, NULL, overflow_callback);
193 if (!pevent || IS_ERR(pevent))
194 return NOTIFY_BAD;
195 if (pevent->state == PERF_EVENT_STATE_ERROR) {
196 perf_event_release_kernel(pevent);
197 return NOTIFY_BAD;
198 }
199 barrier(); /* Create perf counter before setting event */
200 events[cpu] = pevent;
201 break;
202 case CPU_UP_CANCELED:
203 case CPU_UP_CANCELED_FROZEN:
204 case CPU_DEAD:
205 case CPU_DEAD_FROZEN:
206 pevent = events[cpu];
207 events[cpu] = NULL;
208 barrier(); /* NULLify event before perf counter teardown */
209 perf_event_release_kernel(pevent);
210 break;
211 }
212 return NOTIFY_OK;
213 }
214
215 #endif
216
217 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
218
219 static const struct lttng_kernel_type_common *field_type =
220 lttng_kernel_static_type_integer_from_type(uint64_t, __BYTE_ORDER, 10);
221
222 int lttng_add_perf_counter_to_ctx(uint32_t type,
223 uint64_t config,
224 const char *name,
225 struct lttng_kernel_ctx **ctx)
226 {
227 struct lttng_kernel_ctx_field ctx_field = { 0 };
228 struct lttng_kernel_event_field *event_field;
229 struct lttng_perf_counter_field *perf_field;
230 struct perf_event **events;
231 struct perf_event_attr *attr;
232 int ret;
233 char *name_alloc;
234
235 if (lttng_kernel_find_context(*ctx, name))
236 return -EEXIST;
237 name_alloc = kstrdup(name, GFP_KERNEL);
238 if (!name_alloc) {
239 ret = -ENOMEM;
240 goto name_alloc_error;
241 }
242 event_field = kzalloc(sizeof(*event_field), GFP_KERNEL);
243 if (!event_field) {
244 ret = -ENOMEM;
245 goto event_field_alloc_error;
246 }
247 event_field->name = name_alloc;
248 event_field->type = field_type;
249
250 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
251 if (!events) {
252 ret = -ENOMEM;
253 goto event_alloc_error;
254 }
255
256 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
257 if (!attr) {
258 ret = -ENOMEM;
259 goto error_attr;
260 }
261
262 attr->type = type;
263 attr->config = config;
264 attr->size = sizeof(struct perf_event_attr);
265 attr->pinned = 1;
266 attr->disabled = 0;
267
268 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
269 if (!perf_field) {
270 ret = -ENOMEM;
271 goto error_alloc_perf_field;
272 }
273 perf_field->e = events;
274 perf_field->attr = attr;
275 perf_field->name = name_alloc;
276 perf_field->event_field = event_field;
277
278 ctx_field.event_field = event_field;
279 ctx_field.get_size = perf_counter_get_size;
280 ctx_field.record = perf_counter_record;
281 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
282 ctx_field.priv = perf_field;
283
284 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
285
286 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
287 ret = cpuhp_state_add_instance(lttng_hp_prepare,
288 &perf_field->cpuhp_prepare.node);
289 if (ret)
290 goto cpuhp_prepare_error;
291
292 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
293 ret = cpuhp_state_add_instance(lttng_hp_online,
294 &perf_field->cpuhp_online.node);
295 if (ret)
296 goto cpuhp_online_error;
297
298 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
299 {
300 int cpu;
301
302 #ifdef CONFIG_HOTPLUG_CPU
303 perf_field->nb.notifier_call =
304 lttng_perf_counter_cpu_hp_callback;
305 perf_field->nb.priority = 0;
306 register_cpu_notifier(&perf_field->nb);
307 #endif
308 get_online_cpus();
309 for_each_online_cpu(cpu) {
310 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
311 cpu, NULL, overflow_callback);
312 if (!events[cpu] || IS_ERR(events[cpu])) {
313 ret = -EINVAL;
314 goto counter_error;
315 }
316 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
317 ret = -EBUSY;
318 goto counter_busy;
319 }
320 }
321 put_online_cpus();
322 perf_field->hp_enable = 1;
323 }
324 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
325
326 ret = lttng_kernel_context_append(ctx, &ctx_field);
327 if (ret) {
328 ret = -ENOMEM;
329 goto append_context_error;
330 }
331 return 0;
332
333 /* Error handling. */
334 append_context_error:
335 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
336 cpuhp_online_error:
337 {
338 int remove_ret;
339
340 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
341 &perf_field->cpuhp_prepare.node);
342 WARN_ON(remove_ret);
343 }
344 cpuhp_prepare_error:
345 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
346 counter_busy:
347 counter_error:
348 {
349 int cpu;
350
351 for_each_online_cpu(cpu) {
352 if (events[cpu] && !IS_ERR(events[cpu]))
353 perf_event_release_kernel(events[cpu]);
354 }
355 put_online_cpus();
356 #ifdef CONFIG_HOTPLUG_CPU
357 unregister_cpu_notifier(&perf_field->nb);
358 #endif
359 }
360 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
361 kfree(perf_field);
362 error_alloc_perf_field:
363 kfree(attr);
364 error_attr:
365 lttng_kvfree(events);
366 event_alloc_error:
367 kfree(event_field);
368 event_field_alloc_error:
369 kfree(name_alloc);
370 name_alloc_error:
371 return ret;
372 }
This page took 0.035768 seconds and 3 git commands to generate.