Rename struct lib_ring_buffer_ctx to struct lttng_kernel_ring_buffer_ctx
[lttng-modules.git] / src / lttng-context-perf-counters.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * lttng-context-perf-counters.c
833ad6a0
MD
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
886d51a3 7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
833ad6a0
MD
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
c24a0d71 14#include <linux/string.h>
5ca7b8a3 15#include <linux/cpu.h>
2df37e95 16#include <lttng/events.h>
0fe45627 17#include <lttng/events-internal.h>
24591303 18#include <ringbuffer/frontend_types.h>
241ae9a8
MD
19#include <wrapper/vmalloc.h>
20#include <wrapper/perf.h>
2df37e95 21#include <lttng/tracer.h>
833ad6a0 22
f1676205 23static
a92e844e 24size_t perf_counter_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
f1676205
MD
25{
26 size_t size = 0;
27
a90917c3 28 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
29 size += sizeof(uint64_t);
30 return size;
31}
32
833ad6a0 33static
a92e844e 34void perf_counter_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
8a57ec02 35 struct lttng_kernel_ring_buffer_ctx *ctx,
a90917c3 36 struct lttng_channel *chan)
833ad6a0 37{
2dc781e0 38 struct lttng_perf_counter_field *perf_field = (struct lttng_perf_counter_field *) priv;
833ad6a0
MD
39 struct perf_event *event;
40 uint64_t value;
41
b1199bd3 42 event = perf_field->e[ctx->priv.reserve_cpu];
0478c519 43 if (likely(event)) {
7b745a96
MD
44 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
45 value = 0;
46 } else {
47 event->pmu->read(event);
48 value = local64_read(&event->count);
49 }
f91fd73b
MD
50 } else {
51 /*
52 * Perf chooses not to be clever and not to support enabling a
53 * perf counter before the cpu is brought up. Therefore, we need
54 * to support having events coming (e.g. scheduler events)
55 * before the counter is setup. Write an arbitrary 0 in this
56 * case.
57 */
58 value = 0;
59 }
a90917c3 60 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
833ad6a0
MD
61 chan->ops->event_write(ctx, &value, sizeof(value));
62}
63
5f4c791e 64#if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
90f5546c
MD
65static
66void overflow_callback(struct perf_event *event,
67 struct perf_sample_data *data,
68 struct pt_regs *regs)
69{
70}
71#else
833ad6a0
MD
72static
73void overflow_callback(struct perf_event *event, int nmi,
74 struct perf_sample_data *data,
75 struct pt_regs *regs)
76{
77}
90f5546c 78#endif
833ad6a0 79
2dccf128 80static
2dc781e0 81void lttng_destroy_perf_counter_ctx_field(void *priv)
2dccf128 82{
2dc781e0 83 struct lttng_perf_counter_field *perf_field = priv;
437d5aa5 84 struct perf_event **events = perf_field->e;
2dccf128 85
5f4c791e 86#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
87 {
88 int ret;
89
90 ret = cpuhp_state_remove_instance(lttng_hp_online,
437d5aa5 91 &perf_field->cpuhp_online.node);
1e367326
MD
92 WARN_ON(ret);
93 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
437d5aa5 94 &perf_field->cpuhp_prepare.node);
1e367326
MD
95 WARN_ON(ret);
96 }
5f4c791e 97#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
98 {
99 int cpu;
100
101 get_online_cpus();
102 for_each_online_cpu(cpu)
103 perf_event_release_kernel(events[cpu]);
104 put_online_cpus();
8289661d 105#ifdef CONFIG_HOTPLUG_CPU
437d5aa5 106 unregister_cpu_notifier(&perf_field->nb);
8289661d 107#endif
1e367326 108 }
5f4c791e 109#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
437d5aa5
MD
110 kfree(perf_field->name);
111 kfree(perf_field->attr);
112 kfree(perf_field->event_field);
48f5e0b5 113 lttng_kvfree(events);
437d5aa5 114 kfree(perf_field);
2dccf128
MD
115}
116
5f4c791e 117#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
118
119int lttng_cpuhp_perf_counter_online(unsigned int cpu,
120 struct lttng_cpuhp_node *node)
121{
122 struct lttng_perf_counter_field *perf_field =
123 container_of(node, struct lttng_perf_counter_field,
124 cpuhp_online);
125 struct perf_event **events = perf_field->e;
126 struct perf_event_attr *attr = perf_field->attr;
127 struct perf_event *pevent;
128
129 pevent = wrapper_perf_event_create_kernel_counter(attr,
130 cpu, NULL, overflow_callback);
131 if (!pevent || IS_ERR(pevent))
132 return -EINVAL;
133 if (pevent->state == PERF_EVENT_STATE_ERROR) {
134 perf_event_release_kernel(pevent);
135 return -EINVAL;
136 }
137 barrier(); /* Create perf counter before setting event */
138 events[cpu] = pevent;
139 return 0;
140}
141
142int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
143 struct lttng_cpuhp_node *node)
144{
145 struct lttng_perf_counter_field *perf_field =
146 container_of(node, struct lttng_perf_counter_field,
147 cpuhp_prepare);
148 struct perf_event **events = perf_field->e;
149 struct perf_event *pevent;
150
151 pevent = events[cpu];
152 events[cpu] = NULL;
153 barrier(); /* NULLify event before perf counter teardown */
154 perf_event_release_kernel(pevent);
155 return 0;
156}
157
5f4c791e 158#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 159
8289661d
MD
160#ifdef CONFIG_HOTPLUG_CPU
161
162/**
163 * lttng_perf_counter_hp_callback - CPU hotplug callback
164 * @nb: notifier block
165 * @action: hotplug action to take
166 * @hcpu: CPU number
167 *
168 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
169 *
170 * We can setup perf counters when the cpu is online (up prepare seems to be too
171 * soon).
172 */
173static
e8f071d5 174int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
8289661d
MD
175 unsigned long action,
176 void *hcpu)
177{
178 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
179 struct lttng_perf_counter_field *perf_field =
180 container_of(nb, struct lttng_perf_counter_field, nb);
181 struct perf_event **events = perf_field->e;
182 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 183 struct perf_event *pevent;
8289661d 184
2001023e 185 if (!perf_field->hp_enable)
8289661d
MD
186 return NOTIFY_OK;
187
188 switch (action) {
189 case CPU_ONLINE:
190 case CPU_ONLINE_FROZEN:
90f5546c 191 pevent = wrapper_perf_event_create_kernel_counter(attr,
8289661d 192 cpu, NULL, overflow_callback);
0478c519 193 if (!pevent || IS_ERR(pevent))
8289661d 194 return NOTIFY_BAD;
7b745a96
MD
195 if (pevent->state == PERF_EVENT_STATE_ERROR) {
196 perf_event_release_kernel(pevent);
197 return NOTIFY_BAD;
198 }
f91fd73b
MD
199 barrier(); /* Create perf counter before setting event */
200 events[cpu] = pevent;
8289661d
MD
201 break;
202 case CPU_UP_CANCELED:
203 case CPU_UP_CANCELED_FROZEN:
204 case CPU_DEAD:
205 case CPU_DEAD_FROZEN:
f91fd73b
MD
206 pevent = events[cpu];
207 events[cpu] = NULL;
208 barrier(); /* NULLify event before perf counter teardown */
209 perf_event_release_kernel(pevent);
8289661d
MD
210 break;
211 }
212 return NOTIFY_OK;
213}
214
215#endif
216
5f4c791e 217#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 218
437d5aa5
MD
219static const struct lttng_kernel_type_common *field_type =
220 lttng_kernel_static_type_integer_from_type(uint64_t, __BYTE_ORDER, 10);
221
833ad6a0
MD
222int lttng_add_perf_counter_to_ctx(uint32_t type,
223 uint64_t config,
c24a0d71 224 const char *name,
437d5aa5 225 struct lttng_kernel_ctx **ctx)
833ad6a0 226{
2836dd4f 227 struct lttng_kernel_ctx_field ctx_field = { 0 };
437d5aa5 228 struct lttng_kernel_event_field *event_field;
2001023e 229 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
230 struct perf_event **events;
231 struct perf_event_attr *attr;
232 int ret;
c24a0d71 233 char *name_alloc;
833ad6a0 234
437d5aa5
MD
235 if (lttng_kernel_find_context(*ctx, name))
236 return -EEXIST;
237 name_alloc = kstrdup(name, GFP_KERNEL);
238 if (!name_alloc) {
239 ret = -ENOMEM;
240 goto name_alloc_error;
241 }
242 event_field = kzalloc(sizeof(*event_field), GFP_KERNEL);
243 if (!event_field) {
244 ret = -ENOMEM;
245 goto event_field_alloc_error;
246 }
247 event_field->name = name_alloc;
248 event_field->type = field_type;
249
48f5e0b5 250 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
437d5aa5
MD
251 if (!events) {
252 ret = -ENOMEM;
253 goto event_alloc_error;
254 }
833ad6a0 255
2001023e 256 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
257 if (!attr) {
258 ret = -ENOMEM;
259 goto error_attr;
260 }
261
262 attr->type = type;
263 attr->config = config;
264 attr->size = sizeof(struct perf_event_attr);
265 attr->pinned = 1;
266 attr->disabled = 0;
267
2001023e
MD
268 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
269 if (!perf_field) {
270 ret = -ENOMEM;
271 goto error_alloc_perf_field;
272 }
273 perf_field->e = events;
274 perf_field->attr = attr;
437d5aa5
MD
275 perf_field->name = name_alloc;
276 perf_field->event_field = event_field;
2001023e 277
437d5aa5
MD
278 ctx_field.event_field = event_field;
279 ctx_field.get_size = perf_counter_get_size;
280 ctx_field.record = perf_counter_record;
281 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
282 ctx_field.priv = perf_field;
8289661d 283
5f4c791e 284#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
285
286 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
287 ret = cpuhp_state_add_instance(lttng_hp_prepare,
288 &perf_field->cpuhp_prepare.node);
289 if (ret)
290 goto cpuhp_prepare_error;
291
292 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
293 ret = cpuhp_state_add_instance(lttng_hp_online,
294 &perf_field->cpuhp_online.node);
295 if (ret)
296 goto cpuhp_online_error;
297
5f4c791e 298#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
299 {
300 int cpu;
301
8289661d 302#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
303 perf_field->nb.notifier_call =
304 lttng_perf_counter_cpu_hp_callback;
305 perf_field->nb.priority = 0;
306 register_cpu_notifier(&perf_field->nb);
8289661d 307#endif
1e367326
MD
308 get_online_cpus();
309 for_each_online_cpu(cpu) {
310 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
311 cpu, NULL, overflow_callback);
312 if (!events[cpu] || IS_ERR(events[cpu])) {
313 ret = -EINVAL;
314 goto counter_error;
315 }
316 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
317 ret = -EBUSY;
318 goto counter_busy;
319 }
7b745a96 320 }
1e367326
MD
321 put_online_cpus();
322 perf_field->hp_enable = 1;
8289661d 323 }
5f4c791e 324#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
8289661d 325
437d5aa5
MD
326 ret = lttng_kernel_context_append(ctx, &ctx_field);
327 if (ret) {
328 ret = -ENOMEM;
329 goto append_context_error;
330 }
833ad6a0
MD
331 return 0;
332
437d5aa5
MD
333 /* Error handling. */
334append_context_error:
5f4c791e 335#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
336cpuhp_online_error:
337 {
338 int remove_ret;
339
340 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
341 &perf_field->cpuhp_prepare.node);
342 WARN_ON(remove_ret);
343 }
344cpuhp_prepare_error:
5f4c791e 345#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
7b745a96 346counter_busy:
8289661d 347counter_error:
ce4a2f0c
MD
348 {
349 int cpu;
350
351 for_each_online_cpu(cpu) {
352 if (events[cpu] && !IS_ERR(events[cpu]))
353 perf_event_release_kernel(events[cpu]);
354 }
355 put_online_cpus();
8289661d 356#ifdef CONFIG_HOTPLUG_CPU
ce4a2f0c 357 unregister_cpu_notifier(&perf_field->nb);
8289661d 358#endif
ce4a2f0c 359 }
5f4c791e 360#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
2001023e
MD
361 kfree(perf_field);
362error_alloc_perf_field:
833ad6a0
MD
363 kfree(attr);
364error_attr:
48f5e0b5 365 lttng_kvfree(events);
437d5aa5
MD
366event_alloc_error:
367 kfree(event_field);
368event_field_alloc_error:
369 kfree(name_alloc);
370name_alloc_error:
833ad6a0
MD
371 return ret;
372}
This page took 0.064098 seconds and 4 git commands to generate.