Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-context-perf-counters.c
CommitLineData
9f36eaed
MJ
1/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
886d51a3 3 * lttng-context-perf-counters.c
833ad6a0
MD
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
886d51a3 7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
833ad6a0
MD
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
c24a0d71 14#include <linux/string.h>
5ca7b8a3 15#include <linux/cpu.h>
241ae9a8
MD
16#include <lttng-events.h>
17#include <wrapper/ringbuffer/frontend_types.h>
4a6ea683 18#include <wrapper/cpu.h>
241ae9a8
MD
19#include <wrapper/vmalloc.h>
20#include <wrapper/perf.h>
21#include <lttng-tracer.h>
833ad6a0 22
f1676205
MD
23static
24size_t perf_counter_get_size(size_t offset)
25{
26 size_t size = 0;
27
a90917c3 28 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
29 size += sizeof(uint64_t);
30 return size;
31}
32
833ad6a0
MD
33static
34void perf_counter_record(struct lttng_ctx_field *field,
35 struct lib_ring_buffer_ctx *ctx,
a90917c3 36 struct lttng_channel *chan)
833ad6a0
MD
37{
38 struct perf_event *event;
39 uint64_t value;
40
2001023e 41 event = field->u.perf_counter->e[ctx->cpu];
0478c519 42 if (likely(event)) {
7b745a96
MD
43 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
44 value = 0;
45 } else {
46 event->pmu->read(event);
47 value = local64_read(&event->count);
48 }
f91fd73b
MD
49 } else {
50 /*
51 * Perf chooses not to be clever and not to support enabling a
52 * perf counter before the cpu is brought up. Therefore, we need
53 * to support having events coming (e.g. scheduler events)
54 * before the counter is setup. Write an arbitrary 0 in this
55 * case.
56 */
57 value = 0;
58 }
a90917c3 59 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
833ad6a0
MD
60 chan->ops->event_write(ctx, &value, sizeof(value));
61}
62
2d042821 63#if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
90f5546c
MD
64static
65void overflow_callback(struct perf_event *event,
66 struct perf_sample_data *data,
67 struct pt_regs *regs)
68{
69}
70#else
833ad6a0
MD
71static
72void overflow_callback(struct perf_event *event, int nmi,
73 struct perf_sample_data *data,
74 struct pt_regs *regs)
75{
76}
90f5546c 77#endif
833ad6a0 78
2dccf128
MD
79static
80void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
81{
2001023e 82 struct perf_event **events = field->u.perf_counter->e;
2dccf128 83
2d042821 84#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
85 {
86 int ret;
87
88 ret = cpuhp_state_remove_instance(lttng_hp_online,
89 &field->u.perf_counter->cpuhp_online.node);
90 WARN_ON(ret);
91 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
92 &field->u.perf_counter->cpuhp_prepare.node);
93 WARN_ON(ret);
94 }
2d042821 95#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
96 {
97 int cpu;
98
4a6ea683 99 lttng_cpus_read_lock();
1e367326
MD
100 for_each_online_cpu(cpu)
101 perf_event_release_kernel(events[cpu]);
4a6ea683 102 lttng_cpus_read_unlock();
8289661d 103#ifdef CONFIG_HOTPLUG_CPU
1e367326 104 unregister_cpu_notifier(&field->u.perf_counter->nb);
8289661d 105#endif
1e367326 106 }
2d042821 107#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
c24a0d71 108 kfree(field->event_field.name);
2001023e 109 kfree(field->u.perf_counter->attr);
48f5e0b5 110 lttng_kvfree(events);
2001023e 111 kfree(field->u.perf_counter);
2dccf128
MD
112}
113
2d042821 114#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
115
116int lttng_cpuhp_perf_counter_online(unsigned int cpu,
117 struct lttng_cpuhp_node *node)
118{
119 struct lttng_perf_counter_field *perf_field =
120 container_of(node, struct lttng_perf_counter_field,
121 cpuhp_online);
122 struct perf_event **events = perf_field->e;
123 struct perf_event_attr *attr = perf_field->attr;
124 struct perf_event *pevent;
125
126 pevent = wrapper_perf_event_create_kernel_counter(attr,
127 cpu, NULL, overflow_callback);
128 if (!pevent || IS_ERR(pevent))
129 return -EINVAL;
130 if (pevent->state == PERF_EVENT_STATE_ERROR) {
131 perf_event_release_kernel(pevent);
132 return -EINVAL;
133 }
134 barrier(); /* Create perf counter before setting event */
135 events[cpu] = pevent;
136 return 0;
137}
138
139int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
140 struct lttng_cpuhp_node *node)
141{
142 struct lttng_perf_counter_field *perf_field =
143 container_of(node, struct lttng_perf_counter_field,
144 cpuhp_prepare);
145 struct perf_event **events = perf_field->e;
146 struct perf_event *pevent;
147
148 pevent = events[cpu];
149 events[cpu] = NULL;
150 barrier(); /* NULLify event before perf counter teardown */
151 perf_event_release_kernel(pevent);
152 return 0;
153}
154
2d042821 155#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 156
8289661d
MD
157#ifdef CONFIG_HOTPLUG_CPU
158
159/**
160 * lttng_perf_counter_hp_callback - CPU hotplug callback
161 * @nb: notifier block
162 * @action: hotplug action to take
163 * @hcpu: CPU number
164 *
165 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
166 *
167 * We can setup perf counters when the cpu is online (up prepare seems to be too
168 * soon).
169 */
170static
e8f071d5 171int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
8289661d
MD
172 unsigned long action,
173 void *hcpu)
174{
175 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
176 struct lttng_perf_counter_field *perf_field =
177 container_of(nb, struct lttng_perf_counter_field, nb);
178 struct perf_event **events = perf_field->e;
179 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 180 struct perf_event *pevent;
8289661d 181
2001023e 182 if (!perf_field->hp_enable)
8289661d
MD
183 return NOTIFY_OK;
184
185 switch (action) {
186 case CPU_ONLINE:
187 case CPU_ONLINE_FROZEN:
90f5546c 188 pevent = wrapper_perf_event_create_kernel_counter(attr,
8289661d 189 cpu, NULL, overflow_callback);
0478c519 190 if (!pevent || IS_ERR(pevent))
8289661d 191 return NOTIFY_BAD;
7b745a96
MD
192 if (pevent->state == PERF_EVENT_STATE_ERROR) {
193 perf_event_release_kernel(pevent);
194 return NOTIFY_BAD;
195 }
f91fd73b
MD
196 barrier(); /* Create perf counter before setting event */
197 events[cpu] = pevent;
8289661d
MD
198 break;
199 case CPU_UP_CANCELED:
200 case CPU_UP_CANCELED_FROZEN:
201 case CPU_DEAD:
202 case CPU_DEAD_FROZEN:
f91fd73b
MD
203 pevent = events[cpu];
204 events[cpu] = NULL;
205 barrier(); /* NULLify event before perf counter teardown */
206 perf_event_release_kernel(pevent);
8289661d
MD
207 break;
208 }
209 return NOTIFY_OK;
210}
211
212#endif
213
2d042821 214#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 215
833ad6a0
MD
216int lttng_add_perf_counter_to_ctx(uint32_t type,
217 uint64_t config,
c24a0d71 218 const char *name,
2dccf128 219 struct lttng_ctx **ctx)
833ad6a0
MD
220{
221 struct lttng_ctx_field *field;
2001023e 222 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
223 struct perf_event **events;
224 struct perf_event_attr *attr;
225 int ret;
c24a0d71 226 char *name_alloc;
833ad6a0 227
48f5e0b5 228 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
833ad6a0
MD
229 if (!events)
230 return -ENOMEM;
231
2001023e 232 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
233 if (!attr) {
234 ret = -ENOMEM;
235 goto error_attr;
236 }
237
238 attr->type = type;
239 attr->config = config;
240 attr->size = sizeof(struct perf_event_attr);
241 attr->pinned = 1;
242 attr->disabled = 0;
243
2001023e
MD
244 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
245 if (!perf_field) {
246 ret = -ENOMEM;
247 goto error_alloc_perf_field;
248 }
249 perf_field->e = events;
250 perf_field->attr = attr;
251
c24a0d71 252 name_alloc = kstrdup(name, GFP_KERNEL);
bef96e48
MD
253 if (!name_alloc) {
254 ret = -ENOMEM;
c24a0d71 255 goto name_alloc_error;
bef96e48 256 }
c24a0d71 257
2dccf128
MD
258 field = lttng_append_context(ctx);
259 if (!field) {
260 ret = -ENOMEM;
8289661d 261 goto append_context_error;
833ad6a0 262 }
2001023e 263 if (lttng_find_context(*ctx, name_alloc)) {
44252f0f
MD
264 ret = -EEXIST;
265 goto find_error;
266 }
8289661d 267
2d042821 268#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
269
270 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
271 ret = cpuhp_state_add_instance(lttng_hp_prepare,
272 &perf_field->cpuhp_prepare.node);
273 if (ret)
274 goto cpuhp_prepare_error;
275
276 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
277 ret = cpuhp_state_add_instance(lttng_hp_online,
278 &perf_field->cpuhp_online.node);
279 if (ret)
280 goto cpuhp_online_error;
281
2d042821 282#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
283 {
284 int cpu;
285
8289661d 286#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
287 perf_field->nb.notifier_call =
288 lttng_perf_counter_cpu_hp_callback;
289 perf_field->nb.priority = 0;
290 register_cpu_notifier(&perf_field->nb);
8289661d 291#endif
4a6ea683 292 lttng_cpus_read_lock();
1e367326
MD
293 for_each_online_cpu(cpu) {
294 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
295 cpu, NULL, overflow_callback);
296 if (!events[cpu] || IS_ERR(events[cpu])) {
297 ret = -EINVAL;
298 goto counter_error;
299 }
300 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
301 ret = -EBUSY;
302 goto counter_busy;
303 }
7b745a96 304 }
4a6ea683 305 lttng_cpus_read_unlock();
1e367326 306 perf_field->hp_enable = 1;
8289661d 307 }
2d042821 308#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
8289661d 309
2dccf128 310 field->destroy = lttng_destroy_perf_counter_field;
833ad6a0 311
c24a0d71 312 field->event_field.name = name_alloc;
8070f5c0 313 field->event_field.type.atype = atype_integer;
9d7d747f 314 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
a90917c3 315 field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
06254b0f 316 field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
8070f5c0
MD
317 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
318 field->event_field.type.u.basic.integer.base = 10;
319 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
f1676205
MD
320 field->get_size = perf_counter_get_size;
321 field->record = perf_counter_record;
2001023e 322 field->u.perf_counter = perf_field;
a9dd15da 323 lttng_context_update(*ctx);
833ad6a0 324
ea53823c 325 wrapper_vmalloc_sync_mappings();
833ad6a0
MD
326 return 0;
327
2d042821 328#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
329cpuhp_online_error:
330 {
331 int remove_ret;
332
333 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
334 &perf_field->cpuhp_prepare.node);
335 WARN_ON(remove_ret);
336 }
337cpuhp_prepare_error:
2d042821 338#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
7b745a96 339counter_busy:
8289661d 340counter_error:
ce4a2f0c
MD
341 {
342 int cpu;
343
344 for_each_online_cpu(cpu) {
345 if (events[cpu] && !IS_ERR(events[cpu]))
346 perf_event_release_kernel(events[cpu]);
347 }
4a6ea683 348 lttng_cpus_read_unlock();
8289661d 349#ifdef CONFIG_HOTPLUG_CPU
ce4a2f0c 350 unregister_cpu_notifier(&perf_field->nb);
8289661d 351#endif
ce4a2f0c 352 }
2d042821 353#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
44252f0f 354find_error:
8289661d
MD
355 lttng_remove_context_field(ctx, field);
356append_context_error:
357 kfree(name_alloc);
358name_alloc_error:
2001023e
MD
359 kfree(perf_field);
360error_alloc_perf_field:
833ad6a0
MD
361 kfree(attr);
362error_attr:
48f5e0b5 363 lttng_kvfree(events);
833ad6a0
MD
364 return ret;
365}
This page took 0.063455 seconds and 4 git commands to generate.