Fix: vmalloc wrapper on kernel < 2.6.38
[lttng-modules.git] / lttng-context-perf-counters.c
1 /*
2 * lttng-context-perf-counters.c
3 *
4 * LTTng performance monitoring counters (perf-counters) integration module.
5 *
6 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/perf_event.h>
26 #include <linux/list.h>
27 #include <linux/string.h>
28 #include <linux/cpu.h>
29 #include <lttng-events.h>
30 #include <wrapper/ringbuffer/frontend_types.h>
31 #include <wrapper/vmalloc.h>
32 #include <wrapper/perf.h>
33 #include <lttng-tracer.h>
34
35 static
36 size_t perf_counter_get_size(size_t offset)
37 {
38 size_t size = 0;
39
40 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
41 size += sizeof(uint64_t);
42 return size;
43 }
44
45 static
46 void perf_counter_record(struct lttng_ctx_field *field,
47 struct lib_ring_buffer_ctx *ctx,
48 struct lttng_channel *chan)
49 {
50 struct perf_event *event;
51 uint64_t value;
52
53 event = field->u.perf_counter->e[ctx->cpu];
54 if (likely(event)) {
55 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
56 value = 0;
57 } else {
58 event->pmu->read(event);
59 value = local64_read(&event->count);
60 }
61 } else {
62 /*
63 * Perf chooses not to be clever and not to support enabling a
64 * perf counter before the cpu is brought up. Therefore, we need
65 * to support having events coming (e.g. scheduler events)
66 * before the counter is setup. Write an arbitrary 0 in this
67 * case.
68 */
69 value = 0;
70 }
71 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
72 chan->ops->event_write(ctx, &value, sizeof(value));
73 }
74
75 #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
76 static
77 void overflow_callback(struct perf_event *event,
78 struct perf_sample_data *data,
79 struct pt_regs *regs)
80 {
81 }
82 #else
83 static
84 void overflow_callback(struct perf_event *event, int nmi,
85 struct perf_sample_data *data,
86 struct pt_regs *regs)
87 {
88 }
89 #endif
90
91 static
92 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
93 {
94 struct perf_event **events = field->u.perf_counter->e;
95
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
97 {
98 int ret;
99
100 ret = cpuhp_state_remove_instance(lttng_hp_online,
101 &field->u.perf_counter->cpuhp_online.node);
102 WARN_ON(ret);
103 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
104 &field->u.perf_counter->cpuhp_prepare.node);
105 WARN_ON(ret);
106 }
107 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
108 {
109 int cpu;
110
111 get_online_cpus();
112 for_each_online_cpu(cpu)
113 perf_event_release_kernel(events[cpu]);
114 put_online_cpus();
115 #ifdef CONFIG_HOTPLUG_CPU
116 unregister_cpu_notifier(&field->u.perf_counter->nb);
117 #endif
118 }
119 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
120 kfree(field->event_field.name);
121 kfree(field->u.perf_counter->attr);
122 lttng_kvfree(events);
123 kfree(field->u.perf_counter);
124 }
125
126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
127
128 int lttng_cpuhp_perf_counter_online(unsigned int cpu,
129 struct lttng_cpuhp_node *node)
130 {
131 struct lttng_perf_counter_field *perf_field =
132 container_of(node, struct lttng_perf_counter_field,
133 cpuhp_online);
134 struct perf_event **events = perf_field->e;
135 struct perf_event_attr *attr = perf_field->attr;
136 struct perf_event *pevent;
137
138 pevent = wrapper_perf_event_create_kernel_counter(attr,
139 cpu, NULL, overflow_callback);
140 if (!pevent || IS_ERR(pevent))
141 return -EINVAL;
142 if (pevent->state == PERF_EVENT_STATE_ERROR) {
143 perf_event_release_kernel(pevent);
144 return -EINVAL;
145 }
146 barrier(); /* Create perf counter before setting event */
147 events[cpu] = pevent;
148 return 0;
149 }
150
151 int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
152 struct lttng_cpuhp_node *node)
153 {
154 struct lttng_perf_counter_field *perf_field =
155 container_of(node, struct lttng_perf_counter_field,
156 cpuhp_prepare);
157 struct perf_event **events = perf_field->e;
158 struct perf_event *pevent;
159
160 pevent = events[cpu];
161 events[cpu] = NULL;
162 barrier(); /* NULLify event before perf counter teardown */
163 perf_event_release_kernel(pevent);
164 return 0;
165 }
166
167 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
168
169 #ifdef CONFIG_HOTPLUG_CPU
170
171 /**
172 * lttng_perf_counter_hp_callback - CPU hotplug callback
173 * @nb: notifier block
174 * @action: hotplug action to take
175 * @hcpu: CPU number
176 *
177 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
178 *
179 * We can setup perf counters when the cpu is online (up prepare seems to be too
180 * soon).
181 */
182 static
183 int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
184 unsigned long action,
185 void *hcpu)
186 {
187 unsigned int cpu = (unsigned long) hcpu;
188 struct lttng_perf_counter_field *perf_field =
189 container_of(nb, struct lttng_perf_counter_field, nb);
190 struct perf_event **events = perf_field->e;
191 struct perf_event_attr *attr = perf_field->attr;
192 struct perf_event *pevent;
193
194 if (!perf_field->hp_enable)
195 return NOTIFY_OK;
196
197 switch (action) {
198 case CPU_ONLINE:
199 case CPU_ONLINE_FROZEN:
200 pevent = wrapper_perf_event_create_kernel_counter(attr,
201 cpu, NULL, overflow_callback);
202 if (!pevent || IS_ERR(pevent))
203 return NOTIFY_BAD;
204 if (pevent->state == PERF_EVENT_STATE_ERROR) {
205 perf_event_release_kernel(pevent);
206 return NOTIFY_BAD;
207 }
208 barrier(); /* Create perf counter before setting event */
209 events[cpu] = pevent;
210 break;
211 case CPU_UP_CANCELED:
212 case CPU_UP_CANCELED_FROZEN:
213 case CPU_DEAD:
214 case CPU_DEAD_FROZEN:
215 pevent = events[cpu];
216 events[cpu] = NULL;
217 barrier(); /* NULLify event before perf counter teardown */
218 perf_event_release_kernel(pevent);
219 break;
220 }
221 return NOTIFY_OK;
222 }
223
224 #endif
225
226 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
227
228 int lttng_add_perf_counter_to_ctx(uint32_t type,
229 uint64_t config,
230 const char *name,
231 struct lttng_ctx **ctx)
232 {
233 struct lttng_ctx_field *field;
234 struct lttng_perf_counter_field *perf_field;
235 struct perf_event **events;
236 struct perf_event_attr *attr;
237 int ret;
238 char *name_alloc;
239
240 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
241 if (!events)
242 return -ENOMEM;
243
244 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
245 if (!attr) {
246 ret = -ENOMEM;
247 goto error_attr;
248 }
249
250 attr->type = type;
251 attr->config = config;
252 attr->size = sizeof(struct perf_event_attr);
253 attr->pinned = 1;
254 attr->disabled = 0;
255
256 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
257 if (!perf_field) {
258 ret = -ENOMEM;
259 goto error_alloc_perf_field;
260 }
261 perf_field->e = events;
262 perf_field->attr = attr;
263
264 name_alloc = kstrdup(name, GFP_KERNEL);
265 if (!name_alloc) {
266 ret = -ENOMEM;
267 goto name_alloc_error;
268 }
269
270 field = lttng_append_context(ctx);
271 if (!field) {
272 ret = -ENOMEM;
273 goto append_context_error;
274 }
275 if (lttng_find_context(*ctx, name_alloc)) {
276 ret = -EEXIST;
277 goto find_error;
278 }
279
280 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
281
282 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
283 ret = cpuhp_state_add_instance(lttng_hp_prepare,
284 &perf_field->cpuhp_prepare.node);
285 if (ret)
286 goto cpuhp_prepare_error;
287
288 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
289 ret = cpuhp_state_add_instance(lttng_hp_online,
290 &perf_field->cpuhp_online.node);
291 if (ret)
292 goto cpuhp_online_error;
293
294 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
295 {
296 int cpu;
297
298 #ifdef CONFIG_HOTPLUG_CPU
299 perf_field->nb.notifier_call =
300 lttng_perf_counter_cpu_hp_callback;
301 perf_field->nb.priority = 0;
302 register_cpu_notifier(&perf_field->nb);
303 #endif
304 get_online_cpus();
305 for_each_online_cpu(cpu) {
306 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
307 cpu, NULL, overflow_callback);
308 if (!events[cpu] || IS_ERR(events[cpu])) {
309 ret = -EINVAL;
310 goto counter_error;
311 }
312 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
313 ret = -EBUSY;
314 goto counter_busy;
315 }
316 }
317 put_online_cpus();
318 perf_field->hp_enable = 1;
319 }
320 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
321
322 field->destroy = lttng_destroy_perf_counter_field;
323
324 field->event_field.name = name_alloc;
325 field->event_field.type.atype = atype_integer;
326 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
327 field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
328 field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
329 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
330 field->event_field.type.u.basic.integer.base = 10;
331 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
332 field->get_size = perf_counter_get_size;
333 field->record = perf_counter_record;
334 field->u.perf_counter = perf_field;
335 lttng_context_update(*ctx);
336
337 wrapper_vmalloc_sync_all();
338 return 0;
339
340 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
341 cpuhp_online_error:
342 {
343 int remove_ret;
344
345 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
346 &perf_field->cpuhp_prepare.node);
347 WARN_ON(remove_ret);
348 }
349 cpuhp_prepare_error:
350 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
351 counter_busy:
352 counter_error:
353 {
354 int cpu;
355
356 for_each_online_cpu(cpu) {
357 if (events[cpu] && !IS_ERR(events[cpu]))
358 perf_event_release_kernel(events[cpu]);
359 }
360 put_online_cpus();
361 #ifdef CONFIG_HOTPLUG_CPU
362 unregister_cpu_notifier(&perf_field->nb);
363 #endif
364 }
365 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
366 find_error:
367 lttng_remove_context_field(ctx, field);
368 append_context_error:
369 kfree(name_alloc);
370 name_alloc_error:
371 kfree(perf_field);
372 error_alloc_perf_field:
373 kfree(attr);
374 error_attr:
375 lttng_kvfree(events);
376 return ret;
377 }
This page took 0.036273 seconds and 4 git commands to generate.