Handle perf counter's inability to setup counters before cpu is brought online
[lttng-modules.git] / lttng-context-perf-counters.c
CommitLineData
833ad6a0
MD
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
c24a0d71
MD
14#include <linux/string.h>
15#include "ltt-events.h"
16#include "wrapper/ringbuffer/frontend_types.h"
17#include "wrapper/vmalloc.h"
18#include "ltt-tracer.h"
833ad6a0 19
f1676205
MD
20static
21size_t perf_counter_get_size(size_t offset)
22{
23 size_t size = 0;
24
25 size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
26 size += sizeof(uint64_t);
27 return size;
28}
29
833ad6a0
MD
30static
31void perf_counter_record(struct lttng_ctx_field *field,
32 struct lib_ring_buffer_ctx *ctx,
33 struct ltt_channel *chan)
34{
35 struct perf_event *event;
36 uint64_t value;
37
38 event = field->u.perf_counter.e[ctx->cpu];
f91fd73b
MD
39 if (likely(event)) {
40 event->pmu->read(event);
41 value = local64_read(&event->count);
42 } else {
43 /*
44 * Perf chooses not to be clever and not to support enabling a
45 * perf counter before the cpu is brought up. Therefore, we need
46 * to support having events coming (e.g. scheduler events)
47 * before the counter is setup. Write an arbitrary 0 in this
48 * case.
49 */
50 value = 0;
51 }
9e7e4892 52 lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
833ad6a0
MD
53 chan->ops->event_write(ctx, &value, sizeof(value));
54}
55
56static
57void overflow_callback(struct perf_event *event, int nmi,
58 struct perf_sample_data *data,
59 struct pt_regs *regs)
60{
61}
62
2dccf128
MD
63static
64void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
65{
66 struct perf_event **events = field->u.perf_counter.e;
67 int cpu;
68
8289661d 69 get_online_cpus();
2dccf128
MD
70 for_each_online_cpu(cpu)
71 perf_event_release_kernel(events[cpu]);
8289661d
MD
72 put_online_cpus();
73#ifdef CONFIG_HOTPLUG_CPU
74 unregister_cpu_notifier(&field->u.perf_counter.nb);
75#endif
c24a0d71 76 kfree(field->event_field.name);
2dccf128
MD
77 kfree(field->u.perf_counter.attr);
78 kfree(events);
79}
80
8289661d
MD
81#ifdef CONFIG_HOTPLUG_CPU
82
83/**
84 * lttng_perf_counter_hp_callback - CPU hotplug callback
85 * @nb: notifier block
86 * @action: hotplug action to take
87 * @hcpu: CPU number
88 *
89 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
90 *
91 * We can setup perf counters when the cpu is online (up prepare seems to be too
92 * soon).
93 */
94static
95int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
96 unsigned long action,
97 void *hcpu)
98{
99 unsigned int cpu = (unsigned long) hcpu;
100 struct lttng_ctx_field *field =
101 container_of(nb, struct lttng_ctx_field, u.perf_counter.nb);
102 struct perf_event **events = field->u.perf_counter.e;
103 struct perf_event_attr *attr = field->u.perf_counter.attr;
f91fd73b 104 struct perf_event *pevent;
8289661d
MD
105
106 if (!field->u.perf_counter.hp_enable)
107 return NOTIFY_OK;
108
109 switch (action) {
110 case CPU_ONLINE:
111 case CPU_ONLINE_FROZEN:
f91fd73b 112 pevent = perf_event_create_kernel_counter(attr,
8289661d 113 cpu, NULL, overflow_callback);
f91fd73b 114 if (!pevent)
8289661d 115 return NOTIFY_BAD;
f91fd73b
MD
116 barrier(); /* Create perf counter before setting event */
117 events[cpu] = pevent;
8289661d
MD
118 break;
119 case CPU_UP_CANCELED:
120 case CPU_UP_CANCELED_FROZEN:
121 case CPU_DEAD:
122 case CPU_DEAD_FROZEN:
f91fd73b
MD
123 pevent = events[cpu];
124 events[cpu] = NULL;
125 barrier(); /* NULLify event before perf counter teardown */
126 perf_event_release_kernel(pevent);
8289661d
MD
127 break;
128 }
129 return NOTIFY_OK;
130}
131
132#endif
133
833ad6a0
MD
134int lttng_add_perf_counter_to_ctx(uint32_t type,
135 uint64_t config,
c24a0d71 136 const char *name,
2dccf128 137 struct lttng_ctx **ctx)
833ad6a0
MD
138{
139 struct lttng_ctx_field *field;
140 struct perf_event **events;
141 struct perf_event_attr *attr;
142 int ret;
143 int cpu;
c24a0d71 144 char *name_alloc;
833ad6a0
MD
145
146 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
147 if (!events)
148 return -ENOMEM;
149
150 attr = kzalloc(sizeof(*field->u.perf_counter.attr), GFP_KERNEL);
151 if (!attr) {
152 ret = -ENOMEM;
153 goto error_attr;
154 }
155
156 attr->type = type;
157 attr->config = config;
158 attr->size = sizeof(struct perf_event_attr);
159 attr->pinned = 1;
160 attr->disabled = 0;
161
c24a0d71 162 name_alloc = kstrdup(name, GFP_KERNEL);
bef96e48
MD
163 if (!name_alloc) {
164 ret = -ENOMEM;
c24a0d71 165 goto name_alloc_error;
bef96e48 166 }
c24a0d71 167
2dccf128
MD
168 field = lttng_append_context(ctx);
169 if (!field) {
170 ret = -ENOMEM;
8289661d 171 goto append_context_error;
833ad6a0 172 }
8289661d
MD
173
174#ifdef CONFIG_HOTPLUG_CPU
175 field->u.perf_counter.nb.notifier_call =
176 lttng_perf_counter_cpu_hp_callback;
177 field->u.perf_counter.nb.priority = 0;
178 register_cpu_notifier(&field->u.perf_counter.nb);
179#endif
180
181 get_online_cpus();
182 for_each_online_cpu(cpu) {
183 events[cpu] = perf_event_create_kernel_counter(attr,
184 cpu, NULL, overflow_callback);
185 if (!events[cpu]) {
186 ret = -EINVAL;
187 goto counter_error;
188 }
189 }
190 put_online_cpus();
191
2dccf128 192 field->destroy = lttng_destroy_perf_counter_field;
833ad6a0 193
c24a0d71 194 field->event_field.name = name_alloc;
8070f5c0
MD
195 field->event_field.type.atype = atype_integer;
196 field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
197 field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
198 field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long);
199 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
200 field->event_field.type.u.basic.integer.base = 10;
201 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
f1676205
MD
202 field->get_size = perf_counter_get_size;
203 field->record = perf_counter_record;
833ad6a0
MD
204 field->u.perf_counter.e = events;
205 field->u.perf_counter.attr = attr;
8289661d 206 field->u.perf_counter.hp_enable = 1;
833ad6a0
MD
207
208 wrapper_vmalloc_sync_all();
209 return 0;
210
8289661d 211counter_error:
833ad6a0
MD
212 for_each_online_cpu(cpu) {
213 if (events[cpu])
214 perf_event_release_kernel(events[cpu]);
215 }
8289661d
MD
216 put_online_cpus();
217#ifdef CONFIG_HOTPLUG_CPU
218 unregister_cpu_notifier(&field->u.perf_counter.nb);
219#endif
220 lttng_remove_context_field(ctx, field);
221append_context_error:
222 kfree(name_alloc);
223name_alloc_error:
833ad6a0
MD
224 kfree(attr);
225error_attr:
226 kfree(events);
227 return ret;
228}
229
833ad6a0
MD
230MODULE_LICENSE("GPL and additional rights");
231MODULE_AUTHOR("Mathieu Desnoyers");
232MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.032522 seconds and 4 git commands to generate.