2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Dual LGPL v2.1/GPL v2 license.
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
21 size_t perf_counter_get_size(size_t offset
)
25 size
+= lib_ring_buffer_align(offset
, ltt_alignof(uint64_t));
26 size
+= sizeof(uint64_t);
31 void perf_counter_record(struct lttng_ctx_field
*field
,
32 struct lib_ring_buffer_ctx
*ctx
,
33 struct ltt_channel
*chan
)
35 struct perf_event
*event
;
38 event
= field
->u
.perf_counter
->e
[ctx
->cpu
];
40 event
->pmu
->read(event
);
41 value
= local64_read(&event
->count
);
44 * Perf chooses not to be clever and not to support enabling a
45 * perf counter before the cpu is brought up. Therefore, we need
46 * to support having events coming (e.g. scheduler events)
47 * before the counter is setup. Write an arbitrary 0 in this
52 lib_ring_buffer_align_ctx(ctx
, ltt_alignof(value
));
53 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
57 void overflow_callback(struct perf_event
*event
, int nmi
,
58 struct perf_sample_data
*data
,
64 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
66 struct perf_event
**events
= field
->u
.perf_counter
->e
;
70 for_each_online_cpu(cpu
)
71 perf_event_release_kernel(events
[cpu
]);
73 #ifdef CONFIG_HOTPLUG_CPU
74 unregister_cpu_notifier(&field
->u
.perf_counter
->nb
);
76 kfree(field
->event_field
.name
);
77 kfree(field
->u
.perf_counter
->attr
);
79 kfree(field
->u
.perf_counter
);
82 #ifdef CONFIG_HOTPLUG_CPU
85 * lttng_perf_counter_hp_callback - CPU hotplug callback
87 * @action: hotplug action to take
90 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
92 * We can setup perf counters when the cpu is online (up prepare seems to be too
96 int __cpuinit
lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
100 unsigned int cpu
= (unsigned long) hcpu
;
101 struct lttng_perf_counter_field
*perf_field
=
102 container_of(nb
, struct lttng_perf_counter_field
, nb
);
103 struct perf_event
**events
= perf_field
->e
;
104 struct perf_event_attr
*attr
= perf_field
->attr
;
105 struct perf_event
*pevent
;
107 if (!perf_field
->hp_enable
)
112 case CPU_ONLINE_FROZEN
:
113 pevent
= perf_event_create_kernel_counter(attr
,
114 cpu
, NULL
, overflow_callback
);
115 if (!pevent
|| IS_ERR(pevent
))
117 barrier(); /* Create perf counter before setting event */
118 events
[cpu
] = pevent
;
120 case CPU_UP_CANCELED
:
121 case CPU_UP_CANCELED_FROZEN
:
123 case CPU_DEAD_FROZEN
:
124 pevent
= events
[cpu
];
126 barrier(); /* NULLify event before perf counter teardown */
127 perf_event_release_kernel(pevent
);
135 int lttng_add_perf_counter_to_ctx(uint32_t type
,
138 struct lttng_ctx
**ctx
)
140 struct lttng_ctx_field
*field
;
141 struct lttng_perf_counter_field
*perf_field
;
142 struct perf_event
**events
;
143 struct perf_event_attr
*attr
;
148 events
= kzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
152 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
159 attr
->config
= config
;
160 attr
->size
= sizeof(struct perf_event_attr
);
164 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
167 goto error_alloc_perf_field
;
169 perf_field
->e
= events
;
170 perf_field
->attr
= attr
;
172 name_alloc
= kstrdup(name
, GFP_KERNEL
);
175 goto name_alloc_error
;
178 field
= lttng_append_context(ctx
);
181 goto append_context_error
;
183 if (lttng_find_context(*ctx
, name_alloc
)) {
188 #ifdef CONFIG_HOTPLUG_CPU
189 perf_field
->nb
.notifier_call
=
190 lttng_perf_counter_cpu_hp_callback
;
191 perf_field
->nb
.priority
= 0;
192 register_cpu_notifier(&perf_field
->nb
);
196 for_each_online_cpu(cpu
) {
197 events
[cpu
] = perf_event_create_kernel_counter(attr
,
198 cpu
, NULL
, overflow_callback
);
199 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
206 field
->destroy
= lttng_destroy_perf_counter_field
;
208 field
->event_field
.name
= name_alloc
;
209 field
->event_field
.type
.atype
= atype_integer
;
210 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(unsigned long) * CHAR_BIT
;
211 field
->event_field
.type
.u
.basic
.integer
.alignment
= ltt_alignof(unsigned long) * CHAR_BIT
;
212 field
->event_field
.type
.u
.basic
.integer
.signedness
= is_signed_type(unsigned long);
213 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
214 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
215 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
216 field
->get_size
= perf_counter_get_size
;
217 field
->record
= perf_counter_record
;
218 field
->u
.perf_counter
= perf_field
;
219 perf_field
->hp_enable
= 1;
221 wrapper_vmalloc_sync_all();
225 for_each_online_cpu(cpu
) {
226 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
227 perf_event_release_kernel(events
[cpu
]);
230 #ifdef CONFIG_HOTPLUG_CPU
231 unregister_cpu_notifier(&perf_field
->nb
);
234 lttng_remove_context_field(ctx
, field
);
235 append_context_error
:
239 error_alloc_perf_field
:
246 MODULE_LICENSE("GPL and additional rights");
247 MODULE_AUTHOR("Mathieu Desnoyers");
248 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");