projects
/
lttng-modules.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
statedump: fix include circular dep
[lttng-modules.git]
/
lttng-context-perf-counters.c
diff --git
a/lttng-context-perf-counters.c
b/lttng-context-perf-counters.c
index 005c651039e045fb6cb34142b5f8eb355497b515..444c2f92e85822d72e361cb882f3dee5c4849938 100644
(file)
--- a/
lttng-context-perf-counters.c
+++ b/
lttng-context-perf-counters.c
@@
-12,17
+12,18
@@
#include <linux/perf_event.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/perf_event.h>
#include <linux/list.h>
#include <linux/string.h>
-#include "ltt-events.h"
+#include "ltt
ng
-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
-#include "ltt-tracer.h"
+#include "wrapper/perf.h"
+#include "lttng-tracer.h"
static
size_t perf_counter_get_size(size_t offset)
{
size_t size = 0;
static
size_t perf_counter_get_size(size_t offset)
{
size_t size = 0;
- size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
+ size += lib_ring_buffer_align(offset, ltt
ng
_alignof(uint64_t));
size += sizeof(uint64_t);
return size;
}
size += sizeof(uint64_t);
return size;
}
@@
-30,15
+31,19
@@
size_t perf_counter_get_size(size_t offset)
static
void perf_counter_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
static
void perf_counter_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
- struct ltt_channel *chan)
+ struct ltt
ng
_channel *chan)
{
struct perf_event *event;
uint64_t value;
event = field->u.perf_counter->e[ctx->cpu];
if (likely(event)) {
{
struct perf_event *event;
uint64_t value;
event = field->u.perf_counter->e[ctx->cpu];
if (likely(event)) {
- event->pmu->read(event);
- value = local64_read(&event->count);
+ if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
+ value = 0;
+ } else {
+ event->pmu->read(event);
+ value = local64_read(&event->count);
+ }
} else {
/*
* Perf chooses not to be clever and not to support enabling a
} else {
/*
* Perf chooses not to be clever and not to support enabling a
@@
-49,16
+54,25
@@
void perf_counter_record(struct lttng_ctx_field *field,
*/
value = 0;
}
*/
value = 0;
}
- lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
+ lib_ring_buffer_align_ctx(ctx, ltt
ng
_alignof(value));
chan->ops->event_write(ctx, &value, sizeof(value));
}
chan->ops->event_write(ctx, &value, sizeof(value));
}
+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
+static
+void overflow_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+}
+#else
static
void overflow_callback(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
}
static
void overflow_callback(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
}
+#endif
static
void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
static
void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
@@
-110,10
+124,14
@@
int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- pevent = perf_event_create_kernel_counter(attr,
+ pevent =
wrapper_
perf_event_create_kernel_counter(attr,
cpu, NULL, overflow_callback);
if (!pevent || IS_ERR(pevent))
return NOTIFY_BAD;
cpu, NULL, overflow_callback);
if (!pevent || IS_ERR(pevent))
return NOTIFY_BAD;
+ if (pevent->state == PERF_EVENT_STATE_ERROR) {
+ perf_event_release_kernel(pevent);
+ return NOTIFY_BAD;
+ }
barrier(); /* Create perf counter before setting event */
events[cpu] = pevent;
break;
barrier(); /* Create perf counter before setting event */
events[cpu] = pevent;
break;
@@
-194,12
+212,16
@@
int lttng_add_perf_counter_to_ctx(uint32_t type,
get_online_cpus();
for_each_online_cpu(cpu) {
get_online_cpus();
for_each_online_cpu(cpu) {
- events[cpu] = perf_event_create_kernel_counter(attr,
+ events[cpu] =
wrapper_
perf_event_create_kernel_counter(attr,
cpu, NULL, overflow_callback);
if (!events[cpu] || IS_ERR(events[cpu])) {
ret = -EINVAL;
goto counter_error;
}
cpu, NULL, overflow_callback);
if (!events[cpu] || IS_ERR(events[cpu])) {
ret = -EINVAL;
goto counter_error;
}
+ if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
+ ret = -EBUSY;
+ goto counter_busy;
+ }
}
put_online_cpus();
}
put_online_cpus();
@@
-207,9
+229,9
@@
int lttng_add_perf_counter_to_ctx(uint32_t type,
field->event_field.name = name_alloc;
field->event_field.type.atype = atype_integer;
field->event_field.name = name_alloc;
field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(u
nsigned long
) * CHAR_BIT;
- field->event_field.type.u.basic.integer.alignment = ltt
_alignof(unsigned long
) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(u
nsigned long
);
+ field->event_field.type.u.basic.integer.size = sizeof(u
int64_t
) * CHAR_BIT;
+ field->event_field.type.u.basic.integer.alignment = ltt
ng_alignof(uint64_t
) * CHAR_BIT;
+ field->event_field.type.u.basic.integer.signedness = is_signed_type(u
int64_t
);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
@@
-221,6
+243,7
@@
int lttng_add_perf_counter_to_ctx(uint32_t type,
wrapper_vmalloc_sync_all();
return 0;
wrapper_vmalloc_sync_all();
return 0;
+counter_busy:
counter_error:
for_each_online_cpu(cpu) {
if (events[cpu] && !IS_ERR(events[cpu]))
counter_error:
for_each_online_cpu(cpu) {
if (events[cpu] && !IS_ERR(events[cpu]))
This page took
0.033248 seconds
and
4
git commands to generate.