X-Git-Url: http://git.lttng.org/?p=lttng-modules.git;a=blobdiff_plain;f=lttng-context-perf-counters.c;h=260e5d0d9a5873cef635b93bbc93548d70104a46;hp=3ae2266f948bd8a747064b6f29615d325343df5a;hb=ea15538d675ad5685c1e4ce1c393f25f682eb06b;hpb=9d7d747f10bab16b3d85915a5b87e10acd2374f0 diff --git a/lttng-context-perf-counters.c b/lttng-context-perf-counters.c index 3ae2266f..260e5d0d 100644 --- a/lttng-context-perf-counters.c +++ b/lttng-context-perf-counters.c @@ -1,10 +1,23 @@ /* - * (C) Copyright 2009-2011 - - * Mathieu Desnoyers + * lttng-context-perf-counters.c * * LTTng performance monitoring counters (perf-counters) integration module. * - * Dual LGPL v2.1/GPL v2 license. + * Copyright (C) 2009-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include @@ -12,18 +25,19 @@ #include #include #include -#include "ltt-events.h" -#include "wrapper/ringbuffer/frontend_types.h" -#include "wrapper/vmalloc.h" -#include "wrapper/perf.h" -#include "ltt-tracer.h" +#include +#include +#include +#include +#include +#include static size_t perf_counter_get_size(size_t offset) { size_t size = 0; - size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); + size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); size += sizeof(uint64_t); return size; } @@ -31,7 +45,7 @@ size_t perf_counter_get_size(size_t offset) static void perf_counter_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, - struct ltt_channel *chan) + struct lttng_channel *chan) { struct perf_event *event; uint64_t value; @@ -54,7 +68,7 @@ void perf_counter_record(struct lttng_ctx_field *field, */ value = 0; } - lib_ring_buffer_align_ctx(ctx, ltt_alignof(value)); + lib_ring_buffer_align_ctx(ctx, lttng_alignof(value)); chan->ops->event_write(ctx, &value, sizeof(value)); } @@ -78,21 +92,80 @@ static void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) { struct perf_event **events = field->u.perf_counter->e; - int cpu; - get_online_cpus(); - for_each_online_cpu(cpu) - perf_event_release_kernel(events[cpu]); - put_online_cpus(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_hp_online, + &field->u.perf_counter->cpuhp_online.node); + WARN_ON(ret); + ret = cpuhp_state_remove_instance(lttng_hp_prepare, + &field->u.perf_counter->cpuhp_prepare.node); + WARN_ON(ret); + } +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + perf_event_release_kernel(events[cpu]); + put_online_cpus(); #ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&field->u.perf_counter->nb); + unregister_cpu_notifier(&field->u.perf_counter->nb); #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ kfree(field->event_field.name); kfree(field->u.perf_counter->attr); - kfree(events); + lttng_kvfree(events); kfree(field->u.perf_counter); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +int lttng_cpuhp_perf_counter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct lttng_perf_counter_field *perf_field = + container_of(node, struct lttng_perf_counter_field, + cpuhp_online); + struct perf_event **events = perf_field->e; + struct perf_event_attr *attr = perf_field->attr; + struct perf_event *pevent; + + pevent = wrapper_perf_event_create_kernel_counter(attr, + cpu, NULL, overflow_callback); + if (!pevent || IS_ERR(pevent)) + return -EINVAL; + if (pevent->state == PERF_EVENT_STATE_ERROR) { + perf_event_release_kernel(pevent); + return -EINVAL; + } + barrier(); /* Create perf counter before setting event */ + events[cpu] = pevent; + return 0; +} + +int lttng_cpuhp_perf_counter_dead(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct lttng_perf_counter_field *perf_field = + container_of(node, struct lttng_perf_counter_field, + cpuhp_prepare); + struct perf_event **events = perf_field->e; + struct perf_event *pevent; + + pevent = events[cpu]; + events[cpu] = NULL; + barrier(); /* NULLify event before perf counter teardown */ + perf_event_release_kernel(pevent); + return 0; +} + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU /** @@ -107,7 +180,7 @@ void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) * soon). */ static -int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, +int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -150,6 +223,8 @@ int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + int lttng_add_perf_counter_to_ctx(uint32_t type, uint64_t config, const char *name, @@ -160,10 +235,9 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, struct perf_event **events; struct perf_event_attr *attr; int ret; - int cpu; char *name_alloc; - events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); + events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); if (!events) return -ENOMEM; @@ -203,56 +277,92 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, goto find_error; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + + perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS; + ret = cpuhp_state_add_instance(lttng_hp_prepare, + &perf_field->cpuhp_prepare.node); + if (ret) + goto cpuhp_prepare_error; + + perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS; + ret = cpuhp_state_add_instance(lttng_hp_online, + &perf_field->cpuhp_online.node); + if (ret) + goto cpuhp_online_error; + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + #ifdef CONFIG_HOTPLUG_CPU - perf_field->nb.notifier_call = - lttng_perf_counter_cpu_hp_callback; - perf_field->nb.priority = 0; - register_cpu_notifier(&perf_field->nb); + perf_field->nb.notifier_call = + lttng_perf_counter_cpu_hp_callback; + perf_field->nb.priority = 0; + register_cpu_notifier(&perf_field->nb); #endif - - get_online_cpus(); - for_each_online_cpu(cpu) { - events[cpu] = wrapper_perf_event_create_kernel_counter(attr, - cpu, NULL, overflow_callback); - if (!events[cpu] || IS_ERR(events[cpu])) { - ret = -EINVAL; - goto counter_error; - } - if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { - ret = -EBUSY; - goto counter_busy; + get_online_cpus(); + for_each_online_cpu(cpu) { + events[cpu] = wrapper_perf_event_create_kernel_counter(attr, + cpu, NULL, overflow_callback); + if (!events[cpu] || IS_ERR(events[cpu])) { + ret = -EINVAL; + goto counter_error; + } + if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { + ret = -EBUSY; + goto counter_busy; + } } + put_online_cpus(); + perf_field->hp_enable = 1; } - put_online_cpus(); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ field->destroy = lttng_destroy_perf_counter_field; field->event_field.name = name_alloc; field->event_field.type.atype = atype_integer; field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT; - field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT; - field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t); + field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT; + field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t); field->event_field.type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.basic.integer.base = 10; field->event_field.type.u.basic.integer.encoding = lttng_encode_none; field->get_size = perf_counter_get_size; field->record = perf_counter_record; field->u.perf_counter = perf_field; - perf_field->hp_enable = 1; + lttng_context_update(*ctx); wrapper_vmalloc_sync_all(); return 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +cpuhp_online_error: + { + int remove_ret; + + remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare, + &perf_field->cpuhp_prepare.node); + WARN_ON(remove_ret); + } +cpuhp_prepare_error: +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ counter_busy: counter_error: - for_each_online_cpu(cpu) { - if (events[cpu] && !IS_ERR(events[cpu])) - perf_event_release_kernel(events[cpu]); - } - put_online_cpus(); + { + int cpu; + + for_each_online_cpu(cpu) { + if (events[cpu] && !IS_ERR(events[cpu])) + perf_event_release_kernel(events[cpu]); + } + put_online_cpus(); #ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&perf_field->nb); + unregister_cpu_notifier(&perf_field->nb); #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ find_error: lttng_remove_context_field(ctx, field); append_context_error: @@ -262,10 +372,6 @@ name_alloc_error: error_alloc_perf_field: kfree(attr); error_attr: - kfree(events); + lttng_kvfree(events); return ret; } - -MODULE_LICENSE("GPL and additional rights"); -MODULE_AUTHOR("Mathieu Desnoyers"); -MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");