X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Flttng-context-perf-counters.c;h=3a00a7fabdb8f9de00e34428de0315c37e710b48;hb=dc325c1d762bbbdc1866e590f4649078f8600664;hp=a7e1b63f4ab02f5666fbbe3c5c4a23b8dcda5a19;hpb=4f58f54fdc949a00a05743ecf6d95144192c2ce2;p=lttng-ust.git diff --git a/liblttng-ust/lttng-context-perf-counters.c b/liblttng-ust/lttng-context-perf-counters.c index a7e1b63f..3a00a7fa 100644 --- a/liblttng-ust/lttng-context-perf-counters.c +++ b/liblttng-ust/lttng-context-perf-counters.c @@ -1,45 +1,40 @@ /* - * lttng-context-perf-counters.c - * - * LTTng UST performance monitoring counters (perf-counters) integration. + * SPDX-License-Identifier: LGPL-2.1-only * * Copyright (C) 2009-2014 Mathieu Desnoyers * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * LTTng UST performance monitoring counters (perf-counters) integration. */ +#define _LGPL_SOURCE +#include #include #include #include #include #include #include +#include +#include #include #include -#include +#include #include #include -#include +#include #include #include #include -#include +#include #include #include #include +#include +#include "perf_event.h" + +#include "context-internal.h" #include "lttng-tracer-core.h" +#include "ust-events-internal.h" /* * We use a global perf counter key and iterate on per-thread RCU lists @@ -70,17 +65,125 @@ struct lttng_perf_counter_field { static pthread_key_t perf_counter_key; +/* + * lttng_perf_lock - Protect lttng-ust perf counter data structures + * + * Nests within the ust_lock, and therefore within the libc dl lock. + * Therefore, we need to fixup the TLS before nesting into this lock. + * Nests inside RCU bp read-side lock. Protects against concurrent + * fork. + */ +static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Cancel state when grabbing the ust_perf_mutex. Saved when locking, + * restored on unlock. Protected by ust_perf_mutex. + */ +static int ust_perf_saved_cancelstate; + +/* + * Track whether we are tracing from a signal handler nested on an + * application thread. + */ +static DEFINE_URCU_TLS(int, ust_perf_mutex_nest); + +/* + * Force a read (imply TLS fixup for dlopen) of TLS variables. + */ +void lttng_ust_fixup_perf_counter_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest))); +} + +void lttng_perf_lock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, oldstate; + + ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!URCU_TLS(ust_perf_mutex_nest)++) { + /* + * Ensure the compiler don't move the store after the close() + * call in case close() would be marked as leaf. + */ + cmm_barrier(); + pthread_mutex_lock(&ust_perf_mutex); + ust_perf_saved_cancelstate = oldstate; + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } +} + +void lttng_perf_unlock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, newstate, oldstate; + bool restore_cancel = false; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + /* + * Ensure the compiler don't move the store before the close() + * call, in case close() would be marked as leaf. + */ + cmm_barrier(); + if (!--URCU_TLS(ust_perf_mutex_nest)) { + newstate = ust_perf_saved_cancelstate; + restore_cancel = true; + pthread_mutex_unlock(&ust_perf_mutex); + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (restore_cancel) { + ret = pthread_setcancelstate(newstate, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + } +} + static -size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) +size_t perf_counter_get_size(struct lttng_ust_ctx_field *field, size_t offset) { size_t size = 0; - size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); + size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t)); size += sizeof(uint64_t); return size; } -#if defined(__x86_64__) || defined(__i386__) +static +uint64_t read_perf_counter_syscall( + struct lttng_perf_counter_thread_field *thread_field) +{ + uint64_t count; + + if (caa_unlikely(thread_field->fd < 0)) + return 0; + + if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) + < sizeof(count))) + return 0; + + return count; +} + +#if defined(LTTNG_UST_ARCH_X86) static uint64_t rdpmc(unsigned int counter) @@ -92,13 +195,17 @@ uint64_t rdpmc(unsigned int counter) return low | ((uint64_t) high) << 32; } -static bool arch_perf_use_read(void) +static +bool has_rdpmc(struct perf_event_mmap_page *pc) { - return false; + if (caa_unlikely(!pc->cap_bit0_is_deprecated)) + return false; + /* Since Linux kernel 3.12. */ + return pc->cap_user_rdpmc; } static -uint64_t read_perf_counter( +uint64_t arch_read_perf_counter( struct lttng_perf_counter_thread_field *thread_field) { uint32_t seq, idx; @@ -113,7 +220,7 @@ uint64_t read_perf_counter( cmm_barrier(); idx = pc->index; - if (idx) { + if (caa_likely(has_rdpmc(pc) && idx)) { int64_t pmcval; pmcval = rdpmc(idx - 1); @@ -122,7 +229,8 @@ uint64_t read_perf_counter( pmcval >>= 64 - pc->pmc_width; count = pc->offset + pmcval; } else { - count = 0; + /* Fall-back on system call if rdpmc cannot be used. */ + return read_perf_counter_syscall(thread_field); } cmm_barrier(); } while (CMM_LOAD_SHARED(pc->lock) != seq); @@ -130,34 +238,33 @@ uint64_t read_perf_counter( return count; } -#elif defined (__ARM_ARCH_7A__) - -static bool arch_perf_use_read(void) -{ - return true; -} - static -uint64_t read_perf_counter( - struct lttng_perf_counter_thread_field *thread_field) +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) { - uint64_t count; + struct perf_event_mmap_page *pc = thread_field->pc; - if (caa_unlikely(thread_field->fd < 0)) + if (!pc) return 0; + return !has_rdpmc(pc); +} - if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) - < sizeof(count))) - return 0; +#else - return count; +/* Generic (slow) implementation using a read system call. */ +static +uint64_t arch_read_perf_counter( + struct lttng_perf_counter_thread_field *thread_field) +{ + return read_perf_counter_syscall(thread_field); } -#else /* defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ - -#error "Perf event counters are only supported on x86 and ARMv7 so far." +static +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) +{ + return 1; +} -#endif /* #else defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ +#endif static int sys_perf_event_open(struct perf_event_attr *attr, @@ -194,9 +301,7 @@ void close_perf_fd(int fd) } } -static -struct perf_event_mmap_page *setup_perf( - struct lttng_perf_counter_thread_field *thread_field) +static void setup_perf(struct lttng_perf_counter_thread_field *thread_field) { void *perf_addr; @@ -204,13 +309,12 @@ struct perf_event_mmap_page *setup_perf( PROT_READ, MAP_SHARED, thread_field->fd, 0); if (perf_addr == MAP_FAILED) perf_addr = NULL; + thread_field->pc = perf_addr; - if (!arch_perf_use_read()) { + if (!arch_perf_keep_fd(thread_field)) { close_perf_fd(thread_field->fd); thread_field->fd = -1; } - - return perf_addr; } static @@ -285,17 +389,17 @@ struct lttng_perf_counter_thread_field * thread_field->field = perf_field; thread_field->fd = open_perf_fd(&perf_field->attr); if (thread_field->fd >= 0) - thread_field->pc = setup_perf(thread_field); + setup_perf(thread_field); /* * Note: thread_field->pc can be NULL if setup_perf() fails. * Also, thread_field->fd can be -1 if open_perf_fd() fails. */ - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_add_rcu(&thread_field->rcu_field_node, &perf_thread->rcu_field_list); cds_list_add(&thread_field->thread_field_node, &perf_field->thread_field_list); - ust_unlock(); + lttng_perf_unlock(); skip: ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); if (ret) @@ -323,39 +427,36 @@ struct lttng_perf_counter_thread_field * } static -uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field) +uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field) { struct lttng_perf_counter_field *perf_field; struct lttng_perf_counter_thread_field *perf_thread_field; - perf_field = field->u.perf_counter; + perf_field = (struct lttng_perf_counter_field *) field->priv; perf_thread_field = get_thread_field(perf_field); - return read_perf_counter(perf_thread_field); + return arch_read_perf_counter(perf_thread_field); } static -void perf_counter_record(struct lttng_ctx_field *field, +void perf_counter_record(struct lttng_ust_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, - struct lttng_channel *chan) + struct lttng_ust_channel_buffer *chan) { uint64_t value; value = wrapper_perf_counter_read(field); - lib_ring_buffer_align_ctx(ctx, lttng_alignof(value)); + lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(value)); chan->ops->event_write(ctx, &value, sizeof(value)); } static -void perf_counter_get_value(struct lttng_ctx_field *field, - struct lttng_ctx_value *value) +void perf_counter_get_value(struct lttng_ust_ctx_field *field, + struct lttng_ust_ctx_value *value) { - uint64_t v; - - v = wrapper_perf_counter_read(field); - value->u.s64 = v; + value->u.s64 = wrapper_perf_counter_read(field); } -/* Called with UST lock held */ +/* Called with perf lock held */ static void lttng_destroy_perf_thread_field( struct lttng_perf_counter_thread_field *thread_field) @@ -373,35 +474,39 @@ void lttng_destroy_perf_thread_key(void *_key) struct lttng_perf_counter_thread *perf_thread = _key; struct lttng_perf_counter_thread_field *pos, *p; - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list, rcu_field_node) lttng_destroy_perf_thread_field(pos); - ust_unlock(); + lttng_perf_unlock(); free(perf_thread); } /* Called with UST lock held */ static -void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) +void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field) { struct lttng_perf_counter_field *perf_field; struct lttng_perf_counter_thread_field *pos, *p; - free((char *) field->event_field.name); - perf_field = field->u.perf_counter; + free((char *) field->event_field->name); + perf_field = (struct lttng_perf_counter_field *) field->priv; /* * This put is performed when no threads can concurrently * perform a "get" concurrently, thanks to urcu-bp grace - * period. + * period. Holding the lttng perf lock protects against + * concurrent modification of the per-thread thread field + * list. */ + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list, thread_field_node) lttng_destroy_perf_thread_field(pos); + lttng_perf_unlock(); free(perf_field); } -#ifdef __ARM_ARCH_7A__ +#ifdef LTTNG_UST_ARCH_ARMV7 static int perf_get_exclude_kernel(void) @@ -409,7 +514,7 @@ int perf_get_exclude_kernel(void) return 0; } -#else /* __ARM_ARCH_7A__ */ +#else /* LTTNG_UST_ARCH_ARMV7 */ static int perf_get_exclude_kernel(void) @@ -417,15 +522,16 @@ int perf_get_exclude_kernel(void) return 1; } -#endif /* __ARM_ARCH_7A__ */ +#endif /* LTTNG_UST_ARCH_ARMV7 */ /* Called with UST lock held */ int lttng_add_perf_counter_to_ctx(uint32_t type, uint64_t config, const char *name, - struct lttng_ctx **ctx) + struct lttng_ust_ctx **ctx) { - struct lttng_ctx_field *field; + struct lttng_ust_ctx_field *field; + struct lttng_ust_type_common *ust_type; struct lttng_perf_counter_field *perf_field; char *name_alloc; int ret; @@ -440,6 +546,14 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, ret = -ENOMEM; goto perf_field_alloc_error; } + ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT, + lttng_ust_rb_alignof(uint64_t) * CHAR_BIT, + lttng_ust_is_signed_type(uint64_t), + BYTE_ORDER, 10); + if (!ust_type) { + ret = -ENOMEM; + goto type_alloc_error; + } field = lttng_append_context(ctx); if (!field) { ret = -ENOMEM; @@ -452,17 +566,8 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, field->destroy = lttng_destroy_perf_counter_field; - field->event_field.name = name_alloc; - field->event_field.type.atype = atype_integer; - field->event_field.type.u.basic.integer.size = - sizeof(uint64_t) * CHAR_BIT; - field->event_field.type.u.basic.integer.alignment = - lttng_alignof(uint64_t) * CHAR_BIT; - field->event_field.type.u.basic.integer.signedness = - lttng_is_signed_type(uint64_t); - field->event_field.type.u.basic.integer.reverse_byte_order = 0; - field->event_field.type.u.basic.integer.base = 10; - field->event_field.type.u.basic.integer.encoding = lttng_encode_none; + field->event_field->name = name_alloc; + field->event_field->type = ust_type; field->get_size = perf_counter_get_size; field->record = perf_counter_record; field->get_value = perf_counter_get_value; @@ -471,7 +576,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, perf_field->attr.config = config; perf_field->attr.exclude_kernel = perf_get_exclude_kernel(); CDS_INIT_LIST_HEAD(&perf_field->thread_field_list); - field->u.perf_counter = perf_field; + field->priv = perf_field; /* Ensure that this perf counter can be used in this process. */ ret = open_perf_fd(&perf_field->attr); @@ -494,6 +599,8 @@ setup_error: find_error: lttng_remove_context_field(ctx, field); append_context_error: + lttng_ust_destroy_type(ust_type); +type_alloc_error: free(perf_field); perf_field_alloc_error: free(name_alloc);