X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Flttng-context-perf-counters.c;h=12d4ab3b2a1a2760f4da5bc29fbd83424aea896e;hb=fb31eb73d8a4a6d9784ed5c335b7fa3b9684108c;hp=a7e1b63f4ab02f5666fbbe3c5c4a23b8dcda5a19;hpb=4f58f54fdc949a00a05743ecf6d95144192c2ce2;p=lttng-ust.git diff --git a/liblttng-ust/lttng-context-perf-counters.c b/liblttng-ust/lttng-context-perf-counters.c index a7e1b63f..12d4ab3b 100644 --- a/liblttng-ust/lttng-context-perf-counters.c +++ b/liblttng-ust/lttng-context-perf-counters.c @@ -20,15 +20,17 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#define _LGPL_SOURCE #include #include #include #include #include #include +#include +#include #include #include -#include #include #include #include @@ -39,6 +41,8 @@ #include #include #include +#include +#include "perf_event.h" #include "lttng-tracer-core.h" /* @@ -70,6 +74,98 @@ struct lttng_perf_counter_field { static pthread_key_t perf_counter_key; +/* + * lttng_perf_lock - Protect lttng-ust perf counter data structures + * + * Nests within the ust_lock, and therefore within the libc dl lock. + * Therefore, we need to fixup the TLS before nesting into this lock. + * Nests inside RCU bp read-side lock. Protects against concurrent + * fork. + */ +static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Cancel state when grabbing the ust_perf_mutex. Saved when locking, + * restored on unlock. Protected by ust_perf_mutex. + */ +static int ust_perf_saved_cancelstate; + +/* + * Track whether we are tracing from a signal handler nested on an + * application thread. + */ +static DEFINE_URCU_TLS(int, ust_perf_mutex_nest); + +/* + * Force a read (imply TLS fixup for dlopen) of TLS variables. + */ +void lttng_ust_fixup_perf_counter_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest))); +} + +void lttng_perf_lock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, oldstate; + + ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!URCU_TLS(ust_perf_mutex_nest)++) { + /* + * Ensure the compiler don't move the store after the close() + * call in case close() would be marked as leaf. + */ + cmm_barrier(); + pthread_mutex_lock(&ust_perf_mutex); + ust_perf_saved_cancelstate = oldstate; + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } +} + +void lttng_perf_unlock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, newstate, oldstate; + bool restore_cancel = false; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + /* + * Ensure the compiler don't move the store before the close() + * call, in case close() would be marked as leaf. + */ + cmm_barrier(); + if (!--URCU_TLS(ust_perf_mutex_nest)) { + newstate = ust_perf_saved_cancelstate; + restore_cancel = true; + pthread_mutex_unlock(&ust_perf_mutex); + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (restore_cancel) { + ret = pthread_setcancelstate(newstate, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + } +} + static size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) { @@ -80,6 +176,22 @@ size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) return size; } +static +uint64_t read_perf_counter_syscall( + struct lttng_perf_counter_thread_field *thread_field) +{ + uint64_t count; + + if (caa_unlikely(thread_field->fd < 0)) + return 0; + + if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) + < sizeof(count))) + return 0; + + return count; +} + #if defined(__x86_64__) || defined(__i386__) static @@ -92,13 +204,17 @@ uint64_t rdpmc(unsigned int counter) return low | ((uint64_t) high) << 32; } -static bool arch_perf_use_read(void) +static +bool has_rdpmc(struct perf_event_mmap_page *pc) { - return false; + if (caa_unlikely(!pc->cap_bit0_is_deprecated)) + return false; + /* Since Linux kernel 3.12. */ + return pc->cap_user_rdpmc; } static -uint64_t read_perf_counter( +uint64_t arch_read_perf_counter( struct lttng_perf_counter_thread_field *thread_field) { uint32_t seq, idx; @@ -113,7 +229,7 @@ uint64_t read_perf_counter( cmm_barrier(); idx = pc->index; - if (idx) { + if (caa_likely(has_rdpmc(pc) && idx)) { int64_t pmcval; pmcval = rdpmc(idx - 1); @@ -122,7 +238,8 @@ uint64_t read_perf_counter( pmcval >>= 64 - pc->pmc_width; count = pc->offset + pmcval; } else { - count = 0; + /* Fall-back on system call if rdpmc cannot be used. */ + return read_perf_counter_syscall(thread_field); } cmm_barrier(); } while (CMM_LOAD_SHARED(pc->lock) != seq); @@ -130,34 +247,33 @@ uint64_t read_perf_counter( return count; } -#elif defined (__ARM_ARCH_7A__) - -static bool arch_perf_use_read(void) -{ - return true; -} - static -uint64_t read_perf_counter( - struct lttng_perf_counter_thread_field *thread_field) +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) { - uint64_t count; + struct perf_event_mmap_page *pc = thread_field->pc; - if (caa_unlikely(thread_field->fd < 0)) + if (!pc) return 0; + return !has_rdpmc(pc); +} - if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) - < sizeof(count))) - return 0; +#else - return count; +/* Generic (slow) implementation using a read system call. */ +static +uint64_t arch_read_perf_counter( + struct lttng_perf_counter_thread_field *thread_field) +{ + return read_perf_counter_syscall(thread_field); } -#else /* defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ - -#error "Perf event counters are only supported on x86 and ARMv7 so far." +static +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) +{ + return 1; +} -#endif /* #else defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ +#endif static int sys_perf_event_open(struct perf_event_attr *attr, @@ -194,9 +310,7 @@ void close_perf_fd(int fd) } } -static -struct perf_event_mmap_page *setup_perf( - struct lttng_perf_counter_thread_field *thread_field) +static void setup_perf(struct lttng_perf_counter_thread_field *thread_field) { void *perf_addr; @@ -204,13 +318,12 @@ struct perf_event_mmap_page *setup_perf( PROT_READ, MAP_SHARED, thread_field->fd, 0); if (perf_addr == MAP_FAILED) perf_addr = NULL; + thread_field->pc = perf_addr; - if (!arch_perf_use_read()) { + if (!arch_perf_keep_fd(thread_field)) { close_perf_fd(thread_field->fd); thread_field->fd = -1; } - - return perf_addr; } static @@ -285,17 +398,17 @@ struct lttng_perf_counter_thread_field * thread_field->field = perf_field; thread_field->fd = open_perf_fd(&perf_field->attr); if (thread_field->fd >= 0) - thread_field->pc = setup_perf(thread_field); + setup_perf(thread_field); /* * Note: thread_field->pc can be NULL if setup_perf() fails. * Also, thread_field->fd can be -1 if open_perf_fd() fails. */ - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_add_rcu(&thread_field->rcu_field_node, &perf_thread->rcu_field_list); cds_list_add(&thread_field->thread_field_node, &perf_field->thread_field_list); - ust_unlock(); + lttng_perf_unlock(); skip: ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); if (ret) @@ -330,7 +443,7 @@ uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field) perf_field = field->u.perf_counter; perf_thread_field = get_thread_field(perf_field); - return read_perf_counter(perf_thread_field); + return arch_read_perf_counter(perf_thread_field); } static @@ -349,13 +462,10 @@ static void perf_counter_get_value(struct lttng_ctx_field *field, struct lttng_ctx_value *value) { - uint64_t v; - - v = wrapper_perf_counter_read(field); - value->u.s64 = v; + value->u.s64 = wrapper_perf_counter_read(field); } -/* Called with UST lock held */ +/* Called with perf lock held */ static void lttng_destroy_perf_thread_field( struct lttng_perf_counter_thread_field *thread_field) @@ -373,11 +483,11 @@ void lttng_destroy_perf_thread_key(void *_key) struct lttng_perf_counter_thread *perf_thread = _key; struct lttng_perf_counter_thread_field *pos, *p; - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list, rcu_field_node) lttng_destroy_perf_thread_field(pos); - ust_unlock(); + lttng_perf_unlock(); free(perf_thread); } @@ -393,11 +503,15 @@ void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) /* * This put is performed when no threads can concurrently * perform a "get" concurrently, thanks to urcu-bp grace - * period. + * period. Holding the lttng perf lock protects against + * concurrent modification of the per-thread thread field + * list. */ + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list, thread_field_node) lttng_destroy_perf_thread_field(pos); + lttng_perf_unlock(); free(perf_field); }