From: Mathieu Desnoyers Date: Mon, 7 Oct 2019 19:45:46 +0000 (-0400) Subject: Fix: lttng perf counter deadlock X-Git-Tag: v2.12.0-rc1~17 X-Git-Url: http://git.lttng.org/?p=lttng-ust.git;a=commitdiff_plain;h=20142124a2e34c287a277bbd7ef5b28697735d10 Fix: lttng perf counter deadlock Using the ust_lock() to lazily setup the perf counters introduces a scenario where this lock is nested within the urcu-bp read-side lock. However, the LTTNG_UST_WAIT_QUIESCENT ust command requires that urcu-bp synchronize_rcu() is performed with the ust_lock() held. This inter-dependency introduces a deadlock: Thread A Thread B rcu_read_lock() ust_lock() synchronize_rcu() (blocked by rcu read-side lock) ust_lock() <-- deadlock Introduce a new lttng_perf_lock to protect the lttng perf context data structures from concurrent modifications and from fork. This lock can be nested within the ust_lock, but never the opposite. This removes the circular locking dependency involving urcu bp. Fixes: #1202 Signed-off-by: Mathieu Desnoyers --- diff --git a/liblttng-ust/lttng-context-perf-counters.c b/liblttng-ust/lttng-context-perf-counters.c index 4816f89b..a6ff55b6 100644 --- a/liblttng-ust/lttng-context-perf-counters.c +++ b/liblttng-ust/lttng-context-perf-counters.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "perf_event.h" #include "lttng-tracer-core.h" @@ -71,6 +72,98 @@ struct lttng_perf_counter_field { static pthread_key_t perf_counter_key; +/* + * lttng_perf_lock - Protect lttng-ust perf counter data structures + * + * Nests within the ust_lock, and therefore within the libc dl lock. + * Therefore, we need to fixup the TLS before nesting into this lock. + * Nests inside RCU bp read-side lock. Protects against concurrent + * fork. + */ +static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Cancel state when grabbing the ust_perf_mutex. Saved when locking, + * restored on unlock. Protected by ust_perf_mutex. + */ +static int ust_perf_saved_cancelstate; + +/* + * Track whether we are tracing from a signal handler nested on an + * application thread. + */ +static DEFINE_URCU_TLS(int, ust_perf_mutex_nest); + +/* + * Force a read (imply TLS fixup for dlopen) of TLS variables. + */ +void lttng_ust_fixup_perf_counter_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest))); +} + +void lttng_perf_lock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, oldstate; + + ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!URCU_TLS(ust_perf_mutex_nest)++) { + /* + * Ensure the compiler don't move the store after the close() + * call in case close() would be marked as leaf. + */ + cmm_barrier(); + pthread_mutex_lock(&ust_perf_mutex); + ust_perf_saved_cancelstate = oldstate; + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } +} + +void lttng_perf_unlock(void) +{ + sigset_t sig_all_blocked, orig_mask; + int ret, newstate, oldstate; + bool restore_cancel = false; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + /* + * Ensure the compiler don't move the store before the close() + * call, in case close() would be marked as leaf. + */ + cmm_barrier(); + if (!--URCU_TLS(ust_perf_mutex_nest)) { + newstate = ust_perf_saved_cancelstate; + restore_cancel = true; + pthread_mutex_unlock(&ust_perf_mutex); + } + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (restore_cancel) { + ret = pthread_setcancelstate(newstate, &oldstate); + if (ret) { + ERR("pthread_setcancelstate: %s", strerror(ret)); + } + } +} + static size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) { @@ -308,12 +401,12 @@ struct lttng_perf_counter_thread_field * * Note: thread_field->pc can be NULL if setup_perf() fails. * Also, thread_field->fd can be -1 if open_perf_fd() fails. */ - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_add_rcu(&thread_field->rcu_field_node, &perf_thread->rcu_field_list); cds_list_add(&thread_field->thread_field_node, &perf_field->thread_field_list); - ust_unlock(); + lttng_perf_unlock(); skip: ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); if (ret) @@ -370,7 +463,7 @@ void perf_counter_get_value(struct lttng_ctx_field *field, value->u.s64 = wrapper_perf_counter_read(field); } -/* Called with UST lock held */ +/* Called with perf lock held */ static void lttng_destroy_perf_thread_field( struct lttng_perf_counter_thread_field *thread_field) @@ -388,11 +481,11 @@ void lttng_destroy_perf_thread_key(void *_key) struct lttng_perf_counter_thread *perf_thread = _key; struct lttng_perf_counter_thread_field *pos, *p; - ust_lock_nocheck(); + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list, rcu_field_node) lttng_destroy_perf_thread_field(pos); - ust_unlock(); + lttng_perf_unlock(); free(perf_thread); } @@ -408,11 +501,15 @@ void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) /* * This put is performed when no threads can concurrently * perform a "get" concurrently, thanks to urcu-bp grace - * period. + * period. Holding the lttng perf lock protects against + * concurrent modification of the per-thread thread field + * list. */ + lttng_perf_lock(); cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list, thread_field_node) lttng_destroy_perf_thread_field(pos); + lttng_perf_unlock(); free(perf_field); } diff --git a/liblttng-ust/lttng-tracer-core.h b/liblttng-ust/lttng-tracer-core.h index ba232f32..f750f4b0 100644 --- a/liblttng-ust/lttng-tracer-core.h +++ b/liblttng-ust/lttng-tracer-core.h @@ -64,4 +64,23 @@ void lttng_ust_dummy_get_value(struct lttng_ctx_field *field, int lttng_context_is_app(const char *name); void lttng_ust_fixup_tls(void); +#ifdef LTTNG_UST_HAVE_PERF_EVENT +void lttng_ust_fixup_perf_counter_tls(void); +void lttng_perf_lock(void); +void lttng_perf_unlock(void); +#else /* #ifdef LTTNG_UST_HAVE_PERF_EVENT */ +static inline +void lttng_ust_fixup_perf_counter_tls(void) +{ +} +static inline +void lttng_perf_lock(void) +{ +} +static inline +void lttng_perf_unlock(void) +{ +} +#endif /* #else #ifdef LTTNG_UST_HAVE_PERF_EVENT */ + #endif /* _LTTNG_TRACER_CORE_H */ diff --git a/liblttng-ust/lttng-ust-comm.c b/liblttng-ust/lttng-ust-comm.c index 47ba36e5..b067b3da 100644 --- a/liblttng-ust/lttng-ust-comm.c +++ b/liblttng-ust/lttng-ust-comm.c @@ -423,6 +423,7 @@ void lttng_ust_fixup_tls(void) lttng_fixup_nest_count_tls(); lttng_fixup_procname_tls(); lttng_fixup_ust_mutex_nest_tls(); + lttng_ust_fixup_perf_counter_tls(); lttng_ust_fixup_fd_tracker_tls(); } @@ -2068,6 +2069,7 @@ void ust_before_fork(sigset_t *save_sigset) ust_lock_nocheck(); urcu_bp_before_fork(); lttng_ust_lock_fd_tracker(); + lttng_perf_lock(); } static void ust_after_fork_common(sigset_t *restore_sigset) @@ -2075,6 +2077,7 @@ static void ust_after_fork_common(sigset_t *restore_sigset) int ret; DBG("process %d", getpid()); + lttng_perf_unlock(); lttng_ust_unlock_fd_tracker(); ust_unlock();