#include <time.h>
#include <assert.h>
#include <signal.h>
-#include <dlfcn.h>
#include <urcu/uatomic.h>
#include <urcu/futex.h>
#include <urcu/compiler.h>
* The ust_lock/ust_unlock lock is used as a communication thread mutex.
* Held when handling a command, also held by fork() to deal with
* removal of threads, and by exit path.
+ *
+ * The UST lock is the centralized mutex across UST tracing control and
+ * probe registration.
+ *
+ * ust_exit_mutex must never nest in ust_mutex.
*/
+static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * ust_exit_mutex protects thread_active variable wrt thread exit. It
+ * cannot be done by ust_mutex because pthread_cancel(), which takes an
+ * internal libc lock, cannot nest within ust_mutex.
+ *
+ * It never nests within a ust_mutex.
+ */
+static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
/* Should the ust comm thread quit ? */
static int lttng_ust_comm_should_quit;
+/*
+ * Return 0 on success, -1 if should quilt.
+ * The lock is taken in both cases.
+ */
+int ust_lock(void)
+{
+ pthread_mutex_lock(&ust_mutex);
+ if (lttng_ust_comm_should_quit) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * ust_lock_nocheck() can be used in constructors/destructors, because
+ * they are already nested within the dynamic loader lock, and therefore
+ * have exclusive access against execution of liblttng-ust destructor.
+ */
+void ust_lock_nocheck(void)
+{
+ pthread_mutex_lock(&ust_mutex);
+}
+
+void ust_unlock(void)
+{
+ pthread_mutex_unlock(&ust_mutex);
+}
+
/*
* Wait for either of these before continuing to the main
* program:
char wait_shm_path[PATH_MAX];
char *wait_shm_mmap;
- struct lttng_session *session_enabled;
+ /* Keep track of lazy state dump not performed yet. */
+ int statedump_pending;
};
/* Socket from app (connect) to session daemon (listen) for communication */
.wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
- .session_enabled = NULL,
+ .statedump_pending = 0,
};
/* TODO: allow global_apps_sock_path override */
.socket = -1,
.notify_socket = -1,
- .session_enabled = NULL,
+ .statedump_pending = 0,
};
static int wait_poll_fallback;
return 0;
}
+/*
+ * Only execute pending statedump after the constructor semaphore has
+ * been posted by each listener thread. This means statedump will only
+ * be performed after the "registration done" command is received from
+ * each session daemon the application is connected to.
+ *
+ * This ensures we don't run into deadlock issues with the dynamic
+ * loader mutex, which is held while the constructor is called and
+ * waiting on the constructor semaphore. All operations requiring this
+ * dynamic loader lock need to be postponed using this mechanism.
+ */
+static
+void handle_pending_statedump(struct sock_info *sock_info)
+{
+ int ctor_passed = sock_info->constructor_sem_posted;
+
+ if (ctor_passed && sock_info->statedump_pending) {
+ sock_info->statedump_pending = 0;
+ lttng_handle_pending_statedump(sock_info);
+ }
+}
+
static
int handle_message(struct sock_info *sock_info,
int sock, struct ustcomm_ust_msg *lum)
union ust_args args;
ssize_t len;
- ust_lock();
-
memset(&lur, 0, sizeof(lur));
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
ret = -LTTNG_UST_ERR_EXITING;
goto end;
}
error:
ust_unlock();
+
+ /*
+ * Performed delayed statedump operations outside of the UST
+ * lock. We need to take the dynamic loader lock before we take
+ * the UST lock internally within handle_pending_statedump().
+ */
+ handle_pending_statedump(sock_info);
+
return ret;
}
sock_info->notify_socket = -1;
}
if (sock_info->wait_shm_mmap) {
- ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
- if (ret) {
- ERR("Error unmapping wait shm");
+ long page_size;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size > 0) {
+ ret = munmap(sock_info->wait_shm_mmap, page_size);
+ if (ret) {
+ ERR("Error unmapping wait shm");
+ }
}
sock_info->wait_shm_mmap = NULL;
}
static
char *get_map_shm(struct sock_info *sock_info)
{
- size_t mmap_size = sysconf(_SC_PAGE_SIZE);
+ long page_size;
int wait_shm_fd, ret;
char *wait_shm_mmap;
- wait_shm_fd = get_wait_shm(sock_info, mmap_size);
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size < 0) {
+ goto error;
+ }
+
+ wait_shm_fd = get_wait_shm(sock_info, page_size);
if (wait_shm_fd < 0) {
goto error;
}
- wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
+ wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
MAP_SHARED, wait_shm_fd, 0);
/* close shm fd immediately after taking the mmap reference */
ret = close(wait_shm_fd);
{
int ret;
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
if (wait_poll_fallback) {
DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
prev_connect_failed = 1;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
}
sock_info->socket = ret;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
prev_connect_failed = 1;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
WARN("Unsupported timeout value %ld", timeout);
}
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
switch (len) {
case 0: /* orderly shutdown */
DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
/*
ret = handle_message(sock_info, sock, &lum);
if (ret) {
ERR("Error handling message for %s socket", sock_info->name);
- } else {
- struct lttng_session *session;
-
- session = sock_info->session_enabled;
- if (session) {
- sock_info->session_enabled = NULL;
- lttng_ust_baddr_statedump(session);
- }
}
continue;
default:
}
end:
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
/* Cleanup socket handles before trying to reconnect */
goto restart; /* try to reconnect */
quit:
- sock_info->thread_active = 0;
ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ sock_info->thread_active = 0;
+ pthread_mutex_unlock(&ust_exit_mutex);
return NULL;
}
+/*
+ * Weak symbol to call when the ust malloc wrapper is not loaded.
+ */
+__attribute__((weak))
+void lttng_ust_malloc_wrapper_init(void)
+{
+}
+
/*
* sessiond monitoring thread: monitor presence of global and per-user
* sessiond by polling the application common named pipe.
*/
init_usterr();
init_tracepoint();
+ lttng_ust_baddr_statedump_init();
lttng_ring_buffer_metadata_client_init();
lttng_ring_buffer_client_overwrite_init();
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
lttng_context_init();
+ /*
+ * Invoke ust malloc wrapper init before starting other threads.
+ */
+ lttng_ust_malloc_wrapper_init();
timeout_mode = get_constructor_timeout(&constructor_timeout);
ERR("pthread_attr_setdetachstate: %s", strerror(ret));
}
- ust_lock();
+ pthread_mutex_lock(&ust_exit_mutex);
ret = pthread_create(&global_apps.ust_listener, &thread_attr,
ust_listener_thread, &global_apps);
if (ret) {
ERR("pthread_create global: %s", strerror(ret));
}
global_apps.thread_active = 1;
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
if (local_apps.allowed) {
- ust_lock();
+ pthread_mutex_lock(&ust_exit_mutex);
ret = pthread_create(&local_apps.ust_listener, &thread_attr,
ust_listener_thread, &local_apps);
if (ret) {
ERR("pthread_create local: %s", strerror(ret));
}
local_apps.thread_active = 1;
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
} else {
handle_register_done(&local_apps);
}
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_ust_baddr_statedump_destroy();
exit_tracepoint();
if (!exiting) {
/* Reinitialize values for fork */
* mutexes to ensure it is not in a mutex critical section when
* pthread_cancel is later called.
*/
- ust_lock();
+ ust_lock_nocheck();
lttng_ust_comm_should_quit = 1;
+ ust_unlock();
+ pthread_mutex_lock(&ust_exit_mutex);
/* cancel threads */
if (global_apps.thread_active) {
ret = pthread_cancel(global_apps.ust_listener);
local_apps.thread_active = 0;
}
}
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
/*
* Do NOT join threads: use of sys_futex makes it impossible to
if (ret == -1) {
PERROR("sigprocmask");
}
- ust_lock();
+ ust_lock_nocheck();
rcu_bp_before_fork();
}
lttng_ust_init();
}
-void lttng_ust_sockinfo_session_enabled(void *owner,
- struct lttng_session *session_enabled)
+void lttng_ust_sockinfo_session_enabled(void *owner)
{
struct sock_info *sock_info = owner;
- sock_info->session_enabled = session_enabled;
+ sock_info->statedump_pending = 1;
}