port: fix pthread_setname_np integration
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
index a299ba84c6ba583b1d2afff4d8ff0576a0991e36..b265bcd3c61e254c9e8dc889d56fef389f2c18e1 100644 (file)
@@ -20,7 +20,8 @@
  */
 
 #define _LGPL_SOURCE
-#define _GNU_SOURCE
+#include <stddef.h>
+#include <stdint.h>
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/mman.h>
@@ -86,6 +87,8 @@ static int initialized;
  *
  * ust_lock nests within the dynamic loader lock (within glibc) because
  * it is taken within the library constructor.
+ *
+ * The ust fd tracker lock nests within the ust_mutex.
  */
 static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
 
@@ -228,7 +231,11 @@ static sem_t constructor_wait;
 /*
  * Doing this for both the global and local sessiond.
  */
-static int sem_count = { 2 };
+enum {
+       sem_count_initial_value = 4,
+};
+
+static int sem_count = sem_count_initial_value;
 
 /*
  * Counting nesting within lttng-ust. Used to ensure that calling fork()
@@ -243,7 +250,7 @@ struct sock_info {
        const char *name;
        pthread_t ust_listener; /* listener thread */
        int root_handle;
-       int constructor_sem_posted;
+       int registration_done;
        int allowed;
        int global;
        int thread_active;
@@ -256,6 +263,9 @@ struct sock_info {
        char *wait_shm_mmap;
        /* Keep track of lazy state dump not performed yet. */
        int statedump_pending;
+       int initial_statedump_done;
+       /* Keep procname for statedump */
+       char procname[LTTNG_UST_ABI_PROCNAME_LEN];
 };
 
 /* Socket from app (connect) to session daemon (listen) for communication */
@@ -264,6 +274,7 @@ struct sock_info global_apps = {
        .global = 1,
 
        .root_handle = -1,
+       .registration_done = 0,
        .allowed = 0,
        .thread_active = 0,
 
@@ -274,6 +285,8 @@ struct sock_info global_apps = {
        .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
 
        .statedump_pending = 0,
+       .initial_statedump_done = 0,
+       .procname[0] = '\0'
 };
 
 /* TODO: allow global_apps_sock_path override */
@@ -282,6 +295,7 @@ struct sock_info local_apps = {
        .name = "local",
        .global = 0,
        .root_handle = -1,
+       .registration_done = 0,
        .allowed = 0,   /* Check setuid bit first */
        .thread_active = 0,
 
@@ -289,6 +303,8 @@ struct sock_info local_apps = {
        .notify_socket = -1,
 
        .statedump_pending = 0,
+       .initial_statedump_done = 0,
+       .procname[0] = '\0'
 };
 
 static int wait_poll_fallback;
@@ -412,7 +428,13 @@ void lttng_ust_fixup_tls(void)
        lttng_fixup_nest_count_tls();
        lttng_fixup_procname_tls();
        lttng_fixup_ust_mutex_nest_tls();
+       lttng_ust_fixup_perf_counter_tls();
        lttng_ust_fixup_fd_tracker_tls();
+       lttng_fixup_cgroup_ns_tls();
+       lttng_fixup_ipc_ns_tls();
+       lttng_fixup_net_ns_tls();
+       lttng_fixup_time_ns_tls();
+       lttng_fixup_uts_ns_tls();
 }
 
 int lttng_get_notify_socket(void *owner)
@@ -422,6 +444,15 @@ int lttng_get_notify_socket(void *owner)
        return info->notify_socket;
 }
 
+
+LTTNG_HIDDEN
+char* lttng_ust_sockinfo_get_procname(void *owner)
+{
+       struct sock_info *info = owner;
+
+       return info->procname;
+}
+
 static
 void print_cmd(int cmd, int handle)
 {
@@ -451,6 +482,7 @@ int setup_global_apps(void)
        }
 
        global_apps.allowed = 1;
+       lttng_pthread_getname_np(global_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
 error:
        return ret;
 }
@@ -495,6 +527,8 @@ int setup_local_apps(void)
                ret = -EIO;
                goto end;
        }
+
+       lttng_pthread_getname_np(local_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
 end:
        return ret;
 }
@@ -621,45 +655,85 @@ int send_reply(int sock, struct ustcomm_ust_reply *lur)
 }
 
 static
-int handle_register_done(struct sock_info *sock_info)
+void decrement_sem_count(unsigned int count)
 {
        int ret;
 
-       if (sock_info->constructor_sem_posted)
-               return 0;
-       sock_info->constructor_sem_posted = 1;
+       assert(uatomic_read(&sem_count) >= count);
+
        if (uatomic_read(&sem_count) <= 0) {
-               return 0;
+               return;
        }
-       ret = uatomic_add_return(&sem_count, -1);
+
+       ret = uatomic_add_return(&sem_count, -count);
        if (ret == 0) {
                ret = sem_post(&constructor_wait);
                assert(!ret);
        }
+}
+
+static
+int handle_register_done(struct sock_info *sock_info)
+{
+       if (sock_info->registration_done)
+               return 0;
+       sock_info->registration_done = 1;
+
+       decrement_sem_count(1);
+       if (!sock_info->statedump_pending) {
+               sock_info->initial_statedump_done = 1;
+               decrement_sem_count(1);
+       }
+
+       return 0;
+}
+
+static
+int handle_register_failed(struct sock_info *sock_info)
+{
+       if (sock_info->registration_done)
+               return 0;
+       sock_info->registration_done = 1;
+       sock_info->initial_statedump_done = 1;
+
+       decrement_sem_count(2);
+
        return 0;
 }
 
 /*
  * Only execute pending statedump after the constructor semaphore has
- * been posted by each listener thread. This means statedump will only
- * be performed after the "registration done" command is received from
- * each session daemon the application is connected to.
+ * been posted by the current listener thread. This means statedump will
+ * only be performed after the "registration done" command is received
+ * from this thread's session daemon.
  *
  * This ensures we don't run into deadlock issues with the dynamic
  * loader mutex, which is held while the constructor is called and
  * waiting on the constructor semaphore. All operations requiring this
  * dynamic loader lock need to be postponed using this mechanism.
+ *
+ * In a scenario with two session daemons connected to the application,
+ * it is possible that the first listener thread which receives the
+ * registration done command issues its statedump while the dynamic
+ * loader lock is still held by the application constructor waiting on
+ * the semaphore. It will however be allowed to proceed when the
+ * second session daemon sends the registration done command to the
+ * second listener thread. This situation therefore does not produce
+ * a deadlock.
  */
 static
 void handle_pending_statedump(struct sock_info *sock_info)
 {
-       int ctor_passed = sock_info->constructor_sem_posted;
-
-       if (ctor_passed && sock_info->statedump_pending) {
+       if (sock_info->registration_done && sock_info->statedump_pending) {
                sock_info->statedump_pending = 0;
                pthread_mutex_lock(&ust_fork_mutex);
                lttng_handle_pending_statedump(sock_info);
                pthread_mutex_unlock(&ust_fork_mutex);
+
+               if (!sock_info->initial_statedump_done) {
+                       sock_info->initial_statedump_done = 1;
+                       decrement_sem_count(1);
+               }
        }
 }
 
@@ -1069,7 +1143,8 @@ void cleanup_sock_info(struct sock_info *sock_info, int exiting)
                }
                sock_info->root_handle = -1;
        }
-       sock_info->constructor_sem_posted = 0;
+       sock_info->registration_done = 0;
+       sock_info->initial_statedump_done = 0;
 
        /*
         * wait_shm_mmap, socket and notify socket are used by listener
@@ -1477,7 +1552,7 @@ restart:
                 * If we cannot find the sessiond daemon, don't delay
                 * constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1531,7 +1606,7 @@ restart:
                 * If we cannot register to the sessiond daemon, don't
                 * delay constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1560,7 +1635,7 @@ restart:
                 * If we cannot find the sessiond daemon, don't delay
                 * constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1624,7 +1699,7 @@ restart:
                 * If we cannot register to the sessiond daemon, don't
                 * delay constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1654,7 +1729,7 @@ restart:
                         * If we cannot register to the sessiond daemon, don't
                         * delay constructor execution.
                         */
-                       ret = handle_register_done(sock_info);
+                       ret = handle_register_failed(sock_info);
                        assert(!ret);
                        ust_unlock();
                        goto end;
@@ -1923,7 +1998,7 @@ void lttng_ust_cleanup(int exiting)
        exit_tracepoint();
        if (!exiting) {
                /* Reinitialize values for fork */
-               sem_count = 2;
+               sem_count = sem_count_initial_value;
                lttng_ust_comm_should_quit = 0;
                initialized = 0;
        }
@@ -1981,6 +2056,35 @@ void __attribute__((destructor)) lttng_ust_exit(void)
        lttng_ust_cleanup(1);
 }
 
+static
+void ust_context_ns_reset(void)
+{
+       lttng_context_pid_ns_reset();
+       lttng_context_cgroup_ns_reset();
+       lttng_context_ipc_ns_reset();
+       lttng_context_mnt_ns_reset();
+       lttng_context_net_ns_reset();
+       lttng_context_user_ns_reset();
+       lttng_context_time_ns_reset();
+       lttng_context_uts_ns_reset();
+}
+
+static
+void ust_context_vuids_reset(void)
+{
+       lttng_context_vuid_reset();
+       lttng_context_veuid_reset();
+       lttng_context_vsuid_reset();
+}
+
+static
+void ust_context_vgids_reset(void)
+{
+       lttng_context_vgid_reset();
+       lttng_context_vegid_reset();
+       lttng_context_vsgid_reset();
+}
+
 /*
  * We exclude the worker threads across fork and clone (except
  * CLONE_VM), because these system calls only keep the forking thread
@@ -2015,6 +2119,8 @@ void ust_before_fork(sigset_t *save_sigset)
 
        ust_lock_nocheck();
        urcu_bp_before_fork();
+       lttng_ust_lock_fd_tracker();
+       lttng_perf_lock();
 }
 
 static void ust_after_fork_common(sigset_t *restore_sigset)
@@ -2022,6 +2128,8 @@ static void ust_after_fork_common(sigset_t *restore_sigset)
        int ret;
 
        DBG("process %d", getpid());
+       lttng_perf_unlock();
+       lttng_ust_unlock_fd_tracker();
        ust_unlock();
 
        pthread_mutex_unlock(&ust_fork_mutex);
@@ -2059,6 +2167,9 @@ void ust_after_fork_child(sigset_t *restore_sigset)
        lttng_context_vpid_reset();
        lttng_context_vtid_reset();
        lttng_context_procname_reset();
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
        DBG("process %d", getpid());
        /* Release urcu mutexes */
        urcu_bp_after_fork_child();
@@ -2068,6 +2179,60 @@ void ust_after_fork_child(sigset_t *restore_sigset)
        lttng_ust_init();
 }
 
+void ust_after_setns(void)
+{
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
+}
+
+void ust_after_unshare(void)
+{
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
+}
+
+void ust_after_setuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_seteuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setreuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setresuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setgid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setegid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setregid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setresgid(void)
+{
+       ust_context_vgids_reset();
+}
+
 void lttng_ust_sockinfo_session_enabled(void *owner)
 {
        struct sock_info *sock_info = owner;
This page took 0.027875 seconds and 4 git commands to generate.