Cygwin: Pass file paths instead of file descriptors over UNIX sockets
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
index b968b498acc52874b3472544691bbaef05364d48..3bddf76744216c4d684e940290f0089e67c13c43 100644 (file)
@@ -22,7 +22,6 @@
 #define _LGPL_SOURCE
 #include <sys/types.h>
 #include <sys/socket.h>
-#include <sys/prctl.h>
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <sys/types.h>
@@ -46,6 +45,8 @@
 #include <usterr-signal-safe.h>
 #include "tracepoint-internal.h"
 #include "ltt-tracer-core.h"
+#include "compat.h"
+#include "../libringbuffer/tlsfixup.h"
 
 /*
  * Has lttng ust comm constructor been called ?
@@ -77,6 +78,12 @@ static sem_t constructor_wait;
  */
 static int sem_count = { 2 };
 
+/*
+ * Counting nesting within lttng-ust. Used to ensure that calling fork()
+ * from liblttng-ust does not execute the pre/post fork handlers.
+ */
+static int __thread lttng_ust_nest_count;
+
 /*
  * Info about socket and associated listener thread.
  */
@@ -129,6 +136,15 @@ extern void ltt_ring_buffer_client_overwrite_exit(void);
 extern void ltt_ring_buffer_client_discard_exit(void);
 extern void ltt_ring_buffer_metadata_client_exit(void);
 
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+static
+void lttng_fixup_nest_count_tls(void)
+{
+       asm volatile ("" : : "m" (lttng_ust_nest_count));
+}
+
 static
 int setup_local_apps(void)
 {
@@ -140,14 +156,16 @@ int setup_local_apps(void)
         * Disallow per-user tracing for setuid binaries.
         */
        if (uid != geteuid()) {
-               local_apps.allowed = 0;
+               assert(local_apps.allowed == 0);
                return 0;
-       } else {
-               local_apps.allowed = 1;
        }
        home_dir = (const char *) getenv("HOME");
-       if (!home_dir)
+       if (!home_dir) {
+               WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
+               assert(local_apps.allowed == 0);
                return -ENOENT;
+       }
+       local_apps.allowed = 1;
        snprintf(local_apps.sock_path, PATH_MAX,
                 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
        snprintf(local_apps.wait_shm_path, PATH_MAX,
@@ -159,7 +177,6 @@ static
 int register_app_to_sessiond(int socket)
 {
        ssize_t ret;
-       int prctl_ret;
        struct {
                uint32_t major;
                uint32_t minor;
@@ -178,11 +195,7 @@ int register_app_to_sessiond(int socket)
        reg_msg.uid = getuid();
        reg_msg.gid = getgid();
        reg_msg.bits_per_long = CAA_BITS_PER_LONG;
-       prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
-       if (prctl_ret) {
-               ERR("Error executing prctl");
-               return -errno;
-       }
+       lttng_ust_getprocname(reg_msg.name);
 
        ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
        if (ret >= 0 && ret != sizeof(reg_msg))
@@ -239,6 +252,8 @@ int handle_message(struct sock_info *sock_info,
        const struct lttng_ust_objd_ops *ops;
        struct ustcomm_ust_reply lur;
        int shm_fd, wait_fd;
+       char *shm_path, *wait_pipe_path;
+       union ust_args args;
 
        ust_lock();
 
@@ -271,7 +286,8 @@ int handle_message(struct sock_info *sock_info,
        default:
                if (ops->cmd)
                        ret = ops->cmd(lum->handle, lum->cmd,
-                                       (unsigned long) &lum->u);
+                                       (unsigned long) &lum->u,
+                                       &args);
                else
                        ret = -ENOSYS;
                break;
@@ -287,28 +303,34 @@ end:
                //lur.ret_code = USTCOMM_SESSION_FAIL;
                lur.ret_code = ret;
        }
-       switch (lum->cmd) {
-       case LTTNG_UST_STREAM:
-               /*
-                * Special-case reply to send stream info.
-                * Use lum.u output.
-                */
-               lur.u.stream.memory_map_size = lum->u.stream.memory_map_size;
-               shm_fd = lum->u.stream.shm_fd;
-               wait_fd = lum->u.stream.wait_fd;
-               break;
-       case LTTNG_UST_METADATA:
-       case LTTNG_UST_CHANNEL:
-               lur.u.channel.memory_map_size = lum->u.channel.memory_map_size;
-               shm_fd = lum->u.channel.shm_fd;
-               wait_fd = lum->u.channel.wait_fd;
-               break;
-       case LTTNG_UST_TRACER_VERSION:
-               lur.u.version = lum->u.version;
-               break;
-       case LTTNG_UST_TRACEPOINT_LIST_GET:
-               memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
-               break;
+       if (ret >= 0) {
+               switch (lum->cmd) {
+               case LTTNG_UST_STREAM:
+                       /*
+                        * Special-case reply to send stream info.
+                        * Use lum.u output.
+                        */
+                       lur.u.stream.memory_map_size = *args.stream.memory_map_size;
+                       shm_fd         = *args.stream.shm_fd;
+                       shm_path       = args.stream.shm_path;
+                       wait_fd        = *args.stream.wait_fd;
+                       wait_pipe_path = args.stream.wait_pipe_path;
+                       break;
+               case LTTNG_UST_METADATA:
+               case LTTNG_UST_CHANNEL:
+                       lur.u.channel.memory_map_size = *args.channel.memory_map_size;
+                       shm_fd         = *args.channel.shm_fd;
+                       shm_path       = args.channel.shm_path;
+                       wait_fd        = *args.channel.wait_fd;
+                       wait_pipe_path = args.channel.wait_pipe_path;
+                       break;
+               case LTTNG_UST_TRACER_VERSION:
+                       lur.u.version = lum->u.version;
+                       break;
+               case LTTNG_UST_TRACEPOINT_LIST_GET:
+                       memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
+                       break;
+               }
        }
        ret = send_reply(sock, &lur);
        if (ret < 0) {
@@ -320,34 +342,84 @@ end:
             || lum->cmd == LTTNG_UST_CHANNEL
             || lum->cmd == LTTNG_UST_METADATA)
                        && lur.ret_code == USTCOMM_OK) {
-               /* we also need to send the file descriptors. */
-               ret = ustcomm_send_fds_unix_sock(sock,
-                       &shm_fd, &shm_fd,
-                       1, sizeof(int));
+               int sendret = 0;
+
+               /* send the shm path */
+               ret = ustcomm_send_string(sock, shm_path, strlen(shm_path));
                if (ret < 0) {
-                       perror("send shm_fd");
-                       goto error;
+                       perror("send shm_path");
+                       sendret = ret;
                }
-               ret = ustcomm_send_fds_unix_sock(sock,
-                       &wait_fd, &wait_fd,
-                       1, sizeof(int));
+               /*
+                * The sessiond expects 2 file descriptors, even upon
+                * error.
+                */
+               ret = ustcomm_send_string(sock, wait_pipe_path, strlen(wait_pipe_path));
                if (ret < 0) {
-                       perror("send wait_fd");
+                       perror("send wait_pipe_path");
+                       goto error;
+               }
+               if (sendret) {
+                       ret = sendret;
                        goto error;
                }
        }
+       /*
+        * We still have the memory map reference, and the fds have been
+        * sent to the sessiond. We can therefore close those fds. Note
+        * that we keep the write side of the wait_fd open, but close
+        * the read side.
+        */
+       if (lur.ret_code == USTCOMM_OK) {
+               switch (lum->cmd) {
+               case LTTNG_UST_STREAM:
+                       if (shm_fd >= 0) {
+                               ret = close(shm_fd);
+                               if (ret) {
+                                       PERROR("Error closing stream shm_fd");
+                               }
+                               *args.stream.shm_fd = -1;
+                       }
+                       if (wait_fd >= 0) {
+                               ret = close(wait_fd);
+                               if (ret) {
+                                       PERROR("Error closing stream wait_fd");
+                               }
+                               *args.stream.wait_fd = -1;
+                       }
+                       break;
+               case LTTNG_UST_METADATA:
+               case LTTNG_UST_CHANNEL:
+                       if (shm_fd >= 0) {
+                               ret = close(shm_fd);
+                               if (ret) {
+                                       PERROR("Error closing channel shm_fd");
+                               }
+                               *args.channel.shm_fd = -1;
+                       }
+                       if (wait_fd >= 0) {
+                               ret = close(wait_fd);
+                               if (ret) {
+                                       PERROR("Error closing channel wait_fd");
+                               }
+                               *args.channel.wait_fd = -1;
+                       }
+                       break;
+               }
+       }
+
 error:
        ust_unlock();
        return ret;
 }
 
 static
-void cleanup_sock_info(struct sock_info *sock_info)
+void cleanup_sock_info(struct sock_info *sock_info, int exiting)
 {
        int ret;
 
        if (sock_info->socket != -1) {
-               ret = close(sock_info->socket);
+               ret = ustcomm_close_unix_sock(sock_info->socket);
                if (ret) {
                        ERR("Error closing apps socket");
                }
@@ -361,7 +433,13 @@ void cleanup_sock_info(struct sock_info *sock_info)
                sock_info->root_handle = -1;
        }
        sock_info->constructor_sem_posted = 0;
-       if (sock_info->wait_shm_mmap) {
+       /*
+        * wait_shm_mmap is used by listener threads outside of the
+        * ust lock, so we cannot tear it down ourselves, because we
+        * cannot join on these threads. Leave this task to the OS
+        * process exit.
+        */
+       if (!exiting && sock_info->wait_shm_mmap) {
                ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
                if (ret) {
                        ERR("Error unmapping wait shm");
@@ -404,7 +482,9 @@ int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
         * If the open failed because the file did not exist, try
         * creating it ourself.
         */
+       lttng_ust_nest_count++;
        pid = fork();
+       lttng_ust_nest_count--;
        if (pid > 0) {
                int status;
 
@@ -453,9 +533,9 @@ int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
                        ret = ftruncate(wait_shm_fd, mmap_size);
                        if (ret) {
                                PERROR("ftruncate");
-                               exit(EXIT_FAILURE);
+                               _exit(EXIT_FAILURE);
                        }
-                       exit(EXIT_SUCCESS);
+                       _exit(EXIT_SUCCESS);
                }
                /*
                 * For local shm, we need to have rw access to accept
@@ -467,13 +547,13 @@ int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
                 */
                if (!sock_info->global && errno != EACCES) {
                        ERR("Error opening shm %s", sock_info->wait_shm_path);
-                       exit(EXIT_FAILURE);
+                       _exit(EXIT_FAILURE);
                }
                /*
                 * The shm exists, but we cannot open it RW. Report
                 * success.
                 */
-               exit(EXIT_SUCCESS);
+               _exit(EXIT_SUCCESS);
        } else {
                return -1;
        }
@@ -621,7 +701,7 @@ restart:
        }
 
        if (sock_info->socket != -1) {
-               ret = close(sock_info->socket);
+               ret = ustcomm_close_unix_sock(sock_info->socket);
                if (ret) {
                        ERR("Error closing %s apps socket", sock_info->name);
                }
@@ -772,12 +852,24 @@ int get_timeout(struct timespec *constructor_timeout)
 void __attribute__((constructor)) lttng_ust_init(void)
 {
        struct timespec constructor_timeout;
+       sigset_t sig_all_blocked, orig_parent_mask;
+       pthread_attr_t thread_attr;
        int timeout_mode;
        int ret;
 
        if (uatomic_xchg(&initialized, 1) == 1)
                return;
 
+       /*
+        * Fixup interdependency between TLS fixup mutex (which happens
+        * to be the dynamic linker mutex) and ust_lock, taken within
+        * the ust lock.
+        */
+       lttng_fixup_event_tls();
+       lttng_fixup_ringbuffer_tls();
+       lttng_fixup_vtid_tls();
+       lttng_fixup_nest_count_tls();
+
        /*
         * We want precise control over the order in which we construct
         * our sub-libraries vs starting to receive commands from
@@ -797,17 +889,53 @@ void __attribute__((constructor)) lttng_ust_init(void)
 
        ret = setup_local_apps();
        if (ret) {
-               ERR("Error setting up to local apps");
+               DBG("local apps setup returned %d", ret);
+       }
+
+       /* A new thread created by pthread_create inherits the signal mask
+        * from the parent. To avoid any signal being received by the
+        * listener thread, we block all signals temporarily in the parent,
+        * while we create the listener thread.
+        */
+       sigfillset(&sig_all_blocked);
+       ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
+
+       ret = pthread_attr_init(&thread_attr);
+       if (ret) {
+               ERR("pthread_attr_init: %s", strerror(ret));
+       }
+       ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
+       if (ret) {
+               ERR("pthread_attr_setdetachstate: %s", strerror(ret));
        }
-       ret = pthread_create(&local_apps.ust_listener, NULL,
-                       ust_listener_thread, &local_apps);
 
+       ret = pthread_create(&global_apps.ust_listener, &thread_attr,
+                       ust_listener_thread, &global_apps);
+       if (ret) {
+               ERR("pthread_create global: %s", strerror(ret));
+       }
        if (local_apps.allowed) {
-               ret = pthread_create(&global_apps.ust_listener, NULL,
-                               ust_listener_thread, &global_apps);
+               ret = pthread_create(&local_apps.ust_listener, &thread_attr,
+                               ust_listener_thread, &local_apps);
+               if (ret) {
+                       ERR("pthread_create local: %s", strerror(ret));
+               }
        } else {
                handle_register_done(&local_apps);
        }
+       ret = pthread_attr_destroy(&thread_attr);
+       if (ret) {
+               ERR("pthread_attr_destroy: %s", strerror(ret));
+       }
+
+       /* Restore original signal mask in parent */
+       ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
 
        switch (timeout_mode) {
        case 1: /* timeout wait */
@@ -835,10 +963,17 @@ void __attribute__((constructor)) lttng_ust_init(void)
 static
 void lttng_ust_cleanup(int exiting)
 {
-       cleanup_sock_info(&global_apps);
+       cleanup_sock_info(&global_apps, exiting);
        if (local_apps.allowed) {
-               cleanup_sock_info(&local_apps);
+               cleanup_sock_info(&local_apps, exiting);
        }
+       /*
+        * The teardown in this function all affect data structures
+        * accessed under the UST lock by the listener thread. This
+        * lock, along with the lttng_ust_comm_should_quit flag, ensure
+        * that none of these threads are accessing this data at this
+        * point.
+        */
        lttng_ust_abi_exit();
        lttng_ust_events_exit();
        ltt_ring_buffer_client_discard_exit();
@@ -872,16 +1007,27 @@ void __attribute__((destructor)) lttng_ust_exit(void)
        lttng_ust_comm_should_quit = 1;
        ust_unlock();
 
+       /* cancel threads */
        ret = pthread_cancel(global_apps.ust_listener);
        if (ret) {
-               ERR("Error cancelling global ust listener thread");
+               ERR("Error cancelling global ust listener thread: %s",
+                       strerror(ret));
        }
        if (local_apps.allowed) {
                ret = pthread_cancel(local_apps.ust_listener);
                if (ret) {
-                       ERR("Error cancelling local ust listener thread");
+                       ERR("Error cancelling local ust listener thread: %s",
+                               strerror(ret));
                }
        }
+       /*
+        * Do NOT join threads: use of sys_futex makes it impossible to
+        * join the threads without using async-cancel, but async-cancel
+        * is delivered by a signal, which could hit the target thread
+        * anywhere in its code path, including while the ust_lock() is
+        * held, causing a deadlock for the other thread. Let the OS
+        * cleanup the threads if there are stalled in a syscall.
+        */
        lttng_ust_cleanup(1);
 }
 
@@ -903,6 +1049,8 @@ void ust_before_fork(sigset_t *save_sigset)
        sigset_t all_sigs;
        int ret;
 
+       if (lttng_ust_nest_count)
+               return;
        /* Disable signals */
        sigfillset(&all_sigs);
        ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
@@ -928,6 +1076,8 @@ static void ust_after_fork_common(sigset_t *restore_sigset)
 
 void ust_after_fork_parent(sigset_t *restore_sigset)
 {
+       if (lttng_ust_nest_count)
+               return;
        DBG("process %d", getpid());
        rcu_bp_after_fork_parent();
        /* Release mutexes and reenable signals */
@@ -945,6 +1095,8 @@ void ust_after_fork_parent(sigset_t *restore_sigset)
  */
 void ust_after_fork_child(sigset_t *restore_sigset)
 {
+       if (lttng_ust_nest_count)
+               return;
        DBG("process %d", getpid());
        /* Release urcu mutexes */
        rcu_bp_after_fork_child();
This page took 0.029277 seconds and 4 git commands to generate.