X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libust%2Flttng-ust-comm.c;h=3327e10b827e4a5255bd2f1792bdc95bd74bc6fb;hb=d4419b81b243bc3a6bdd4a09b3ca2216d044a1c7;hp=a9f5797df671cc8af4ff5e2f01f4408ab0143360;hpb=11ff9c7d186237bf04d14ca05b2071bc0f12cae3;p=lttng-ust.git diff --git a/libust/lttng-ust-comm.c b/libust/lttng-ust-comm.c index a9f5797d..3327e10b 100644 --- a/libust/lttng-ust-comm.c +++ b/libust/lttng-ust-comm.c @@ -19,23 +19,44 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#define _LGPL_SOURCE #include #include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include #include #include #include #include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "ltt-tracer-core.h" + +/* + * Has lttng ust comm constructor been called ? + */ +static int initialized; /* - * communication thread mutex. Held when handling a command, also held - * by fork() to deal with removal of threads, and by exit path. + * The ust_lock/ust_unlock lock is used as a communication thread mutex. + * Held when handling a command, also held by fork() to deal with + * removal of threads, and by exit path. */ -static pthread_mutex_t lttng_ust_comm_mutex = PTHREAD_MUTEX_INITIALIZER; /* Should the ust comm thread quit ? */ static int lttng_ust_comm_should_quit; @@ -51,44 +72,86 @@ static int lttng_ust_comm_should_quit; * daemon problems). */ static sem_t constructor_wait; +/* + * Doing this for both the global and local sessiond. + */ +static int sem_count = { 2 }; /* * Info about socket and associated listener thread. */ struct sock_info { const char *name; - char sock_path[PATH_MAX]; - int socket; pthread_t ust_listener; /* listener thread */ int root_handle; + int constructor_sem_posted; + int allowed; + int global; + + char sock_path[PATH_MAX]; + int socket; + + char wait_shm_path[PATH_MAX]; + char *wait_shm_mmap; }; /* Socket from app (connect) to session daemon (listen) for communication */ struct sock_info global_apps = { .name = "global", + .global = 1, + + .root_handle = -1, + .allowed = 1, + .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK, .socket = -1, - .root_handle = -1, + + .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH, }; /* TODO: allow global_apps_sock_path override */ struct sock_info local_apps = { .name = "local", - .socket = -1, + .global = 0, .root_handle = -1, + .allowed = 0, /* Check setuid bit first */ + + .socket = -1, }; +static int wait_poll_fallback; + +extern void ltt_ring_buffer_client_overwrite_init(void); +extern void ltt_ring_buffer_client_discard_init(void); +extern void ltt_ring_buffer_metadata_client_init(void); +extern void ltt_ring_buffer_client_overwrite_exit(void); +extern void ltt_ring_buffer_client_discard_exit(void); +extern void ltt_ring_buffer_metadata_client_exit(void); + static -int setup_local_apps_socket(void) +int setup_local_apps(void) { const char *home_dir; + uid_t uid; + uid = getuid(); + /* + * Disallow per-user tracing for setuid binaries. + */ + if (uid != geteuid()) { + local_apps.allowed = 0; + return 0; + } else { + local_apps.allowed = 1; + } home_dir = (const char *) getenv("HOME"); if (!home_dir) return -ENOENT; snprintf(local_apps.sock_path, PATH_MAX, DEFAULT_HOME_APPS_UNIX_SOCK, home_dir); + snprintf(local_apps.wait_shm_path, PATH_MAX, + DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid); return 0; } @@ -96,30 +159,41 @@ static int register_app_to_sessiond(int socket) { ssize_t ret; + int prctl_ret; struct { uint32_t major; uint32_t minor; pid_t pid; + pid_t ppid; uid_t uid; + gid_t gid; + char name[16]; /* process name */ } reg_msg; reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR; reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR; reg_msg.pid = getpid(); + reg_msg.ppid = getppid(); reg_msg.uid = getuid(); + reg_msg.gid = getgid(); + prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0); + if (prctl_ret) { + ERR("Error executing prctl"); + return -errno; + } - ret = lttcomm_send_unix_sock(socket, ®_msg, sizeof(reg_msg)); + ret = ustcomm_send_unix_sock(socket, ®_msg, sizeof(reg_msg)); if (ret >= 0 && ret != sizeof(reg_msg)) return -EIO; return ret; } static -int send_reply(int sock, struct lttcomm_ust_reply *lur) +int send_reply(int sock, struct ustcomm_ust_reply *lur) { ssize_t len; - len = lttcomm_send_unix_sock(sock, lur, sizeof(*lur)); + len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur)); switch (len) { case sizeof(*lur): DBG("message successfully sent"); @@ -137,24 +211,34 @@ int send_reply(int sock, struct lttcomm_ust_reply *lur) } static -int handle_register_done(void) +int handle_register_done(struct sock_info *sock_info) { int ret; - ret = sem_post(&constructor_wait); - assert(!ret); + if (sock_info->constructor_sem_posted) + return 0; + sock_info->constructor_sem_posted = 1; + if (uatomic_read(&sem_count) <= 0) { + return 0; + } + ret = uatomic_add_return(&sem_count, -1); + if (ret == 0) { + ret = sem_post(&constructor_wait); + assert(!ret); + } return 0; } static int handle_message(struct sock_info *sock_info, - int sock, struct lttcomm_ust_msg *lum) + int sock, struct ustcomm_ust_msg *lum) { int ret = 0; - const struct objd_ops *ops; - struct lttcomm_ust_reply lur; + const struct lttng_ust_objd_ops *ops; + struct ustcomm_ust_reply lur; + int shm_fd, wait_fd; - pthread_mutex_lock(<tng_ust_comm_mutex); + ust_lock(); memset(&lur, 0, sizeof(lur)); @@ -172,7 +256,7 @@ int handle_message(struct sock_info *sock_info, switch (lum->cmd) { case LTTNG_UST_REGISTER_DONE: if (lum->handle == LTTNG_UST_ROOT_HANDLE) - ret = handle_register_done(); + ret = handle_register_done(sock_info); else ret = -EINVAL; break; @@ -180,7 +264,7 @@ int handle_message(struct sock_info *sock_info, if (lum->handle == LTTNG_UST_ROOT_HANDLE) ret = -EPERM; else - ret = objd_unref(lum->handle); + ret = lttng_ust_objd_unref(lum->handle); break; default: if (ops->cmd) @@ -196,13 +280,59 @@ end: lur.cmd = lum->cmd; lur.ret_val = ret; if (ret >= 0) { - lur.ret_code = LTTCOMM_OK; + lur.ret_code = USTCOMM_OK; } else { - lur.ret_code = LTTCOMM_SESSION_FAIL; + //lur.ret_code = USTCOMM_SESSION_FAIL; + lur.ret_code = ret; + } + switch (lum->cmd) { + case LTTNG_UST_STREAM: + /* + * Special-case reply to send stream info. + * Use lum.u output. + */ + lur.u.stream.memory_map_size = lum->u.stream.memory_map_size; + shm_fd = lum->u.stream.shm_fd; + wait_fd = lum->u.stream.wait_fd; + break; + case LTTNG_UST_METADATA: + case LTTNG_UST_CHANNEL: + lur.u.channel.memory_map_size = lum->u.channel.memory_map_size; + shm_fd = lum->u.channel.shm_fd; + wait_fd = lum->u.channel.wait_fd; + break; + case LTTNG_UST_VERSION: + lur.u.version = lum->u.version; + break; } ret = send_reply(sock, &lur); + if (ret < 0) { + perror("error sending reply"); + goto error; + } - pthread_mutex_unlock(<tng_ust_comm_mutex); + if ((lum->cmd == LTTNG_UST_STREAM + || lum->cmd == LTTNG_UST_CHANNEL + || lum->cmd == LTTNG_UST_METADATA) + && lur.ret_code == USTCOMM_OK) { + /* we also need to send the file descriptors. */ + ret = ustcomm_send_fds_unix_sock(sock, + &shm_fd, &shm_fd, + 1, sizeof(int)); + if (ret < 0) { + perror("send shm_fd"); + goto error; + } + ret = ustcomm_send_fds_unix_sock(sock, + &wait_fd, &wait_fd, + 1, sizeof(int)); + if (ret < 0) { + perror("send wait_fd"); + goto error; + } + } +error: + ust_unlock(); return ret; } @@ -214,17 +344,237 @@ void cleanup_sock_info(struct sock_info *sock_info) if (sock_info->socket != -1) { ret = close(sock_info->socket); if (ret) { - ERR("Error closing local apps socket"); + ERR("Error closing apps socket"); } sock_info->socket = -1; } if (sock_info->root_handle != -1) { - ret = objd_unref(sock_info->root_handle); + ret = lttng_ust_objd_unref(sock_info->root_handle); if (ret) { ERR("Error unref root handle"); } sock_info->root_handle = -1; } + sock_info->constructor_sem_posted = 0; + if (sock_info->wait_shm_mmap) { + ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE)); + if (ret) { + ERR("Error unmapping wait shm"); + } + sock_info->wait_shm_mmap = NULL; + } +} + +/* + * Using fork to set umask in the child process (not multi-thread safe). + * We deal with the shm_open vs ftruncate race (happening when the + * sessiond owns the shm and does not let everybody modify it, to ensure + * safety against shm_unlink) by simply letting the mmap fail and + * retrying after a few seconds. + * For global shm, everybody has rw access to it until the sessiond + * starts. + */ +static +int get_wait_shm(struct sock_info *sock_info, size_t mmap_size) +{ + int wait_shm_fd, ret; + pid_t pid; + + /* + * Try to open read-only. + */ + wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0); + if (wait_shm_fd >= 0) { + goto end; + } else if (wait_shm_fd < 0 && errno != ENOENT) { + /* + * Real-only open did not work, and it's not because the + * entry was not present. It's a failure that prohibits + * using shm. + */ + ERR("Error opening shm %s", sock_info->wait_shm_path); + goto end; + } + /* + * If the open failed because the file did not exist, try + * creating it ourself. + */ + pid = fork(); + if (pid > 0) { + int status; + + /* + * Parent: wait for child to return, in which case the + * shared memory map will have been created. + */ + pid = wait(&status); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + wait_shm_fd = -1; + goto end; + } + /* + * Try to open read-only again after creation. + */ + wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0); + if (wait_shm_fd < 0) { + /* + * Real-only open did not work. It's a failure + * that prohibits using shm. + */ + ERR("Error opening shm %s", sock_info->wait_shm_path); + goto end; + } + goto end; + } else if (pid == 0) { + int create_mode; + + /* Child */ + create_mode = S_IRUSR | S_IWUSR | S_IRGRP; + if (sock_info->global) + create_mode |= S_IROTH | S_IWGRP | S_IWOTH; + /* + * We're alone in a child process, so we can modify the + * process-wide umask. + */ + umask(~create_mode); + /* + * Try creating shm (or get rw access). + * We don't do an exclusive open, because we allow other + * processes to create+ftruncate it concurrently. + */ + wait_shm_fd = shm_open(sock_info->wait_shm_path, + O_RDWR | O_CREAT, create_mode); + if (wait_shm_fd >= 0) { + ret = ftruncate(wait_shm_fd, mmap_size); + if (ret) { + PERROR("ftruncate"); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); + } + /* + * For local shm, we need to have rw access to accept + * opening it: this means the local sessiond will be + * able to wake us up. For global shm, we open it even + * if rw access is not granted, because the root.root + * sessiond will be able to override all rights and wake + * us up. + */ + if (!sock_info->global && errno != EACCES) { + ERR("Error opening shm %s", sock_info->wait_shm_path); + exit(EXIT_FAILURE); + } + /* + * The shm exists, but we cannot open it RW. Report + * success. + */ + exit(EXIT_SUCCESS); + } else { + return -1; + } +end: + if (wait_shm_fd >= 0 && !sock_info->global) { + struct stat statbuf; + + /* + * Ensure that our user is the owner of the shm file for + * local shm. If we do not own the file, it means our + * sessiond will not have access to wake us up (there is + * probably a rogue process trying to fake our + * sessiond). Fallback to polling method in this case. + */ + ret = fstat(wait_shm_fd, &statbuf); + if (ret) { + PERROR("fstat"); + goto error_close; + } + if (statbuf.st_uid != getuid()) + goto error_close; + } + return wait_shm_fd; + +error_close: + ret = close(wait_shm_fd); + if (ret) { + PERROR("Error closing fd"); + } + return -1; +} + +static +char *get_map_shm(struct sock_info *sock_info) +{ + size_t mmap_size = sysconf(_SC_PAGE_SIZE); + int wait_shm_fd, ret; + char *wait_shm_mmap; + + wait_shm_fd = get_wait_shm(sock_info, mmap_size); + if (wait_shm_fd < 0) { + goto error; + } + wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ, + MAP_SHARED, wait_shm_fd, 0); + /* close shm fd immediately after taking the mmap reference */ + ret = close(wait_shm_fd); + if (ret) { + PERROR("Error closing fd"); + } + if (wait_shm_mmap == MAP_FAILED) { + DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode."); + goto error; + } + return wait_shm_mmap; + +error: + return NULL; +} + +static +void wait_for_sessiond(struct sock_info *sock_info) +{ + int ret; + + ust_lock(); + if (lttng_ust_comm_should_quit) { + goto quit; + } + if (wait_poll_fallback) { + goto error; + } + if (!sock_info->wait_shm_mmap) { + sock_info->wait_shm_mmap = get_map_shm(sock_info); + if (!sock_info->wait_shm_mmap) + goto error; + } + ust_unlock(); + + DBG("Waiting for %s apps sessiond", sock_info->name); + /* Wait for futex wakeup */ + if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) { + ret = futex_async((int32_t *) sock_info->wait_shm_mmap, + FUTEX_WAIT, 0, NULL, NULL, 0); + if (ret < 0) { + if (errno == EFAULT) { + wait_poll_fallback = 1; + WARN( +"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) " +"do not support FUTEX_WAKE on read-only memory mappings correctly. " +"Please upgrade your kernel " +"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel " +"mainline). LTTng-UST will use polling mode fallback."); + } + PERROR("futex"); + } + } + return; + +quit: + ust_unlock(); + return; + +error: + ust_unlock(); + return; } /* @@ -238,14 +588,29 @@ static void *ust_listener_thread(void *arg) { struct sock_info *sock_info = arg; - int sock, ret; + int sock, ret, prev_connect_failed = 0, has_waited = 0; /* Restart trying to connect to the session daemon */ restart: - pthread_mutex_lock(<tng_ust_comm_mutex); + if (prev_connect_failed) { + /* Wait for sessiond availability with pipe */ + wait_for_sessiond(sock_info); + if (has_waited) { + has_waited = 0; + /* + * Sleep for 5 seconds before retrying after a + * sequence of failure / wait / failure. This + * deals with a killed or broken session daemon. + */ + sleep(5); + } + has_waited = 1; + prev_connect_failed = 0; + } + ust_lock(); if (lttng_ust_comm_should_quit) { - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); goto quit; } @@ -257,20 +622,18 @@ restart: sock_info->socket = -1; } - /* Check for sessiond availability with pipe TODO */ - /* Register */ - ret = lttcomm_connect_unix_sock(sock_info->sock_path); + ret = ustcomm_connect_unix_sock(sock_info->sock_path); if (ret < 0) { ERR("Error connecting to %s apps socket", sock_info->name); + prev_connect_failed = 1; /* * If we cannot find the sessiond daemon, don't delay * constructor execution. */ - ret = handle_register_done(); + ret = handle_register_done(sock_info); assert(!ret); - pthread_mutex_unlock(<tng_ust_comm_mutex); - sleep(5); + ust_unlock(); goto restart; } @@ -282,9 +645,9 @@ restart: */ if (sock_info->root_handle == -1) { ret = lttng_abi_create_root_handle(); - if (ret) { + if (ret < 0) { ERR("Error creating root handle"); - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); goto quit; } sock_info->root_handle = ret; @@ -293,23 +656,23 @@ restart: ret = register_app_to_sessiond(sock); if (ret < 0) { ERR("Error registering to %s apps socket", sock_info->name); + prev_connect_failed = 1; /* * If we cannot register to the sessiond daemon, don't * delay constructor execution. */ - ret = handle_register_done(); + ret = handle_register_done(sock_info); assert(!ret); - pthread_mutex_unlock(<tng_ust_comm_mutex); - sleep(5); + ust_unlock(); goto restart; } - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); for (;;) { ssize_t len; - struct lttcomm_ust_msg lum; + struct ustcomm_ust_msg lum; - len = lttcomm_recv_unix_sock(sock, &lum, sizeof(lum)); + len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum)); switch (len) { case 0: /* orderly shutdown */ DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name); @@ -339,30 +702,44 @@ quit: return NULL; } +/* + * Return values: -1: don't wait. 0: wait forever. 1: timeout wait. + */ static int get_timeout(struct timespec *constructor_timeout) { - struct timespec constructor_delay = - { - .tv_sec = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_S, - .tv_nsec = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_NS, - }; - struct timespec realtime; + long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS; + char *str_delay; int ret; - ret = clock_gettime(CLOCK_REALTIME, &realtime); - if (ret) - return ret; + str_delay = getenv("UST_REGISTER_TIMEOUT"); + if (str_delay) { + constructor_delay_ms = strtol(str_delay, NULL, 10); + } + + switch (constructor_delay_ms) { + case -1:/* fall-through */ + case 0: + return constructor_delay_ms; + default: + break; + } - constructor_timeout->tv_sec = - realtime.tv_sec + constructor_delay.tv_sec; - constructor_timeout->tv_nsec = - constructor_delay.tv_nsec + realtime.tv_nsec; + /* + * If we are unable to find the current time, don't wait. + */ + ret = clock_gettime(CLOCK_REALTIME, constructor_timeout); + if (ret) { + return -1; + } + constructor_timeout->tv_sec += constructor_delay_ms / 1000UL; + constructor_timeout->tv_nsec += + (constructor_delay_ms % 1000UL) * 1000000UL; if (constructor_timeout->tv_nsec >= 1000000000UL) { constructor_timeout->tv_sec++; constructor_timeout->tv_nsec -= 1000000000UL; } - return 0; + return 1; } /* @@ -371,47 +748,91 @@ int get_timeout(struct timespec *constructor_timeout) */ /* TODO */ -void __attribute__((constructor)) lttng_ust_comm_init(void) +void __attribute__((constructor)) lttng_ust_init(void) { struct timespec constructor_timeout; + int timeout_mode; int ret; + if (uatomic_xchg(&initialized, 1) == 1) + return; + + /* + * We want precise control over the order in which we construct + * our sub-libraries vs starting to receive commands from + * sessiond (otherwise leading to errors when trying to create + * sessiond before the init functions are completed). + */ init_usterr(); + init_tracepoint(); + ltt_ring_buffer_metadata_client_init(); + ltt_ring_buffer_client_overwrite_init(); + ltt_ring_buffer_client_discard_init(); - ret = get_timeout(&constructor_timeout); - assert(!ret); + timeout_mode = get_timeout(&constructor_timeout); - ret = sem_init(&constructor_wait, 0, 2); + ret = sem_init(&constructor_wait, 0, 0); assert(!ret); - ret = setup_local_apps_socket(); + ret = setup_local_apps(); if (ret) { - ERR("Error setting up to local apps socket"); + ERR("Error setting up to local apps"); } - - /* - * Wait for the pthread cond to let us continue to main program - * execution. Hold mutex across thread creation, so we start - * waiting for the condition before the threads can signal its - * completion. - */ - pthread_mutex_lock(<tng_ust_comm_mutex); - ret = pthread_create(&global_apps.ust_listener, NULL, - ust_listener_thread, &global_apps); ret = pthread_create(&local_apps.ust_listener, NULL, ust_listener_thread, &local_apps); - ret = sem_timedwait(&constructor_wait, &constructor_timeout); - if (ret < 0 && errno == ETIMEDOUT) { - ERR("Timed out waiting for ltt-sessiond"); + if (local_apps.allowed) { + ret = pthread_create(&global_apps.ust_listener, NULL, + ust_listener_thread, &global_apps); } else { + handle_register_done(&local_apps); + } + + switch (timeout_mode) { + case 1: /* timeout wait */ + do { + ret = sem_timedwait(&constructor_wait, + &constructor_timeout); + } while (ret < 0 && errno == EINTR); + if (ret < 0 && errno == ETIMEDOUT) { + ERR("Timed out waiting for ltt-sessiond"); + } else { + assert(!ret); + } + break; + case -1:/* wait forever */ + do { + ret = sem_wait(&constructor_wait); + } while (ret < 0 && errno == EINTR); assert(!ret); + break; + case 0: /* no timeout */ + break; } - pthread_mutex_unlock(<tng_ust_comm_mutex); +} +static +void lttng_ust_cleanup(int exiting) +{ + cleanup_sock_info(&global_apps); + if (local_apps.allowed) { + cleanup_sock_info(&local_apps); + } + lttng_ust_abi_exit(); + lttng_ust_events_exit(); + ltt_ring_buffer_client_discard_exit(); + ltt_ring_buffer_client_overwrite_exit(); + ltt_ring_buffer_metadata_client_exit(); + exit_tracepoint(); + if (!exiting) { + /* Reinitialize values for fork */ + sem_count = 2; + lttng_ust_comm_should_quit = 0; + initialized = 0; + } } -void __attribute__((destructor)) lttng_ust_comm_exit(void) +void __attribute__((destructor)) lttng_ust_exit(void) { int ret; @@ -426,26 +847,89 @@ void __attribute__((destructor)) lttng_ust_comm_exit(void) * mutexes to ensure it is not in a mutex critical section when * pthread_cancel is later called. */ - pthread_mutex_lock(<tng_ust_comm_mutex); + ust_lock(); lttng_ust_comm_should_quit = 1; - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); -#if 0 ret = pthread_cancel(global_apps.ust_listener); if (ret) { ERR("Error cancelling global ust listener thread"); } -#endif //0 + if (local_apps.allowed) { + ret = pthread_cancel(local_apps.ust_listener); + if (ret) { + ERR("Error cancelling local ust listener thread"); + } + } + lttng_ust_cleanup(1); +} - cleanup_sock_info(&global_apps); +/* + * We exclude the worker threads across fork and clone (except + * CLONE_VM), because these system calls only keep the forking thread + * running in the child. Therefore, we don't want to call fork or clone + * in the middle of an tracepoint or ust tracing state modification. + * Holding this mutex protects these structures across fork and clone. + */ +void ust_before_fork(ust_fork_info_t *fork_info) +{ + /* + * Disable signals. This is to avoid that the child intervenes + * before it is properly setup for tracing. It is safer to + * disable all signals, because then we know we are not breaking + * anything by restoring the original mask. + */ + sigset_t all_sigs; + int ret; - ret = pthread_cancel(local_apps.ust_listener); - if (ret) { - ERR("Error cancelling local ust listener thread"); + /* Disable signals */ + sigfillset(&all_sigs); + ret = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs); + if (ret == -1) { + PERROR("sigprocmask"); + } + ust_lock(); + rcu_bp_before_fork(); +} + +static void ust_after_fork_common(ust_fork_info_t *fork_info) +{ + int ret; + + DBG("process %d", getpid()); + ust_unlock(); + /* Restore signals */ + ret = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL); + if (ret == -1) { + PERROR("sigprocmask"); } +} - cleanup_sock_info(&local_apps); +void ust_after_fork_parent(ust_fork_info_t *fork_info) +{ + DBG("process %d", getpid()); + rcu_bp_after_fork_parent(); + /* Release mutexes and reenable signals */ + ust_after_fork_common(fork_info); +} - lttng_ust_abi_exit(); - ltt_events_exit(); +/* + * After fork, in the child, we need to cleanup all the leftover state, + * except the worker thread which already magically disappeared thanks + * to the weird Linux fork semantics. After tyding up, we call + * lttng_ust_init() again to start over as a new PID. + * + * This is meant for forks() that have tracing in the child between the + * fork and following exec call (if there is any). + */ +void ust_after_fork_child(ust_fork_info_t *fork_info) +{ + DBG("process %d", getpid()); + /* Release urcu mutexes */ + rcu_bp_after_fork_child(); + lttng_ust_cleanup(0); + lttng_context_vtid_reset(); + /* Release mutexes and reenable signals */ + ust_after_fork_common(fork_info); + lttng_ust_init(); }