+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
+end:
+ lur.handle = lum->handle;
+ lur.cmd = lum->cmd;
+ lur.ret_val = ret;
+ if (ret >= 0) {
+ lur.ret_code = LTTCOMM_OK;
+ } else {
+ //lur.ret_code = LTTCOMM_SESSION_FAIL;
+ lur.ret_code = ret;
+ }
+ switch (lum->cmd) {
+ case LTTNG_UST_STREAM:
+ /*
+ * Special-case reply to send stream info.
+ * Use lum.u output.
+ */
+ lur.u.stream.memory_map_size = lum->u.stream.memory_map_size;
+ shm_fd = lum->u.stream.shm_fd;
+ wait_fd = lum->u.stream.wait_fd;
+ break;
+ case LTTNG_UST_CHANNEL:
+ lur.u.channel.memory_map_size = lum->u.channel.memory_map_size;
+ shm_fd = lum->u.channel.shm_fd;
+ wait_fd = lum->u.channel.wait_fd;
+ break;
+ }
+ ret = send_reply(sock, &lur);
+ if (ret < 0) {
+ perror("error sending reply");
+ goto error;
+ }
+
+ if ((lum->cmd == LTTNG_UST_STREAM || lum->cmd == LTTNG_UST_CHANNEL)
+ && lur.ret_code == LTTCOMM_OK) {
+ /* we also need to send the file descriptors. */
+ ret = lttcomm_send_fds_unix_sock(sock,
+ &shm_fd, &shm_fd,
+ 1, sizeof(int));
+ if (ret < 0) {
+ perror("send shm_fd");
+ goto error;
+ }
+ ret = lttcomm_send_fds_unix_sock(sock,
+ &wait_fd, &wait_fd,
+ 1, sizeof(int));
+ if (ret < 0) {
+ perror("send wait_fd");
+ goto error;
+ }
+ }
+error:
+ ust_unlock();
+ return ret;
+}
+
+static
+void cleanup_sock_info(struct sock_info *sock_info)
+{
+ int ret;
+
+ if (sock_info->socket != -1) {
+ ret = close(sock_info->socket);
+ if (ret) {
+ ERR("Error closing apps socket");
+ }
+ sock_info->socket = -1;
+ }
+ if (sock_info->root_handle != -1) {
+ ret = objd_unref(sock_info->root_handle);
+ if (ret) {
+ ERR("Error unref root handle");
+ }
+ sock_info->root_handle = -1;
+ }
+ sock_info->constructor_sem_posted = 0;
+ if (sock_info->wait_shm_mmap) {
+ ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
+ if (ret) {
+ ERR("Error unmapping wait shm");
+ }
+ sock_info->wait_shm_mmap = NULL;
+ }
+}
+
+/*
+ * Using fork to set umask in the child process (not multi-thread safe).
+ * We deal with the shm_open vs ftruncate race (happening when the
+ * sessiond owns the shm and does not let everybody modify it, to ensure
+ * safety against shm_unlink) by simply letting the mmap fail and
+ * retrying after a few seconds.
+ * For global shm, everybody has rw access to it until the sessiond
+ * starts.
+ */
+static
+int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
+{
+ int wait_shm_fd, ret;
+ pid_t pid;
+
+ /*
+ * Try to open read-only.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd >= 0) {
+ goto end;
+ } else if (wait_shm_fd < 0 && errno != ENOENT) {
+ /*
+ * Real-only open did not work, and it's not because the
+ * entry was not present. It's a failure that prohibits
+ * using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+ /*
+ * If the open failed because the file did not exist, try
+ * creating it ourself.
+ */
+ pid = fork();
+ if (pid > 0) {
+ int status;
+
+ /*
+ * Parent: wait for child to return, in which case the
+ * shared memory map will have been created.
+ */
+ pid = wait(&status);
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ wait_shm_fd = -1;
+ goto end;
+ }
+ /*
+ * Try to open read-only again after creation.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd < 0) {
+ /*
+ * Real-only open did not work. It's a failure
+ * that prohibits using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+ goto end;
+ } else if (pid == 0) {
+ int create_mode;
+
+ /* Child */
+ create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
+ if (sock_info->global)
+ create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
+ /*
+ * We're alone in a child process, so we can modify the
+ * process-wide umask.
+ */
+ umask(~create_mode);
+ /*
+ * Try creating shm (or get rw access).
+ * We don't do an exclusive open, because we allow other
+ * processes to create+ftruncate it concurrently.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path,
+ O_RDWR | O_CREAT, create_mode);
+ if (wait_shm_fd >= 0) {
+ ret = ftruncate(wait_shm_fd, mmap_size);
+ if (ret) {
+ PERROR("ftruncate");
+ exit(EXIT_FAILURE);
+ }
+ exit(EXIT_SUCCESS);
+ }
+ /*
+ * For local shm, we need to have rw access to accept
+ * opening it: this means the local sessiond will be
+ * able to wake us up. For global shm, we open it even
+ * if rw access is not granted, because the root.root
+ * sessiond will be able to override all rights and wake
+ * us up.
+ */
+ if (!sock_info->global && errno != EACCES) {
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ exit(EXIT_FAILURE);
+ }
+ /*
+ * The shm exists, but we cannot open it RW. Report
+ * success.
+ */
+ exit(EXIT_SUCCESS);
+ } else {