+/*
+ * Thread managing health check socket.
+ */
+static void *thread_manage_health(void *data)
+{
+ int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
+ uint32_t revents, nb_fd;
+ struct lttng_poll_event events;
+ struct lttcomm_health_msg msg;
+ struct lttcomm_health_data reply;
+
+ DBG("[thread] Manage health check started");
+
+ rcu_register_thread();
+
+ /* Create unix socket */
+ sock = lttcomm_create_unix_sock(health_unix_sock_path);
+ if (sock < 0) {
+ ERR("Unable to create health check Unix socket");
+ ret = -1;
+ goto error;
+ }
+
+ ret = lttcomm_listen_unix_sock(sock);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /*
+ * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
+ * more will be added to this poll set.
+ */
+ ret = create_thread_poll_set(&events, 2);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* Add the application registration socket */
+ ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
+ if (ret < 0) {
+ goto error;
+ }
+
+ while (1) {
+ DBG("Health check ready");
+
+ nb_fd = LTTNG_POLL_GETNB(&events);
+
+ /* Inifinite blocking call, waiting for transmission */
+restart:
+ ret = lttng_poll_wait(&events, -1);
+ if (ret < 0) {
+ /*
+ * Restart interrupted system call.
+ */
+ if (errno == EINTR) {
+ goto restart;
+ }
+ goto error;
+ }
+
+ for (i = 0; i < nb_fd; i++) {
+ /* Fetch once the poll data */
+ revents = LTTNG_POLL_GETEV(&events, i);
+ pollfd = LTTNG_POLL_GETFD(&events, i);
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ ret = check_thread_quit_pipe(pollfd, revents);
+ if (ret) {
+ err = 0;
+ goto exit;
+ }
+
+ /* Event on the registration socket */
+ if (pollfd == sock) {
+ if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Health socket poll error");
+ goto error;
+ }
+ }
+ }
+
+ new_sock = lttcomm_accept_unix_sock(sock);
+ if (new_sock < 0) {
+ goto error;
+ }
+
+ DBG("Receiving data from client for health...");
+ ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
+ if (ret <= 0) {
+ DBG("Nothing recv() from client... continuing");
+ ret = close(new_sock);
+ if (ret) {
+ PERROR("close");
+ }
+ new_sock = -1;
+ continue;
+ }
+
+ rcu_thread_online();
+
+ switch (msg.component) {
+ case LTTNG_HEALTH_CMD:
+ reply.ret_code = health_check_state(&health_thread_cmd);
+ break;
+ case LTTNG_HEALTH_APP_MANAGE:
+ reply.ret_code = health_check_state(&health_thread_app_manage);
+ break;
+ case LTTNG_HEALTH_APP_REG:
+ reply.ret_code = health_check_state(&health_thread_app_reg);
+ break;
+ case LTTNG_HEALTH_KERNEL:
+ reply.ret_code = health_check_state(&health_thread_kernel);
+ break;
+ case LTTNG_HEALTH_CONSUMER:
+ reply.ret_code = check_consumer_health();
+ break;
+ case LTTNG_HEALTH_ALL:
+ reply.ret_code =
+ health_check_state(&health_thread_app_manage) &&
+ health_check_state(&health_thread_app_reg) &&
+ health_check_state(&health_thread_cmd) &&
+ health_check_state(&health_thread_kernel) &&
+ check_consumer_health();
+ break;
+ default:
+ reply.ret_code = LTTCOMM_UND;
+ break;
+ }
+
+ /*
+ * Flip ret value since 0 is a success and 1 indicates a bad health for
+ * the client where in the sessiond it is the opposite. Again, this is
+ * just to make things easier for us poor developer which enjoy a lot
+ * lazyness.
+ */
+ if (reply.ret_code == 0 || reply.ret_code == 1) {
+ reply.ret_code = !reply.ret_code;
+ }
+
+ DBG2("Health check return value %d", reply.ret_code);
+
+ ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
+ if (ret < 0) {
+ ERR("Failed to send health data back to client");
+ }
+
+ /* End of transmission */
+ ret = close(new_sock);
+ if (ret) {
+ PERROR("close");
+ }
+ new_sock = -1;
+ }
+
+exit:
+error:
+ if (err) {
+ ERR("Health error occurred in %s", __func__);
+ }
+ DBG("Health check thread dying");
+ unlink(health_unix_sock_path);
+ if (sock >= 0) {
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
+ }
+ if (new_sock >= 0) {
+ ret = close(new_sock);
+ if (ret) {
+ PERROR("close");
+ }
+ }
+
+ lttng_poll_clean(&events);
+
+ rcu_unregister_thread();
+ return NULL;
+}
+