docs: Add supported versions and fix-backport policy
[lttng-tools.git] / src / bin / lttng-sessiond / health.cpp
1 /*
2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #include "health-sessiond.hpp"
10 #include "lttng-sessiond.hpp"
11 #include "thread.hpp"
12 #include "utils.hpp"
13
14 #include <common/error.hpp>
15 #include <common/macros.hpp>
16 #include <common/pipe.hpp>
17 #include <common/utils.hpp>
18
19 #include <inttypes.h>
20 #include <sys/stat.h>
21
22 namespace {
23 struct thread_notifiers {
24 struct lttng_pipe *quit_pipe;
25 sem_t ready;
26 };
27 } /* namespace */
28
29 static void mark_thread_as_ready(struct thread_notifiers *notifiers)
30 {
31 DBG("Marking health management thread as ready");
32 sem_post(&notifiers->ready);
33 }
34
35 static void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
36 {
37 DBG("Waiting for health management thread to be ready");
38 sem_wait(&notifiers->ready);
39 DBG("Health management thread is ready");
40 }
41
42 static void cleanup_health_management_thread(void *data)
43 {
44 struct thread_notifiers *notifiers = (thread_notifiers *) data;
45
46 lttng_pipe_destroy(notifiers->quit_pipe);
47 sem_destroy(&notifiers->ready);
48 free(notifiers);
49 }
50
51 /*
52 * Thread managing health check socket.
53 */
54 static void *thread_manage_health(void *data)
55 {
56 const bool is_root = (getuid() == 0);
57 int sock = -1, new_sock = -1, ret, i, err = -1;
58 uint32_t nb_fd;
59 struct lttng_poll_event events;
60 struct health_comm_msg msg;
61 struct health_comm_reply reply;
62 /* Thread-specific quit pipe. */
63 struct thread_notifiers *notifiers = (thread_notifiers *) data;
64 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
65
66 DBG("[thread] Manage health check started");
67
68 rcu_register_thread();
69
70 /*
71 * Created with a size of two for:
72 * - health client socket
73 * - thread quit pipe
74 */
75 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
76 if (ret < 0) {
77 goto error;
78 }
79
80 /* Create unix socket */
81 sock = lttcomm_create_unix_sock(the_config.health_unix_sock_path.value);
82 if (sock < 0) {
83 ERR("Unable to create health check Unix socket");
84 goto error;
85 }
86
87 if (is_root) {
88 /* lttng health client socket path permissions */
89 gid_t gid;
90
91 ret = utils_get_group_id(the_config.tracing_group_name.value, true, &gid);
92 if (ret) {
93 /* Default to root group. */
94 gid = 0;
95 }
96
97 ret = chown(the_config.health_unix_sock_path.value, 0, gid);
98 if (ret < 0) {
99 ERR("Unable to set group on %s", the_config.health_unix_sock_path.value);
100 PERROR("chown");
101 goto error;
102 }
103
104 ret = chmod(the_config.health_unix_sock_path.value,
105 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
106 if (ret < 0) {
107 ERR("Unable to set permissions on %s",
108 the_config.health_unix_sock_path.value);
109 PERROR("chmod");
110 goto error;
111 }
112 }
113
114 /*
115 * Set the CLOEXEC flag. Return code is useless because either way, the
116 * show must go on.
117 */
118 (void) utils_set_fd_cloexec(sock);
119
120 ret = lttcomm_listen_unix_sock(sock);
121 if (ret < 0) {
122 goto error;
123 }
124
125 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
126 if (ret < 0) {
127 goto error;
128 }
129
130 /* Add the health client socket. */
131 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
132 if (ret < 0) {
133 goto error;
134 }
135
136 mark_thread_as_ready(notifiers);
137 while (true) {
138 DBG("Health check ready");
139
140 /* Infinite blocking call, waiting for transmission */
141 restart:
142 ret = lttng_poll_wait(&events, -1);
143 if (ret < 0) {
144 /*
145 * Restart interrupted system call.
146 */
147 if (errno == EINTR) {
148 goto restart;
149 }
150 goto error;
151 }
152
153 nb_fd = ret;
154
155 for (i = 0; i < nb_fd; i++) {
156 /* Fetch once the poll data */
157 const auto revents = LTTNG_POLL_GETEV(&events, i);
158 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
159
160 /* Activity on thread quit pipe, exiting. */
161 if (pollfd == thread_quit_pipe_fd) {
162 DBG("Activity on thread quit pipe");
163 err = 0;
164 goto exit;
165 }
166
167 /* Event on the health client socket. */
168 if (revents & LPOLLIN) {
169 continue;
170 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
171 ERR("Health socket poll error");
172 goto error;
173 } else {
174 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
175 goto error;
176 }
177 }
178
179 new_sock = lttcomm_accept_unix_sock(sock);
180 if (new_sock < 0) {
181 goto error;
182 }
183
184 /*
185 * Set the CLOEXEC flag. Return code is useless because either way, the
186 * show must go on.
187 */
188 (void) utils_set_fd_cloexec(new_sock);
189
190 DBG("Receiving data from client for health...");
191 ret = lttcomm_recv_unix_sock(new_sock, (void *) &msg, sizeof(msg));
192 if (ret <= 0) {
193 DBG("Nothing recv() from client... continuing");
194 ret = close(new_sock);
195 if (ret) {
196 PERROR("close");
197 }
198 continue;
199 }
200
201 rcu_thread_online();
202
203 memset(&reply, 0, sizeof(reply));
204 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
205 /*
206 * health_check_state returns 0 if health is
207 * bad.
208 */
209 if (!health_check_state(the_health_sessiond, i)) {
210 reply.ret_code |= 1ULL << i;
211 }
212 }
213
214 DBG2("Health check return value %" PRIx64, reply.ret_code);
215
216 ret = lttcomm_send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
217 if (ret < 0) {
218 ERR("Failed to send health data back to client");
219 }
220
221 /* End of transmission */
222 ret = close(new_sock);
223 if (ret) {
224 PERROR("close");
225 }
226 }
227
228 exit:
229 error:
230 if (err) {
231 ERR("Health error occurred in %s", __func__);
232 }
233 DBG("Health check thread dying");
234 unlink(the_config.health_unix_sock_path.value);
235 if (sock >= 0) {
236 ret = close(sock);
237 if (ret) {
238 PERROR("close");
239 }
240 }
241
242 lttng_poll_clean(&events);
243 rcu_unregister_thread();
244 return nullptr;
245 }
246
247 static bool shutdown_health_management_thread(void *data)
248 {
249 struct thread_notifiers *notifiers = (thread_notifiers *) data;
250 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
251
252 return notify_thread_pipe(write_fd) == 1;
253 }
254
255 bool launch_health_management_thread()
256 {
257 struct thread_notifiers *notifiers;
258 struct lttng_thread *thread;
259
260 notifiers = zmalloc<thread_notifiers>();
261 if (!notifiers) {
262 goto error_alloc;
263 }
264
265 sem_init(&notifiers->ready, 0, 0);
266 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
267 if (!notifiers->quit_pipe) {
268 goto error;
269 }
270 thread = lttng_thread_create("Health management",
271 thread_manage_health,
272 shutdown_health_management_thread,
273 cleanup_health_management_thread,
274 notifiers);
275 if (!thread) {
276 goto error;
277 }
278
279 wait_until_thread_is_ready(notifiers);
280 lttng_thread_put(thread);
281 return true;
282 error:
283 cleanup_health_management_thread(notifiers);
284 error_alloc:
285 return false;
286 }
This page took 0.034148 seconds and 4 git commands to generate.