docs: Add supported versions and fix-backport policy
[lttng-tools.git] / src / bin / lttng-sessiond / health.cpp
1 /*
2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #include "lttng-sessiond.hpp"
10 #include "health-sessiond.hpp"
11 #include <common/macros.hpp>
12 #include <common/error.hpp>
13 #include <common/utils.hpp>
14 #include <common/pipe.hpp>
15 #include <inttypes.h>
16 #include <sys/stat.h>
17 #include "utils.hpp"
18 #include "thread.hpp"
19
20 namespace {
21 struct thread_notifiers {
22 struct lttng_pipe *quit_pipe;
23 sem_t ready;
24 };
25 } /* namespace */
26
27 static
28 void mark_thread_as_ready(struct thread_notifiers *notifiers)
29 {
30 DBG("Marking health management thread as ready");
31 sem_post(&notifiers->ready);
32 }
33
34 static
35 void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
36 {
37 DBG("Waiting for health management thread to be ready");
38 sem_wait(&notifiers->ready);
39 DBG("Health management thread is ready");
40 }
41
42 static void cleanup_health_management_thread(void *data)
43 {
44 struct thread_notifiers *notifiers = (thread_notifiers *) data;
45
46 lttng_pipe_destroy(notifiers->quit_pipe);
47 sem_destroy(&notifiers->ready);
48 free(notifiers);
49 }
50
51 /*
52 * Thread managing health check socket.
53 */
54 static void *thread_manage_health(void *data)
55 {
56 const bool is_root = (getuid() == 0);
57 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
58 uint32_t revents, nb_fd;
59 struct lttng_poll_event events;
60 struct health_comm_msg msg;
61 struct health_comm_reply reply;
62 /* Thread-specific quit pipe. */
63 struct thread_notifiers *notifiers = (thread_notifiers *) data;
64 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
65 notifiers->quit_pipe);
66
67 DBG("[thread] Manage health check started");
68
69 rcu_register_thread();
70
71 /*
72 * Created with a size of two for:
73 * - client socket
74 * - thread quit pipe
75 */
76 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
77 if (ret < 0) {
78 goto error;
79 }
80
81 /* Create unix socket */
82 sock = lttcomm_create_unix_sock(the_config.health_unix_sock_path.value);
83 if (sock < 0) {
84 ERR("Unable to create health check Unix socket");
85 goto error;
86 }
87
88 if (is_root) {
89 /* lttng health client socket path permissions */
90 gid_t gid;
91
92 ret = utils_get_group_id(the_config.tracing_group_name.value, true, &gid);
93 if (ret) {
94 /* Default to root group. */
95 gid = 0;
96 }
97
98 ret = chown(the_config.health_unix_sock_path.value, 0, gid);
99 if (ret < 0) {
100 ERR("Unable to set group on %s", the_config.health_unix_sock_path.value);
101 PERROR("chown");
102 goto error;
103 }
104
105 ret = chmod(the_config.health_unix_sock_path.value,
106 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
107 if (ret < 0) {
108 ERR("Unable to set permissions on %s", the_config.health_unix_sock_path.value);
109 PERROR("chmod");
110 goto error;
111 }
112 }
113
114 /*
115 * Set the CLOEXEC flag. Return code is useless because either way, the
116 * show must go on.
117 */
118 (void) utils_set_fd_cloexec(sock);
119
120 ret = lttcomm_listen_unix_sock(sock);
121 if (ret < 0) {
122 goto error;
123 }
124
125 ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
126 if (ret < 0) {
127 goto error;
128 }
129
130 /* Add the application registration socket */
131 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
132 if (ret < 0) {
133 goto error;
134 }
135
136 mark_thread_as_ready(notifiers);
137 while (1) {
138 DBG("Health check ready");
139
140 /* Infinite blocking call, waiting for transmission */
141 restart:
142 ret = lttng_poll_wait(&events, -1);
143 if (ret < 0) {
144 /*
145 * Restart interrupted system call.
146 */
147 if (errno == EINTR) {
148 goto restart;
149 }
150 goto error;
151 }
152
153 nb_fd = ret;
154
155 for (i = 0; i < nb_fd; i++) {
156 /* Fetch once the poll data */
157 revents = LTTNG_POLL_GETEV(&events, i);
158 pollfd = LTTNG_POLL_GETFD(&events, i);
159
160 /* Event on the registration socket */
161 if (pollfd == sock) {
162 if (revents & LPOLLIN) {
163 continue;
164 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
165 ERR("Health socket poll error");
166 goto error;
167 } else {
168 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
169 goto error;
170 }
171 } else {
172 /* Event on the thread's quit pipe. */
173 err = 0;
174 goto exit;
175 }
176 }
177
178 new_sock = lttcomm_accept_unix_sock(sock);
179 if (new_sock < 0) {
180 goto error;
181 }
182
183 /*
184 * Set the CLOEXEC flag. Return code is useless because either way, the
185 * show must go on.
186 */
187 (void) utils_set_fd_cloexec(new_sock);
188
189 DBG("Receiving data from client for health...");
190 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
191 if (ret <= 0) {
192 DBG("Nothing recv() from client... continuing");
193 ret = close(new_sock);
194 if (ret) {
195 PERROR("close");
196 }
197 continue;
198 }
199
200 rcu_thread_online();
201
202 memset(&reply, 0, sizeof(reply));
203 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
204 /*
205 * health_check_state returns 0 if health is
206 * bad.
207 */
208 if (!health_check_state(the_health_sessiond, i)) {
209 reply.ret_code |= 1ULL << i;
210 }
211 }
212
213 DBG2("Health check return value %" PRIx64, reply.ret_code);
214
215 ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
216 sizeof(reply));
217 if (ret < 0) {
218 ERR("Failed to send health data back to client");
219 }
220
221 /* End of transmission */
222 ret = close(new_sock);
223 if (ret) {
224 PERROR("close");
225 }
226 }
227
228 exit:
229 error:
230 if (err) {
231 ERR("Health error occurred in %s", __func__);
232 }
233 DBG("Health check thread dying");
234 unlink(the_config.health_unix_sock_path.value);
235 if (sock >= 0) {
236 ret = close(sock);
237 if (ret) {
238 PERROR("close");
239 }
240 }
241
242 lttng_poll_clean(&events);
243 rcu_unregister_thread();
244 return NULL;
245 }
246
247 static bool shutdown_health_management_thread(void *data)
248 {
249 struct thread_notifiers *notifiers = ( thread_notifiers *) data;
250 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
251
252 return notify_thread_pipe(write_fd) == 1;
253 }
254
255 bool launch_health_management_thread(void)
256 {
257 struct thread_notifiers *notifiers;
258 struct lttng_thread *thread;
259
260 notifiers = zmalloc<thread_notifiers>();
261 if (!notifiers) {
262 goto error_alloc;
263 }
264
265 sem_init(&notifiers->ready, 0, 0);
266 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
267 if (!notifiers->quit_pipe) {
268 goto error;
269 }
270 thread = lttng_thread_create("Health management",
271 thread_manage_health,
272 shutdown_health_management_thread,
273 cleanup_health_management_thread,
274 notifiers);
275 if (!thread) {
276 goto error;
277 }
278
279 wait_until_thread_is_ready(notifiers);
280 lttng_thread_put(thread);
281 return true;
282 error:
283 cleanup_health_management_thread(notifiers);
284 error_alloc:
285 return false;
286 }
This page took 0.033878 seconds and 4 git commands to generate.