74bcb1322e868284b788e2c4d279f0481b03a6d8
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #include "health-sessiond.hpp"
11 #include "kernel-consumer.hpp"
12 #include "kernel.hpp"
13 #include "manage-kernel.hpp"
14 #include "testpoint.hpp"
15 #include "thread.hpp"
16 #include "utils.hpp"
17
18 #include <common/pipe.hpp>
19 #include <common/urcu.hpp>
20 #include <common/utils.hpp>
21
22 namespace {
23 struct thread_notifiers {
24 struct lttng_pipe *quit_pipe;
25 int kernel_poll_pipe_read_fd;
26 };
27 } /* namespace */
28
29 /*
30 * Update the kernel poll set of all channel fd available over all tracing
31 * session. Add the wakeup pipe at the end of the set.
32 */
33 static int update_kernel_poll(struct lttng_poll_event *events)
34 {
35 int ret;
36 struct ltt_kernel_channel *channel;
37 struct ltt_session *session;
38 const struct ltt_session_list *session_list = session_get_list();
39
40 DBG("Updating kernel poll set");
41
42 session_lock_list();
43 cds_list_for_each_entry (session, &session_list->head, list) {
44 if (!session_get(session)) {
45 continue;
46 }
47 session_lock(session);
48 if (session->kernel_session == nullptr) {
49 session_unlock(session);
50 session_put(session);
51 continue;
52 }
53
54 cds_list_for_each_entry (
55 channel, &session->kernel_session->channel_list.head, list) {
56 /* Add channel fd to the kernel poll set */
57 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
58 if (ret < 0) {
59 session_unlock(session);
60 session_put(session);
61 goto error;
62 }
63 DBG("Channel fd %d added to kernel set", channel->fd);
64 }
65 session_unlock(session);
66 session_put(session);
67 }
68 session_unlock_list();
69
70 return 0;
71
72 error:
73 session_unlock_list();
74 return -1;
75 }
76
77 /*
78 * Find the channel fd from 'fd' over all tracing session. When found, check
79 * for new channel stream and send those stream fds to the kernel consumer.
80 *
81 * Useful for CPU hotplug feature.
82 */
83 static int update_kernel_stream(int fd)
84 {
85 int ret = 0;
86 struct ltt_session *session;
87 struct ltt_kernel_session *ksess;
88 struct ltt_kernel_channel *channel;
89 const struct ltt_session_list *session_list = session_get_list();
90
91 DBG("Updating kernel streams for channel fd %d", fd);
92
93 session_lock_list();
94 cds_list_for_each_entry (session, &session_list->head, list) {
95 if (!session_get(session)) {
96 continue;
97 }
98
99 session_lock(session);
100 if (session->kernel_session == nullptr) {
101 session_unlock(session);
102 session_put(session);
103 continue;
104 }
105
106 ksess = session->kernel_session;
107
108 cds_list_for_each_entry (channel, &ksess->channel_list.head, list) {
109 struct lttng_ht_iter iter;
110 struct consumer_socket *socket;
111
112 if (channel->fd != fd) {
113 continue;
114 }
115 DBG("Channel found, updating kernel streams");
116 ret = kernel_open_channel_stream(channel);
117 if (ret < 0) {
118 goto error;
119 }
120 /* Update the stream global counter */
121 ksess->stream_count_global += ret;
122
123 /*
124 * Have we already sent fds to the consumer? If yes, it
125 * means that tracing is started so it is safe to send
126 * our updated stream fds.
127 */
128 if (ksess->consumer_fds_sent != 1 || ksess->consumer == nullptr) {
129 ret = -1;
130 goto error;
131 }
132
133 {
134 lttng::urcu::read_lock_guard read_lock;
135
136 cds_lfht_for_each_entry (
137 ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
138 pthread_mutex_lock(socket->lock);
139 ret = kernel_consumer_send_channel_streams(
140 socket,
141 channel,
142 ksess,
143 session->output_traces ? 1 : 0);
144 pthread_mutex_unlock(socket->lock);
145 if (ret < 0) {
146 goto error;
147 }
148 }
149 }
150 }
151
152 session_unlock(session);
153 session_put(session);
154 }
155 session_unlock_list();
156 return ret;
157
158 error:
159 session_unlock(session);
160 session_put(session);
161 session_unlock_list();
162 return ret;
163 }
164
165 /*
166 * This thread manage event coming from the kernel.
167 *
168 * Features supported in this thread:
169 * -) CPU Hotplug
170 */
171 static void *thread_kernel_management(void *data)
172 {
173 int ret, i, update_poll_flag = 1, err = -1;
174 uint32_t nb_fd;
175 char tmp;
176 struct lttng_poll_event events;
177 struct thread_notifiers *notifiers = (thread_notifiers *) data;
178 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
179
180 DBG("[thread] Thread manage kernel started");
181
182 health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
183
184 /*
185 * This first step of the while is to clean this structure which could free
186 * non NULL pointers so initialize it before the loop.
187 */
188 lttng_poll_init(&events);
189
190 if (testpoint(sessiond_thread_manage_kernel)) {
191 goto error_testpoint;
192 }
193
194 health_code_update();
195
196 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
197 goto error_testpoint;
198 }
199
200 while (true) {
201 health_code_update();
202
203 if (update_poll_flag == 1) {
204 /* Clean events object. We are about to populate it again. */
205 lttng_poll_clean(&events);
206
207 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
208 if (ret < 0) {
209 goto error_poll_create;
210 }
211
212 ret = lttng_poll_add(&events, notifiers->kernel_poll_pipe_read_fd, LPOLLIN);
213 if (ret < 0) {
214 goto error;
215 }
216
217 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
218 if (ret < 0) {
219 goto error;
220 }
221
222 /* This will add the available kernel channel if any. */
223 ret = update_kernel_poll(&events);
224 if (ret < 0) {
225 goto error;
226 }
227 update_poll_flag = 0;
228 }
229
230 DBG("Thread kernel polling");
231
232 /* Poll infinite value of time */
233 restart:
234 health_poll_entry();
235 ret = lttng_poll_wait(&events, -1);
236 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events));
237 health_poll_exit();
238 if (ret < 0) {
239 /*
240 * Restart interrupted system call.
241 */
242 if (errno == EINTR) {
243 goto restart;
244 }
245 goto error;
246 } else if (ret == 0) {
247 /* Should not happen since timeout is infinite */
248 ERR("Return value of poll is 0 with an infinite timeout.\n"
249 "This should not have happened! Continuing...");
250 continue;
251 }
252
253 nb_fd = ret;
254
255 for (i = 0; i < nb_fd; i++) {
256 /* Fetch once the poll data */
257 const auto revents = LTTNG_POLL_GETEV(&events, i);
258 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
259
260 health_code_update();
261
262 /* Activity on thread quit pipe, exiting. */
263 if (pollfd == thread_quit_pipe_fd) {
264 DBG("Activity on thread quit pipe");
265 err = 0;
266 goto exit;
267 }
268
269 /* Check for data on kernel pipe */
270 if (revents & LPOLLIN) {
271 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
272 (void) lttng_read(
273 notifiers->kernel_poll_pipe_read_fd, &tmp, 1);
274 /*
275 * Ret value is useless here, if this pipe gets any actions
276 * an update is required anyway.
277 */
278 update_poll_flag = 1;
279 continue;
280 } else {
281 /*
282 * New CPU detected by the kernel. Adding kernel stream to
283 * kernel session and updating the kernel consumer
284 */
285 ret = update_kernel_stream(pollfd);
286 if (ret < 0) {
287 continue;
288 }
289 break;
290 }
291 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
292 update_poll_flag = 1;
293 continue;
294 } else {
295 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
296 goto error;
297 }
298 }
299 }
300
301 exit:
302 error:
303 lttng_poll_clean(&events);
304 error_poll_create:
305 error_testpoint:
306 if (err) {
307 health_error();
308 ERR("Health error occurred in %s", __func__);
309 WARN("Kernel thread died unexpectedly. "
310 "Kernel tracing can continue but CPU hotplug is disabled.");
311 }
312 health_unregister(the_health_sessiond);
313 DBG("Kernel thread dying");
314 return nullptr;
315 }
316
317 static bool shutdown_kernel_management_thread(void *data)
318 {
319 struct thread_notifiers *notifiers = (thread_notifiers *) data;
320 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
321
322 return notify_thread_pipe(write_fd) == 1;
323 }
324
325 static void cleanup_kernel_management_thread(void *data)
326 {
327 struct thread_notifiers *notifiers = (thread_notifiers *) data;
328
329 lttng_pipe_destroy(notifiers->quit_pipe);
330 free(notifiers);
331 }
332
333 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
334 {
335 struct lttng_pipe *quit_pipe;
336 struct thread_notifiers *notifiers = nullptr;
337 struct lttng_thread *thread;
338
339 notifiers = zmalloc<thread_notifiers>();
340 if (!notifiers) {
341 goto error_alloc;
342 }
343 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
344 if (!quit_pipe) {
345 goto error;
346 }
347 notifiers->quit_pipe = quit_pipe;
348 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
349
350 thread = lttng_thread_create("Kernel management",
351 thread_kernel_management,
352 shutdown_kernel_management_thread,
353 cleanup_kernel_management_thread,
354 notifiers);
355 if (!thread) {
356 goto error;
357 }
358 lttng_thread_put(thread);
359 return true;
360 error:
361 cleanup_kernel_management_thread(notifiers);
362 error_alloc:
363 return false;
364 }
This page took 0.038067 seconds and 4 git commands to generate.