fix: relayd: unaligned access in trace_chunk_registry_ht_key_hash
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #include "health-sessiond.hpp"
11 #include "kernel-consumer.hpp"
12 #include "kernel.hpp"
13 #include "manage-kernel.hpp"
14 #include "testpoint.hpp"
15 #include "thread.hpp"
16 #include "utils.hpp"
17
18 #include <common/pipe.hpp>
19 #include <common/urcu.hpp>
20 #include <common/utils.hpp>
21
22 #include <fcntl.h>
23
24 namespace {
25 struct thread_notifiers {
26 struct lttng_pipe *quit_pipe;
27 int kernel_poll_pipe_read_fd;
28 };
29 } /* namespace */
30
31 /*
32 * Update the kernel poll set of all channel fd available over all tracing
33 * session. Add the wakeup pipe at the end of the set.
34 */
35 static int update_kernel_poll(struct lttng_poll_event *events)
36 {
37 int ret;
38 struct ltt_kernel_channel *channel;
39 struct ltt_session *session;
40 const struct ltt_session_list *session_list = session_get_list();
41
42 DBG("Updating kernel poll set");
43
44 session_lock_list();
45 cds_list_for_each_entry (session, &session_list->head, list) {
46 if (!session_get(session)) {
47 continue;
48 }
49 session_lock(session);
50 if (session->kernel_session == nullptr) {
51 session_unlock(session);
52 session_put(session);
53 continue;
54 }
55
56 cds_list_for_each_entry (
57 channel, &session->kernel_session->channel_list.head, list) {
58 /* Add channel fd to the kernel poll set */
59 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
60 if (ret < 0) {
61 session_unlock(session);
62 session_put(session);
63 goto error;
64 }
65 DBG("Channel fd %d added to kernel set", channel->fd);
66 }
67 session_unlock(session);
68 session_put(session);
69 }
70 session_unlock_list();
71
72 return 0;
73
74 error:
75 session_unlock_list();
76 return -1;
77 }
78
79 /*
80 * Find the channel fd from 'fd' over all tracing session. When found, check
81 * for new channel stream and send those stream fds to the kernel consumer.
82 *
83 * Useful for CPU hotplug feature.
84 */
85 static int update_kernel_stream(int fd)
86 {
87 int ret = 0;
88 struct ltt_session *session;
89 struct ltt_kernel_session *ksess;
90 struct ltt_kernel_channel *channel;
91 const struct ltt_session_list *session_list = session_get_list();
92
93 DBG("Updating kernel streams for channel fd %d", fd);
94
95 session_lock_list();
96 cds_list_for_each_entry (session, &session_list->head, list) {
97 if (!session_get(session)) {
98 continue;
99 }
100
101 session_lock(session);
102 if (session->kernel_session == nullptr) {
103 session_unlock(session);
104 session_put(session);
105 continue;
106 }
107
108 ksess = session->kernel_session;
109
110 cds_list_for_each_entry (channel, &ksess->channel_list.head, list) {
111 struct lttng_ht_iter iter;
112 struct consumer_socket *socket;
113
114 if (channel->fd != fd) {
115 continue;
116 }
117 DBG("Channel found, updating kernel streams");
118 ret = kernel_open_channel_stream(channel);
119 if (ret < 0) {
120 goto error;
121 }
122 /* Update the stream global counter */
123 ksess->stream_count_global += ret;
124
125 /*
126 * Have we already sent fds to the consumer? If yes, it
127 * means that tracing is started so it is safe to send
128 * our updated stream fds.
129 */
130 if (ksess->consumer_fds_sent != 1 || ksess->consumer == nullptr) {
131 ret = -1;
132 goto error;
133 }
134
135 {
136 lttng::urcu::read_lock_guard read_lock;
137
138 cds_lfht_for_each_entry (
139 ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
140 pthread_mutex_lock(socket->lock);
141 ret = kernel_consumer_send_channel_streams(
142 socket,
143 channel,
144 ksess,
145 session->output_traces ? 1 : 0);
146 pthread_mutex_unlock(socket->lock);
147 if (ret < 0) {
148 goto error;
149 }
150 }
151 }
152 }
153
154 session_unlock(session);
155 session_put(session);
156 }
157 session_unlock_list();
158 return ret;
159
160 error:
161 session_unlock(session);
162 session_put(session);
163 session_unlock_list();
164 return ret;
165 }
166
167 /*
168 * This thread manage event coming from the kernel.
169 *
170 * Features supported in this thread:
171 * -) CPU Hotplug
172 */
173 static void *thread_kernel_management(void *data)
174 {
175 int ret, i, update_poll_flag = 1, err = -1;
176 uint32_t nb_fd;
177 char tmp;
178 struct lttng_poll_event events;
179 struct thread_notifiers *notifiers = (thread_notifiers *) data;
180 const auto thread_quit_pipe_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
181
182 DBG("[thread] Thread manage kernel started");
183
184 health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
185
186 /*
187 * This first step of the while is to clean this structure which could free
188 * non NULL pointers so initialize it before the loop.
189 */
190 lttng_poll_init(&events);
191
192 if (testpoint(sessiond_thread_manage_kernel)) {
193 goto error_testpoint;
194 }
195
196 health_code_update();
197
198 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
199 goto error_testpoint;
200 }
201
202 while (true) {
203 health_code_update();
204
205 if (update_poll_flag == 1) {
206 /* Clean events object. We are about to populate it again. */
207 lttng_poll_clean(&events);
208
209 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
210 if (ret < 0) {
211 goto error_poll_create;
212 }
213
214 ret = lttng_poll_add(&events, notifiers->kernel_poll_pipe_read_fd, LPOLLIN);
215 if (ret < 0) {
216 goto error;
217 }
218
219 ret = lttng_poll_add(&events, thread_quit_pipe_fd, LPOLLIN);
220 if (ret < 0) {
221 goto error;
222 }
223
224 /* This will add the available kernel channel if any. */
225 ret = update_kernel_poll(&events);
226 if (ret < 0) {
227 goto error;
228 }
229 update_poll_flag = 0;
230 }
231
232 DBG("Thread kernel polling");
233
234 /* Poll infinite value of time */
235 restart:
236 health_poll_entry();
237 ret = lttng_poll_wait(&events, -1);
238 DBG("Thread kernel return from poll on %d fds", LTTNG_POLL_GETNB(&events));
239 health_poll_exit();
240 if (ret < 0) {
241 /*
242 * Restart interrupted system call.
243 */
244 if (errno == EINTR) {
245 goto restart;
246 }
247 goto error;
248 } else if (ret == 0) {
249 /* Should not happen since timeout is infinite */
250 ERR("Return value of poll is 0 with an infinite timeout.\n"
251 "This should not have happened! Continuing...");
252 continue;
253 }
254
255 nb_fd = ret;
256
257 for (i = 0; i < nb_fd; i++) {
258 /* Fetch once the poll data */
259 const auto revents = LTTNG_POLL_GETEV(&events, i);
260 const auto pollfd = LTTNG_POLL_GETFD(&events, i);
261
262 health_code_update();
263
264 /* Activity on thread quit pipe, exiting. */
265 if (pollfd == thread_quit_pipe_fd) {
266 DBG("Activity on thread quit pipe");
267 err = 0;
268 goto exit;
269 }
270
271 /* Check for data on kernel pipe */
272 if (revents & LPOLLIN) {
273 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
274 (void) lttng_read(
275 notifiers->kernel_poll_pipe_read_fd, &tmp, 1);
276 /*
277 * Ret value is useless here, if this pipe gets any actions
278 * an update is required anyway.
279 */
280 update_poll_flag = 1;
281 continue;
282 } else {
283 /*
284 * New CPU detected by the kernel. Adding kernel stream to
285 * kernel session and updating the kernel consumer
286 */
287 ret = update_kernel_stream(pollfd);
288 if (ret < 0) {
289 continue;
290 }
291 break;
292 }
293 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
294 update_poll_flag = 1;
295 continue;
296 } else {
297 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
298 goto error;
299 }
300 }
301 }
302
303 exit:
304 error:
305 lttng_poll_clean(&events);
306 error_poll_create:
307 error_testpoint:
308 if (err) {
309 health_error();
310 ERR("Health error occurred in %s", __func__);
311 WARN("Kernel thread died unexpectedly. "
312 "Kernel tracing can continue but CPU hotplug is disabled.");
313 }
314 health_unregister(the_health_sessiond);
315 DBG("Kernel thread dying");
316 return nullptr;
317 }
318
319 static bool shutdown_kernel_management_thread(void *data)
320 {
321 struct thread_notifiers *notifiers = (thread_notifiers *) data;
322 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
323
324 return notify_thread_pipe(write_fd) == 1;
325 }
326
327 static void cleanup_kernel_management_thread(void *data)
328 {
329 struct thread_notifiers *notifiers = (thread_notifiers *) data;
330
331 lttng_pipe_destroy(notifiers->quit_pipe);
332 free(notifiers);
333 }
334
335 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
336 {
337 struct lttng_pipe *quit_pipe;
338 struct thread_notifiers *notifiers = nullptr;
339 struct lttng_thread *thread;
340
341 notifiers = zmalloc<thread_notifiers>();
342 if (!notifiers) {
343 goto error_alloc;
344 }
345 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
346 if (!quit_pipe) {
347 goto error;
348 }
349 notifiers->quit_pipe = quit_pipe;
350 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
351
352 thread = lttng_thread_create("Kernel management",
353 thread_kernel_management,
354 shutdown_kernel_management_thread,
355 cleanup_kernel_management_thread,
356 notifiers);
357 if (!thread) {
358 goto error;
359 }
360 lttng_thread_put(thread);
361 return true;
362 error:
363 cleanup_kernel_management_thread(notifiers);
364 error_alloc:
365 return false;
366 }
This page took 0.035873 seconds and 4 git commands to generate.