Fix: wait for the completion of implicit session rotations
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.c
CommitLineData
45f6525f
JG
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <common/pipe.h>
21#include <common/utils.h>
22
23#include "manage-kernel.h"
24#include "testpoint.h"
25#include "health-sessiond.h"
26#include "utils.h"
27#include "thread.h"
28#include "kernel.h"
29#include "kernel-consumer.h"
30
31struct thread_notifiers {
32 struct lttng_pipe *quit_pipe;
33 int kernel_poll_pipe_read_fd;
34};
35
36/*
37 * Update the kernel poll set of all channel fd available over all tracing
38 * session. Add the wakeup pipe at the end of the set.
39 */
40static int update_kernel_poll(struct lttng_poll_event *events)
41{
42 int ret;
43 struct ltt_kernel_channel *channel;
44 struct ltt_session *session;
45 const struct ltt_session_list *session_list = session_get_list();
46
47 DBG("Updating kernel poll set");
48
49 session_lock_list();
50 cds_list_for_each_entry(session, &session_list->head, list) {
51 if (!session_get(session)) {
52 continue;
53 }
54 session_lock(session);
55 if (session->kernel_session == NULL) {
56 session_unlock(session);
57 session_put(session);
58 continue;
59 }
60
61 cds_list_for_each_entry(channel,
62 &session->kernel_session->channel_list.head, list) {
63 /* Add channel fd to the kernel poll set */
64 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
65 if (ret < 0) {
66 session_unlock(session);
67 session_put(session);
68 goto error;
69 }
70 DBG("Channel fd %d added to kernel set", channel->fd);
71 }
72 session_unlock(session);
c59b0313 73 session_put(session);
45f6525f
JG
74 }
75 session_unlock_list();
76
77 return 0;
78
79error:
80 session_unlock_list();
81 return -1;
82}
83
84/*
85 * Find the channel fd from 'fd' over all tracing session. When found, check
86 * for new channel stream and send those stream fds to the kernel consumer.
87 *
88 * Useful for CPU hotplug feature.
89 */
90static int update_kernel_stream(int fd)
91{
92 int ret = 0;
93 struct ltt_session *session;
94 struct ltt_kernel_session *ksess;
95 struct ltt_kernel_channel *channel;
96 const struct ltt_session_list *session_list = session_get_list();
97
98 DBG("Updating kernel streams for channel fd %d", fd);
99
100 session_lock_list();
101 cds_list_for_each_entry(session, &session_list->head, list) {
102 if (!session_get(session)) {
103 continue;
104 }
105 session_lock(session);
106 if (session->kernel_session == NULL) {
107 session_unlock(session);
108 session_put(session);
109 continue;
110 }
111 ksess = session->kernel_session;
112
113 cds_list_for_each_entry(channel,
114 &ksess->channel_list.head, list) {
115 struct lttng_ht_iter iter;
116 struct consumer_socket *socket;
117
118 if (channel->fd != fd) {
119 continue;
120 }
121 DBG("Channel found, updating kernel streams");
122 ret = kernel_open_channel_stream(channel);
123 if (ret < 0) {
124 goto error;
125 }
126 /* Update the stream global counter */
127 ksess->stream_count_global += ret;
128
129 /*
130 * Have we already sent fds to the consumer? If yes, it
131 * means that tracing is started so it is safe to send
132 * our updated stream fds.
133 */
134 if (ksess->consumer_fds_sent != 1
135 || ksess->consumer == NULL) {
136 ret = -1;
137 goto error;
138 }
139
140 rcu_read_lock();
141 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
142 &iter.iter, socket, node.node) {
143 pthread_mutex_lock(socket->lock);
144 ret = kernel_consumer_send_channel_streams(socket,
145 channel, ksess,
146 session->output_traces ? 1 : 0);
147 pthread_mutex_unlock(socket->lock);
148 if (ret < 0) {
149 rcu_read_unlock();
150 goto error;
151 }
152 }
153 rcu_read_unlock();
154 }
155 session_unlock(session);
156 session_put(session);
157 }
158 session_unlock_list();
159 return ret;
160
161error:
162 session_unlock(session);
163 session_put(session);
164 session_unlock_list();
165 return ret;
166}
167
168/*
169 * This thread manage event coming from the kernel.
170 *
171 * Features supported in this thread:
172 * -) CPU Hotplug
173 */
174static void *thread_kernel_management(void *data)
175{
176 int ret, i, pollfd, update_poll_flag = 1, err = -1;
177 uint32_t revents, nb_fd;
178 char tmp;
179 struct lttng_poll_event events;
180 struct thread_notifiers *notifiers = data;
181 const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
182
183 DBG("[thread] Thread manage kernel started");
184
185 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
186
187 /*
188 * This first step of the while is to clean this structure which could free
189 * non NULL pointers so initialize it before the loop.
190 */
191 lttng_poll_init(&events);
192
193 if (testpoint(sessiond_thread_manage_kernel)) {
194 goto error_testpoint;
195 }
196
197 health_code_update();
198
199 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
200 goto error_testpoint;
201 }
202
203 while (1) {
204 health_code_update();
205
206 if (update_poll_flag == 1) {
207 /* Clean events object. We are about to populate it again. */
208 lttng_poll_clean(&events);
209
210 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
211 if (ret < 0) {
212 goto error_poll_create;
213 }
214
215 ret = lttng_poll_add(&events,
216 notifiers->kernel_poll_pipe_read_fd,
217 LPOLLIN);
218 if (ret < 0) {
219 goto error;
220 }
221
222 ret = lttng_poll_add(&events,
223 quit_pipe_read_fd,
224 LPOLLIN);
225 if (ret < 0) {
226 goto error;
227 }
228
229 /* This will add the available kernel channel if any. */
230 ret = update_kernel_poll(&events);
231 if (ret < 0) {
232 goto error;
233 }
234 update_poll_flag = 0;
235 }
236
237 DBG("Thread kernel polling");
238
239 /* Poll infinite value of time */
240 restart:
241 health_poll_entry();
242 ret = lttng_poll_wait(&events, -1);
243 DBG("Thread kernel return from poll on %d fds",
244 LTTNG_POLL_GETNB(&events));
245 health_poll_exit();
246 if (ret < 0) {
247 /*
248 * Restart interrupted system call.
249 */
250 if (errno == EINTR) {
251 goto restart;
252 }
253 goto error;
254 } else if (ret == 0) {
255 /* Should not happen since timeout is infinite */
256 ERR("Return value of poll is 0 with an infinite timeout.\n"
257 "This should not have happened! Continuing...");
258 continue;
259 }
260
261 nb_fd = ret;
262
263 for (i = 0; i < nb_fd; i++) {
264 /* Fetch once the poll data */
265 revents = LTTNG_POLL_GETEV(&events, i);
266 pollfd = LTTNG_POLL_GETFD(&events, i);
267
268 health_code_update();
269
45f6525f
JG
270 if (pollfd == quit_pipe_read_fd) {
271 err = 0;
272 goto exit;
273 }
274
275 /* Check for data on kernel pipe */
276 if (revents & LPOLLIN) {
277 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
278 (void) lttng_read(notifiers->kernel_poll_pipe_read_fd,
279 &tmp, 1);
280 /*
281 * Ret value is useless here, if this pipe gets any actions an
282 * update is required anyway.
283 */
284 update_poll_flag = 1;
285 continue;
286 } else {
287 /*
288 * New CPU detected by the kernel. Adding kernel stream to
289 * kernel session and updating the kernel consumer
290 */
291 ret = update_kernel_stream(pollfd);
292 if (ret < 0) {
293 continue;
294 }
295 break;
296 }
297 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
298 update_poll_flag = 1;
299 continue;
300 } else {
301 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
302 goto error;
303 }
304 }
305 }
306
307exit:
308error:
309 lttng_poll_clean(&events);
310error_poll_create:
311error_testpoint:
312 if (err) {
313 health_error();
314 ERR("Health error occurred in %s", __func__);
315 WARN("Kernel thread died unexpectedly. "
316 "Kernel tracing can continue but CPU hotplug is disabled.");
317 }
318 health_unregister(health_sessiond);
319 DBG("Kernel thread dying");
320 return NULL;
321}
322
323static bool shutdown_kernel_management_thread(void *data)
324{
325 struct thread_notifiers *notifiers = data;
326 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
327
328 return notify_thread_pipe(write_fd) == 1;
329}
330
331static void cleanup_kernel_management_thread(void *data)
332{
333 struct thread_notifiers *notifiers = data;
334
335 lttng_pipe_destroy(notifiers->quit_pipe);
336 free(notifiers);
337}
338
339bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
340{
341 struct lttng_pipe *quit_pipe;
342 struct thread_notifiers *notifiers = NULL;
343 struct lttng_thread *thread;
344
45f6525f
JG
345 notifiers = zmalloc(sizeof(*notifiers));
346 if (!notifiers) {
a7f04300
JG
347 goto error_alloc;
348 }
349 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
350 if (!quit_pipe) {
45f6525f
JG
351 goto error;
352 }
353 notifiers->quit_pipe = quit_pipe;
354 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
355
356 thread = lttng_thread_create("Kernel management",
357 thread_kernel_management,
358 shutdown_kernel_management_thread,
359 cleanup_kernel_management_thread,
360 notifiers);
361 if (!thread) {
362 goto error;
363 }
364 lttng_thread_put(thread);
365 return true;
366error:
367 cleanup_kernel_management_thread(notifiers);
a7f04300 368error_alloc:
45f6525f
JG
369 return false;
370}
This page took 0.0649960000000001 seconds and 4 git commands to generate.