Commit | Line | Data |
---|---|---|
1ce86c9a JD |
1 | /* |
2 | * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca> | |
3 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
82a3637f DG |
7 | * as published by the Free Software Foundation; only version 2 |
8 | * of the License. | |
1ce86c9a JD |
9 | * |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | */ | |
19 | ||
20 | #define _GNU_SOURCE | |
21 | #include <fcntl.h> | |
22 | #include <poll.h> | |
23 | #include <pthread.h> | |
24 | #include <stdlib.h> | |
25 | #include <string.h> | |
26 | #include <sys/mman.h> | |
27 | #include <sys/socket.h> | |
28 | #include <sys/types.h> | |
29 | #include <unistd.h> | |
30 | #include <urcu/list.h> | |
159c7ff4 | 31 | #include <assert.h> |
1ce86c9a | 32 | |
50ecdf72 MD |
33 | #include "kernelctl.h" |
34 | #include "lttkconsumerd.h" | |
1ce86c9a JD |
35 | #include "lttngerr.h" |
36 | ||
242cd187 MD |
37 | static |
38 | struct kconsumerd_global_data { | |
39 | /* | |
40 | * kconsumerd_data.lock protects kconsumerd_data.fd_list, | |
41 | * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It | |
42 | * ensures the count matches the number of items in the fd_list. | |
43 | * It ensures the list updates *always* trigger an fd_array | |
44 | * update (therefore need to make list update vs | |
45 | * kconsumerd_data.need_update flag update atomic, and also flag | |
46 | * read, fd array and flag clear atomic). | |
47 | */ | |
48 | pthread_mutex_t lock; | |
49 | /* | |
50 | * Number of element for the list below. Protected by | |
51 | * kconsumerd_data.lock. | |
52 | */ | |
53 | unsigned int fds_count; | |
54 | /* | |
55 | * List of FDs. Protected by kconsumerd_data.lock. | |
56 | */ | |
57 | struct kconsumerd_fd_list fd_list; | |
58 | /* | |
59 | * Flag specifying if the local array of FDs needs update in the | |
60 | * poll function. Protected by kconsumerd_data.lock. | |
61 | */ | |
62 | unsigned int need_update; | |
63 | } kconsumerd_data = { | |
64 | .fd_list.head = CDS_LIST_HEAD_INIT(kconsumerd_data.fd_list.head), | |
cb040cc1 JD |
65 | .fds_count = 0, |
66 | .need_update = 1, | |
1ce86c9a JD |
67 | }; |
68 | ||
1ce86c9a JD |
69 | /* timeout parameter, to control the polling thread grace period */ |
70 | static int kconsumerd_poll_timeout = -1; | |
71 | ||
3dcd2721 MD |
72 | /* |
73 | * flag to inform the polling thread to quit when all fd hung up. | |
74 | * Updated by the kconsumerd_thread_receive_fds when it notices that all | |
75 | * fds has hung up. Also updated by the signal handler | |
76 | * (kconsumerd_should_exit()). Read by the polling threads. | |
77 | */ | |
78 | static volatile int kconsumerd_quit = 0; | |
1ce86c9a JD |
79 | |
80 | /* | |
81 | * kconsumerd_set_error_socket | |
82 | * | |
83 | * Set the error socket | |
84 | */ | |
cb040cc1 | 85 | void kconsumerd_set_error_socket(struct kconsumerd_local_data *ctx, int sock) |
1ce86c9a | 86 | { |
cb040cc1 | 87 | ctx->kconsumerd_error_socket = sock; |
1ce86c9a JD |
88 | } |
89 | ||
90 | /* | |
91 | * kconsumerd_set_command_socket_path | |
92 | * | |
93 | * Set the command socket path | |
94 | */ | |
cb040cc1 JD |
95 | void kconsumerd_set_command_socket_path(struct kconsumerd_local_data *ctx, |
96 | char *sock) | |
1ce86c9a | 97 | { |
cb040cc1 | 98 | ctx->kconsumerd_command_sock_path = sock; |
1ce86c9a JD |
99 | } |
100 | ||
38079a1b DG |
101 | /* |
102 | * kconsumerd_find_session_fd | |
103 | * | |
104 | * Find a session fd in the global list. | |
fec07047 | 105 | * The kconsumerd_data.lock must be locked during this call |
38079a1b DG |
106 | * |
107 | * Return 1 if found else 0 | |
108 | */ | |
109 | static int kconsumerd_find_session_fd(int fd) | |
110 | { | |
111 | struct kconsumerd_fd *iter; | |
112 | ||
242cd187 | 113 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { |
38079a1b DG |
114 | if (iter->sessiond_fd == fd) { |
115 | DBG("Duplicate session fd %d", fd); | |
38079a1b DG |
116 | return 1; |
117 | } | |
118 | } | |
38079a1b DG |
119 | |
120 | return 0; | |
121 | } | |
122 | ||
1ce86c9a JD |
123 | /* |
124 | * kconsumerd_del_fd | |
125 | * | |
126 | * Remove a fd from the global list protected by a mutex | |
127 | */ | |
128 | static void kconsumerd_del_fd(struct kconsumerd_fd *lcf) | |
129 | { | |
242cd187 | 130 | pthread_mutex_lock(&kconsumerd_data.lock); |
1ce86c9a | 131 | cds_list_del(&lcf->list); |
242cd187 MD |
132 | if (kconsumerd_data.fds_count > 0) { |
133 | kconsumerd_data.fds_count--; | |
1ce86c9a | 134 | if (lcf != NULL) { |
cb040cc1 JD |
135 | if (lcf->out_fd != 0) { |
136 | close(lcf->out_fd); | |
137 | } | |
1ce86c9a JD |
138 | close(lcf->consumerd_fd); |
139 | free(lcf); | |
140 | lcf = NULL; | |
141 | } | |
142 | } | |
242cd187 MD |
143 | kconsumerd_data.need_update = 1; |
144 | pthread_mutex_unlock(&kconsumerd_data.lock); | |
1ce86c9a JD |
145 | } |
146 | ||
147 | /* | |
148 | * kconsumerd_add_fd | |
149 | * | |
150 | * Add a fd to the global list protected by a mutex | |
151 | */ | |
152 | static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd) | |
153 | { | |
38079a1b | 154 | struct kconsumerd_fd *tmp_fd; |
cb040cc1 | 155 | int ret = 0; |
38079a1b | 156 | |
242cd187 | 157 | pthread_mutex_lock(&kconsumerd_data.lock); |
38079a1b DG |
158 | /* Check if already exist */ |
159 | ret = kconsumerd_find_session_fd(buf->fd); | |
160 | if (ret == 1) { | |
161 | goto end; | |
162 | } | |
1ce86c9a JD |
163 | |
164 | tmp_fd = malloc(sizeof(struct kconsumerd_fd)); | |
165 | tmp_fd->sessiond_fd = buf->fd; | |
166 | tmp_fd->consumerd_fd = consumerd_fd; | |
167 | tmp_fd->state = buf->state; | |
168 | tmp_fd->max_sb_size = buf->max_sb_size; | |
cb040cc1 JD |
169 | tmp_fd->out_fd = 0; |
170 | tmp_fd->out_fd_offset = 0; | |
1ce86c9a | 171 | strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX); |
99497cd0 | 172 | tmp_fd->path_name[PATH_MAX - 1] = '\0'; |
1ce86c9a JD |
173 | |
174 | /* Opening the tracefile in write mode */ | |
cb040cc1 JD |
175 | if (tmp_fd->path_name != NULL) { |
176 | ret = open(tmp_fd->path_name, | |
177 | O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU|S_IRWXG|S_IRWXO); | |
178 | if (ret < 0) { | |
179 | ERR("Opening %s", tmp_fd->path_name); | |
180 | perror("open"); | |
181 | goto end; | |
182 | } | |
183 | tmp_fd->out_fd = ret; | |
184 | DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name, | |
185 | tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd); | |
1ce86c9a | 186 | } |
1ce86c9a | 187 | |
242cd187 MD |
188 | cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head); |
189 | kconsumerd_data.fds_count++; | |
190 | kconsumerd_data.need_update = 1; | |
1ce86c9a | 191 | end: |
242cd187 | 192 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
193 | return ret; |
194 | } | |
195 | ||
196 | /* | |
197 | * kconsumerd_change_fd_state | |
198 | * | |
199 | * Update a fd according to what we just received | |
200 | */ | |
201 | static void kconsumerd_change_fd_state(int sessiond_fd, | |
202 | enum kconsumerd_fd_state state) | |
203 | { | |
204 | struct kconsumerd_fd *iter; | |
0237248c | 205 | |
242cd187 MD |
206 | pthread_mutex_lock(&kconsumerd_data.lock); |
207 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { | |
1ce86c9a JD |
208 | if (iter->sessiond_fd == sessiond_fd) { |
209 | iter->state = state; | |
210 | break; | |
211 | } | |
212 | } | |
242cd187 MD |
213 | kconsumerd_data.need_update = 1; |
214 | pthread_mutex_unlock(&kconsumerd_data.lock); | |
1ce86c9a JD |
215 | } |
216 | ||
217 | /* | |
218 | * kconsumerd_update_poll_array | |
219 | * | |
220 | * Allocate the pollfd structure and the local view of the out fds | |
221 | * to avoid doing a lookup in the linked list and concurrency issues | |
222 | * when writing is needed. | |
223 | * Returns the number of fds in the structures | |
242cd187 | 224 | * Called with kconsumerd_data.lock held. |
1ce86c9a | 225 | */ |
cb040cc1 JD |
226 | static int kconsumerd_update_poll_array(struct kconsumerd_local_data *ctx, |
227 | struct pollfd **pollfd, struct kconsumerd_fd **local_kconsumerd_fd) | |
1ce86c9a JD |
228 | { |
229 | struct kconsumerd_fd *iter; | |
230 | int i = 0; | |
231 | ||
232 | DBG("Updating poll fd array"); | |
242cd187 | 233 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { |
1ce86c9a JD |
234 | if (iter->state == ACTIVE_FD) { |
235 | DBG("Active FD %d", iter->consumerd_fd); | |
236 | (*pollfd)[i].fd = iter->consumerd_fd; | |
237 | (*pollfd)[i].events = POLLIN | POLLPRI; | |
238 | local_kconsumerd_fd[i] = iter; | |
239 | i++; | |
240 | } | |
241 | } | |
242 | ||
243 | /* | |
244 | * insert the kconsumerd_poll_pipe at the end of the array and don't | |
245 | * increment i so nb_fd is the number of real FD | |
246 | */ | |
cb040cc1 | 247 | (*pollfd)[i].fd = ctx->kconsumerd_poll_pipe[0]; |
1ce86c9a | 248 | (*pollfd)[i].events = POLLIN; |
1ce86c9a JD |
249 | return i; |
250 | } | |
251 | ||
252 | ||
253 | /* | |
254 | * kconsumerd_on_read_subbuffer_mmap | |
255 | * | |
256 | * mmap the ring buffer, read it and write the data to the tracefile. | |
257 | * Returns the number of bytes written | |
258 | */ | |
cb040cc1 | 259 | int kconsumerd_on_read_subbuffer_mmap(struct kconsumerd_local_data *ctx, |
1ce86c9a JD |
260 | struct kconsumerd_fd *kconsumerd_fd, unsigned long len) |
261 | { | |
262 | unsigned long mmap_len, mmap_offset, padded_len, padding_len; | |
263 | char *mmap_base; | |
264 | char *padding = NULL; | |
265 | long ret = 0; | |
266 | off_t orig_offset = kconsumerd_fd->out_fd_offset; | |
267 | int fd = kconsumerd_fd->consumerd_fd; | |
268 | int outfd = kconsumerd_fd->out_fd; | |
269 | ||
270 | /* get the padded subbuffer size to know the padding required */ | |
271 | ret = kernctl_get_padded_subbuf_size(fd, &padded_len); | |
272 | if (ret != 0) { | |
273 | ret = errno; | |
274 | perror("kernctl_get_padded_subbuf_size"); | |
275 | goto end; | |
276 | } | |
277 | padding_len = padded_len - len; | |
278 | padding = malloc(padding_len * sizeof(char)); | |
279 | memset(padding, '\0', padding_len); | |
280 | ||
281 | /* get the len of the mmap region */ | |
282 | ret = kernctl_get_mmap_len(fd, &mmap_len); | |
283 | if (ret != 0) { | |
284 | ret = errno; | |
285 | perror("kernctl_get_mmap_len"); | |
286 | goto end; | |
287 | } | |
288 | ||
289 | /* get the offset inside the fd to mmap */ | |
290 | ret = kernctl_get_mmap_read_offset(fd, &mmap_offset); | |
291 | if (ret != 0) { | |
292 | ret = errno; | |
293 | perror("kernctl_get_mmap_read_offset"); | |
294 | goto end; | |
295 | } | |
296 | ||
297 | mmap_base = mmap(NULL, mmap_len, PROT_READ, MAP_PRIVATE, fd, mmap_offset); | |
298 | if (mmap_base == MAP_FAILED) { | |
299 | perror("Error mmaping"); | |
300 | ret = -1; | |
301 | goto end; | |
302 | } | |
303 | ||
304 | while (len > 0) { | |
305 | ret = write(outfd, mmap_base, len); | |
306 | if (ret >= len) { | |
307 | len = 0; | |
308 | } else if (ret < 0) { | |
309 | ret = errno; | |
310 | perror("Error in file write"); | |
311 | goto end; | |
312 | } | |
313 | /* This won't block, but will start writeout asynchronously */ | |
314 | sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret, | |
315 | SYNC_FILE_RANGE_WRITE); | |
316 | kconsumerd_fd->out_fd_offset += ret; | |
317 | } | |
318 | ||
319 | /* once all the data is written, write the padding to disk */ | |
320 | ret = write(outfd, padding, padding_len); | |
321 | if (ret < 0) { | |
322 | ret = errno; | |
323 | perror("Error writing padding to file"); | |
324 | goto end; | |
325 | } | |
326 | ||
327 | /* | |
328 | * This does a blocking write-and-wait on any page that belongs to the | |
329 | * subbuffer prior to the one we just wrote. | |
330 | * Don't care about error values, as these are just hints and ways to | |
331 | * limit the amount of page cache used. | |
332 | */ | |
333 | if (orig_offset >= kconsumerd_fd->max_sb_size) { | |
334 | sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
335 | kconsumerd_fd->max_sb_size, | |
336 | SYNC_FILE_RANGE_WAIT_BEFORE | |
337 | | SYNC_FILE_RANGE_WRITE | |
338 | | SYNC_FILE_RANGE_WAIT_AFTER); | |
339 | ||
340 | /* | |
341 | * Give hints to the kernel about how we access the file: | |
342 | * POSIX_FADV_DONTNEED : we won't re-access data in a near future after | |
343 | * we write it. | |
344 | * | |
345 | * We need to call fadvise again after the file grows because the | |
346 | * kernel does not seem to apply fadvise to non-existing parts of the | |
347 | * file. | |
348 | * | |
349 | * Call fadvise _after_ having waited for the page writeback to | |
350 | * complete because the dirty page writeback semantic is not well | |
351 | * defined. So it can be expected to lead to lower throughput in | |
352 | * streaming. | |
353 | */ | |
354 | posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
355 | kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED); | |
356 | } | |
357 | goto end; | |
358 | ||
359 | end: | |
360 | if (padding != NULL) { | |
361 | free(padding); | |
362 | } | |
363 | return ret; | |
364 | } | |
365 | ||
366 | /* | |
367 | * kconsumerd_on_read_subbuffer | |
368 | * | |
369 | * Splice the data from the ring buffer to the tracefile. | |
370 | * Returns the number of bytes spliced | |
371 | */ | |
cb040cc1 | 372 | int kconsumerd_on_read_subbuffer_splice(struct kconsumerd_local_data *ctx, |
1ce86c9a JD |
373 | struct kconsumerd_fd *kconsumerd_fd, unsigned long len) |
374 | { | |
375 | long ret = 0; | |
376 | loff_t offset = 0; | |
377 | off_t orig_offset = kconsumerd_fd->out_fd_offset; | |
378 | int fd = kconsumerd_fd->consumerd_fd; | |
379 | int outfd = kconsumerd_fd->out_fd; | |
380 | ||
381 | while (len > 0) { | |
382 | DBG("splice chan to pipe offset %lu (fd : %d)", | |
383 | (unsigned long)offset, fd); | |
cb040cc1 | 384 | ret = splice(fd, &offset, ctx->kconsumerd_thread_pipe[1], NULL, len, |
1ce86c9a JD |
385 | SPLICE_F_MOVE | SPLICE_F_MORE); |
386 | DBG("splice chan to pipe ret %ld", ret); | |
387 | if (ret < 0) { | |
388 | ret = errno; | |
389 | perror("Error in relay splice"); | |
390 | goto splice_error; | |
391 | } | |
392 | ||
cb040cc1 | 393 | ret = splice(ctx->kconsumerd_thread_pipe[0], NULL, outfd, NULL, ret, |
1ce86c9a JD |
394 | SPLICE_F_MOVE | SPLICE_F_MORE); |
395 | DBG("splice pipe to file %ld", ret); | |
396 | if (ret < 0) { | |
397 | ret = errno; | |
398 | perror("Error in file splice"); | |
399 | goto splice_error; | |
400 | } | |
401 | if (ret >= len) { | |
402 | len = 0; | |
403 | } | |
404 | /* This won't block, but will start writeout asynchronously */ | |
405 | sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret, | |
406 | SYNC_FILE_RANGE_WRITE); | |
407 | kconsumerd_fd->out_fd_offset += ret; | |
408 | } | |
409 | ||
410 | /* | |
411 | * This does a blocking write-and-wait on any page that belongs to the | |
412 | * subbuffer prior to the one we just wrote. | |
413 | * Don't care about error values, as these are just hints and ways to | |
414 | * limit the amount of page cache used. | |
415 | */ | |
416 | if (orig_offset >= kconsumerd_fd->max_sb_size) { | |
417 | sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
418 | kconsumerd_fd->max_sb_size, | |
419 | SYNC_FILE_RANGE_WAIT_BEFORE | |
420 | | SYNC_FILE_RANGE_WRITE | |
421 | | SYNC_FILE_RANGE_WAIT_AFTER); | |
422 | /* | |
423 | * Give hints to the kernel about how we access the file: | |
424 | * POSIX_FADV_DONTNEED : we won't re-access data in a near future after | |
425 | * we write it. | |
426 | * | |
427 | * We need to call fadvise again after the file grows because the | |
428 | * kernel does not seem to apply fadvise to non-existing parts of the | |
429 | * file. | |
430 | * | |
431 | * Call fadvise _after_ having waited for the page writeback to | |
432 | * complete because the dirty page writeback semantic is not well | |
433 | * defined. So it can be expected to lead to lower throughput in | |
434 | * streaming. | |
435 | */ | |
436 | posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
437 | kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED); | |
438 | } | |
439 | goto end; | |
440 | ||
441 | splice_error: | |
442 | /* send the appropriate error description to sessiond */ | |
443 | switch(ret) { | |
444 | case EBADF: | |
cb040cc1 | 445 | kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EBADF); |
1ce86c9a JD |
446 | break; |
447 | case EINVAL: | |
cb040cc1 | 448 | kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_EINVAL); |
1ce86c9a JD |
449 | break; |
450 | case ENOMEM: | |
cb040cc1 | 451 | kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ENOMEM); |
1ce86c9a JD |
452 | break; |
453 | case ESPIPE: | |
cb040cc1 | 454 | kconsumerd_send_error(ctx, KCONSUMERD_SPLICE_ESPIPE); |
1ce86c9a | 455 | break; |
1ce86c9a JD |
456 | } |
457 | ||
458 | end: | |
459 | return ret; | |
460 | } | |
461 | ||
4de84ad9 JD |
462 | /* |
463 | * kconsumerd_poll_socket | |
464 | * | |
465 | * Poll on the should_quit pipe and the command socket | |
466 | * return -1 on error and should exit, 0 if data is | |
467 | * available on the command socket | |
468 | */ | |
469 | int kconsumerd_poll_socket(struct pollfd *kconsumerd_sockpoll) | |
470 | { | |
471 | int num_rdy; | |
472 | ||
473 | num_rdy = poll(kconsumerd_sockpoll, 2, -1); | |
474 | if (num_rdy == -1) { | |
475 | perror("Poll error"); | |
476 | goto exit; | |
477 | } | |
478 | if (kconsumerd_sockpoll[0].revents == POLLIN) { | |
479 | DBG("kconsumerd_should_quit wake up"); | |
480 | goto exit; | |
481 | } | |
482 | return 0; | |
483 | ||
484 | exit: | |
485 | return -1; | |
486 | } | |
487 | ||
1ce86c9a JD |
488 | /* |
489 | * kconsumerd_consumerd_recv_fd | |
490 | * | |
491 | * Receives an array of file descriptors and the associated | |
492 | * structures describing each fd (path name). | |
493 | * Returns the size of received data | |
494 | */ | |
cb040cc1 JD |
495 | static int kconsumerd_consumerd_recv_fd(struct kconsumerd_local_data *ctx, |
496 | int sfd, struct pollfd *kconsumerd_sockpoll, int size, | |
1ce86c9a JD |
497 | enum kconsumerd_command cmd_type) |
498 | { | |
1ce86c9a JD |
499 | struct iovec iov[1]; |
500 | int ret = 0, i, tmp2; | |
501 | struct cmsghdr *cmsg; | |
502 | int nb_fd; | |
503 | char recv_fd[CMSG_SPACE(sizeof(int))]; | |
504 | struct lttcomm_kconsumerd_msg lkm; | |
505 | ||
506 | /* the number of fds we are about to receive */ | |
507 | nb_fd = size / sizeof(struct lttcomm_kconsumerd_msg); | |
508 | ||
159c7ff4 | 509 | /* |
8b97b5dd | 510 | * nb_fd is the number of fds we receive. One fd per recvmsg. |
159c7ff4 | 511 | */ |
1ce86c9a | 512 | for (i = 0; i < nb_fd; i++) { |
159c7ff4 | 513 | struct msghdr msg = { 0 }; |
1ce86c9a JD |
514 | |
515 | /* Prepare to receive the structures */ | |
516 | iov[0].iov_base = &lkm; | |
517 | iov[0].iov_len = sizeof(lkm); | |
518 | msg.msg_iov = iov; | |
519 | msg.msg_iovlen = 1; | |
520 | ||
521 | msg.msg_control = recv_fd; | |
522 | msg.msg_controllen = sizeof(recv_fd); | |
523 | ||
524 | DBG("Waiting to receive fd"); | |
4de84ad9 JD |
525 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { |
526 | goto end; | |
527 | } | |
528 | ||
1ce86c9a JD |
529 | if ((ret = recvmsg(sfd, &msg, 0)) < 0) { |
530 | perror("recvmsg"); | |
531 | continue; | |
532 | } | |
533 | ||
534 | if (ret != (size / nb_fd)) { | |
535 | ERR("Received only %d, expected %d", ret, size); | |
cb040cc1 | 536 | kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD); |
1ce86c9a JD |
537 | goto end; |
538 | } | |
539 | ||
540 | cmsg = CMSG_FIRSTHDR(&msg); | |
541 | if (!cmsg) { | |
542 | ERR("Invalid control message header"); | |
543 | ret = -1; | |
cb040cc1 | 544 | kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD); |
1ce86c9a JD |
545 | goto end; |
546 | } | |
159c7ff4 | 547 | |
1ce86c9a JD |
548 | /* if we received fds */ |
549 | if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { | |
550 | switch (cmd_type) { | |
551 | case ADD_STREAM: | |
159c7ff4 MD |
552 | DBG("kconsumerd_add_fd %s (%d)", lkm.path_name, ((int *) CMSG_DATA(cmsg))[0]); |
553 | ret = kconsumerd_add_fd(&lkm, ((int *) CMSG_DATA(cmsg))[0]); | |
1ce86c9a | 554 | if (ret < 0) { |
cb040cc1 | 555 | kconsumerd_send_error(ctx, KCONSUMERD_OUTFD_ERROR); |
1ce86c9a JD |
556 | goto end; |
557 | } | |
558 | break; | |
559 | case UPDATE_STREAM: | |
560 | kconsumerd_change_fd_state(lkm.fd, lkm.state); | |
561 | break; | |
562 | default: | |
563 | break; | |
564 | } | |
1ce86c9a | 565 | /* signal the poll thread */ |
cb040cc1 | 566 | tmp2 = write(ctx->kconsumerd_poll_pipe[1], "4", 1); |
f40799e8 DG |
567 | if (tmp2 < 0) { |
568 | perror("write kconsumerd poll"); | |
569 | } | |
1ce86c9a JD |
570 | } else { |
571 | ERR("Didn't received any fd"); | |
cb040cc1 | 572 | kconsumerd_send_error(ctx, KCONSUMERD_ERROR_RECV_FD); |
1ce86c9a JD |
573 | ret = -1; |
574 | goto end; | |
575 | } | |
576 | } | |
577 | ||
578 | end: | |
1ce86c9a JD |
579 | return ret; |
580 | } | |
581 | ||
582 | /* | |
583 | * kconsumerd_thread_poll_fds | |
584 | * | |
585 | * This thread polls the fds in the ltt_fd_list to consume the data | |
586 | * and write it to tracefile if necessary. | |
587 | */ | |
588 | void *kconsumerd_thread_poll_fds(void *data) | |
589 | { | |
590 | int num_rdy, num_hup, high_prio, ret, i; | |
591 | struct pollfd *pollfd = NULL; | |
592 | /* local view of the fds */ | |
593 | struct kconsumerd_fd **local_kconsumerd_fd = NULL; | |
242cd187 | 594 | /* local view of kconsumerd_data.fds_count */ |
1ce86c9a JD |
595 | int nb_fd = 0; |
596 | char tmp; | |
597 | int tmp2; | |
cb040cc1 | 598 | struct kconsumerd_local_data *ctx = data; |
1ce86c9a | 599 | |
1ce86c9a JD |
600 | |
601 | local_kconsumerd_fd = malloc(sizeof(struct kconsumerd_fd)); | |
602 | ||
603 | while (1) { | |
604 | high_prio = 0; | |
605 | num_hup = 0; | |
606 | ||
607 | /* | |
608 | * the ltt_fd_list has been updated, we need to update our | |
609 | * local array as well | |
610 | */ | |
242cd187 MD |
611 | pthread_mutex_lock(&kconsumerd_data.lock); |
612 | if (kconsumerd_data.need_update) { | |
1ce86c9a JD |
613 | if (pollfd != NULL) { |
614 | free(pollfd); | |
615 | pollfd = NULL; | |
616 | } | |
617 | if (local_kconsumerd_fd != NULL) { | |
618 | free(local_kconsumerd_fd); | |
619 | local_kconsumerd_fd = NULL; | |
620 | } | |
0237248c | 621 | |
1ce86c9a | 622 | /* allocate for all fds + 1 for the kconsumerd_poll_pipe */ |
242cd187 | 623 | pollfd = malloc((kconsumerd_data.fds_count + 1) * sizeof(struct pollfd)); |
1ce86c9a JD |
624 | if (pollfd == NULL) { |
625 | perror("pollfd malloc"); | |
242cd187 | 626 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
627 | goto end; |
628 | } | |
0237248c | 629 | |
1ce86c9a | 630 | /* allocate for all fds + 1 for the kconsumerd_poll_pipe */ |
242cd187 | 631 | local_kconsumerd_fd = malloc((kconsumerd_data.fds_count + 1) * |
1ce86c9a JD |
632 | sizeof(struct kconsumerd_fd)); |
633 | if (local_kconsumerd_fd == NULL) { | |
634 | perror("local_kconsumerd_fd malloc"); | |
242cd187 | 635 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
636 | goto end; |
637 | } | |
cb040cc1 | 638 | ret = kconsumerd_update_poll_array(ctx, &pollfd, local_kconsumerd_fd); |
1ce86c9a JD |
639 | if (ret < 0) { |
640 | ERR("Error in allocating pollfd or local_outfds"); | |
cb040cc1 | 641 | kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR); |
242cd187 | 642 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
643 | goto end; |
644 | } | |
645 | nb_fd = ret; | |
242cd187 | 646 | kconsumerd_data.need_update = 0; |
1ce86c9a | 647 | } |
242cd187 | 648 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
649 | |
650 | /* poll on the array of fds */ | |
651 | DBG("polling on %d fd", nb_fd + 1); | |
652 | num_rdy = poll(pollfd, nb_fd + 1, kconsumerd_poll_timeout); | |
653 | DBG("poll num_rdy : %d", num_rdy); | |
654 | if (num_rdy == -1) { | |
655 | perror("Poll error"); | |
cb040cc1 | 656 | kconsumerd_send_error(ctx, KCONSUMERD_POLL_ERROR); |
1ce86c9a JD |
657 | goto end; |
658 | } else if (num_rdy == 0) { | |
659 | DBG("Polling thread timed out"); | |
660 | goto end; | |
661 | } | |
662 | ||
663 | /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */ | |
664 | if (nb_fd == 0 && kconsumerd_quit == 1) { | |
665 | goto end; | |
666 | } | |
667 | ||
668 | /* | |
242cd187 MD |
669 | * If the kconsumerd_poll_pipe triggered poll go |
670 | * directly to the beginning of the loop to update the | |
671 | * array. We want to prioritize array update over | |
672 | * low-priority reads. | |
1ce86c9a | 673 | */ |
242cd187 | 674 | if (pollfd[nb_fd].revents == POLLIN) { |
1ce86c9a | 675 | DBG("kconsumerd_poll_pipe wake up"); |
cb040cc1 | 676 | tmp2 = read(ctx->kconsumerd_poll_pipe[0], &tmp, 1); |
f40799e8 DG |
677 | if (tmp2 < 0) { |
678 | perror("read kconsumerd poll"); | |
679 | } | |
1ce86c9a JD |
680 | continue; |
681 | } | |
682 | ||
683 | /* Take care of high priority channels first. */ | |
684 | for (i = 0; i < nb_fd; i++) { | |
685 | switch(pollfd[i].revents) { | |
686 | case POLLERR: | |
687 | ERR("Error returned in polling fd %d.", pollfd[i].fd); | |
688 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
689 | num_hup++; |
690 | break; | |
691 | case POLLHUP: | |
692 | DBG("Polling fd %d tells it has hung up.", pollfd[i].fd); | |
693 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
694 | num_hup++; |
695 | break; | |
696 | case POLLNVAL: | |
697 | ERR("Polling fd %d tells fd is not open.", pollfd[i].fd); | |
698 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
699 | num_hup++; |
700 | break; | |
701 | case POLLPRI: | |
702 | DBG("Urgent read on fd %d", pollfd[i].fd); | |
703 | high_prio = 1; | |
cb040cc1 | 704 | ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]); |
1ce86c9a JD |
705 | /* it's ok to have an unavailable sub-buffer */ |
706 | if (ret == EAGAIN) { | |
707 | ret = 0; | |
708 | } | |
709 | break; | |
710 | } | |
711 | } | |
712 | ||
713 | /* If every buffer FD has hung up, we end the read loop here */ | |
714 | if (nb_fd > 0 && num_hup == nb_fd) { | |
715 | DBG("every buffer FD has hung up\n"); | |
716 | if (kconsumerd_quit == 1) { | |
717 | goto end; | |
718 | } | |
719 | continue; | |
720 | } | |
721 | ||
722 | /* Take care of low priority channels. */ | |
723 | if (high_prio == 0) { | |
724 | for (i = 0; i < nb_fd; i++) { | |
725 | if (pollfd[i].revents == POLLIN) { | |
726 | DBG("Normal read on fd %d", pollfd[i].fd); | |
cb040cc1 | 727 | ret = ctx->on_buffer_ready(local_kconsumerd_fd[i]); |
1ce86c9a JD |
728 | /* it's ok to have an unavailable subbuffer */ |
729 | if (ret == EAGAIN) { | |
730 | ret = 0; | |
731 | } | |
732 | } | |
733 | } | |
734 | } | |
735 | } | |
736 | end: | |
737 | DBG("polling thread exiting"); | |
738 | if (pollfd != NULL) { | |
739 | free(pollfd); | |
740 | pollfd = NULL; | |
741 | } | |
742 | if (local_kconsumerd_fd != NULL) { | |
743 | free(local_kconsumerd_fd); | |
744 | local_kconsumerd_fd = NULL; | |
745 | } | |
1ce86c9a JD |
746 | return NULL; |
747 | } | |
748 | ||
749 | /* | |
cb040cc1 | 750 | * kconsumerd_create |
1ce86c9a | 751 | * |
4de84ad9 | 752 | * initialise the necessary environnement : |
cb040cc1 | 753 | * - create a new context |
4de84ad9 JD |
754 | * - create the poll_pipe |
755 | * - create the should_quit pipe (for signal handler) | |
cb040cc1 JD |
756 | * - create the thread pipe (for splice) |
757 | * Takes a function pointer as argument, this function is called when data is | |
758 | * available on a buffer. This function is responsible to do the | |
759 | * kernctl_get_next_subbuf, read the data with mmap or splice depending on the | |
760 | * buffer configuration and then kernctl_put_next_subbuf at the end. | |
761 | * Returns a pointer to the new context or NULL on error. | |
1ce86c9a | 762 | */ |
cb040cc1 JD |
763 | struct kconsumerd_local_data *kconsumerd_create( |
764 | int (*buffer_ready)(struct kconsumerd_fd *kconsumerd_fd)) | |
1ce86c9a | 765 | { |
4de84ad9 | 766 | int ret; |
cb040cc1 | 767 | struct kconsumerd_local_data *ctx; |
4de84ad9 | 768 | |
cb040cc1 JD |
769 | ctx = malloc(sizeof(struct kconsumerd_local_data)); |
770 | if (ctx == NULL) { | |
771 | perror("allocating context"); | |
772 | goto end; | |
773 | } | |
774 | ||
775 | ctx->on_buffer_ready = buffer_ready; | |
4de84ad9 | 776 | |
cb040cc1 | 777 | ret = pipe(ctx->kconsumerd_poll_pipe); |
4de84ad9 JD |
778 | if (ret < 0) { |
779 | perror("Error creating poll pipe"); | |
cb040cc1 | 780 | ctx = NULL; |
4de84ad9 JD |
781 | goto end; |
782 | } | |
783 | ||
cb040cc1 | 784 | ret = pipe(ctx->kconsumerd_should_quit); |
4de84ad9 JD |
785 | if (ret < 0) { |
786 | perror("Error creating recv pipe"); | |
cb040cc1 JD |
787 | ctx = NULL; |
788 | goto end; | |
789 | } | |
790 | ||
791 | ret = pipe(ctx->kconsumerd_thread_pipe); | |
792 | if (ret < 0) { | |
793 | perror("Error creating thread pipe"); | |
794 | ctx = NULL; | |
4de84ad9 JD |
795 | goto end; |
796 | } | |
797 | ||
798 | end: | |
cb040cc1 JD |
799 | return ctx; |
800 | } | |
801 | ||
802 | /* | |
803 | * kconsumerd_destroy | |
804 | * | |
805 | * Close all fds associated with the instance and free the context | |
806 | */ | |
807 | void kconsumerd_destroy(struct kconsumerd_local_data *ctx) | |
808 | { | |
809 | close(ctx->kconsumerd_error_socket); | |
810 | close(ctx->kconsumerd_thread_pipe[0]); | |
811 | close(ctx->kconsumerd_thread_pipe[1]); | |
812 | close(ctx->kconsumerd_poll_pipe[0]); | |
813 | close(ctx->kconsumerd_poll_pipe[1]); | |
814 | close(ctx->kconsumerd_should_quit[0]); | |
815 | close(ctx->kconsumerd_should_quit[1]); | |
816 | unlink(ctx->kconsumerd_command_sock_path); | |
817 | free(ctx); | |
818 | ctx = NULL; | |
1ce86c9a JD |
819 | } |
820 | ||
821 | /* | |
822 | * kconsumerd_thread_receive_fds | |
823 | * | |
824 | * This thread listens on the consumerd socket and | |
825 | * receives the file descriptors from ltt-sessiond | |
826 | */ | |
827 | void *kconsumerd_thread_receive_fds(void *data) | |
828 | { | |
829 | int sock, client_socket, ret; | |
830 | struct lttcomm_kconsumerd_header tmp; | |
4de84ad9 JD |
831 | /* |
832 | * structure to poll for incoming data on communication socket | |
833 | * avoids making blocking sockets | |
834 | */ | |
835 | struct pollfd kconsumerd_sockpoll[2]; | |
cb040cc1 | 836 | struct kconsumerd_local_data *ctx = data; |
4de84ad9 | 837 | |
1ce86c9a | 838 | |
cb040cc1 JD |
839 | DBG("Creating command socket %s", ctx->kconsumerd_command_sock_path); |
840 | unlink(ctx->kconsumerd_command_sock_path); | |
841 | client_socket = lttcomm_create_unix_sock(ctx->kconsumerd_command_sock_path); | |
1ce86c9a JD |
842 | if (client_socket < 0) { |
843 | ERR("Cannot create command socket"); | |
844 | goto end; | |
845 | } | |
846 | ||
847 | ret = lttcomm_listen_unix_sock(client_socket); | |
848 | if (ret < 0) { | |
849 | goto end; | |
850 | } | |
851 | ||
852 | DBG("Sending ready command to ltt-sessiond"); | |
cb040cc1 | 853 | ret = kconsumerd_send_error(ctx, KCONSUMERD_COMMAND_SOCK_READY); |
1ce86c9a JD |
854 | if (ret < 0) { |
855 | ERR("Error sending ready command to ltt-sessiond"); | |
856 | goto end; | |
857 | } | |
858 | ||
4de84ad9 JD |
859 | ret = fcntl(client_socket, F_SETFL, O_NONBLOCK); |
860 | if (ret < 0) { | |
861 | perror("fcntl O_NONBLOCK"); | |
862 | goto end; | |
863 | } | |
864 | ||
865 | /* prepare the FDs to poll : to client socket and the should_quit pipe */ | |
cb040cc1 | 866 | kconsumerd_sockpoll[0].fd = ctx->kconsumerd_should_quit[0]; |
4de84ad9 JD |
867 | kconsumerd_sockpoll[0].events = POLLIN | POLLPRI; |
868 | kconsumerd_sockpoll[1].fd = client_socket; | |
869 | kconsumerd_sockpoll[1].events = POLLIN | POLLPRI; | |
870 | ||
871 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { | |
872 | goto end; | |
873 | } | |
874 | DBG("Connection on client_socket"); | |
875 | ||
1ce86c9a JD |
876 | /* Blocking call, waiting for transmission */ |
877 | sock = lttcomm_accept_unix_sock(client_socket); | |
878 | if (sock <= 0) { | |
879 | WARN("On accept"); | |
880 | goto end; | |
881 | } | |
4de84ad9 JD |
882 | ret = fcntl(sock, F_SETFL, O_NONBLOCK); |
883 | if (ret < 0) { | |
884 | perror("fcntl O_NONBLOCK"); | |
885 | goto end; | |
886 | } | |
887 | ||
888 | /* update the polling structure to poll on the established socket */ | |
889 | kconsumerd_sockpoll[1].fd = sock; | |
890 | kconsumerd_sockpoll[1].events = POLLIN | POLLPRI; | |
891 | ||
1ce86c9a | 892 | while (1) { |
4de84ad9 JD |
893 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { |
894 | goto end; | |
895 | } | |
896 | DBG("Incoming fds on sock"); | |
897 | ||
1ce86c9a JD |
898 | /* We first get the number of fd we are about to receive */ |
899 | ret = lttcomm_recv_unix_sock(sock, &tmp, | |
900 | sizeof(struct lttcomm_kconsumerd_header)); | |
901 | if (ret <= 0) { | |
902 | ERR("Communication interrupted on command socket"); | |
903 | goto end; | |
904 | } | |
905 | if (tmp.cmd_type == STOP) { | |
906 | DBG("Received STOP command"); | |
907 | goto end; | |
908 | } | |
3dcd2721 MD |
909 | if (kconsumerd_quit) { |
910 | DBG("kconsumerd_thread_receive_fds received quit from signal"); | |
911 | goto end; | |
912 | } | |
4de84ad9 | 913 | |
1ce86c9a | 914 | /* we received a command to add or update fds */ |
cb040cc1 | 915 | ret = kconsumerd_consumerd_recv_fd(ctx, sock, kconsumerd_sockpoll, |
4de84ad9 | 916 | tmp.payload_size, tmp.cmd_type); |
1ce86c9a JD |
917 | if (ret <= 0) { |
918 | ERR("Receiving the FD, exiting"); | |
919 | goto end; | |
920 | } | |
4de84ad9 | 921 | DBG("received fds on sock"); |
1ce86c9a JD |
922 | } |
923 | ||
924 | end: | |
925 | DBG("kconsumerd_thread_receive_fds exiting"); | |
926 | ||
927 | /* | |
928 | * when all fds have hung up, the polling thread | |
929 | * can exit cleanly | |
930 | */ | |
931 | kconsumerd_quit = 1; | |
932 | ||
933 | /* | |
934 | * 2s of grace period, if no polling events occur during | |
935 | * this period, the polling thread will exit even if there | |
936 | * are still open FDs (should not happen, but safety mechanism). | |
937 | */ | |
938 | kconsumerd_poll_timeout = KCONSUMERD_POLL_GRACE_PERIOD; | |
939 | ||
940 | /* wake up the polling thread */ | |
cb040cc1 | 941 | ret = write(ctx->kconsumerd_poll_pipe[1], "4", 1); |
1ce86c9a JD |
942 | if (ret < 0) { |
943 | perror("poll pipe write"); | |
944 | } | |
945 | return NULL; | |
946 | } | |
947 | ||
948 | /* | |
949 | * kconsumerd_cleanup | |
950 | * | |
cb040cc1 JD |
951 | * Close all the tracefiles and stream fds, should be called when all |
952 | * instances are destroyed. | |
1ce86c9a | 953 | */ |
3dcd2721 | 954 | void kconsumerd_cleanup(void) |
1ce86c9a | 955 | { |
fbf10601 | 956 | struct kconsumerd_fd *iter, *tmp; |
1ce86c9a | 957 | |
3dcd2721 MD |
958 | /* |
959 | * close all outfd. Called when there are no more threads | |
960 | * running (after joining on the threads), no need to protect | |
961 | * list iteration with mutex. | |
962 | */ | |
fbf10601 | 963 | cds_list_for_each_entry_safe(iter, tmp, &kconsumerd_data.fd_list.head, list) { |
1ce86c9a JD |
964 | kconsumerd_del_fd(iter); |
965 | } | |
966 | } | |
967 | ||
3dcd2721 | 968 | /* |
4de84ad9 JD |
969 | * kconsumerd_should_exit |
970 | * | |
3dcd2721 MD |
971 | * Called from signal handler. |
972 | */ | |
cb040cc1 | 973 | void kconsumerd_should_exit(struct kconsumerd_local_data *ctx) |
3dcd2721 | 974 | { |
4de84ad9 | 975 | int ret; |
3dcd2721 | 976 | kconsumerd_quit = 1; |
cb040cc1 | 977 | ret = write(ctx->kconsumerd_should_quit[1], "4", 1); |
f40799e8 DG |
978 | if (ret < 0) { |
979 | perror("write kconsumerd quit"); | |
980 | } | |
3dcd2721 MD |
981 | } |
982 | ||
1ce86c9a JD |
983 | /* |
984 | * kconsumerd_send_error | |
985 | * | |
986 | * send return code to ltt-sessiond | |
987 | */ | |
cb040cc1 | 988 | int kconsumerd_send_error(struct kconsumerd_local_data *ctx, enum lttcomm_return_code cmd) |
1ce86c9a | 989 | { |
cb040cc1 JD |
990 | if (ctx->kconsumerd_error_socket > 0) { |
991 | return lttcomm_send_unix_sock(ctx->kconsumerd_error_socket, &cmd, | |
1ce86c9a JD |
992 | sizeof(enum lttcomm_sessiond_command)); |
993 | } | |
994 | ||
995 | return 0; | |
996 | } |