Remove the timeout on poll
[lttng-tools.git] / kconsumerd / kconsumerd.c
CommitLineData
d4a1283e
JD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <fcntl.h>
22#include <getopt.h>
23#include <grp.h>
24#include <limits.h>
25#include <pthread.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/ipc.h>
31#include <sys/shm.h>
32#include <sys/socket.h>
33#include <sys/stat.h>
34#include <sys/types.h>
35#include <urcu/list.h>
36#include <poll.h>
37#include <unistd.h>
38
39#include "lttngerr.h"
40#include "libkernelctl.h"
41#include "liblttsessiondcomm.h"
42#include "kconsumerd.h"
43
44/* Init the list of FDs */
45static struct ltt_kconsumerd_fd_list kconsumerd_fd_list = {
46 .head = CDS_LIST_HEAD_INIT(kconsumerd_fd_list.head),
47};
48
49/* Number of element for the list below. */
50static unsigned int fds_count;
51
52/* If the local array of FDs needs update in the poll function */
53static unsigned int update_fd_array = 1;
54
55/* lock the fd array and structures */
56static pthread_mutex_t kconsumerd_lock_fds;
57
58/* the two threads (receive fd and poll) */
59static pthread_t threads[2];
60
61/* communication with splice */
62static int thread_pipe[2];
63
252fd492
JD
64/* pipe to wake the poll thread when necessary */
65static int poll_pipe[2];
66
d4a1283e
JD
67/* socket to communicate errors with sessiond */
68static int error_socket = -1;
69
70/* Argument variables */
71int opt_quiet;
72int opt_verbose;
73static int opt_daemon;
74static const char *progname;
75static char command_sock_path[PATH_MAX]; /* Global command socket path */
76static char error_sock_path[PATH_MAX]; /* Global error path */
77
bcd8d9db
JD
78/*
79 * del_fd
80 *
81 * Remove a fd from the global list protected by a mutex
82 */
83static void del_fd(struct ltt_kconsumerd_fd *lcf)
84{
6aea26bc 85 DBG("Removing %d", lcf->consumerd_fd);
bcd8d9db
JD
86 pthread_mutex_lock(&kconsumerd_lock_fds);
87 cds_list_del(&lcf->list);
88 if (fds_count > 0) {
89 fds_count--;
90 DBG("Removed ltt_kconsumerd_fd");
91 if (lcf != NULL) {
92 close(lcf->out_fd);
93 close(lcf->consumerd_fd);
94 free(lcf);
95 lcf = NULL;
96 }
97 }
98 pthread_mutex_unlock(&kconsumerd_lock_fds);
99}
100
d4a1283e
JD
101/*
102 * cleanup
103 *
104 * Cleanup the daemon's socket on exit
105 */
106static void cleanup()
107{
bcd8d9db
JD
108 struct ltt_kconsumerd_fd *iter;
109
bcd8d9db 110 /* remove the socket file */
d4a1283e 111 unlink(command_sock_path);
bcd8d9db
JD
112
113 /* unblock the threads */
114 WARN("Terminating the threads before exiting");
115 pthread_cancel(threads[0]);
116 pthread_cancel(threads[1]);
117
118 /* close all outfd */
119 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
120 del_fd(iter);
121 }
d4a1283e
JD
122}
123
6aea26bc
JD
124/*
125 * send_error
d4a1283e
JD
126 *
127 * send return code to ltt-sessiond
128 */
129static int send_error(enum lttcomm_return_code cmd)
130{
131 if (error_socket > 0) {
132 return lttcomm_send_unix_sock(error_socket, &cmd,
133 sizeof(enum lttcomm_sessiond_command));
134 } else {
135 return 0;
136 }
137}
138
d4a1283e
JD
139/*
140 * add_fd
141 *
142 * Add a fd to the global list protected by a mutex
143 */
144static int add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
145{
146 struct ltt_kconsumerd_fd *tmp_fd;
147 int ret;
148
149 tmp_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
150 tmp_fd->sessiond_fd = buf->fd;
151 tmp_fd->consumerd_fd = consumerd_fd;
152 tmp_fd->state = buf->state;
153 tmp_fd->max_sb_size = buf->max_sb_size;
154 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
155
156 /* Opening the tracefile in write mode */
157 DBG("Opening %s for writing", tmp_fd->path_name);
158 ret = open(tmp_fd->path_name,
159 O_WRONLY|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
160 if (ret < 0) {
161 ERR("Opening %s", tmp_fd->path_name);
162 perror("open");
163 goto end;
164 }
165 tmp_fd->out_fd = ret;
166 tmp_fd->out_fd_offset = 0;
167
168 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
169 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
170
171 pthread_mutex_lock(&kconsumerd_lock_fds);
172 cds_list_add(&tmp_fd->list, &kconsumerd_fd_list.head);
173 fds_count++;
174 pthread_mutex_unlock(&kconsumerd_lock_fds);
175
176end:
177 return ret;
178}
179
d4a1283e
JD
180
181/*
182 * sighandler
183 *
184 * Signal handler for the daemon
185 */
186static void sighandler(int sig)
187{
d4a1283e
JD
188 cleanup();
189
190 return;
191}
192
193/*
194 * set_signal_handler
195 *
196 * Setup signal handler for :
197 * SIGINT, SIGTERM, SIGPIPE
198 */
199static int set_signal_handler(void)
200{
201 int ret = 0;
202 struct sigaction sa;
203 sigset_t sigset;
204
205 if ((ret = sigemptyset(&sigset)) < 0) {
206 perror("sigemptyset");
207 return ret;
208 }
209
210 sa.sa_handler = sighandler;
211 sa.sa_mask = sigset;
212 sa.sa_flags = 0;
213 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
214 perror("sigaction");
215 return ret;
216 }
217
218 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
219 perror("sigaction");
220 return ret;
221 }
222
223 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
224 perror("sigaction");
225 return ret;
226 }
227
228 return ret;
229}
230
231/*
232 * on_read_subbuffer
233 *
234 * Splice the data from the ring buffer to the tracefile.
235 * Returns the number of bytes spliced
236 */
237static int on_read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd,
238 unsigned long len)
239{
240 long ret = 0;
241 loff_t offset = 0;
242 off_t orig_offset = kconsumerd_fd->out_fd_offset;
243 int fd = kconsumerd_fd->consumerd_fd;
244 int outfd = kconsumerd_fd->out_fd;
245
246 while (len > 0) {
247 DBG("splice chan to pipe offset %lu (fd : %d)",
248 (unsigned long)offset, fd);
249 ret = splice(fd, &offset, thread_pipe[1], NULL, len,
250 SPLICE_F_MOVE | SPLICE_F_MORE);
251 DBG("splice chan to pipe ret %ld", ret);
252 if (ret < 0) {
0632499a 253 ret = errno;
d4a1283e 254 perror("Error in relay splice");
0632499a 255 goto splice_error;
d4a1283e
JD
256 }
257
258 ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
259 SPLICE_F_MOVE | SPLICE_F_MORE);
260 DBG("splice pipe to file %ld", ret);
261 if (ret < 0) {
0632499a 262 ret = errno;
d4a1283e 263 perror("Error in file splice");
0632499a 264 goto splice_error;
d4a1283e
JD
265 }
266 if (ret >= len) {
267 len = 0;
268 }
269 /* This won't block, but will start writeout asynchronously */
270 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
271 SYNC_FILE_RANGE_WRITE);
272 kconsumerd_fd->out_fd_offset += ret;
273 }
0632499a 274
d4a1283e
JD
275 /*
276 * This does a blocking write-and-wait on any page that belongs to the
277 * subbuffer prior to the one we just wrote.
278 * Don't care about error values, as these are just hints and ways to
279 * limit the amount of page cache used.
280 */
281 if (orig_offset >= kconsumerd_fd->max_sb_size) {
282 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
283 kconsumerd_fd->max_sb_size,
284 SYNC_FILE_RANGE_WAIT_BEFORE
285 | SYNC_FILE_RANGE_WRITE
286 | SYNC_FILE_RANGE_WAIT_AFTER);
287 /*
288 * Give hints to the kernel about how we access the file:
289 * POSIX_FADV_DONTNEED : we won't re-access data in a near
290 * future after we write it.
291 * We need to call fadvise again after the file grows because
292 * the kernel does not seem to apply fadvise to non-existing
293 * parts of the file.
294 * Call fadvise _after_ having waited for the page writeback to
295 * complete because the dirty page writeback semantic is not
296 * well defined. So it can be expected to lead to lower
297 * throughput in streaming.
298 */
299 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
300 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
301 }
0632499a
JD
302 goto end;
303
304splice_error:
305 /* send the appropriate error description to sessiond */
306 switch(ret) {
307 case EBADF:
308 send_error(KCONSUMERD_SPLICE_EBADF);
309 break;
310 case EINVAL:
311 send_error(KCONSUMERD_SPLICE_EINVAL);
312 break;
313 case ENOMEM:
314 send_error(KCONSUMERD_SPLICE_ENOMEM);
315 break;
316 case ESPIPE:
317 send_error(KCONSUMERD_SPLICE_ESPIPE);
318 break;
319 }
320
321end:
d4a1283e
JD
322 return ret;
323}
324
325/*
326 * read_subbuffer
327 *
328 * Consume data on a file descriptor and write it on a trace file
329 */
330static int read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd)
331{
332 unsigned long len;
333 int err;
334 long ret = 0;
335 int infd = kconsumerd_fd->consumerd_fd;
336
6aea26bc 337 DBG("In read_subbuffer (infd : %d)", infd);
d4a1283e
JD
338 /* Get the next subbuffer */
339 err = kernctl_get_next_subbuf(infd);
340 if (err != 0) {
341 ret = errno;
342 perror("Reserving sub buffer failed (everything is normal, "
343 "it is due to concurrency)");
344 goto end;
345 }
346
347 /* read the whole subbuffer */
348 err = kernctl_get_padded_subbuf_size(infd, &len);
349 if (err != 0) {
350 ret = errno;
351 perror("Getting sub-buffer len failed.");
352 goto end;
353 }
354
355 /* splice the subbuffer to the tracefile */
356 ret = on_read_subbuffer(kconsumerd_fd, len);
357 if (ret < 0) {
358 /*
359 * display the error but continue processing to try
360 * to release the subbuffer
361 */
362 ERR("Error splicing to tracefile");
363 }
364
365 err = kernctl_put_next_subbuf(infd);
366 if (err != 0) {
367 ret = errno;
368 if (errno == EFAULT) {
369 perror("Error in unreserving sub buffer\n");
370 } else if (errno == EIO) {
371 /* Should never happen with newer LTTng versions */
372 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
373 }
374 goto end;
375 }
376
377end:
378 return ret;
379}
380
381/*
382 * change_fd_state
383 *
384 * Update a fd according to what we just received
385 */
386static void change_fd_state(int sessiond_fd,
387 enum lttcomm_kconsumerd_fd_state state)
388{
389 struct ltt_kconsumerd_fd *iter;
390 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
391 if (iter->sessiond_fd == sessiond_fd) {
392 iter->state = state;
393 break;
394 }
395 }
396}
397
398/*
399 * consumerd_recv_fd
400 *
401 * Receives an array of file descriptors and the associated
402 * structures describing each fd (path name).
403 * Returns the size of received data
404 */
405static int consumerd_recv_fd(int sfd, int size,
406 enum lttcomm_consumerd_command cmd_type)
407{
408 struct msghdr msg;
409 struct iovec iov[1];
252fd492 410 int ret, i, tmp2;
d4a1283e
JD
411 struct cmsghdr *cmsg;
412 int nb_fd;
413 char tmp[CMSG_SPACE(size)];
414 struct lttcomm_kconsumerd_msg *buf;
415 /* the number of fds we are about to receive */
416 nb_fd = size/sizeof(struct lttcomm_kconsumerd_msg);
417
418 buf = malloc(size);
419
420 memset(&msg, 0, sizeof(msg));
421
422 /* Prepare to receive the structures */
423 iov[0].iov_base = buf;
424 iov[0].iov_len = size;
425 msg.msg_iov = iov;
426 msg.msg_iovlen = 1;
427
428 msg.msg_control = tmp;
429 msg.msg_controllen = sizeof(tmp);
430
431 DBG("Waiting to receive fds");
432 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
433 perror("recvmsg");
434 }
435 if (ret != size) {
436 ERR("Received only %d, expected %d", ret, size);
437 send_error(KCONSUMERD_ERROR_RECV_FD);
438 goto end;
439 }
440
441 cmsg = CMSG_FIRSTHDR(&msg);
442 if (!cmsg) {
443 ERR("Invalid control message header");
444 ret = -1;
445 send_error(KCONSUMERD_ERROR_RECV_FD);
446 goto end;
447 }
448
449 /* if we received fds */
450 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
451 DBG("Receive : expecting %d fds", nb_fd);
452 for (i = 0; i < nb_fd; i++) {
453 switch (cmd_type) {
454 case LTTCOMM_ADD_STREAM:
455 DBG("add_fd %s (%d)", buf[i].path_name, ((int *)CMSG_DATA(cmsg))[i]);
456 ret = add_fd(&buf[i], ((int *)CMSG_DATA(cmsg))[i]);
457 if (ret < 0) {
458 send_error(KCONSUMERD_OUTFD_ERROR);
459 goto end;
460 }
461 break;
462 case LTTCOMM_UPDATE_STREAM:
463 change_fd_state(buf[i].fd, buf[i].state);
464 break;
465 default:
466 break;
467 }
468 }
469 /* flag to tell the polling thread to update its fd array */
470 update_fd_array = 1;
252fd492
JD
471 /* signal the poll thread */
472 tmp2 = write(poll_pipe[1], "4", 1);
d4a1283e
JD
473 } else {
474 ERR("Didn't received any fd");
475 send_error(KCONSUMERD_ERROR_RECV_FD);
476 ret = -1;
477 goto end;
478 }
479
480end:
481 if (buf != NULL) {
482 free(buf);
483 buf = NULL;
484 }
485 return ret;
486}
487
488/*
489 * thread_receive_fds
490 *
491 * This thread listens on the consumerd socket and
492 * receives the file descriptors from ltt-sessiond
493 */
494static void *thread_receive_fds(void *data)
495{
496 int sock, client_socket, ret;
497 struct lttcomm_kconsumerd_header tmp;
498
499 DBG("Creating command socket %s", command_sock_path);
500 unlink(command_sock_path);
501 client_socket = lttcomm_create_unix_sock(command_sock_path);
502 if (client_socket < 0) {
503 ERR("Cannot create command socket");
504 goto error;
505 }
506
507 ret = lttcomm_listen_unix_sock(client_socket);
508 if (ret < 0) {
509 goto error;
510 }
511
512 DBG("Sending ready command to ltt-sessiond");
513 ret = send_error(KCONSUMERD_COMMAND_SOCK_READY);
514 if (ret < 0) {
515 ERR("Error sending ready command to ltt-sessiond");
516 goto error;
517 }
518
7e8c38c6
JD
519 /* Blocking call, waiting for transmission */
520 sock = lttcomm_accept_unix_sock(client_socket);
521 if (sock <= 0) {
522 WARN("On accept, retrying");
523 goto error;
524 }
d4a1283e 525 while (1) {
d4a1283e
JD
526 /* We first get the number of fd we are about to receive */
527 ret = lttcomm_recv_unix_sock(sock, &tmp,
528 sizeof(struct lttcomm_kconsumerd_header));
6aea26bc 529 if (ret <= 0) {
bcd8d9db
JD
530 ERR("Receiving the lttcomm_kconsumerd_header, exiting");
531 goto error;
d4a1283e
JD
532 }
533 ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
6aea26bc 534 if (ret <= 0) {
bcd8d9db
JD
535 ERR("Receiving the FD, exiting");
536 goto error;
d4a1283e
JD
537 }
538 }
539
540error:
541 return NULL;
542}
543
544/*
545 * update_poll_array
546 *
547 * Allocate the pollfd structure and the local view of the out fds
548 * to avoid doing a lookup in the linked list and concurrency issues
549 * when writing is needed.
550 * Returns the number of fds in the structures
551 */
552static int update_poll_array(struct pollfd **pollfd,
553 struct ltt_kconsumerd_fd **local_kconsumerd_fd)
554{
555 struct ltt_kconsumerd_fd *iter;
556 int i = 0;
557
558 if (*pollfd != NULL) {
559 free(*pollfd);
560 *pollfd = NULL;
561 }
562
563 if (*local_kconsumerd_fd != NULL) {
564 free(*local_kconsumerd_fd);
565 *local_kconsumerd_fd = NULL;
566 }
567
252fd492
JD
568 /* allocate for all fds + 1 for the poll_pipe */
569 *pollfd = malloc((fds_count + 1) * sizeof(struct pollfd));
d4a1283e
JD
570 if (*pollfd == NULL) {
571 perror("pollfd malloc");
572 goto error_mem;
573 }
574
252fd492
JD
575 /* allocate for all fds + 1 for the poll_pipe */
576 *local_kconsumerd_fd = malloc((fds_count + 1) * sizeof(struct ltt_kconsumerd_fd));
d4a1283e
JD
577 if (*local_kconsumerd_fd == NULL) {
578 perror("local_kconsumerd_fd malloc");
579 goto error_mem;
580 }
581
582 DBG("Updating poll fd array");
583 pthread_mutex_lock(&kconsumerd_lock_fds);
584
585 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
586 DBG("Inside for each");
587 if (iter->state == ACTIVE_FD) {
588 DBG("Active FD %d", iter->consumerd_fd);
1b686c3f
JD
589 (*pollfd)[i].fd = iter->consumerd_fd;
590 (*pollfd)[i].events = POLLIN | POLLPRI;
d4a1283e
JD
591 local_kconsumerd_fd[i] = iter;
592 i++;
593 } else if (iter->state == DELETE_FD) {
594 del_fd(iter);
595 }
596 }
252fd492
JD
597 /*
598 * insert the poll_pipe at the end of the array and don't increment i
599 * so nb_fd is the number of real FD
600 */
601 (*pollfd)[i].fd = poll_pipe[0];
602 (*pollfd)[i].events = POLLIN;
603
d4a1283e
JD
604 update_fd_array = 0;
605 pthread_mutex_unlock(&kconsumerd_lock_fds);
606 return i;
607
608error_mem:
609 return -ENOMEM;
610}
611
612/*
613 * thread_poll_fds
614 *
615 * This thread polls the fds in the ltt_fd_list to consume the data
616 * and write it to tracefile if necessary.
617 */
618static void *thread_poll_fds(void *data)
619{
620 int num_rdy, num_hup, high_prio, ret, i;
621 struct pollfd *pollfd = NULL;
622 /* local view of the fds */
6aea26bc 623 struct ltt_kconsumerd_fd **local_kconsumerd_fd = NULL;
d4a1283e
JD
624 /* local view of fds_count */
625 int nb_fd = 0;
252fd492
JD
626 char tmp;
627 int tmp2;
d4a1283e
JD
628
629 ret = pipe(thread_pipe);
630 if (ret < 0) {
631 perror("Error creating pipe");
632 goto end;
633 }
634
6aea26bc
JD
635 local_kconsumerd_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
636
d4a1283e
JD
637 while (1) {
638 high_prio = 0;
639 num_hup = 0;
640
641 /*
642 * the ltt_fd_list has been updated, we need to update our
643 * local array as well
644 */
645 if (update_fd_array) {
6aea26bc 646 ret = update_poll_array(&pollfd, local_kconsumerd_fd);
d4a1283e
JD
647 if (ret < 0) {
648 ERR("Error in allocating pollfd or local_outfds");
649 send_error(KCONSUMERD_POLL_ERROR);
650 goto end;
651 }
652 nb_fd = ret;
653 }
654
655 /* poll on the array of fds */
252fd492
JD
656 DBG("polling on %d fd", nb_fd + 1);
657 num_rdy = poll(pollfd, nb_fd + 1, -1);
d4a1283e
JD
658 DBG("poll num_rdy : %d", num_rdy);
659 if (num_rdy == -1) {
660 perror("Poll error");
661 send_error(KCONSUMERD_POLL_ERROR);
662 goto end;
663 }
664
252fd492
JD
665 /*
666 * if only the poll_pipe triggered poll to return just return to the
667 * beginning of the loop to update the array
668 */
669 if (num_rdy == 1 && pollfd[nb_fd].revents == POLLIN) {
670 DBG("poll_pipe wake up");
671 tmp2 = read(poll_pipe[0], &tmp, 1);
672 continue;
673 }
674
d4a1283e
JD
675 /* Take care of high priority channels first. */
676 for (i = 0; i < nb_fd; i++) {
677 switch(pollfd[i].revents) {
678 case POLLERR:
679 ERR("Error returned in polling fd %d.", pollfd[i].fd);
680 num_hup++;
681 send_error(KCONSUMERD_POLL_ERROR);
682 break;
683 case POLLHUP:
684 ERR("Polling fd %d tells it has hung up.", pollfd[i].fd);
685 num_hup++;
686 break;
687 case POLLNVAL:
688 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
689 send_error(KCONSUMERD_POLL_NVAL);
690 num_hup++;
691 break;
692 case POLLPRI:
693 DBG("Urgent read on fd %d", pollfd[i].fd);
694 high_prio = 1;
6aea26bc 695 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
696 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
697 if (ret == EAGAIN) {
698 ret = 0;
699 }
700 break;
701 }
702 }
703
704 /* If every buffer FD has hung up, we end the read loop here */
705 if (nb_fd > 0 && num_hup == nb_fd) {
706 DBG("every buffer FD has hung up\n");
707 send_error(KCONSUMERD_POLL_HUP);
6aea26bc 708 goto end;
d4a1283e
JD
709 }
710
711 /* Take care of low priority channels. */
712 if (!high_prio) {
713 for (i = 0; i < nb_fd; i++) {
714 switch(pollfd[i].revents) {
715 case POLLIN:
716 DBG("Normal read on fd %d", pollfd[i].fd);
6aea26bc 717 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
718 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
719 if (ret == EAGAIN) {
720 ret = 0;
721 }
722 break;
723 }
724 }
725 }
726 }
727end:
728 if (pollfd != NULL) {
729 free(pollfd);
730 pollfd = NULL;
731 }
732 if (local_kconsumerd_fd != NULL) {
733 free(local_kconsumerd_fd);
734 local_kconsumerd_fd = NULL;
735 }
bcd8d9db 736 cleanup();
d4a1283e
JD
737 return NULL;
738}
739
740/*
741 * usage function on stderr
742 */
743static void usage(void)
744{
745 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
746 fprintf(stderr, " -h, --help "
747 "Display this usage.\n");
748 fprintf(stderr, " -c, --kconsumerd-cmd-sock PATH "
749 "Specify path for the command socket\n");
750 fprintf(stderr, " -e, --kconsumerd-err-sock PATH "
751 "Specify path for the error socket\n");
752 fprintf(stderr, " -d, --daemonize "
753 "Start as a daemon.\n");
754 fprintf(stderr, " -q, --quiet "
755 "No output at all.\n");
756 fprintf(stderr, " -v, --verbose "
757 "Verbose mode. Activate DBG() macro.\n");
758 fprintf(stderr, " -V, --version "
759 "Show version number.\n");
760}
761
762/*
763 * daemon argument parsing
764 */
765static void parse_args(int argc, char **argv)
766{
767 int c;
768
769 static struct option long_options[] = {
770 { "kconsumerd-cmd-sock", 1, 0, 'c' },
771 { "kconsumerd-err-sock", 1, 0, 'e' },
772 { "daemonize", 0, 0, 'd' },
773 { "help", 0, 0, 'h' },
774 { "quiet", 0, 0, 'q' },
775 { "verbose", 0, 0, 'v' },
776 { "version", 0, 0, 'V' },
777 { NULL, 0, 0, 0 }
778 };
779
780 while (1) {
781 int option_index = 0;
782 c = getopt_long(argc, argv, "dhqvV" "c:e:", long_options, &option_index);
783 if (c == -1) {
784 break;
785 }
786
787 switch (c) {
788 case 0:
789 fprintf(stderr, "option %s", long_options[option_index].name);
790 if (optarg) {
791 fprintf(stderr, " with arg %s\n", optarg);
792 }
793 break;
794 case 'c':
795 snprintf(command_sock_path, PATH_MAX, "%s", optarg);
796 break;
797 case 'e':
798 snprintf(error_sock_path, PATH_MAX, "%s", optarg);
799 break;
800 case 'd':
801 opt_daemon = 1;
802 break;
803 case 'h':
804 usage();
805 exit(EXIT_FAILURE);
806 case 'q':
807 opt_quiet = 1;
808 break;
809 case 'v':
810 opt_verbose = 1;
811 break;
812 case 'V':
813 fprintf(stdout, "%s\n", VERSION);
814 exit(EXIT_SUCCESS);
815 default:
816 usage();
817 exit(EXIT_FAILURE);
818 }
819 }
820}
821
822
823/*
824 * main
825 */
826int main(int argc, char **argv)
827{
828 int i;
829 int ret = 0;
830 void *status;
831
832 /* Parse arguments */
833 progname = argv[0];
834 parse_args(argc, argv);
835
836 /* Daemonize */
837 if (opt_daemon) {
838 ret = daemon(0, 0);
839 if (ret < 0) {
840 perror("daemon");
841 goto error;
842 }
843 }
844
845 if (strlen(command_sock_path) == 0) {
846 snprintf(command_sock_path, PATH_MAX,
847 KCONSUMERD_CMD_SOCK_PATH);
848 }
849 if (strlen(error_sock_path) == 0) {
850 snprintf(error_sock_path, PATH_MAX,
851 KCONSUMERD_ERR_SOCK_PATH);
852 }
853
854 if (set_signal_handler() < 0) {
855 goto error;
856 }
857
252fd492
JD
858 /* create the pipe to wake to polling thread when needed */
859 ret = pipe(poll_pipe);
860 if (ret < 0) {
861 perror("Error creating poll pipe");
862 goto end;
863 }
864
d4a1283e
JD
865 /* Connect to the socket created by ltt-sessiond to report errors */
866 DBG("Connecting to error socket %s", error_sock_path);
867 error_socket = lttcomm_connect_unix_sock(error_sock_path);
868 /* not a fatal error, but all communication with ltt-sessiond will fail */
869 if (error_socket < 0) {
870 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
871 }
872
873 /* Create the thread to manage the receive of fd */
874 ret = pthread_create(&threads[0], NULL, thread_receive_fds, (void *) NULL);
875 if (ret != 0) {
876 perror("pthread_create");
877 goto error;
878 }
879
880 /* Create thread to manage the polling/writing of traces */
881 ret = pthread_create(&threads[1], NULL, thread_poll_fds, (void *) NULL);
882 if (ret != 0) {
883 perror("pthread_create");
884 goto error;
885 }
886
887 for (i = 0; i < 2; i++) {
888 ret = pthread_join(threads[i], &status);
889 if (ret != 0) {
890 perror("pthread_join");
891 goto error;
892 }
893 }
894 ret = EXIT_SUCCESS;
895 send_error(KCONSUMERD_EXIT_SUCCESS);
896 goto end;
897
898error:
899 ret = EXIT_FAILURE;
900 send_error(KCONSUMERD_EXIT_FAILURE);
901
902end:
903 cleanup();
904
905 return ret;
906}
This page took 0.090393 seconds and 4 git commands to generate.