Ignore first SIGINT
[lttng-tools.git] / kconsumerd / kconsumerd.c
CommitLineData
d4a1283e
JD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <fcntl.h>
22#include <getopt.h>
23#include <grp.h>
24#include <limits.h>
25#include <pthread.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <sys/ipc.h>
31#include <sys/shm.h>
32#include <sys/socket.h>
33#include <sys/stat.h>
34#include <sys/types.h>
35#include <urcu/list.h>
36#include <poll.h>
37#include <unistd.h>
38
39#include "lttngerr.h"
40#include "libkernelctl.h"
41#include "liblttsessiondcomm.h"
42#include "kconsumerd.h"
43
44/* Init the list of FDs */
45static struct ltt_kconsumerd_fd_list kconsumerd_fd_list = {
46 .head = CDS_LIST_HEAD_INIT(kconsumerd_fd_list.head),
47};
48
49/* Number of element for the list below. */
50static unsigned int fds_count;
51
52/* If the local array of FDs needs update in the poll function */
53static unsigned int update_fd_array = 1;
54
55/* lock the fd array and structures */
56static pthread_mutex_t kconsumerd_lock_fds;
57
58/* the two threads (receive fd and poll) */
59static pthread_t threads[2];
60
61/* communication with splice */
62static int thread_pipe[2];
63
252fd492
JD
64/* pipe to wake the poll thread when necessary */
65static int poll_pipe[2];
66
d4a1283e
JD
67/* socket to communicate errors with sessiond */
68static int error_socket = -1;
69
13e44745
JD
70/* to count the number of time the user pressed ctrl+c */
71static int sigintcount = 0;
72
d4a1283e
JD
73/* Argument variables */
74int opt_quiet;
75int opt_verbose;
76static int opt_daemon;
77static const char *progname;
78static char command_sock_path[PATH_MAX]; /* Global command socket path */
79static char error_sock_path[PATH_MAX]; /* Global error path */
80
bcd8d9db
JD
81/*
82 * del_fd
83 *
84 * Remove a fd from the global list protected by a mutex
85 */
86static void del_fd(struct ltt_kconsumerd_fd *lcf)
87{
6aea26bc 88 DBG("Removing %d", lcf->consumerd_fd);
bcd8d9db
JD
89 pthread_mutex_lock(&kconsumerd_lock_fds);
90 cds_list_del(&lcf->list);
91 if (fds_count > 0) {
92 fds_count--;
93 DBG("Removed ltt_kconsumerd_fd");
94 if (lcf != NULL) {
95 close(lcf->out_fd);
96 close(lcf->consumerd_fd);
97 free(lcf);
98 lcf = NULL;
99 }
100 }
101 pthread_mutex_unlock(&kconsumerd_lock_fds);
102}
103
d4a1283e
JD
104/*
105 * cleanup
106 *
107 * Cleanup the daemon's socket on exit
108 */
109static void cleanup()
110{
bcd8d9db
JD
111 struct ltt_kconsumerd_fd *iter;
112
bcd8d9db 113 /* remove the socket file */
d4a1283e 114 unlink(command_sock_path);
bcd8d9db
JD
115
116 /* unblock the threads */
117 WARN("Terminating the threads before exiting");
118 pthread_cancel(threads[0]);
119 pthread_cancel(threads[1]);
120
121 /* close all outfd */
122 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
123 del_fd(iter);
124 }
d4a1283e
JD
125}
126
6aea26bc
JD
127/*
128 * send_error
d4a1283e
JD
129 *
130 * send return code to ltt-sessiond
131 */
132static int send_error(enum lttcomm_return_code cmd)
133{
134 if (error_socket > 0) {
135 return lttcomm_send_unix_sock(error_socket, &cmd,
136 sizeof(enum lttcomm_sessiond_command));
137 } else {
138 return 0;
139 }
140}
141
d4a1283e
JD
142/*
143 * add_fd
144 *
145 * Add a fd to the global list protected by a mutex
146 */
147static int add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
148{
149 struct ltt_kconsumerd_fd *tmp_fd;
150 int ret;
151
152 tmp_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
153 tmp_fd->sessiond_fd = buf->fd;
154 tmp_fd->consumerd_fd = consumerd_fd;
155 tmp_fd->state = buf->state;
156 tmp_fd->max_sb_size = buf->max_sb_size;
157 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
158
159 /* Opening the tracefile in write mode */
160 DBG("Opening %s for writing", tmp_fd->path_name);
161 ret = open(tmp_fd->path_name,
46258765 162 O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU|S_IRWXG|S_IRWXO);
d4a1283e
JD
163 if (ret < 0) {
164 ERR("Opening %s", tmp_fd->path_name);
165 perror("open");
166 goto end;
167 }
168 tmp_fd->out_fd = ret;
169 tmp_fd->out_fd_offset = 0;
170
171 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
172 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
173
174 pthread_mutex_lock(&kconsumerd_lock_fds);
175 cds_list_add(&tmp_fd->list, &kconsumerd_fd_list.head);
176 fds_count++;
177 pthread_mutex_unlock(&kconsumerd_lock_fds);
178
179end:
180 return ret;
181}
182
d4a1283e
JD
183
184/*
185 * sighandler
186 *
187 * Signal handler for the daemon
188 */
189static void sighandler(int sig)
190{
13e44745
JD
191 if (sig == SIGINT && sigintcount++ == 0) {
192 DBG("ignoring first SIGINT");
193 return;
194 }
195
d4a1283e
JD
196 cleanup();
197
198 return;
199}
200
201/*
202 * set_signal_handler
203 *
204 * Setup signal handler for :
205 * SIGINT, SIGTERM, SIGPIPE
206 */
207static int set_signal_handler(void)
208{
209 int ret = 0;
210 struct sigaction sa;
211 sigset_t sigset;
212
213 if ((ret = sigemptyset(&sigset)) < 0) {
214 perror("sigemptyset");
215 return ret;
216 }
217
218 sa.sa_handler = sighandler;
219 sa.sa_mask = sigset;
220 sa.sa_flags = 0;
221 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
222 perror("sigaction");
223 return ret;
224 }
225
226 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
227 perror("sigaction");
228 return ret;
229 }
230
231 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
232 perror("sigaction");
233 return ret;
234 }
235
236 return ret;
237}
238
239/*
240 * on_read_subbuffer
241 *
242 * Splice the data from the ring buffer to the tracefile.
243 * Returns the number of bytes spliced
244 */
245static int on_read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd,
246 unsigned long len)
247{
248 long ret = 0;
249 loff_t offset = 0;
250 off_t orig_offset = kconsumerd_fd->out_fd_offset;
251 int fd = kconsumerd_fd->consumerd_fd;
252 int outfd = kconsumerd_fd->out_fd;
253
254 while (len > 0) {
255 DBG("splice chan to pipe offset %lu (fd : %d)",
256 (unsigned long)offset, fd);
257 ret = splice(fd, &offset, thread_pipe[1], NULL, len,
258 SPLICE_F_MOVE | SPLICE_F_MORE);
259 DBG("splice chan to pipe ret %ld", ret);
260 if (ret < 0) {
0632499a 261 ret = errno;
d4a1283e 262 perror("Error in relay splice");
0632499a 263 goto splice_error;
d4a1283e
JD
264 }
265
266 ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
267 SPLICE_F_MOVE | SPLICE_F_MORE);
268 DBG("splice pipe to file %ld", ret);
269 if (ret < 0) {
0632499a 270 ret = errno;
d4a1283e 271 perror("Error in file splice");
0632499a 272 goto splice_error;
d4a1283e
JD
273 }
274 if (ret >= len) {
275 len = 0;
276 }
277 /* This won't block, but will start writeout asynchronously */
278 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
279 SYNC_FILE_RANGE_WRITE);
280 kconsumerd_fd->out_fd_offset += ret;
281 }
0632499a 282
d4a1283e
JD
283 /*
284 * This does a blocking write-and-wait on any page that belongs to the
285 * subbuffer prior to the one we just wrote.
286 * Don't care about error values, as these are just hints and ways to
287 * limit the amount of page cache used.
288 */
289 if (orig_offset >= kconsumerd_fd->max_sb_size) {
290 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
291 kconsumerd_fd->max_sb_size,
292 SYNC_FILE_RANGE_WAIT_BEFORE
293 | SYNC_FILE_RANGE_WRITE
294 | SYNC_FILE_RANGE_WAIT_AFTER);
295 /*
296 * Give hints to the kernel about how we access the file:
297 * POSIX_FADV_DONTNEED : we won't re-access data in a near
298 * future after we write it.
299 * We need to call fadvise again after the file grows because
300 * the kernel does not seem to apply fadvise to non-existing
301 * parts of the file.
302 * Call fadvise _after_ having waited for the page writeback to
303 * complete because the dirty page writeback semantic is not
304 * well defined. So it can be expected to lead to lower
305 * throughput in streaming.
306 */
307 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
308 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
309 }
0632499a
JD
310 goto end;
311
312splice_error:
313 /* send the appropriate error description to sessiond */
314 switch(ret) {
315 case EBADF:
316 send_error(KCONSUMERD_SPLICE_EBADF);
317 break;
318 case EINVAL:
319 send_error(KCONSUMERD_SPLICE_EINVAL);
320 break;
321 case ENOMEM:
322 send_error(KCONSUMERD_SPLICE_ENOMEM);
323 break;
324 case ESPIPE:
325 send_error(KCONSUMERD_SPLICE_ESPIPE);
326 break;
327 }
328
329end:
d4a1283e
JD
330 return ret;
331}
332
333/*
334 * read_subbuffer
335 *
336 * Consume data on a file descriptor and write it on a trace file
337 */
338static int read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd)
339{
340 unsigned long len;
341 int err;
342 long ret = 0;
343 int infd = kconsumerd_fd->consumerd_fd;
344
6aea26bc 345 DBG("In read_subbuffer (infd : %d)", infd);
d4a1283e
JD
346 /* Get the next subbuffer */
347 err = kernctl_get_next_subbuf(infd);
348 if (err != 0) {
349 ret = errno;
350 perror("Reserving sub buffer failed (everything is normal, "
351 "it is due to concurrency)");
352 goto end;
353 }
354
355 /* read the whole subbuffer */
356 err = kernctl_get_padded_subbuf_size(infd, &len);
357 if (err != 0) {
358 ret = errno;
359 perror("Getting sub-buffer len failed.");
360 goto end;
361 }
362
363 /* splice the subbuffer to the tracefile */
364 ret = on_read_subbuffer(kconsumerd_fd, len);
365 if (ret < 0) {
366 /*
367 * display the error but continue processing to try
368 * to release the subbuffer
369 */
370 ERR("Error splicing to tracefile");
371 }
372
373 err = kernctl_put_next_subbuf(infd);
374 if (err != 0) {
375 ret = errno;
376 if (errno == EFAULT) {
377 perror("Error in unreserving sub buffer\n");
378 } else if (errno == EIO) {
379 /* Should never happen with newer LTTng versions */
380 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
381 }
382 goto end;
383 }
384
385end:
386 return ret;
387}
388
389/*
390 * change_fd_state
391 *
392 * Update a fd according to what we just received
393 */
394static void change_fd_state(int sessiond_fd,
395 enum lttcomm_kconsumerd_fd_state state)
396{
397 struct ltt_kconsumerd_fd *iter;
398 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
399 if (iter->sessiond_fd == sessiond_fd) {
400 iter->state = state;
401 break;
402 }
403 }
404}
405
406/*
407 * consumerd_recv_fd
408 *
409 * Receives an array of file descriptors and the associated
410 * structures describing each fd (path name).
411 * Returns the size of received data
412 */
413static int consumerd_recv_fd(int sfd, int size,
414 enum lttcomm_consumerd_command cmd_type)
415{
416 struct msghdr msg;
417 struct iovec iov[1];
252fd492 418 int ret, i, tmp2;
d4a1283e
JD
419 struct cmsghdr *cmsg;
420 int nb_fd;
421 char tmp[CMSG_SPACE(size)];
422 struct lttcomm_kconsumerd_msg *buf;
423 /* the number of fds we are about to receive */
424 nb_fd = size/sizeof(struct lttcomm_kconsumerd_msg);
425
426 buf = malloc(size);
427
428 memset(&msg, 0, sizeof(msg));
429
430 /* Prepare to receive the structures */
431 iov[0].iov_base = buf;
432 iov[0].iov_len = size;
433 msg.msg_iov = iov;
434 msg.msg_iovlen = 1;
435
436 msg.msg_control = tmp;
437 msg.msg_controllen = sizeof(tmp);
438
439 DBG("Waiting to receive fds");
440 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
441 perror("recvmsg");
442 }
443 if (ret != size) {
444 ERR("Received only %d, expected %d", ret, size);
445 send_error(KCONSUMERD_ERROR_RECV_FD);
446 goto end;
447 }
448
449 cmsg = CMSG_FIRSTHDR(&msg);
450 if (!cmsg) {
451 ERR("Invalid control message header");
452 ret = -1;
453 send_error(KCONSUMERD_ERROR_RECV_FD);
454 goto end;
455 }
456
457 /* if we received fds */
458 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
459 DBG("Receive : expecting %d fds", nb_fd);
460 for (i = 0; i < nb_fd; i++) {
461 switch (cmd_type) {
462 case LTTCOMM_ADD_STREAM:
463 DBG("add_fd %s (%d)", buf[i].path_name, ((int *)CMSG_DATA(cmsg))[i]);
464 ret = add_fd(&buf[i], ((int *)CMSG_DATA(cmsg))[i]);
465 if (ret < 0) {
466 send_error(KCONSUMERD_OUTFD_ERROR);
467 goto end;
468 }
469 break;
470 case LTTCOMM_UPDATE_STREAM:
471 change_fd_state(buf[i].fd, buf[i].state);
472 break;
473 default:
474 break;
475 }
476 }
477 /* flag to tell the polling thread to update its fd array */
478 update_fd_array = 1;
252fd492
JD
479 /* signal the poll thread */
480 tmp2 = write(poll_pipe[1], "4", 1);
d4a1283e
JD
481 } else {
482 ERR("Didn't received any fd");
483 send_error(KCONSUMERD_ERROR_RECV_FD);
484 ret = -1;
485 goto end;
486 }
487
488end:
489 if (buf != NULL) {
490 free(buf);
491 buf = NULL;
492 }
493 return ret;
494}
495
496/*
497 * thread_receive_fds
498 *
499 * This thread listens on the consumerd socket and
500 * receives the file descriptors from ltt-sessiond
501 */
502static void *thread_receive_fds(void *data)
503{
504 int sock, client_socket, ret;
505 struct lttcomm_kconsumerd_header tmp;
506
507 DBG("Creating command socket %s", command_sock_path);
508 unlink(command_sock_path);
509 client_socket = lttcomm_create_unix_sock(command_sock_path);
510 if (client_socket < 0) {
511 ERR("Cannot create command socket");
512 goto error;
513 }
514
515 ret = lttcomm_listen_unix_sock(client_socket);
516 if (ret < 0) {
517 goto error;
518 }
519
520 DBG("Sending ready command to ltt-sessiond");
521 ret = send_error(KCONSUMERD_COMMAND_SOCK_READY);
522 if (ret < 0) {
523 ERR("Error sending ready command to ltt-sessiond");
524 goto error;
525 }
526
7e8c38c6
JD
527 /* Blocking call, waiting for transmission */
528 sock = lttcomm_accept_unix_sock(client_socket);
529 if (sock <= 0) {
4abc0780 530 WARN("On accept");
7e8c38c6
JD
531 goto error;
532 }
d4a1283e 533 while (1) {
d4a1283e
JD
534 /* We first get the number of fd we are about to receive */
535 ret = lttcomm_recv_unix_sock(sock, &tmp,
536 sizeof(struct lttcomm_kconsumerd_header));
6aea26bc 537 if (ret <= 0) {
bcd8d9db
JD
538 ERR("Receiving the lttcomm_kconsumerd_header, exiting");
539 goto error;
d4a1283e
JD
540 }
541 ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
6aea26bc 542 if (ret <= 0) {
bcd8d9db
JD
543 ERR("Receiving the FD, exiting");
544 goto error;
d4a1283e
JD
545 }
546 }
547
548error:
549 return NULL;
550}
551
552/*
553 * update_poll_array
554 *
555 * Allocate the pollfd structure and the local view of the out fds
556 * to avoid doing a lookup in the linked list and concurrency issues
557 * when writing is needed.
558 * Returns the number of fds in the structures
559 */
560static int update_poll_array(struct pollfd **pollfd,
561 struct ltt_kconsumerd_fd **local_kconsumerd_fd)
562{
563 struct ltt_kconsumerd_fd *iter;
564 int i = 0;
565
d4a1283e
JD
566
567 DBG("Updating poll fd array");
568 pthread_mutex_lock(&kconsumerd_lock_fds);
569
570 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
571 DBG("Inside for each");
572 if (iter->state == ACTIVE_FD) {
573 DBG("Active FD %d", iter->consumerd_fd);
1b686c3f
JD
574 (*pollfd)[i].fd = iter->consumerd_fd;
575 (*pollfd)[i].events = POLLIN | POLLPRI;
d4a1283e
JD
576 local_kconsumerd_fd[i] = iter;
577 i++;
578 } else if (iter->state == DELETE_FD) {
579 del_fd(iter);
580 }
581 }
252fd492
JD
582 /*
583 * insert the poll_pipe at the end of the array and don't increment i
584 * so nb_fd is the number of real FD
585 */
586 (*pollfd)[i].fd = poll_pipe[0];
587 (*pollfd)[i].events = POLLIN;
588
d4a1283e
JD
589 update_fd_array = 0;
590 pthread_mutex_unlock(&kconsumerd_lock_fds);
591 return i;
592
d4a1283e
JD
593}
594
595/*
596 * thread_poll_fds
597 *
598 * This thread polls the fds in the ltt_fd_list to consume the data
599 * and write it to tracefile if necessary.
600 */
601static void *thread_poll_fds(void *data)
602{
603 int num_rdy, num_hup, high_prio, ret, i;
604 struct pollfd *pollfd = NULL;
605 /* local view of the fds */
6aea26bc 606 struct ltt_kconsumerd_fd **local_kconsumerd_fd = NULL;
d4a1283e
JD
607 /* local view of fds_count */
608 int nb_fd = 0;
252fd492
JD
609 char tmp;
610 int tmp2;
d4a1283e
JD
611
612 ret = pipe(thread_pipe);
613 if (ret < 0) {
614 perror("Error creating pipe");
615 goto end;
616 }
617
6aea26bc
JD
618 local_kconsumerd_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
619
d4a1283e
JD
620 while (1) {
621 high_prio = 0;
622 num_hup = 0;
623
624 /*
625 * the ltt_fd_list has been updated, we need to update our
626 * local array as well
627 */
628 if (update_fd_array) {
4abc0780
JD
629 if (pollfd != NULL) {
630 free(pollfd);
631 pollfd = NULL;
632 }
633 if (local_kconsumerd_fd != NULL) {
634 free(local_kconsumerd_fd);
635 local_kconsumerd_fd = NULL;
636 }
637 /* allocate for all fds + 1 for the poll_pipe */
638 pollfd = malloc((fds_count + 1) * sizeof(struct pollfd));
639 if (pollfd == NULL) {
640 perror("pollfd malloc");
641 goto end;
642 }
643 /* allocate for all fds + 1 for the poll_pipe */
644 local_kconsumerd_fd = malloc((fds_count + 1) * sizeof(struct ltt_kconsumerd_fd));
645 if (local_kconsumerd_fd == NULL) {
646 perror("local_kconsumerd_fd malloc");
647 goto end;
648 }
649
6aea26bc 650 ret = update_poll_array(&pollfd, local_kconsumerd_fd);
d4a1283e
JD
651 if (ret < 0) {
652 ERR("Error in allocating pollfd or local_outfds");
653 send_error(KCONSUMERD_POLL_ERROR);
654 goto end;
655 }
656 nb_fd = ret;
657 }
658
659 /* poll on the array of fds */
252fd492
JD
660 DBG("polling on %d fd", nb_fd + 1);
661 num_rdy = poll(pollfd, nb_fd + 1, -1);
d4a1283e
JD
662 DBG("poll num_rdy : %d", num_rdy);
663 if (num_rdy == -1) {
664 perror("Poll error");
665 send_error(KCONSUMERD_POLL_ERROR);
666 goto end;
667 }
668
252fd492
JD
669 /*
670 * if only the poll_pipe triggered poll to return just return to the
671 * beginning of the loop to update the array
672 */
673 if (num_rdy == 1 && pollfd[nb_fd].revents == POLLIN) {
674 DBG("poll_pipe wake up");
675 tmp2 = read(poll_pipe[0], &tmp, 1);
676 continue;
677 }
678
d4a1283e
JD
679 /* Take care of high priority channels first. */
680 for (i = 0; i < nb_fd; i++) {
681 switch(pollfd[i].revents) {
682 case POLLERR:
683 ERR("Error returned in polling fd %d.", pollfd[i].fd);
684 num_hup++;
685 send_error(KCONSUMERD_POLL_ERROR);
686 break;
687 case POLLHUP:
688 ERR("Polling fd %d tells it has hung up.", pollfd[i].fd);
689 num_hup++;
690 break;
691 case POLLNVAL:
692 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
693 send_error(KCONSUMERD_POLL_NVAL);
694 num_hup++;
695 break;
696 case POLLPRI:
697 DBG("Urgent read on fd %d", pollfd[i].fd);
698 high_prio = 1;
6aea26bc 699 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
700 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
701 if (ret == EAGAIN) {
702 ret = 0;
703 }
704 break;
705 }
706 }
707
708 /* If every buffer FD has hung up, we end the read loop here */
709 if (nb_fd > 0 && num_hup == nb_fd) {
710 DBG("every buffer FD has hung up\n");
711 send_error(KCONSUMERD_POLL_HUP);
6aea26bc 712 goto end;
d4a1283e
JD
713 }
714
715 /* Take care of low priority channels. */
716 if (!high_prio) {
717 for (i = 0; i < nb_fd; i++) {
718 switch(pollfd[i].revents) {
719 case POLLIN:
720 DBG("Normal read on fd %d", pollfd[i].fd);
6aea26bc 721 ret = read_subbuffer(local_kconsumerd_fd[i]);
d4a1283e
JD
722 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
723 if (ret == EAGAIN) {
724 ret = 0;
725 }
726 break;
727 }
728 }
729 }
730 }
731end:
732 if (pollfd != NULL) {
733 free(pollfd);
734 pollfd = NULL;
735 }
736 if (local_kconsumerd_fd != NULL) {
737 free(local_kconsumerd_fd);
738 local_kconsumerd_fd = NULL;
739 }
bcd8d9db 740 cleanup();
d4a1283e
JD
741 return NULL;
742}
743
744/*
745 * usage function on stderr
746 */
747static void usage(void)
748{
749 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
750 fprintf(stderr, " -h, --help "
751 "Display this usage.\n");
752 fprintf(stderr, " -c, --kconsumerd-cmd-sock PATH "
753 "Specify path for the command socket\n");
754 fprintf(stderr, " -e, --kconsumerd-err-sock PATH "
755 "Specify path for the error socket\n");
756 fprintf(stderr, " -d, --daemonize "
757 "Start as a daemon.\n");
758 fprintf(stderr, " -q, --quiet "
759 "No output at all.\n");
760 fprintf(stderr, " -v, --verbose "
761 "Verbose mode. Activate DBG() macro.\n");
762 fprintf(stderr, " -V, --version "
763 "Show version number.\n");
764}
765
766/*
767 * daemon argument parsing
768 */
769static void parse_args(int argc, char **argv)
770{
771 int c;
772
773 static struct option long_options[] = {
774 { "kconsumerd-cmd-sock", 1, 0, 'c' },
775 { "kconsumerd-err-sock", 1, 0, 'e' },
776 { "daemonize", 0, 0, 'd' },
777 { "help", 0, 0, 'h' },
778 { "quiet", 0, 0, 'q' },
779 { "verbose", 0, 0, 'v' },
780 { "version", 0, 0, 'V' },
781 { NULL, 0, 0, 0 }
782 };
783
784 while (1) {
785 int option_index = 0;
786 c = getopt_long(argc, argv, "dhqvV" "c:e:", long_options, &option_index);
787 if (c == -1) {
788 break;
789 }
790
791 switch (c) {
792 case 0:
793 fprintf(stderr, "option %s", long_options[option_index].name);
794 if (optarg) {
795 fprintf(stderr, " with arg %s\n", optarg);
796 }
797 break;
798 case 'c':
799 snprintf(command_sock_path, PATH_MAX, "%s", optarg);
800 break;
801 case 'e':
802 snprintf(error_sock_path, PATH_MAX, "%s", optarg);
803 break;
804 case 'd':
805 opt_daemon = 1;
806 break;
807 case 'h':
808 usage();
809 exit(EXIT_FAILURE);
810 case 'q':
811 opt_quiet = 1;
812 break;
813 case 'v':
814 opt_verbose = 1;
815 break;
816 case 'V':
817 fprintf(stdout, "%s\n", VERSION);
818 exit(EXIT_SUCCESS);
819 default:
820 usage();
821 exit(EXIT_FAILURE);
822 }
823 }
824}
825
826
827/*
828 * main
829 */
830int main(int argc, char **argv)
831{
832 int i;
833 int ret = 0;
834 void *status;
835
836 /* Parse arguments */
837 progname = argv[0];
838 parse_args(argc, argv);
839
840 /* Daemonize */
841 if (opt_daemon) {
842 ret = daemon(0, 0);
843 if (ret < 0) {
844 perror("daemon");
845 goto error;
846 }
847 }
848
849 if (strlen(command_sock_path) == 0) {
850 snprintf(command_sock_path, PATH_MAX,
851 KCONSUMERD_CMD_SOCK_PATH);
852 }
853 if (strlen(error_sock_path) == 0) {
854 snprintf(error_sock_path, PATH_MAX,
855 KCONSUMERD_ERR_SOCK_PATH);
856 }
857
858 if (set_signal_handler() < 0) {
859 goto error;
860 }
861
252fd492
JD
862 /* create the pipe to wake to polling thread when needed */
863 ret = pipe(poll_pipe);
864 if (ret < 0) {
865 perror("Error creating poll pipe");
866 goto end;
867 }
868
d4a1283e
JD
869 /* Connect to the socket created by ltt-sessiond to report errors */
870 DBG("Connecting to error socket %s", error_sock_path);
871 error_socket = lttcomm_connect_unix_sock(error_sock_path);
872 /* not a fatal error, but all communication with ltt-sessiond will fail */
873 if (error_socket < 0) {
874 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
875 }
876
877 /* Create the thread to manage the receive of fd */
878 ret = pthread_create(&threads[0], NULL, thread_receive_fds, (void *) NULL);
879 if (ret != 0) {
880 perror("pthread_create");
881 goto error;
882 }
883
884 /* Create thread to manage the polling/writing of traces */
885 ret = pthread_create(&threads[1], NULL, thread_poll_fds, (void *) NULL);
886 if (ret != 0) {
887 perror("pthread_create");
888 goto error;
889 }
890
891 for (i = 0; i < 2; i++) {
892 ret = pthread_join(threads[i], &status);
893 if (ret != 0) {
894 perror("pthread_join");
895 goto error;
896 }
897 }
898 ret = EXIT_SUCCESS;
899 send_error(KCONSUMERD_EXIT_SUCCESS);
900 goto end;
901
902error:
903 ret = EXIT_FAILURE;
904 send_error(KCONSUMERD_EXIT_FAILURE);
905
906end:
907 cleanup();
908
909 return ret;
910}
This page took 0.093371 seconds and 4 git commands to generate.