Add lttng-kconsumerd.h
[lttng-tools.git] / kconsumerd / kconsumerd.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <fcntl.h>
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/ipc.h>
31 #include <sys/shm.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <urcu/list.h>
36 #include <poll.h>
37 #include <unistd.h>
38
39 #include "lttngerr.h"
40 #include "libkernelctl.h"
41 #include "liblttsessiondcomm.h"
42 #include "kconsumerd.h"
43
44 /* Init the list of FDs */
45 static struct ltt_kconsumerd_fd_list kconsumerd_fd_list = {
46 .head = CDS_LIST_HEAD_INIT(kconsumerd_fd_list.head),
47 };
48
49 /* Number of element for the list below. */
50 static unsigned int fds_count;
51
52 /* If the local array of FDs needs update in the poll function */
53 static unsigned int update_fd_array = 1;
54
55 /* lock the fd array and structures */
56 static pthread_mutex_t kconsumerd_lock_fds;
57
58 /* the two threads (receive fd and poll) */
59 static pthread_t threads[2];
60
61 /* communication with splice */
62 static int thread_pipe[2];
63
64 /* pipe to wake the poll thread when necessary */
65 static int poll_pipe[2];
66
67 /* socket to communicate errors with sessiond */
68 static int error_socket = -1;
69
70 /* to count the number of time the user pressed ctrl+c */
71 static int sigintcount = 0;
72
73 /* Argument variables */
74 int opt_quiet;
75 int opt_verbose;
76 static int opt_daemon;
77 static const char *progname;
78 static char command_sock_path[PATH_MAX]; /* Global command socket path */
79 static char error_sock_path[PATH_MAX]; /* Global error path */
80
81 /*
82 * del_fd
83 *
84 * Remove a fd from the global list protected by a mutex
85 */
86 static void del_fd(struct ltt_kconsumerd_fd *lcf)
87 {
88 DBG("Removing %d", lcf->consumerd_fd);
89 pthread_mutex_lock(&kconsumerd_lock_fds);
90 cds_list_del(&lcf->list);
91 if (fds_count > 0) {
92 fds_count--;
93 DBG("Removed ltt_kconsumerd_fd");
94 if (lcf != NULL) {
95 close(lcf->out_fd);
96 close(lcf->consumerd_fd);
97 free(lcf);
98 lcf = NULL;
99 }
100 }
101 pthread_mutex_unlock(&kconsumerd_lock_fds);
102 }
103
104 /*
105 * cleanup
106 *
107 * Cleanup the daemon's socket on exit
108 */
109 static void cleanup()
110 {
111 struct ltt_kconsumerd_fd *iter;
112
113 /* remove the socket file */
114 unlink(command_sock_path);
115
116 /* unblock the threads */
117 WARN("Terminating the threads before exiting");
118 pthread_cancel(threads[0]);
119 pthread_cancel(threads[1]);
120
121 /* close all outfd */
122 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
123 del_fd(iter);
124 }
125 }
126
127 /*
128 * send_error
129 *
130 * send return code to ltt-sessiond
131 */
132 static int send_error(enum lttcomm_return_code cmd)
133 {
134 if (error_socket > 0) {
135 return lttcomm_send_unix_sock(error_socket, &cmd,
136 sizeof(enum lttcomm_sessiond_command));
137 } else {
138 return 0;
139 }
140 }
141
142 /*
143 * add_fd
144 *
145 * Add a fd to the global list protected by a mutex
146 */
147 static int add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
148 {
149 struct ltt_kconsumerd_fd *tmp_fd;
150 int ret;
151
152 tmp_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
153 tmp_fd->sessiond_fd = buf->fd;
154 tmp_fd->consumerd_fd = consumerd_fd;
155 tmp_fd->state = buf->state;
156 tmp_fd->max_sb_size = buf->max_sb_size;
157 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
158
159 /* Opening the tracefile in write mode */
160 DBG("Opening %s for writing", tmp_fd->path_name);
161 ret = open(tmp_fd->path_name,
162 O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU|S_IRWXG|S_IRWXO);
163 if (ret < 0) {
164 ERR("Opening %s", tmp_fd->path_name);
165 perror("open");
166 goto end;
167 }
168 tmp_fd->out_fd = ret;
169 tmp_fd->out_fd_offset = 0;
170
171 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
172 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
173
174 pthread_mutex_lock(&kconsumerd_lock_fds);
175 cds_list_add(&tmp_fd->list, &kconsumerd_fd_list.head);
176 fds_count++;
177 pthread_mutex_unlock(&kconsumerd_lock_fds);
178
179 end:
180 return ret;
181 }
182
183
184 /*
185 * sighandler
186 *
187 * Signal handler for the daemon
188 */
189 static void sighandler(int sig)
190 {
191 if (sig == SIGINT && sigintcount++ == 0) {
192 DBG("ignoring first SIGINT");
193 return;
194 }
195
196 cleanup();
197
198 return;
199 }
200
201 /*
202 * set_signal_handler
203 *
204 * Setup signal handler for :
205 * SIGINT, SIGTERM, SIGPIPE
206 */
207 static int set_signal_handler(void)
208 {
209 int ret = 0;
210 struct sigaction sa;
211 sigset_t sigset;
212
213 if ((ret = sigemptyset(&sigset)) < 0) {
214 perror("sigemptyset");
215 return ret;
216 }
217
218 sa.sa_handler = sighandler;
219 sa.sa_mask = sigset;
220 sa.sa_flags = 0;
221 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
222 perror("sigaction");
223 return ret;
224 }
225
226 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
227 perror("sigaction");
228 return ret;
229 }
230
231 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
232 perror("sigaction");
233 return ret;
234 }
235
236 return ret;
237 }
238
239 /*
240 * on_read_subbuffer
241 *
242 * Splice the data from the ring buffer to the tracefile.
243 * Returns the number of bytes spliced
244 */
245 static int on_read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd,
246 unsigned long len)
247 {
248 long ret = 0;
249 loff_t offset = 0;
250 off_t orig_offset = kconsumerd_fd->out_fd_offset;
251 int fd = kconsumerd_fd->consumerd_fd;
252 int outfd = kconsumerd_fd->out_fd;
253
254 while (len > 0) {
255 DBG("splice chan to pipe offset %lu (fd : %d)",
256 (unsigned long)offset, fd);
257 ret = splice(fd, &offset, thread_pipe[1], NULL, len,
258 SPLICE_F_MOVE | SPLICE_F_MORE);
259 DBG("splice chan to pipe ret %ld", ret);
260 if (ret < 0) {
261 ret = errno;
262 perror("Error in relay splice");
263 goto splice_error;
264 }
265
266 ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
267 SPLICE_F_MOVE | SPLICE_F_MORE);
268 DBG("splice pipe to file %ld", ret);
269 if (ret < 0) {
270 ret = errno;
271 perror("Error in file splice");
272 goto splice_error;
273 }
274 if (ret >= len) {
275 len = 0;
276 }
277 /* This won't block, but will start writeout asynchronously */
278 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
279 SYNC_FILE_RANGE_WRITE);
280 kconsumerd_fd->out_fd_offset += ret;
281 }
282
283 /*
284 * This does a blocking write-and-wait on any page that belongs to the
285 * subbuffer prior to the one we just wrote.
286 * Don't care about error values, as these are just hints and ways to
287 * limit the amount of page cache used.
288 */
289 if (orig_offset >= kconsumerd_fd->max_sb_size) {
290 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
291 kconsumerd_fd->max_sb_size,
292 SYNC_FILE_RANGE_WAIT_BEFORE
293 | SYNC_FILE_RANGE_WRITE
294 | SYNC_FILE_RANGE_WAIT_AFTER);
295 /*
296 * Give hints to the kernel about how we access the file:
297 * POSIX_FADV_DONTNEED : we won't re-access data in a near
298 * future after we write it.
299 * We need to call fadvise again after the file grows because
300 * the kernel does not seem to apply fadvise to non-existing
301 * parts of the file.
302 * Call fadvise _after_ having waited for the page writeback to
303 * complete because the dirty page writeback semantic is not
304 * well defined. So it can be expected to lead to lower
305 * throughput in streaming.
306 */
307 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
308 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
309 }
310 goto end;
311
312 splice_error:
313 /* send the appropriate error description to sessiond */
314 switch(ret) {
315 case EBADF:
316 send_error(KCONSUMERD_SPLICE_EBADF);
317 break;
318 case EINVAL:
319 send_error(KCONSUMERD_SPLICE_EINVAL);
320 break;
321 case ENOMEM:
322 send_error(KCONSUMERD_SPLICE_ENOMEM);
323 break;
324 case ESPIPE:
325 send_error(KCONSUMERD_SPLICE_ESPIPE);
326 break;
327 }
328
329 end:
330 return ret;
331 }
332
333 /*
334 * read_subbuffer
335 *
336 * Consume data on a file descriptor and write it on a trace file
337 */
338 static int read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd)
339 {
340 unsigned long len;
341 int err;
342 long ret = 0;
343 int infd = kconsumerd_fd->consumerd_fd;
344
345 DBG("In read_subbuffer (infd : %d)", infd);
346 /* Get the next subbuffer */
347 err = kernctl_get_next_subbuf(infd);
348 if (err != 0) {
349 ret = errno;
350 perror("Reserving sub buffer failed (everything is normal, "
351 "it is due to concurrency)");
352 goto end;
353 }
354
355 /* read the whole subbuffer */
356 err = kernctl_get_padded_subbuf_size(infd, &len);
357 if (err != 0) {
358 ret = errno;
359 perror("Getting sub-buffer len failed.");
360 goto end;
361 }
362
363 /* splice the subbuffer to the tracefile */
364 ret = on_read_subbuffer(kconsumerd_fd, len);
365 if (ret < 0) {
366 /*
367 * display the error but continue processing to try
368 * to release the subbuffer
369 */
370 ERR("Error splicing to tracefile");
371 }
372
373 err = kernctl_put_next_subbuf(infd);
374 if (err != 0) {
375 ret = errno;
376 if (errno == EFAULT) {
377 perror("Error in unreserving sub buffer\n");
378 } else if (errno == EIO) {
379 /* Should never happen with newer LTTng versions */
380 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
381 }
382 goto end;
383 }
384
385 end:
386 return ret;
387 }
388
389 /*
390 * change_fd_state
391 *
392 * Update a fd according to what we just received
393 */
394 static void change_fd_state(int sessiond_fd,
395 enum kconsumerd_fd_state state)
396 {
397 struct ltt_kconsumerd_fd *iter;
398 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
399 if (iter->sessiond_fd == sessiond_fd) {
400 iter->state = state;
401 break;
402 }
403 }
404 }
405
406 /*
407 * consumerd_recv_fd
408 *
409 * Receives an array of file descriptors and the associated
410 * structures describing each fd (path name).
411 * Returns the size of received data
412 */
413 static int consumerd_recv_fd(int sfd, int size,
414 enum kconsumerd_command cmd_type)
415 {
416 struct msghdr msg;
417 struct iovec iov[1];
418 int ret, i, tmp2;
419 struct cmsghdr *cmsg;
420 int nb_fd;
421 char tmp[CMSG_SPACE(size)];
422 struct lttcomm_kconsumerd_msg *buf;
423 /* the number of fds we are about to receive */
424 nb_fd = size/sizeof(struct lttcomm_kconsumerd_msg);
425
426 buf = malloc(size);
427
428 memset(&msg, 0, sizeof(msg));
429
430 /* Prepare to receive the structures */
431 iov[0].iov_base = buf;
432 iov[0].iov_len = size;
433 msg.msg_iov = iov;
434 msg.msg_iovlen = 1;
435
436 msg.msg_control = tmp;
437 msg.msg_controllen = sizeof(tmp);
438
439 DBG("Waiting to receive fds");
440 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
441 perror("recvmsg");
442 }
443 if (ret != size) {
444 ERR("Received only %d, expected %d", ret, size);
445 send_error(KCONSUMERD_ERROR_RECV_FD);
446 goto end;
447 }
448
449 cmsg = CMSG_FIRSTHDR(&msg);
450 if (!cmsg) {
451 ERR("Invalid control message header");
452 ret = -1;
453 send_error(KCONSUMERD_ERROR_RECV_FD);
454 goto end;
455 }
456
457 /* if we received fds */
458 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
459 DBG("Receive : expecting %d fds", nb_fd);
460 for (i = 0; i < nb_fd; i++) {
461 switch (cmd_type) {
462 case ADD_STREAM:
463 DBG("add_fd %s (%d)", buf[i].path_name, ((int *)CMSG_DATA(cmsg))[i]);
464 ret = add_fd(&buf[i], ((int *)CMSG_DATA(cmsg))[i]);
465 if (ret < 0) {
466 send_error(KCONSUMERD_OUTFD_ERROR);
467 goto end;
468 }
469 break;
470 case UPDATE_STREAM:
471 change_fd_state(buf[i].fd, buf[i].state);
472 break;
473 default:
474 break;
475 }
476 }
477 /* flag to tell the polling thread to update its fd array */
478 update_fd_array = 1;
479 /* signal the poll thread */
480 tmp2 = write(poll_pipe[1], "4", 1);
481 } else {
482 ERR("Didn't received any fd");
483 send_error(KCONSUMERD_ERROR_RECV_FD);
484 ret = -1;
485 goto end;
486 }
487
488 end:
489 if (buf != NULL) {
490 free(buf);
491 buf = NULL;
492 }
493 return ret;
494 }
495
496 /*
497 * thread_receive_fds
498 *
499 * This thread listens on the consumerd socket and
500 * receives the file descriptors from ltt-sessiond
501 */
502 static void *thread_receive_fds(void *data)
503 {
504 int sock, client_socket, ret;
505 struct lttcomm_kconsumerd_header tmp;
506
507 DBG("Creating command socket %s", command_sock_path);
508 unlink(command_sock_path);
509 client_socket = lttcomm_create_unix_sock(command_sock_path);
510 if (client_socket < 0) {
511 ERR("Cannot create command socket");
512 goto error;
513 }
514
515 ret = lttcomm_listen_unix_sock(client_socket);
516 if (ret < 0) {
517 goto error;
518 }
519
520 DBG("Sending ready command to ltt-sessiond");
521 ret = send_error(KCONSUMERD_COMMAND_SOCK_READY);
522 if (ret < 0) {
523 ERR("Error sending ready command to ltt-sessiond");
524 goto error;
525 }
526
527 /* Blocking call, waiting for transmission */
528 sock = lttcomm_accept_unix_sock(client_socket);
529 if (sock <= 0) {
530 WARN("On accept");
531 goto error;
532 }
533 while (1) {
534 /* We first get the number of fd we are about to receive */
535 ret = lttcomm_recv_unix_sock(sock, &tmp,
536 sizeof(struct lttcomm_kconsumerd_header));
537 if (ret <= 0) {
538 ERR("Receiving the lttcomm_kconsumerd_header, exiting");
539 goto error;
540 }
541 ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
542 if (ret <= 0) {
543 ERR("Receiving the FD, exiting");
544 goto error;
545 }
546 }
547
548 error:
549 return NULL;
550 }
551
552 /*
553 * update_poll_array
554 *
555 * Allocate the pollfd structure and the local view of the out fds
556 * to avoid doing a lookup in the linked list and concurrency issues
557 * when writing is needed.
558 * Returns the number of fds in the structures
559 */
560 static int update_poll_array(struct pollfd **pollfd,
561 struct ltt_kconsumerd_fd **local_kconsumerd_fd)
562 {
563 struct ltt_kconsumerd_fd *iter;
564 int i = 0;
565
566
567 DBG("Updating poll fd array");
568 pthread_mutex_lock(&kconsumerd_lock_fds);
569
570 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
571 DBG("Inside for each");
572 if (iter->state == ACTIVE_FD) {
573 DBG("Active FD %d", iter->consumerd_fd);
574 (*pollfd)[i].fd = iter->consumerd_fd;
575 (*pollfd)[i].events = POLLIN | POLLPRI;
576 local_kconsumerd_fd[i] = iter;
577 i++;
578 } else if (iter->state == DELETE_FD) {
579 del_fd(iter);
580 }
581 }
582 /*
583 * insert the poll_pipe at the end of the array and don't increment i
584 * so nb_fd is the number of real FD
585 */
586 (*pollfd)[i].fd = poll_pipe[0];
587 (*pollfd)[i].events = POLLIN;
588
589 update_fd_array = 0;
590 pthread_mutex_unlock(&kconsumerd_lock_fds);
591 return i;
592
593 }
594
595 /*
596 * thread_poll_fds
597 *
598 * This thread polls the fds in the ltt_fd_list to consume the data
599 * and write it to tracefile if necessary.
600 */
601 static void *thread_poll_fds(void *data)
602 {
603 int num_rdy, num_hup, high_prio, ret, i;
604 struct pollfd *pollfd = NULL;
605 /* local view of the fds */
606 struct ltt_kconsumerd_fd **local_kconsumerd_fd = NULL;
607 /* local view of fds_count */
608 int nb_fd = 0;
609 char tmp;
610 int tmp2;
611
612 ret = pipe(thread_pipe);
613 if (ret < 0) {
614 perror("Error creating pipe");
615 goto end;
616 }
617
618 local_kconsumerd_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
619
620 while (1) {
621 high_prio = 0;
622 num_hup = 0;
623
624 /*
625 * the ltt_fd_list has been updated, we need to update our
626 * local array as well
627 */
628 if (update_fd_array == 1) {
629 if (pollfd != NULL) {
630 free(pollfd);
631 pollfd = NULL;
632 }
633 if (local_kconsumerd_fd != NULL) {
634 free(local_kconsumerd_fd);
635 local_kconsumerd_fd = NULL;
636 }
637 /* allocate for all fds + 1 for the poll_pipe */
638 pollfd = malloc((fds_count + 1) * sizeof(struct pollfd));
639 if (pollfd == NULL) {
640 perror("pollfd malloc");
641 goto end;
642 }
643 /* allocate for all fds + 1 for the poll_pipe */
644 local_kconsumerd_fd = malloc((fds_count + 1) * sizeof(struct ltt_kconsumerd_fd));
645 if (local_kconsumerd_fd == NULL) {
646 perror("local_kconsumerd_fd malloc");
647 goto end;
648 }
649
650 ret = update_poll_array(&pollfd, local_kconsumerd_fd);
651 if (ret < 0) {
652 ERR("Error in allocating pollfd or local_outfds");
653 send_error(KCONSUMERD_POLL_ERROR);
654 goto end;
655 }
656 nb_fd = ret;
657 }
658
659 /* poll on the array of fds */
660 DBG("polling on %d fd", nb_fd + 1);
661 num_rdy = poll(pollfd, nb_fd + 1, -1);
662 DBG("poll num_rdy : %d", num_rdy);
663 if (num_rdy == -1) {
664 perror("Poll error");
665 send_error(KCONSUMERD_POLL_ERROR);
666 goto end;
667 }
668
669 /*
670 * if only the poll_pipe triggered poll to return just return to the
671 * beginning of the loop to update the array
672 */
673 if (num_rdy == 1 && pollfd[nb_fd].revents == POLLIN) {
674 DBG("poll_pipe wake up");
675 tmp2 = read(poll_pipe[0], &tmp, 1);
676 continue;
677 }
678
679 /* Take care of high priority channels first. */
680 for (i = 0; i < nb_fd; i++) {
681 switch(pollfd[i].revents) {
682 case POLLERR:
683 ERR("Error returned in polling fd %d.", pollfd[i].fd);
684 num_hup++;
685 send_error(KCONSUMERD_POLL_ERROR);
686 break;
687 case POLLHUP:
688 ERR("Polling fd %d tells it has hung up.", pollfd[i].fd);
689 num_hup++;
690 break;
691 case POLLNVAL:
692 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
693 send_error(KCONSUMERD_POLL_NVAL);
694 num_hup++;
695 break;
696 case POLLPRI:
697 DBG("Urgent read on fd %d", pollfd[i].fd);
698 high_prio = 1;
699 ret = read_subbuffer(local_kconsumerd_fd[i]);
700 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
701 if (ret == EAGAIN) {
702 ret = 0;
703 }
704 break;
705 }
706 }
707
708 /* If every buffer FD has hung up, we end the read loop here */
709 if (nb_fd > 0 && num_hup == nb_fd) {
710 DBG("every buffer FD has hung up\n");
711 send_error(KCONSUMERD_POLL_HUP);
712 goto end;
713 }
714
715 /* Take care of low priority channels. */
716 if (high_prio == 0) {
717 for (i = 0; i < nb_fd; i++) {
718 if (pollfd[i].revents == POLLIN) {
719 DBG("Normal read on fd %d", pollfd[i].fd);
720 ret = read_subbuffer(local_kconsumerd_fd[i]);
721 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
722 if (ret == EAGAIN) {
723 ret = 0;
724 }
725 }
726 }
727 }
728 }
729 end:
730 if (pollfd != NULL) {
731 free(pollfd);
732 pollfd = NULL;
733 }
734 if (local_kconsumerd_fd != NULL) {
735 free(local_kconsumerd_fd);
736 local_kconsumerd_fd = NULL;
737 }
738 cleanup();
739 return NULL;
740 }
741
742 /*
743 * usage function on stderr
744 */
745 static void usage(void)
746 {
747 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
748 fprintf(stderr, " -h, --help "
749 "Display this usage.\n");
750 fprintf(stderr, " -c, --kconsumerd-cmd-sock PATH "
751 "Specify path for the command socket\n");
752 fprintf(stderr, " -e, --kconsumerd-err-sock PATH "
753 "Specify path for the error socket\n");
754 fprintf(stderr, " -d, --daemonize "
755 "Start as a daemon.\n");
756 fprintf(stderr, " -q, --quiet "
757 "No output at all.\n");
758 fprintf(stderr, " -v, --verbose "
759 "Verbose mode. Activate DBG() macro.\n");
760 fprintf(stderr, " -V, --version "
761 "Show version number.\n");
762 }
763
764 /*
765 * daemon argument parsing
766 */
767 static void parse_args(int argc, char **argv)
768 {
769 int c;
770
771 static struct option long_options[] = {
772 { "kconsumerd-cmd-sock", 1, 0, 'c' },
773 { "kconsumerd-err-sock", 1, 0, 'e' },
774 { "daemonize", 0, 0, 'd' },
775 { "help", 0, 0, 'h' },
776 { "quiet", 0, 0, 'q' },
777 { "verbose", 0, 0, 'v' },
778 { "version", 0, 0, 'V' },
779 { NULL, 0, 0, 0 }
780 };
781
782 while (1) {
783 int option_index = 0;
784 c = getopt_long(argc, argv, "dhqvV" "c:e:", long_options, &option_index);
785 if (c == -1) {
786 break;
787 }
788
789 switch (c) {
790 case 0:
791 fprintf(stderr, "option %s", long_options[option_index].name);
792 if (optarg) {
793 fprintf(stderr, " with arg %s\n", optarg);
794 }
795 break;
796 case 'c':
797 snprintf(command_sock_path, PATH_MAX, "%s", optarg);
798 break;
799 case 'e':
800 snprintf(error_sock_path, PATH_MAX, "%s", optarg);
801 break;
802 case 'd':
803 opt_daemon = 1;
804 break;
805 case 'h':
806 usage();
807 exit(EXIT_FAILURE);
808 case 'q':
809 opt_quiet = 1;
810 break;
811 case 'v':
812 opt_verbose = 1;
813 break;
814 case 'V':
815 fprintf(stdout, "%s\n", VERSION);
816 exit(EXIT_SUCCESS);
817 default:
818 usage();
819 exit(EXIT_FAILURE);
820 }
821 }
822 }
823
824
825 /*
826 * main
827 */
828 int main(int argc, char **argv)
829 {
830 int i;
831 int ret = 0;
832 void *status;
833
834 /* Parse arguments */
835 progname = argv[0];
836 parse_args(argc, argv);
837
838 /* Daemonize */
839 if (opt_daemon) {
840 ret = daemon(0, 0);
841 if (ret < 0) {
842 perror("daemon");
843 goto error;
844 }
845 }
846
847 if (strlen(command_sock_path) == 0) {
848 snprintf(command_sock_path, PATH_MAX,
849 KCONSUMERD_CMD_SOCK_PATH);
850 }
851 if (strlen(error_sock_path) == 0) {
852 snprintf(error_sock_path, PATH_MAX,
853 KCONSUMERD_ERR_SOCK_PATH);
854 }
855
856 if (set_signal_handler() < 0) {
857 goto error;
858 }
859
860 /* create the pipe to wake to polling thread when needed */
861 ret = pipe(poll_pipe);
862 if (ret < 0) {
863 perror("Error creating poll pipe");
864 goto end;
865 }
866
867 /* Connect to the socket created by ltt-sessiond to report errors */
868 DBG("Connecting to error socket %s", error_sock_path);
869 error_socket = lttcomm_connect_unix_sock(error_sock_path);
870 /* not a fatal error, but all communication with ltt-sessiond will fail */
871 if (error_socket < 0) {
872 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
873 }
874
875 /* Create the thread to manage the receive of fd */
876 ret = pthread_create(&threads[0], NULL, thread_receive_fds, (void *) NULL);
877 if (ret != 0) {
878 perror("pthread_create");
879 goto error;
880 }
881
882 /* Create thread to manage the polling/writing of traces */
883 ret = pthread_create(&threads[1], NULL, thread_poll_fds, (void *) NULL);
884 if (ret != 0) {
885 perror("pthread_create");
886 goto error;
887 }
888
889 for (i = 0; i < 2; i++) {
890 ret = pthread_join(threads[i], &status);
891 if (ret != 0) {
892 perror("pthread_join");
893 goto error;
894 }
895 }
896 ret = EXIT_SUCCESS;
897 send_error(KCONSUMERD_EXIT_SUCCESS);
898 goto end;
899
900 error:
901 ret = EXIT_FAILURE;
902 send_error(KCONSUMERD_EXIT_FAILURE);
903
904 end:
905 cleanup();
906
907 return ret;
908 }
This page took 0.047293 seconds and 5 git commands to generate.