7e166b8f144886dc254cf997c4753721e3350efb
[lttng-tools.git] / kconsumerd / kconsumerd.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <fcntl.h>
22 #include <getopt.h>
23 #include <grp.h>
24 #include <limits.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/ipc.h>
31 #include <sys/shm.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <urcu/list.h>
36 #include <poll.h>
37 #include <unistd.h>
38
39 #include "lttngerr.h"
40 #include "libkernelctl.h"
41 #include "liblttsessiondcomm.h"
42 #include "kconsumerd.h"
43
44 /* Init the list of FDs */
45 static struct ltt_kconsumerd_fd_list kconsumerd_fd_list = {
46 .head = CDS_LIST_HEAD_INIT(kconsumerd_fd_list.head),
47 };
48
49 /* Number of element for the list below. */
50 static unsigned int fds_count;
51
52 /* If the local array of FDs needs update in the poll function */
53 static unsigned int update_fd_array = 1;
54
55 /* lock the fd array and structures */
56 static pthread_mutex_t kconsumerd_lock_fds;
57
58 /* the two threads (receive fd and poll) */
59 static pthread_t threads[2];
60
61 /* communication with splice */
62 static int thread_pipe[2];
63
64 /* socket to communicate errors with sessiond */
65 static int error_socket = -1;
66
67 /* Argument variables */
68 int opt_quiet;
69 int opt_verbose;
70 static int opt_daemon;
71 static const char *progname;
72 static char command_sock_path[PATH_MAX]; /* Global command socket path */
73 static char error_sock_path[PATH_MAX]; /* Global error path */
74
75 /*
76 * cleanup
77 *
78 * Cleanup the daemon's socket on exit
79 */
80 static void cleanup()
81 {
82 unlink(command_sock_path);
83 }
84
85 /* send_error
86 *
87 * send return code to ltt-sessiond
88 */
89 static int send_error(enum lttcomm_return_code cmd)
90 {
91 if (error_socket > 0) {
92 return lttcomm_send_unix_sock(error_socket, &cmd,
93 sizeof(enum lttcomm_sessiond_command));
94 } else {
95 return 0;
96 }
97 }
98
99 /*
100 * cleanup_kconsumerd_fd
101 *
102 * Close the FDs and frees a ltt_kconsumerd_fd struct
103 */
104 static void cleanup_kconsumerd_fd(struct ltt_kconsumerd_fd *lcf)
105 {
106 if (lcf != NULL) {
107 close(lcf->out_fd);
108 close(lcf->consumerd_fd);
109 free(lcf);
110 lcf = NULL;
111 }
112 }
113
114 /*
115 * add_fd
116 *
117 * Add a fd to the global list protected by a mutex
118 */
119 static int add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd)
120 {
121 struct ltt_kconsumerd_fd *tmp_fd;
122 int ret;
123
124 tmp_fd = malloc(sizeof(struct ltt_kconsumerd_fd));
125 tmp_fd->sessiond_fd = buf->fd;
126 tmp_fd->consumerd_fd = consumerd_fd;
127 tmp_fd->state = buf->state;
128 tmp_fd->max_sb_size = buf->max_sb_size;
129 strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
130
131 /* Opening the tracefile in write mode */
132 DBG("Opening %s for writing", tmp_fd->path_name);
133 ret = open(tmp_fd->path_name,
134 O_WRONLY|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
135 if (ret < 0) {
136 ERR("Opening %s", tmp_fd->path_name);
137 perror("open");
138 goto end;
139 }
140 tmp_fd->out_fd = ret;
141 tmp_fd->out_fd_offset = 0;
142
143 DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name,
144 tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
145
146 pthread_mutex_lock(&kconsumerd_lock_fds);
147 cds_list_add(&tmp_fd->list, &kconsumerd_fd_list.head);
148 fds_count++;
149 pthread_mutex_unlock(&kconsumerd_lock_fds);
150
151 end:
152 return ret;
153 }
154
155 /*
156 * del_fd
157 *
158 * Remove a fd from the global list protected by a mutex
159 */
160 static void del_fd(struct ltt_kconsumerd_fd *lcf)
161 {
162 pthread_mutex_lock(&kconsumerd_lock_fds);
163 cds_list_del(&lcf->list);
164 if (fds_count > 0) {
165 fds_count--;
166 DBG("Removed ltt_kconsumerd_fd");
167 cleanup_kconsumerd_fd(lcf);
168 }
169 pthread_mutex_unlock(&kconsumerd_lock_fds);
170 }
171
172 /*
173 * close_outfds
174 *
175 * Close all fds in the previous fd_list
176 * Must be used with kconsumerd_lock_fds lock held
177 */
178 static void close_outfds()
179 {
180 struct ltt_kconsumerd_fd *iter;
181 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
182 del_fd(iter);
183 }
184 }
185
186 /*
187 * sighandler
188 *
189 * Signal handler for the daemon
190 */
191 static void sighandler(int sig)
192 {
193 /* unblock the threads */
194 pthread_cancel(threads[0]);
195 pthread_cancel(threads[1]);
196
197 close_outfds();
198 cleanup();
199
200 return;
201 }
202
203 /*
204 * set_signal_handler
205 *
206 * Setup signal handler for :
207 * SIGINT, SIGTERM, SIGPIPE
208 */
209 static int set_signal_handler(void)
210 {
211 int ret = 0;
212 struct sigaction sa;
213 sigset_t sigset;
214
215 if ((ret = sigemptyset(&sigset)) < 0) {
216 perror("sigemptyset");
217 return ret;
218 }
219
220 sa.sa_handler = sighandler;
221 sa.sa_mask = sigset;
222 sa.sa_flags = 0;
223 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
224 perror("sigaction");
225 return ret;
226 }
227
228 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
229 perror("sigaction");
230 return ret;
231 }
232
233 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
234 perror("sigaction");
235 return ret;
236 }
237
238 return ret;
239 }
240
241 /*
242 * on_read_subbuffer
243 *
244 * Splice the data from the ring buffer to the tracefile.
245 * Returns the number of bytes spliced
246 */
247 static int on_read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd,
248 unsigned long len)
249 {
250 long ret = 0;
251 loff_t offset = 0;
252 off_t orig_offset = kconsumerd_fd->out_fd_offset;
253 int fd = kconsumerd_fd->consumerd_fd;
254 int outfd = kconsumerd_fd->out_fd;
255
256 while (len > 0) {
257 DBG("splice chan to pipe offset %lu (fd : %d)",
258 (unsigned long)offset, fd);
259 ret = splice(fd, &offset, thread_pipe[1], NULL, len,
260 SPLICE_F_MOVE | SPLICE_F_MORE);
261 DBG("splice chan to pipe ret %ld", ret);
262 if (ret < 0) {
263 perror("Error in relay splice");
264 goto write_end;
265 }
266
267 ret = splice(thread_pipe[0], NULL, outfd, NULL, ret,
268 SPLICE_F_MOVE | SPLICE_F_MORE);
269 DBG("splice pipe to file %ld", ret);
270 if (ret < 0) {
271 perror("Error in file splice");
272 goto write_end;
273 }
274 if (ret >= len) {
275 len = 0;
276 }
277 /* This won't block, but will start writeout asynchronously */
278 sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret,
279 SYNC_FILE_RANGE_WRITE);
280 kconsumerd_fd->out_fd_offset += ret;
281 }
282 write_end:
283 /*
284 * This does a blocking write-and-wait on any page that belongs to the
285 * subbuffer prior to the one we just wrote.
286 * Don't care about error values, as these are just hints and ways to
287 * limit the amount of page cache used.
288 */
289 if (orig_offset >= kconsumerd_fd->max_sb_size) {
290 sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size,
291 kconsumerd_fd->max_sb_size,
292 SYNC_FILE_RANGE_WAIT_BEFORE
293 | SYNC_FILE_RANGE_WRITE
294 | SYNC_FILE_RANGE_WAIT_AFTER);
295 /*
296 * Give hints to the kernel about how we access the file:
297 * POSIX_FADV_DONTNEED : we won't re-access data in a near
298 * future after we write it.
299 * We need to call fadvise again after the file grows because
300 * the kernel does not seem to apply fadvise to non-existing
301 * parts of the file.
302 * Call fadvise _after_ having waited for the page writeback to
303 * complete because the dirty page writeback semantic is not
304 * well defined. So it can be expected to lead to lower
305 * throughput in streaming.
306 */
307 posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size,
308 kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED);
309 }
310 return ret;
311 }
312
313 /*
314 * read_subbuffer
315 *
316 * Consume data on a file descriptor and write it on a trace file
317 */
318 static int read_subbuffer(struct ltt_kconsumerd_fd *kconsumerd_fd)
319 {
320 unsigned long len;
321 int err;
322 long ret = 0;
323 int infd = kconsumerd_fd->consumerd_fd;
324
325 DBG("In read_subbuffer");
326 /* Get the next subbuffer */
327 err = kernctl_get_next_subbuf(infd);
328 if (err != 0) {
329 ret = errno;
330 perror("Reserving sub buffer failed (everything is normal, "
331 "it is due to concurrency)");
332 goto end;
333 }
334
335 /* read the whole subbuffer */
336 err = kernctl_get_padded_subbuf_size(infd, &len);
337 if (err != 0) {
338 ret = errno;
339 perror("Getting sub-buffer len failed.");
340 goto end;
341 }
342
343 /* splice the subbuffer to the tracefile */
344 ret = on_read_subbuffer(kconsumerd_fd, len);
345 if (ret < 0) {
346 /*
347 * display the error but continue processing to try
348 * to release the subbuffer
349 */
350 ERR("Error splicing to tracefile");
351 }
352
353 err = kernctl_put_next_subbuf(infd);
354 if (err != 0) {
355 ret = errno;
356 if (errno == EFAULT) {
357 perror("Error in unreserving sub buffer\n");
358 } else if (errno == EIO) {
359 /* Should never happen with newer LTTng versions */
360 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
361 }
362 goto end;
363 }
364
365 end:
366 return ret;
367 }
368
369 /*
370 * change_fd_state
371 *
372 * Update a fd according to what we just received
373 */
374 static void change_fd_state(int sessiond_fd,
375 enum lttcomm_kconsumerd_fd_state state)
376 {
377 struct ltt_kconsumerd_fd *iter;
378 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
379 if (iter->sessiond_fd == sessiond_fd) {
380 iter->state = state;
381 break;
382 }
383 }
384 }
385
386 /*
387 * consumerd_recv_fd
388 *
389 * Receives an array of file descriptors and the associated
390 * structures describing each fd (path name).
391 * Returns the size of received data
392 */
393 static int consumerd_recv_fd(int sfd, int size,
394 enum lttcomm_consumerd_command cmd_type)
395 {
396 struct msghdr msg;
397 struct iovec iov[1];
398 int ret, i;
399 struct cmsghdr *cmsg;
400 int nb_fd;
401 char tmp[CMSG_SPACE(size)];
402 struct lttcomm_kconsumerd_msg *buf;
403 /* the number of fds we are about to receive */
404 nb_fd = size/sizeof(struct lttcomm_kconsumerd_msg);
405
406 buf = malloc(size);
407
408 memset(&msg, 0, sizeof(msg));
409
410 /* Prepare to receive the structures */
411 iov[0].iov_base = buf;
412 iov[0].iov_len = size;
413 msg.msg_iov = iov;
414 msg.msg_iovlen = 1;
415
416 msg.msg_control = tmp;
417 msg.msg_controllen = sizeof(tmp);
418
419 DBG("Waiting to receive fds");
420 if ((ret = recvmsg(sfd, &msg, 0)) < 0) {
421 perror("recvmsg");
422 }
423 if (ret != size) {
424 ERR("Received only %d, expected %d", ret, size);
425 send_error(KCONSUMERD_ERROR_RECV_FD);
426 goto end;
427 }
428
429 cmsg = CMSG_FIRSTHDR(&msg);
430 if (!cmsg) {
431 ERR("Invalid control message header");
432 ret = -1;
433 send_error(KCONSUMERD_ERROR_RECV_FD);
434 goto end;
435 }
436
437 /* if we received fds */
438 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
439 DBG("Receive : expecting %d fds", nb_fd);
440 for (i = 0; i < nb_fd; i++) {
441 switch (cmd_type) {
442 case LTTCOMM_ADD_STREAM:
443 DBG("add_fd %s (%d)", buf[i].path_name, ((int *)CMSG_DATA(cmsg))[i]);
444 ret = add_fd(&buf[i], ((int *)CMSG_DATA(cmsg))[i]);
445 if (ret < 0) {
446 send_error(KCONSUMERD_OUTFD_ERROR);
447 goto end;
448 }
449 break;
450 case LTTCOMM_UPDATE_STREAM:
451 change_fd_state(buf[i].fd, buf[i].state);
452 break;
453 default:
454 break;
455 }
456 }
457 /* flag to tell the polling thread to update its fd array */
458 update_fd_array = 1;
459 send_error(KCONSUMERD_SUCCESS_RECV_FD);
460 } else {
461 ERR("Didn't received any fd");
462 send_error(KCONSUMERD_ERROR_RECV_FD);
463 ret = -1;
464 goto end;
465 }
466
467 end:
468 if (buf != NULL) {
469 free(buf);
470 buf = NULL;
471 }
472 return ret;
473 }
474
475 /*
476 * thread_receive_fds
477 *
478 * This thread listens on the consumerd socket and
479 * receives the file descriptors from ltt-sessiond
480 */
481 static void *thread_receive_fds(void *data)
482 {
483 int sock, client_socket, ret;
484 struct lttcomm_kconsumerd_header tmp;
485
486 DBG("Creating command socket %s", command_sock_path);
487 unlink(command_sock_path);
488 client_socket = lttcomm_create_unix_sock(command_sock_path);
489 if (client_socket < 0) {
490 ERR("Cannot create command socket");
491 goto error;
492 }
493
494 ret = lttcomm_listen_unix_sock(client_socket);
495 if (ret < 0) {
496 goto error;
497 }
498
499 DBG("Sending ready command to ltt-sessiond");
500 ret = send_error(KCONSUMERD_COMMAND_SOCK_READY);
501 if (ret < 0) {
502 ERR("Error sending ready command to ltt-sessiond");
503 goto error;
504 }
505
506 while (1) {
507 /* Blocking call, waiting for transmission */
508 sock = lttcomm_accept_unix_sock(client_socket);
509 if (sock <= 0) {
510 continue;
511 }
512
513 /* We first get the number of fd we are about to receive */
514 ret = lttcomm_recv_unix_sock(sock, &tmp,
515 sizeof(struct lttcomm_kconsumerd_header));
516 if (ret < 0) {
517 ERR("Receiving the lttcomm_kconsumerd_header");
518 continue;
519 }
520 ret = consumerd_recv_fd(sock, tmp.payload_size, tmp.cmd_type);
521 if (ret < 0) {
522 continue;
523 }
524 }
525
526 error:
527 return NULL;
528 }
529
530 /*
531 * update_poll_array
532 *
533 * Allocate the pollfd structure and the local view of the out fds
534 * to avoid doing a lookup in the linked list and concurrency issues
535 * when writing is needed.
536 * Returns the number of fds in the structures
537 */
538 static int update_poll_array(struct pollfd **pollfd,
539 struct ltt_kconsumerd_fd **local_kconsumerd_fd)
540 {
541 struct ltt_kconsumerd_fd *iter;
542 int i = 0;
543
544 if (*pollfd != NULL) {
545 free(*pollfd);
546 *pollfd = NULL;
547 }
548
549 if (*local_kconsumerd_fd != NULL) {
550 free(*local_kconsumerd_fd);
551 *local_kconsumerd_fd = NULL;
552 }
553
554 *pollfd = malloc(fds_count * sizeof(struct pollfd));
555 if (*pollfd == NULL) {
556 perror("pollfd malloc");
557 goto error_mem;
558 }
559
560 *local_kconsumerd_fd = malloc(fds_count * sizeof(struct ltt_kconsumerd_fd));
561 if (*local_kconsumerd_fd == NULL) {
562 perror("local_kconsumerd_fd malloc");
563 goto error_mem;
564 }
565
566 DBG("Updating poll fd array");
567 pthread_mutex_lock(&kconsumerd_lock_fds);
568
569 cds_list_for_each_entry(iter, &kconsumerd_fd_list.head, list) {
570 DBG("Inside for each");
571 if (iter->state == ACTIVE_FD) {
572 DBG("Active FD %d", iter->consumerd_fd);
573 pollfd[i]->fd = iter->consumerd_fd;
574 pollfd[i]->events = POLLIN | POLLPRI;
575 local_kconsumerd_fd[i] = iter;
576 i++;
577 } else if (iter->state == DELETE_FD) {
578 del_fd(iter);
579 }
580 }
581 update_fd_array = 0;
582 pthread_mutex_unlock(&kconsumerd_lock_fds);
583 return i;
584
585 error_mem:
586 return -ENOMEM;
587 }
588
589 /*
590 * thread_poll_fds
591 *
592 * This thread polls the fds in the ltt_fd_list to consume the data
593 * and write it to tracefile if necessary.
594 */
595 static void *thread_poll_fds(void *data)
596 {
597 int num_rdy, num_hup, high_prio, ret, i;
598 struct pollfd *pollfd = NULL;
599 /* local view of the fds */
600 struct ltt_kconsumerd_fd *local_kconsumerd_fd = NULL;
601 /* local view of fds_count */
602 int nb_fd = 0;
603
604 ret = pipe(thread_pipe);
605 if (ret < 0) {
606 perror("Error creating pipe");
607 goto end;
608 }
609
610 while (1) {
611 high_prio = 0;
612 num_hup = 0;
613
614 /*
615 * the ltt_fd_list has been updated, we need to update our
616 * local array as well
617 */
618 if (update_fd_array) {
619 ret = update_poll_array(&pollfd, &local_kconsumerd_fd);
620 if (ret < 0) {
621 ERR("Error in allocating pollfd or local_outfds");
622 send_error(KCONSUMERD_POLL_ERROR);
623 goto end;
624 }
625 nb_fd = ret;
626 }
627
628 /* poll on the array of fds */
629 DBG("polling on %d fd", nb_fd);
630 num_rdy = poll(pollfd, nb_fd, POLL_TIMEOUT);
631 DBG("poll num_rdy : %d", num_rdy);
632 if (num_rdy == -1) {
633 perror("Poll error");
634 send_error(KCONSUMERD_POLL_ERROR);
635 goto end;
636 }
637
638 /* Take care of high priority channels first. */
639 for (i = 0; i < nb_fd; i++) {
640 switch(pollfd[i].revents) {
641 case POLLERR:
642 ERR("Error returned in polling fd %d.", pollfd[i].fd);
643 num_hup++;
644 send_error(KCONSUMERD_POLL_ERROR);
645 break;
646 case POLLHUP:
647 ERR("Polling fd %d tells it has hung up.", pollfd[i].fd);
648 num_hup++;
649 break;
650 case POLLNVAL:
651 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
652 send_error(KCONSUMERD_POLL_NVAL);
653 num_hup++;
654 break;
655 case POLLPRI:
656 DBG("Urgent read on fd %d", pollfd[i].fd);
657 high_prio = 1;
658 ret = read_subbuffer(&local_kconsumerd_fd[i]);
659 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
660 if (ret == EAGAIN) {
661 ret = 0;
662 }
663 break;
664 }
665 }
666
667 /* If every buffer FD has hung up, we end the read loop here */
668 if (nb_fd > 0 && num_hup == nb_fd) {
669 DBG("every buffer FD has hung up\n");
670 send_error(KCONSUMERD_POLL_HUP);
671 continue;
672 }
673
674 /* Take care of low priority channels. */
675 if (!high_prio) {
676 for (i = 0; i < nb_fd; i++) {
677 switch(pollfd[i].revents) {
678 case POLLIN:
679 DBG("Normal read on fd %d", pollfd[i].fd);
680 ret = read_subbuffer(&local_kconsumerd_fd[i]);
681 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
682 if (ret == EAGAIN) {
683 ret = 0;
684 }
685 break;
686 }
687 }
688 }
689 }
690 end:
691 if (pollfd != NULL) {
692 free(pollfd);
693 pollfd = NULL;
694 }
695 if (local_kconsumerd_fd != NULL) {
696 free(local_kconsumerd_fd);
697 local_kconsumerd_fd = NULL;
698 }
699 return NULL;
700 }
701
702 /*
703 * usage function on stderr
704 */
705 static void usage(void)
706 {
707 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
708 fprintf(stderr, " -h, --help "
709 "Display this usage.\n");
710 fprintf(stderr, " -c, --kconsumerd-cmd-sock PATH "
711 "Specify path for the command socket\n");
712 fprintf(stderr, " -e, --kconsumerd-err-sock PATH "
713 "Specify path for the error socket\n");
714 fprintf(stderr, " -d, --daemonize "
715 "Start as a daemon.\n");
716 fprintf(stderr, " -q, --quiet "
717 "No output at all.\n");
718 fprintf(stderr, " -v, --verbose "
719 "Verbose mode. Activate DBG() macro.\n");
720 fprintf(stderr, " -V, --version "
721 "Show version number.\n");
722 }
723
724 /*
725 * daemon argument parsing
726 */
727 static void parse_args(int argc, char **argv)
728 {
729 int c;
730
731 static struct option long_options[] = {
732 { "kconsumerd-cmd-sock", 1, 0, 'c' },
733 { "kconsumerd-err-sock", 1, 0, 'e' },
734 { "daemonize", 0, 0, 'd' },
735 { "help", 0, 0, 'h' },
736 { "quiet", 0, 0, 'q' },
737 { "verbose", 0, 0, 'v' },
738 { "version", 0, 0, 'V' },
739 { NULL, 0, 0, 0 }
740 };
741
742 while (1) {
743 int option_index = 0;
744 c = getopt_long(argc, argv, "dhqvV" "c:e:", long_options, &option_index);
745 if (c == -1) {
746 break;
747 }
748
749 switch (c) {
750 case 0:
751 fprintf(stderr, "option %s", long_options[option_index].name);
752 if (optarg) {
753 fprintf(stderr, " with arg %s\n", optarg);
754 }
755 break;
756 case 'c':
757 snprintf(command_sock_path, PATH_MAX, "%s", optarg);
758 break;
759 case 'e':
760 snprintf(error_sock_path, PATH_MAX, "%s", optarg);
761 break;
762 case 'd':
763 opt_daemon = 1;
764 break;
765 case 'h':
766 usage();
767 exit(EXIT_FAILURE);
768 case 'q':
769 opt_quiet = 1;
770 break;
771 case 'v':
772 opt_verbose = 1;
773 break;
774 case 'V':
775 fprintf(stdout, "%s\n", VERSION);
776 exit(EXIT_SUCCESS);
777 default:
778 usage();
779 exit(EXIT_FAILURE);
780 }
781 }
782 }
783
784
785 /*
786 * main
787 */
788 int main(int argc, char **argv)
789 {
790 int i;
791 int ret = 0;
792 void *status;
793
794 /* Parse arguments */
795 progname = argv[0];
796 parse_args(argc, argv);
797
798 /* Daemonize */
799 if (opt_daemon) {
800 ret = daemon(0, 0);
801 if (ret < 0) {
802 perror("daemon");
803 goto error;
804 }
805 }
806
807 if (strlen(command_sock_path) == 0) {
808 snprintf(command_sock_path, PATH_MAX,
809 KCONSUMERD_CMD_SOCK_PATH);
810 }
811 if (strlen(error_sock_path) == 0) {
812 snprintf(error_sock_path, PATH_MAX,
813 KCONSUMERD_ERR_SOCK_PATH);
814 }
815
816 if (set_signal_handler() < 0) {
817 goto error;
818 }
819
820 /* Connect to the socket created by ltt-sessiond to report errors */
821 DBG("Connecting to error socket %s", error_sock_path);
822 error_socket = lttcomm_connect_unix_sock(error_sock_path);
823 /* not a fatal error, but all communication with ltt-sessiond will fail */
824 if (error_socket < 0) {
825 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
826 }
827
828 /* Create the thread to manage the receive of fd */
829 ret = pthread_create(&threads[0], NULL, thread_receive_fds, (void *) NULL);
830 if (ret != 0) {
831 perror("pthread_create");
832 goto error;
833 }
834
835 /* Create thread to manage the polling/writing of traces */
836 ret = pthread_create(&threads[1], NULL, thread_poll_fds, (void *) NULL);
837 if (ret != 0) {
838 perror("pthread_create");
839 goto error;
840 }
841
842 for (i = 0; i < 2; i++) {
843 ret = pthread_join(threads[i], &status);
844 if (ret != 0) {
845 perror("pthread_join");
846 goto error;
847 }
848 }
849 ret = EXIT_SUCCESS;
850 send_error(KCONSUMERD_EXIT_SUCCESS);
851 goto end;
852
853 error:
854 ret = EXIT_FAILURE;
855 send_error(KCONSUMERD_EXIT_FAILURE);
856
857 end:
858 cleanup();
859
860 return ret;
861 }
This page took 0.044633 seconds and 3 git commands to generate.