2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
30 #include <urcu/list.h>
32 #include "libkernelctl.h"
33 #include "liblttkconsumerd.h"
37 struct kconsumerd_global_data
{
39 * kconsumerd_data.lock protects kconsumerd_data.fd_list,
40 * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It
41 * ensures the count matches the number of items in the fd_list.
42 * It ensures the list updates *always* trigger an fd_array
43 * update (therefore need to make list update vs
44 * kconsumerd_data.need_update flag update atomic, and also flag
45 * read, fd array and flag clear atomic).
49 * Number of element for the list below. Protected by
50 * kconsumerd_data.lock.
52 unsigned int fds_count
;
54 * List of FDs. Protected by kconsumerd_data.lock.
56 struct kconsumerd_fd_list fd_list
;
58 * Flag specifying if the local array of FDs needs update in the
59 * poll function. Protected by kconsumerd_data.lock.
61 unsigned int need_update
;
63 .fd_list
.head
= CDS_LIST_HEAD_INIT(kconsumerd_data
.fd_list
.head
),
66 /* communication with splice */
67 static int kconsumerd_thread_pipe
[2];
69 /* pipe to wake the poll thread when necessary */
70 static int kconsumerd_poll_pipe
[2];
72 /* timeout parameter, to control the polling thread grace period */
73 static int kconsumerd_poll_timeout
= -1;
75 /* socket to communicate errors with sessiond */
76 static int kconsumerd_error_socket
;
78 /* socket to exchange commands with sessiond */
79 static char *kconsumerd_command_sock_path
;
81 /* flag to inform the polling thread to kconsumerd_quit when all fd hung up */
82 static int kconsumerd_quit
= 0;
85 * kconsumerd_set_error_socket
87 * Set the error socket
89 void kconsumerd_set_error_socket(int sock
)
91 kconsumerd_error_socket
= sock
;
95 * kconsumerd_set_command_socket_path
97 * Set the command socket path
99 void kconsumerd_set_command_socket_path(char *sock
)
101 kconsumerd_command_sock_path
= sock
;
105 * kconsumerd_find_session_fd
107 * Find a session fd in the global list.
109 * Return 1 if found else 0
111 static int kconsumerd_find_session_fd(int fd
)
113 struct kconsumerd_fd
*iter
;
115 pthread_mutex_lock(&kconsumerd_data
.lock
);
116 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
117 if (iter
->sessiond_fd
== fd
) {
118 DBG("Duplicate session fd %d", fd
);
119 pthread_mutex_unlock(&kconsumerd_data
.lock
);
123 pthread_mutex_unlock(&kconsumerd_data
.lock
);
131 * Remove a fd from the global list protected by a mutex
133 static void kconsumerd_del_fd(struct kconsumerd_fd
*lcf
)
135 pthread_mutex_lock(&kconsumerd_data
.lock
);
136 cds_list_del(&lcf
->list
);
137 if (kconsumerd_data
.fds_count
> 0) {
138 kconsumerd_data
.fds_count
--;
141 close(lcf
->consumerd_fd
);
146 kconsumerd_data
.need_update
= 1;
147 pthread_mutex_unlock(&kconsumerd_data
.lock
);
153 * Add a fd to the global list protected by a mutex
155 static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg
*buf
, int consumerd_fd
)
158 struct kconsumerd_fd
*tmp_fd
;
160 pthread_mutex_lock(&kconsumerd_data
.lock
);
161 /* Check if already exist */
162 ret
= kconsumerd_find_session_fd(buf
->fd
);
167 tmp_fd
= malloc(sizeof(struct kconsumerd_fd
));
168 tmp_fd
->sessiond_fd
= buf
->fd
;
169 tmp_fd
->consumerd_fd
= consumerd_fd
;
170 tmp_fd
->state
= buf
->state
;
171 tmp_fd
->max_sb_size
= buf
->max_sb_size
;
172 strncpy(tmp_fd
->path_name
, buf
->path_name
, PATH_MAX
);
174 /* Opening the tracefile in write mode */
175 ret
= open(tmp_fd
->path_name
,
176 O_WRONLY
|O_CREAT
|O_TRUNC
, S_IRWXU
|S_IRWXG
|S_IRWXO
);
178 ERR("Opening %s", tmp_fd
->path_name
);
182 tmp_fd
->out_fd
= ret
;
183 tmp_fd
->out_fd_offset
= 0;
185 DBG("Adding %s (%d, %d, %d)", tmp_fd
->path_name
,
186 tmp_fd
->sessiond_fd
, tmp_fd
->consumerd_fd
, tmp_fd
->out_fd
);
188 cds_list_add(&tmp_fd
->list
, &kconsumerd_data
.fd_list
.head
);
189 kconsumerd_data
.fds_count
++;
190 kconsumerd_data
.need_update
= 1;
192 pthread_mutex_unlock(&kconsumerd_data
.lock
);
197 * kconsumerd_change_fd_state
199 * Update a fd according to what we just received
201 static void kconsumerd_change_fd_state(int sessiond_fd
,
202 enum kconsumerd_fd_state state
)
204 struct kconsumerd_fd
*iter
;
206 pthread_mutex_lock(&kconsumerd_data
.lock
);
207 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
208 if (iter
->sessiond_fd
== sessiond_fd
) {
213 kconsumerd_data
.need_update
= 1;
214 pthread_mutex_unlock(&kconsumerd_data
.lock
);
218 * kconsumerd_update_poll_array
220 * Allocate the pollfd structure and the local view of the out fds
221 * to avoid doing a lookup in the linked list and concurrency issues
222 * when writing is needed.
223 * Returns the number of fds in the structures
224 * Called with kconsumerd_data.lock held.
226 static int kconsumerd_update_poll_array(struct pollfd
**pollfd
,
227 struct kconsumerd_fd
**local_kconsumerd_fd
)
229 struct kconsumerd_fd
*iter
;
232 DBG("Updating poll fd array");
234 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
235 DBG("Inside for each");
236 if (iter
->state
== ACTIVE_FD
) {
237 DBG("Active FD %d", iter
->consumerd_fd
);
238 (*pollfd
)[i
].fd
= iter
->consumerd_fd
;
239 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
240 local_kconsumerd_fd
[i
] = iter
;
246 * insert the kconsumerd_poll_pipe at the end of the array and don't
247 * increment i so nb_fd is the number of real FD
249 (*pollfd
)[i
].fd
= kconsumerd_poll_pipe
[0];
250 (*pollfd
)[i
].events
= POLLIN
;
256 * kconsumerd_on_read_subbuffer_mmap
258 * mmap the ring buffer, read it and write the data to the tracefile.
259 * Returns the number of bytes written
261 static int kconsumerd_on_read_subbuffer_mmap(
262 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
264 unsigned long mmap_len
, mmap_offset
, padded_len
, padding_len
;
266 char *padding
= NULL
;
268 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
269 int fd
= kconsumerd_fd
->consumerd_fd
;
270 int outfd
= kconsumerd_fd
->out_fd
;
272 /* get the padded subbuffer size to know the padding required */
273 ret
= kernctl_get_padded_subbuf_size(fd
, &padded_len
);
276 perror("kernctl_get_padded_subbuf_size");
279 padding_len
= padded_len
- len
;
280 padding
= malloc(padding_len
* sizeof(char));
281 memset(padding
, '\0', padding_len
);
283 /* get the len of the mmap region */
284 ret
= kernctl_get_mmap_len(fd
, &mmap_len
);
287 perror("kernctl_get_mmap_len");
291 /* get the offset inside the fd to mmap */
292 ret
= kernctl_get_mmap_read_offset(fd
, &mmap_offset
);
295 perror("kernctl_get_mmap_read_offset");
299 mmap_base
= mmap(NULL
, mmap_len
, PROT_READ
, MAP_PRIVATE
, fd
, mmap_offset
);
300 if (mmap_base
== MAP_FAILED
) {
301 perror("Error mmaping");
307 ret
= write(outfd
, mmap_base
, len
);
310 } else if (ret
< 0) {
312 perror("Error in file write");
315 /* This won't block, but will start writeout asynchronously */
316 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
317 SYNC_FILE_RANGE_WRITE
);
318 kconsumerd_fd
->out_fd_offset
+= ret
;
321 /* once all the data is written, write the padding to disk */
322 ret
= write(outfd
, padding
, padding_len
);
325 perror("Error writing padding to file");
330 * This does a blocking write-and-wait on any page that belongs to the
331 * subbuffer prior to the one we just wrote.
332 * Don't care about error values, as these are just hints and ways to
333 * limit the amount of page cache used.
335 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
336 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
337 kconsumerd_fd
->max_sb_size
,
338 SYNC_FILE_RANGE_WAIT_BEFORE
339 | SYNC_FILE_RANGE_WRITE
340 | SYNC_FILE_RANGE_WAIT_AFTER
);
343 * Give hints to the kernel about how we access the file:
344 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
347 * We need to call fadvise again after the file grows because the
348 * kernel does not seem to apply fadvise to non-existing parts of the
351 * Call fadvise _after_ having waited for the page writeback to
352 * complete because the dirty page writeback semantic is not well
353 * defined. So it can be expected to lead to lower throughput in
356 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
357 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
362 if (padding
!= NULL
) {
369 * kconsumerd_on_read_subbuffer
371 * Splice the data from the ring buffer to the tracefile.
372 * Returns the number of bytes spliced
374 static int kconsumerd_on_read_subbuffer(
375 struct kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
379 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
380 int fd
= kconsumerd_fd
->consumerd_fd
;
381 int outfd
= kconsumerd_fd
->out_fd
;
384 DBG("splice chan to pipe offset %lu (fd : %d)",
385 (unsigned long)offset
, fd
);
386 ret
= splice(fd
, &offset
, kconsumerd_thread_pipe
[1], NULL
, len
,
387 SPLICE_F_MOVE
| SPLICE_F_MORE
);
388 DBG("splice chan to pipe ret %ld", ret
);
391 perror("Error in relay splice");
395 ret
= splice(kconsumerd_thread_pipe
[0], NULL
, outfd
, NULL
, ret
,
396 SPLICE_F_MOVE
| SPLICE_F_MORE
);
397 DBG("splice pipe to file %ld", ret
);
400 perror("Error in file splice");
406 /* This won't block, but will start writeout asynchronously */
407 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
408 SYNC_FILE_RANGE_WRITE
);
409 kconsumerd_fd
->out_fd_offset
+= ret
;
413 * This does a blocking write-and-wait on any page that belongs to the
414 * subbuffer prior to the one we just wrote.
415 * Don't care about error values, as these are just hints and ways to
416 * limit the amount of page cache used.
418 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
419 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
420 kconsumerd_fd
->max_sb_size
,
421 SYNC_FILE_RANGE_WAIT_BEFORE
422 | SYNC_FILE_RANGE_WRITE
423 | SYNC_FILE_RANGE_WAIT_AFTER
);
425 * Give hints to the kernel about how we access the file:
426 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
429 * We need to call fadvise again after the file grows because the
430 * kernel does not seem to apply fadvise to non-existing parts of the
433 * Call fadvise _after_ having waited for the page writeback to
434 * complete because the dirty page writeback semantic is not well
435 * defined. So it can be expected to lead to lower throughput in
438 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
439 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
444 /* send the appropriate error description to sessiond */
447 kconsumerd_send_error(KCONSUMERD_SPLICE_EBADF
);
450 kconsumerd_send_error(KCONSUMERD_SPLICE_EINVAL
);
453 kconsumerd_send_error(KCONSUMERD_SPLICE_ENOMEM
);
456 kconsumerd_send_error(KCONSUMERD_SPLICE_ESPIPE
);
465 * kconsumerd_read_subbuffer
467 * Consume data on a file descriptor and write it on a trace file
469 static int kconsumerd_read_subbuffer(struct kconsumerd_fd
*kconsumerd_fd
)
474 int infd
= kconsumerd_fd
->consumerd_fd
;
476 DBG("In kconsumerd_read_subbuffer (infd : %d)", infd
);
477 /* Get the next subbuffer */
478 err
= kernctl_get_next_subbuf(infd
);
481 perror("Reserving sub buffer failed (everything is normal, "
482 "it is due to concurrency)");
486 switch (DEFAULT_KERNEL_CHANNEL_OUTPUT
) {
487 case LTTNG_KERNEL_SPLICE
:
488 /* read the whole subbuffer */
489 err
= kernctl_get_padded_subbuf_size(infd
, &len
);
492 perror("Getting sub-buffer len failed.");
496 /* splice the subbuffer to the tracefile */
497 ret
= kconsumerd_on_read_subbuffer(kconsumerd_fd
, len
);
500 * display the error but continue processing to try
501 * to release the subbuffer
503 ERR("Error splicing to tracefile");
506 case LTTNG_KERNEL_MMAP
:
507 /* read the used subbuffer size */
508 err
= kernctl_get_subbuf_size(infd
, &len
);
511 perror("Getting sub-buffer len failed.");
514 /* write the subbuffer to the tracefile */
515 ret
= kconsumerd_on_read_subbuffer_mmap(kconsumerd_fd
, len
);
518 * display the error but continue processing to try
519 * to release the subbuffer
521 ERR("Error writing to tracefile");
525 ERR("Unknown output method");
529 err
= kernctl_put_next_subbuf(infd
);
532 if (errno
== EFAULT
) {
533 perror("Error in unreserving sub buffer\n");
534 } else if (errno
== EIO
) {
535 /* Should never happen with newer LTTng versions */
536 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
546 * kconsumerd_consumerd_recv_fd
548 * Receives an array of file descriptors and the associated
549 * structures describing each fd (path name).
550 * Returns the size of received data
552 static int kconsumerd_consumerd_recv_fd(int sfd
, int size
,
553 enum kconsumerd_command cmd_type
)
557 int ret
= 0, i
, tmp2
;
558 struct cmsghdr
*cmsg
;
560 char recv_fd
[CMSG_SPACE(sizeof(int))];
561 struct lttcomm_kconsumerd_msg lkm
;
563 /* the number of fds we are about to receive */
564 nb_fd
= size
/ sizeof(struct lttcomm_kconsumerd_msg
);
566 for (i
= 0; i
< nb_fd
; i
++) {
567 memset(&msg
, 0, sizeof(msg
));
569 /* Prepare to receive the structures */
570 iov
[0].iov_base
= &lkm
;
571 iov
[0].iov_len
= sizeof(lkm
);
575 msg
.msg_control
= recv_fd
;
576 msg
.msg_controllen
= sizeof(recv_fd
);
578 DBG("Waiting to receive fd");
579 if ((ret
= recvmsg(sfd
, &msg
, 0)) < 0) {
584 if (ret
!= (size
/ nb_fd
)) {
585 ERR("Received only %d, expected %d", ret
, size
);
586 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
590 cmsg
= CMSG_FIRSTHDR(&msg
);
592 ERR("Invalid control message header");
594 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
597 /* if we received fds */
598 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
601 DBG("kconsumerd_add_fd %s (%d)", lkm
.path_name
, (CMSG_DATA(cmsg
)[0]));
602 ret
= kconsumerd_add_fd(&lkm
, (CMSG_DATA(cmsg
)[0]));
604 kconsumerd_send_error(KCONSUMERD_OUTFD_ERROR
);
609 kconsumerd_change_fd_state(lkm
.fd
, lkm
.state
);
614 /* signal the poll thread */
615 tmp2
= write(kconsumerd_poll_pipe
[1], "4", 1);
617 ERR("Didn't received any fd");
618 kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD
);
629 * kconsumerd_thread_poll_fds
631 * This thread polls the fds in the ltt_fd_list to consume the data
632 * and write it to tracefile if necessary.
634 void *kconsumerd_thread_poll_fds(void *data
)
636 int num_rdy
, num_hup
, high_prio
, ret
, i
;
637 struct pollfd
*pollfd
= NULL
;
638 /* local view of the fds */
639 struct kconsumerd_fd
**local_kconsumerd_fd
= NULL
;
640 /* local view of kconsumerd_data.fds_count */
645 ret
= pipe(kconsumerd_thread_pipe
);
647 perror("Error creating pipe");
651 local_kconsumerd_fd
= malloc(sizeof(struct kconsumerd_fd
));
658 * the ltt_fd_list has been updated, we need to update our
659 * local array as well
661 pthread_mutex_lock(&kconsumerd_data
.lock
);
662 if (kconsumerd_data
.need_update
) {
663 if (pollfd
!= NULL
) {
667 if (local_kconsumerd_fd
!= NULL
) {
668 free(local_kconsumerd_fd
);
669 local_kconsumerd_fd
= NULL
;
672 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
673 pollfd
= malloc((kconsumerd_data
.fds_count
+ 1) * sizeof(struct pollfd
));
674 if (pollfd
== NULL
) {
675 perror("pollfd malloc");
676 pthread_mutex_unlock(&kconsumerd_data
.lock
);
680 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
681 local_kconsumerd_fd
= malloc((kconsumerd_data
.fds_count
+ 1) *
682 sizeof(struct kconsumerd_fd
));
683 if (local_kconsumerd_fd
== NULL
) {
684 perror("local_kconsumerd_fd malloc");
685 pthread_mutex_unlock(&kconsumerd_data
.lock
);
688 ret
= kconsumerd_update_poll_array(&pollfd
, local_kconsumerd_fd
);
690 ERR("Error in allocating pollfd or local_outfds");
691 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
692 pthread_mutex_unlock(&kconsumerd_data
.lock
);
696 kconsumerd_data
.need_update
= 0;
698 pthread_mutex_unlock(&kconsumerd_data
.lock
);
700 /* poll on the array of fds */
701 DBG("polling on %d fd", nb_fd
+ 1);
702 num_rdy
= poll(pollfd
, nb_fd
+ 1, kconsumerd_poll_timeout
);
703 DBG("poll num_rdy : %d", num_rdy
);
705 perror("Poll error");
706 kconsumerd_send_error(KCONSUMERD_POLL_ERROR
);
708 } else if (num_rdy
== 0) {
709 DBG("Polling thread timed out");
713 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
714 if (nb_fd
== 0 && kconsumerd_quit
== 1) {
719 * If the kconsumerd_poll_pipe triggered poll go
720 * directly to the beginning of the loop to update the
721 * array. We want to prioritize array update over
722 * low-priority reads.
724 if (pollfd
[nb_fd
].revents
== POLLIN
) {
725 DBG("kconsumerd_poll_pipe wake up");
726 tmp2
= read(kconsumerd_poll_pipe
[0], &tmp
, 1);
730 /* Take care of high priority channels first. */
731 for (i
= 0; i
< nb_fd
; i
++) {
732 switch(pollfd
[i
].revents
) {
734 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
735 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
739 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
740 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
744 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
745 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
749 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
751 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
752 /* it's ok to have an unavailable sub-buffer */
760 /* If every buffer FD has hung up, we end the read loop here */
761 if (nb_fd
> 0 && num_hup
== nb_fd
) {
762 DBG("every buffer FD has hung up\n");
763 if (kconsumerd_quit
== 1) {
769 /* Take care of low priority channels. */
770 if (high_prio
== 0) {
771 for (i
= 0; i
< nb_fd
; i
++) {
772 if (pollfd
[i
].revents
== POLLIN
) {
773 DBG("Normal read on fd %d", pollfd
[i
].fd
);
774 ret
= kconsumerd_read_subbuffer(local_kconsumerd_fd
[i
]);
775 /* it's ok to have an unavailable subbuffer */
784 DBG("polling thread exiting");
785 if (pollfd
!= NULL
) {
789 if (local_kconsumerd_fd
!= NULL
) {
790 free(local_kconsumerd_fd
);
791 local_kconsumerd_fd
= NULL
;
793 kconsumerd_cleanup();
798 * kconsumerd_create_poll_pipe
800 * create the pipe to wake to polling thread when needed
802 int kconsumerd_create_poll_pipe()
804 return pipe(kconsumerd_poll_pipe
);
808 * kconsumerd_thread_receive_fds
810 * This thread listens on the consumerd socket and
811 * receives the file descriptors from ltt-sessiond
813 void *kconsumerd_thread_receive_fds(void *data
)
815 int sock
, client_socket
, ret
;
816 struct lttcomm_kconsumerd_header tmp
;
818 DBG("Creating command socket %s", kconsumerd_command_sock_path
);
819 unlink(kconsumerd_command_sock_path
);
820 client_socket
= lttcomm_create_unix_sock(kconsumerd_command_sock_path
);
821 if (client_socket
< 0) {
822 ERR("Cannot create command socket");
826 ret
= lttcomm_listen_unix_sock(client_socket
);
831 DBG("Sending ready command to ltt-sessiond");
832 ret
= kconsumerd_send_error(KCONSUMERD_COMMAND_SOCK_READY
);
834 ERR("Error sending ready command to ltt-sessiond");
838 /* Blocking call, waiting for transmission */
839 sock
= lttcomm_accept_unix_sock(client_socket
);
845 /* We first get the number of fd we are about to receive */
846 ret
= lttcomm_recv_unix_sock(sock
, &tmp
,
847 sizeof(struct lttcomm_kconsumerd_header
));
849 ERR("Communication interrupted on command socket");
852 if (tmp
.cmd_type
== STOP
) {
853 DBG("Received STOP command");
856 /* we received a command to add or update fds */
857 ret
= kconsumerd_consumerd_recv_fd(sock
, tmp
.payload_size
, tmp
.cmd_type
);
859 ERR("Receiving the FD, exiting");
865 DBG("kconsumerd_thread_receive_fds exiting");
868 * when all fds have hung up, the polling thread
874 * 2s of grace period, if no polling events occur during
875 * this period, the polling thread will exit even if there
876 * are still open FDs (should not happen, but safety mechanism).
878 kconsumerd_poll_timeout
= KCONSUMERD_POLL_GRACE_PERIOD
;
880 /* wake up the polling thread */
881 ret
= write(kconsumerd_poll_pipe
[1], "4", 1);
883 perror("poll pipe write");
891 * Cleanup the daemon's socket on exit
893 void kconsumerd_cleanup()
895 struct kconsumerd_fd
*iter
;
897 /* remove the socket file */
898 unlink(kconsumerd_command_sock_path
);
900 /* close all outfd */
901 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
902 kconsumerd_del_fd(iter
);
907 * kconsumerd_send_error
909 * send return code to ltt-sessiond
911 int kconsumerd_send_error(enum lttcomm_return_code cmd
)
913 if (kconsumerd_error_socket
> 0) {
914 return lttcomm_send_unix_sock(kconsumerd_error_socket
, &cmd
,
915 sizeof(enum lttcomm_sessiond_command
));