Fix order of streams
[lttng-tools.git] / liblttng-consumer / lttng-consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <assert.h>
22#include <fcntl.h>
23#include <poll.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/mman.h>
28#include <sys/socket.h>
29#include <sys/types.h>
30#include <unistd.h>
31
32#include <lttng-kernel-ctl.h>
33#include <lttng-sessiond-comm.h>
34#include <lttng/lttng-consumer.h>
35#include <lttng/lttng-kconsumer.h>
36#include <lttng/lttng-ustconsumer.h>
37#include <lttngerr.h>
38
39struct lttng_consumer_global_data consumer_data = {
40 .stream_list.head = CDS_LIST_HEAD_INIT(consumer_data.stream_list.head),
41 .channel_list.head = CDS_LIST_HEAD_INIT(consumer_data.channel_list.head),
42 .stream_count = 0,
43 .need_update = 1,
44 .type = LTTNG_CONSUMER_UNKNOWN,
45};
46
47/* timeout parameter, to control the polling thread grace period. */
48int consumer_poll_timeout = -1;
49
50/*
51 * Flag to inform the polling thread to quit when all fd hung up. Updated by
52 * the consumer_thread_receive_fds when it notices that all fds has hung up.
53 * Also updated by the signal handler (consumer_should_exit()). Read by the
54 * polling threads.
55 */
56volatile int consumer_quit = 0;
57
58/*
59 * Find a stream. The consumer_data.lock must be locked during this
60 * call.
61 */
62static struct lttng_consumer_stream *consumer_find_stream(int key)
63{
64 struct lttng_consumer_stream *iter;
65
66 cds_list_for_each_entry(iter, &consumer_data.stream_list.head, list) {
67 if (iter->key == key) {
68 DBG("Found stream key %d", key);
69 return iter;
70 }
71 }
72 return NULL;
73}
74
75static struct lttng_consumer_channel *consumer_find_channel(int key)
76{
77 struct lttng_consumer_channel *iter;
78
79 cds_list_for_each_entry(iter, &consumer_data.channel_list.head, list) {
80 if (iter->key == key) {
81 DBG("Found channel key %d", key);
82 return iter;
83 }
84 }
85 return NULL;
86}
87
88/*
89 * Remove a stream from the global list protected by a mutex. This
90 * function is also responsible for freeing its data structures.
91 */
92void consumer_del_stream(struct lttng_consumer_stream *stream)
93{
94 int ret;
95 struct lttng_consumer_channel *free_chan = NULL;
96
97 pthread_mutex_lock(&consumer_data.lock);
98
99 switch (consumer_data.type) {
100 case LTTNG_CONSUMER_KERNEL:
101 if (stream->mmap_base != NULL) {
102 ret = munmap(stream->mmap_base, stream->mmap_len);
103 if (ret != 0) {
104 perror("munmap");
105 }
106 }
107 break;
108 case LTTNG_CONSUMER_UST:
109 lttng_ustconsumer_del_stream(stream);
110 break;
111 default:
112 ERR("Unknown consumer_data type");
113 assert(0);
114 goto end;
115 }
116
117 cds_list_del(&stream->list);
118 if (consumer_data.stream_count <= 0) {
119 goto end;
120 }
121 consumer_data.stream_count--;
122 if (!stream) {
123 goto end;
124 }
125 if (stream->out_fd >= 0) {
126 close(stream->out_fd);
127 }
b5c5fc29 128 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
3bd1e081
MD
129 close(stream->wait_fd);
130 }
b5c5fc29
MD
131 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd
132 && !stream->shm_fd_is_copy) {
3bd1e081
MD
133 close(stream->shm_fd);
134 }
135 if (!--stream->chan->refcount)
136 free_chan = stream->chan;
137 free(stream);
138end:
139 consumer_data.need_update = 1;
140 pthread_mutex_unlock(&consumer_data.lock);
141
142 if (free_chan)
143 consumer_del_channel(free_chan);
144}
145
146struct lttng_consumer_stream *consumer_allocate_stream(
147 int channel_key, int stream_key,
148 int shm_fd, int wait_fd,
149 enum lttng_consumer_stream_state state,
150 uint64_t mmap_len,
151 enum lttng_event_output output,
152 const char *path_name)
153{
154 struct lttng_consumer_stream *stream;
155 int ret;
156
157 stream = malloc(sizeof(*stream));
158 if (stream == NULL) {
159 perror("malloc struct lttng_consumer_stream");
160 goto end;
161 }
162 stream->chan = consumer_find_channel(channel_key);
163 if (!stream->chan) {
164 perror("Unable to find channel key");
165 goto end;
166 }
167 stream->chan->refcount++;
168 stream->key = stream_key;
169 stream->shm_fd = shm_fd;
170 stream->wait_fd = wait_fd;
171 stream->out_fd = -1;
172 stream->out_fd_offset = 0;
173 stream->state = state;
174 stream->mmap_len = mmap_len;
175 stream->mmap_base = NULL;
176 stream->output = output;
177 strncpy(stream->path_name, path_name, PATH_MAX - 1);
178 stream->path_name[PATH_MAX - 1] = '\0';
179
180 switch (consumer_data.type) {
181 case LTTNG_CONSUMER_KERNEL:
182 break;
183 case LTTNG_CONSUMER_UST:
5af2f756 184 stream->cpu = stream->chan->cpucount++;
3bd1e081
MD
185 ret = lttng_ustconsumer_allocate_stream(stream);
186 if (ret) {
187 free(stream);
188 return NULL;
189 }
190 break;
191 default:
192 ERR("Unknown consumer_data type");
193 assert(0);
194 goto end;
195 }
196 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
197 stream->path_name, stream->key,
198 stream->shm_fd,
199 stream->wait_fd,
200 (unsigned long long) stream->mmap_len,
201 stream->out_fd);
202end:
203 return stream;
204}
205
206/*
207 * Add a stream to the global list protected by a mutex.
208 */
209int consumer_add_stream(struct lttng_consumer_stream *stream)
210{
211 int ret = 0;
212
213 pthread_mutex_lock(&consumer_data.lock);
214 /* Check if already exist */
215 if (consumer_find_stream(stream->key)) {
216 ret = -1;
217 goto end;
218 }
219 cds_list_add(&stream->list, &consumer_data.stream_list.head);
220 consumer_data.stream_count++;
221 consumer_data.need_update = 1;
222
223 switch (consumer_data.type) {
224 case LTTNG_CONSUMER_KERNEL:
225 break;
226 case LTTNG_CONSUMER_UST:
227 /* Streams are in CPU number order (we rely on this) */
228 stream->cpu = stream->chan->nr_streams++;
229 break;
230 default:
231 ERR("Unknown consumer_data type");
232 assert(0);
233 goto end;
234 }
235
236end:
237 pthread_mutex_unlock(&consumer_data.lock);
238 return ret;
239}
240
241/*
242 * Update a stream according to what we just received.
243 */
244void consumer_change_stream_state(int stream_key,
245 enum lttng_consumer_stream_state state)
246{
247 struct lttng_consumer_stream *stream;
248
249 pthread_mutex_lock(&consumer_data.lock);
250 stream = consumer_find_stream(stream_key);
251 if (stream) {
252 stream->state = state;
253 }
254 consumer_data.need_update = 1;
255 pthread_mutex_unlock(&consumer_data.lock);
256}
257
258/*
259 * Remove a channel from the global list protected by a mutex. This
260 * function is also responsible for freeing its data structures.
261 */
262void consumer_del_channel(struct lttng_consumer_channel *channel)
263{
264 int ret;
265
266 pthread_mutex_lock(&consumer_data.lock);
267
268 switch (consumer_data.type) {
269 case LTTNG_CONSUMER_KERNEL:
270 break;
271 case LTTNG_CONSUMER_UST:
272 lttng_ustconsumer_del_channel(channel);
273 break;
274 default:
275 ERR("Unknown consumer_data type");
276 assert(0);
277 goto end;
278 }
279
280 cds_list_del(&channel->list);
281 if (channel->mmap_base != NULL) {
282 ret = munmap(channel->mmap_base, channel->mmap_len);
283 if (ret != 0) {
284 perror("munmap");
285 }
286 }
b5c5fc29 287 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
3bd1e081
MD
288 close(channel->wait_fd);
289 }
b5c5fc29
MD
290 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd
291 && !channel->shm_fd_is_copy) {
3bd1e081
MD
292 close(channel->shm_fd);
293 }
294 free(channel);
295end:
296 pthread_mutex_unlock(&consumer_data.lock);
297}
298
299struct lttng_consumer_channel *consumer_allocate_channel(
300 int channel_key,
301 int shm_fd, int wait_fd,
302 uint64_t mmap_len,
303 uint64_t max_sb_size)
304{
305 struct lttng_consumer_channel *channel;
306 int ret;
307
308 channel = malloc(sizeof(*channel));
309 if (channel == NULL) {
310 perror("malloc struct lttng_consumer_channel");
311 goto end;
312 }
313 channel->key = channel_key;
314 channel->shm_fd = shm_fd;
315 channel->wait_fd = wait_fd;
316 channel->mmap_len = mmap_len;
317 channel->max_sb_size = max_sb_size;
318 channel->refcount = 0;
319 channel->nr_streams = 0;
320
321 switch (consumer_data.type) {
322 case LTTNG_CONSUMER_KERNEL:
323 channel->mmap_base = NULL;
324 channel->mmap_len = 0;
325 break;
326 case LTTNG_CONSUMER_UST:
327 ret = lttng_ustconsumer_allocate_channel(channel);
328 if (ret) {
329 free(channel);
330 return NULL;
331 }
332 break;
333 default:
334 ERR("Unknown consumer_data type");
335 assert(0);
336 goto end;
337 }
338 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
339 channel->key,
340 channel->shm_fd,
341 channel->wait_fd,
342 (unsigned long long) channel->mmap_len,
343 (unsigned long long) channel->max_sb_size);
344end:
345 return channel;
346}
347
348/*
349 * Add a channel to the global list protected by a mutex.
350 */
351int consumer_add_channel(struct lttng_consumer_channel *channel)
352{
353 int ret = 0;
354
355 pthread_mutex_lock(&consumer_data.lock);
356 /* Check if already exist */
357 if (consumer_find_channel(channel->key)) {
358 ret = -1;
359 goto end;
360 }
361 cds_list_add(&channel->list, &consumer_data.channel_list.head);
362end:
363 pthread_mutex_unlock(&consumer_data.lock);
364 return ret;
365}
366
367/*
368 * Allocate the pollfd structure and the local view of the out fds to avoid
369 * doing a lookup in the linked list and concurrency issues when writing is
370 * needed. Called with consumer_data.lock held.
371 *
372 * Returns the number of fds in the structures.
373 */
374int consumer_update_poll_array(
375 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
376 struct lttng_consumer_stream **local_stream)
377{
378 struct lttng_consumer_stream *iter;
379 int i = 0;
380
381 DBG("Updating poll fd array");
382 cds_list_for_each_entry(iter, &consumer_data.stream_list.head, list) {
383 if (iter->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
384 continue;
385 }
386 DBG("Active FD %d", iter->wait_fd);
387 (*pollfd)[i].fd = iter->wait_fd;
388 (*pollfd)[i].events = POLLIN | POLLPRI;
389 local_stream[i] = iter;
390 i++;
391 }
392
393 /*
394 * Insert the consumer_poll_pipe at the end of the array and don't
395 * increment i so nb_fd is the number of real FD.
396 */
397 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
398 (*pollfd)[i].events = POLLIN;
399 return i;
400}
401
402/*
403 * Poll on the should_quit pipe and the command socket return -1 on error and
404 * should exit, 0 if data is available on the command socket
405 */
406int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
407{
408 int num_rdy;
409
410 num_rdy = poll(consumer_sockpoll, 2, -1);
411 if (num_rdy == -1) {
412 perror("Poll error");
413 goto exit;
414 }
415 if (consumer_sockpoll[0].revents == POLLIN) {
416 DBG("consumer_should_quit wake up");
417 goto exit;
418 }
419 return 0;
420
421exit:
422 return -1;
423}
424
425/*
426 * Set the error socket.
427 */
428void lttng_consumer_set_error_sock(
429 struct lttng_consumer_local_data *ctx, int sock)
430{
431 ctx->consumer_error_socket = sock;
432}
433
434/*
435 * Set the command socket path.
436 */
437
438void lttng_consumer_set_command_sock_path(
439 struct lttng_consumer_local_data *ctx, char *sock)
440{
441 ctx->consumer_command_sock_path = sock;
442}
443
444/*
445 * Send return code to the session daemon.
446 * If the socket is not defined, we return 0, it is not a fatal error
447 */
448int lttng_consumer_send_error(
449 struct lttng_consumer_local_data *ctx, int cmd)
450{
451 if (ctx->consumer_error_socket > 0) {
452 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
453 sizeof(enum lttcomm_sessiond_command));
454 }
455
456 return 0;
457}
458
459/*
460 * Close all the tracefiles and stream fds, should be called when all instances
461 * are destroyed.
462 */
463void lttng_consumer_cleanup(void)
464{
465 struct lttng_consumer_stream *iter, *tmp;
466 struct lttng_consumer_channel *citer, *ctmp;
467
468 /*
469 * close all outfd. Called when there are no more threads
470 * running (after joining on the threads), no need to protect
471 * list iteration with mutex.
472 */
473 cds_list_for_each_entry_safe(iter, tmp,
474 &consumer_data.stream_list.head, list) {
475 consumer_del_stream(iter);
476 }
477 cds_list_for_each_entry_safe(citer, ctmp,
478 &consumer_data.channel_list.head, list) {
479 consumer_del_channel(citer);
480 }
481}
482
483/*
484 * Called from signal handler.
485 */
486void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
487{
488 int ret;
489 consumer_quit = 1;
490 ret = write(ctx->consumer_should_quit[1], "4", 1);
491 if (ret < 0) {
492 perror("write consumer quit");
493 }
494}
495
496void lttng_consumer_sync_trace_file(
497 struct lttng_consumer_stream *stream, off_t orig_offset)
498{
499 int outfd = stream->out_fd;
500
501 /*
502 * This does a blocking write-and-wait on any page that belongs to the
503 * subbuffer prior to the one we just wrote.
504 * Don't care about error values, as these are just hints and ways to
505 * limit the amount of page cache used.
506 */
507 if (orig_offset < stream->chan->max_sb_size) {
508 return;
509 }
510 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
511 stream->chan->max_sb_size,
512 SYNC_FILE_RANGE_WAIT_BEFORE
513 | SYNC_FILE_RANGE_WRITE
514 | SYNC_FILE_RANGE_WAIT_AFTER);
515 /*
516 * Give hints to the kernel about how we access the file:
517 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
518 * we write it.
519 *
520 * We need to call fadvise again after the file grows because the
521 * kernel does not seem to apply fadvise to non-existing parts of the
522 * file.
523 *
524 * Call fadvise _after_ having waited for the page writeback to
525 * complete because the dirty page writeback semantic is not well
526 * defined. So it can be expected to lead to lower throughput in
527 * streaming.
528 */
529 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
530 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
531}
532
533/*
534 * Initialise the necessary environnement :
535 * - create a new context
536 * - create the poll_pipe
537 * - create the should_quit pipe (for signal handler)
538 * - create the thread pipe (for splice)
539 *
540 * Takes a function pointer as argument, this function is called when data is
541 * available on a buffer. This function is responsible to do the
542 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
543 * buffer configuration and then kernctl_put_next_subbuf at the end.
544 *
545 * Returns a pointer to the new context or NULL on error.
546 */
547struct lttng_consumer_local_data *lttng_consumer_create(
548 enum lttng_consumer_type type,
549 int (*buffer_ready)(struct lttng_consumer_stream *stream),
550 int (*recv_channel)(struct lttng_consumer_channel *channel),
551 int (*recv_stream)(struct lttng_consumer_stream *stream),
552 int (*update_stream)(int stream_key, uint32_t state))
553{
554 int ret, i;
555 struct lttng_consumer_local_data *ctx;
556
557 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
558 consumer_data.type == type);
559 consumer_data.type = type;
560
561 ctx = malloc(sizeof(struct lttng_consumer_local_data));
562 if (ctx == NULL) {
563 perror("allocating context");
564 goto error;
565 }
566
567 ctx->consumer_error_socket = -1;
568 /* assign the callbacks */
569 ctx->on_buffer_ready = buffer_ready;
570 ctx->on_recv_channel = recv_channel;
571 ctx->on_recv_stream = recv_stream;
572 ctx->on_update_stream = update_stream;
573
574 ret = pipe(ctx->consumer_poll_pipe);
575 if (ret < 0) {
576 perror("Error creating poll pipe");
577 goto error_poll_pipe;
578 }
579
580 ret = pipe(ctx->consumer_should_quit);
581 if (ret < 0) {
582 perror("Error creating recv pipe");
583 goto error_quit_pipe;
584 }
585
586 ret = pipe(ctx->consumer_thread_pipe);
587 if (ret < 0) {
588 perror("Error creating thread pipe");
589 goto error_thread_pipe;
590 }
591
592 return ctx;
593
594
595error_thread_pipe:
596 for (i = 0; i < 2; i++) {
597 int err;
598
599 err = close(ctx->consumer_should_quit[i]);
600 assert(!err);
601 }
602error_quit_pipe:
603 for (i = 0; i < 2; i++) {
604 int err;
605
606 err = close(ctx->consumer_poll_pipe[i]);
607 assert(!err);
608 }
609error_poll_pipe:
610 free(ctx);
611error:
612 return NULL;
613}
614
615/*
616 * Close all fds associated with the instance and free the context.
617 */
618void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
619{
620 close(ctx->consumer_error_socket);
621 close(ctx->consumer_thread_pipe[0]);
622 close(ctx->consumer_thread_pipe[1]);
623 close(ctx->consumer_poll_pipe[0]);
624 close(ctx->consumer_poll_pipe[1]);
625 close(ctx->consumer_should_quit[0]);
626 close(ctx->consumer_should_quit[1]);
627 unlink(ctx->consumer_command_sock_path);
628 free(ctx);
629}
630
631/*
632 * Mmap the ring buffer, read it and write the data to the tracefile.
633 *
634 * Returns the number of bytes written
635 */
636int lttng_consumer_on_read_subbuffer_mmap(
637 struct lttng_consumer_local_data *ctx,
638 struct lttng_consumer_stream *stream, unsigned long len)
639{
640 switch (consumer_data.type) {
641 case LTTNG_CONSUMER_KERNEL:
642 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
643 case LTTNG_CONSUMER_UST:
644 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
645 default:
646 ERR("Unknown consumer_data type");
647 assert(0);
648 }
649}
650
651/*
652 * Splice the data from the ring buffer to the tracefile.
653 *
654 * Returns the number of bytes spliced.
655 */
656int lttng_consumer_on_read_subbuffer_splice(
657 struct lttng_consumer_local_data *ctx,
658 struct lttng_consumer_stream *stream, unsigned long len)
659{
660 switch (consumer_data.type) {
661 case LTTNG_CONSUMER_KERNEL:
662 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
663 case LTTNG_CONSUMER_UST:
664 return -ENOSYS;
665 default:
666 ERR("Unknown consumer_data type");
667 assert(0);
668 return -ENOSYS;
669 }
670
671}
672
673/*
674 * Take a snapshot for a specific fd
675 *
676 * Returns 0 on success, < 0 on error
677 */
678int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
679 struct lttng_consumer_stream *stream)
680{
681 switch (consumer_data.type) {
682 case LTTNG_CONSUMER_KERNEL:
683 return lttng_kconsumer_take_snapshot(ctx, stream);
684 case LTTNG_CONSUMER_UST:
685 return lttng_ustconsumer_take_snapshot(ctx, stream);
686 default:
687 ERR("Unknown consumer_data type");
688 assert(0);
689 return -ENOSYS;
690 }
691
692}
693
694/*
695 * Get the produced position
696 *
697 * Returns 0 on success, < 0 on error
698 */
699int lttng_consumer_get_produced_snapshot(
700 struct lttng_consumer_local_data *ctx,
701 struct lttng_consumer_stream *stream,
702 unsigned long *pos)
703{
704 switch (consumer_data.type) {
705 case LTTNG_CONSUMER_KERNEL:
706 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
707 case LTTNG_CONSUMER_UST:
708 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
709 default:
710 ERR("Unknown consumer_data type");
711 assert(0);
712 return -ENOSYS;
713 }
714}
715
716int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
717 int sock, struct pollfd *consumer_sockpoll)
718{
719 switch (consumer_data.type) {
720 case LTTNG_CONSUMER_KERNEL:
721 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
722 case LTTNG_CONSUMER_UST:
723 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
724 default:
725 ERR("Unknown consumer_data type");
726 assert(0);
727 return -ENOSYS;
728 }
729}
730
731/*
732 * This thread polls the fds in the ltt_fd_list to consume the data and write
733 * it to tracefile if necessary.
734 */
735void *lttng_consumer_thread_poll_fds(void *data)
736{
737 int num_rdy, num_hup, high_prio, ret, i;
738 struct pollfd *pollfd = NULL;
739 /* local view of the streams */
740 struct lttng_consumer_stream **local_stream = NULL;
741 /* local view of consumer_data.fds_count */
742 int nb_fd = 0;
743 char tmp;
744 int tmp2;
745 struct lttng_consumer_local_data *ctx = data;
746
747 local_stream = malloc(sizeof(struct lttng_consumer_stream));
748
749 while (1) {
750 high_prio = 0;
751 num_hup = 0;
752
753 /*
754 * the ltt_fd_list has been updated, we need to update our
755 * local array as well
756 */
757 pthread_mutex_lock(&consumer_data.lock);
758 if (consumer_data.need_update) {
759 if (pollfd != NULL) {
760 free(pollfd);
761 pollfd = NULL;
762 }
763 if (local_stream != NULL) {
764 free(local_stream);
765 local_stream = NULL;
766 }
767
768 /* allocate for all fds + 1 for the consumer_poll_pipe */
769 pollfd = malloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
770 if (pollfd == NULL) {
771 perror("pollfd malloc");
772 pthread_mutex_unlock(&consumer_data.lock);
773 goto end;
774 }
775
776 /* allocate for all fds + 1 for the consumer_poll_pipe */
777 local_stream = malloc((consumer_data.stream_count + 1) *
778 sizeof(struct lttng_consumer_stream));
779 if (local_stream == NULL) {
780 perror("local_stream malloc");
781 pthread_mutex_unlock(&consumer_data.lock);
782 goto end;
783 }
784 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
785 if (ret < 0) {
786 ERR("Error in allocating pollfd or local_outfds");
787 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
788 pthread_mutex_unlock(&consumer_data.lock);
789 goto end;
790 }
791 nb_fd = ret;
792 consumer_data.need_update = 0;
793 }
794 pthread_mutex_unlock(&consumer_data.lock);
795
796 /* poll on the array of fds */
797 DBG("polling on %d fd", nb_fd + 1);
798 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
799 DBG("poll num_rdy : %d", num_rdy);
800 if (num_rdy == -1) {
801 perror("Poll error");
802 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
803 goto end;
804 } else if (num_rdy == 0) {
805 DBG("Polling thread timed out");
806 goto end;
807 }
808
809 /* No FDs and consumer_quit, kconsumer_cleanup the thread */
810 if (nb_fd == 0 && consumer_quit == 1) {
811 goto end;
812 }
813
814 /*
815 * If the consumer_poll_pipe triggered poll go
816 * directly to the beginning of the loop to update the
817 * array. We want to prioritize array update over
818 * low-priority reads.
819 */
820 if (pollfd[nb_fd].revents == POLLIN) {
821 DBG("consumer_poll_pipe wake up");
822 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
823 if (tmp2 < 0) {
824 perror("read kconsumer poll");
825 }
826 continue;
827 }
828
829 /* Take care of high priority channels first. */
830 for (i = 0; i < nb_fd; i++) {
831 switch(pollfd[i].revents) {
832 case POLLERR:
833 ERR("Error returned in polling fd %d.", pollfd[i].fd);
834 consumer_del_stream(local_stream[i]);
835 num_hup++;
836 break;
837 case POLLHUP:
838 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
839 consumer_del_stream(local_stream[i]);
840 num_hup++;
841 break;
842 case POLLNVAL:
843 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
844 consumer_del_stream(local_stream[i]);
845 num_hup++;
846 break;
847 case POLLPRI:
848 DBG("Urgent read on fd %d", pollfd[i].fd);
849 high_prio = 1;
850 ret = ctx->on_buffer_ready(local_stream[i]);
851 /* it's ok to have an unavailable sub-buffer */
852 if (ret == EAGAIN) {
853 ret = 0;
854 }
855 break;
856 }
857 }
858
859 /* If every buffer FD has hung up, we end the read loop here */
860 if (nb_fd > 0 && num_hup == nb_fd) {
861 DBG("every buffer FD has hung up\n");
862 if (consumer_quit == 1) {
863 goto end;
864 }
865 continue;
866 }
867
868 /* Take care of low priority channels. */
869 if (high_prio == 0) {
870 for (i = 0; i < nb_fd; i++) {
871 if (pollfd[i].revents == POLLIN) {
872 DBG("Normal read on fd %d", pollfd[i].fd);
873 ret = ctx->on_buffer_ready(local_stream[i]);
874 /* it's ok to have an unavailable subbuffer */
875 if (ret == EAGAIN) {
876 ret = 0;
877 }
878 }
879 }
880 }
881 }
882end:
883 DBG("polling thread exiting");
884 if (pollfd != NULL) {
885 free(pollfd);
886 pollfd = NULL;
887 }
888 if (local_stream != NULL) {
889 free(local_stream);
890 local_stream = NULL;
891 }
892 return NULL;
893}
894
895/*
896 * This thread listens on the consumerd socket and receives the file
897 * descriptors from the session daemon.
898 */
899void *lttng_consumer_thread_receive_fds(void *data)
900{
901 int sock, client_socket, ret;
902 /*
903 * structure to poll for incoming data on communication socket avoids
904 * making blocking sockets.
905 */
906 struct pollfd consumer_sockpoll[2];
907 struct lttng_consumer_local_data *ctx = data;
908
909 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
910 unlink(ctx->consumer_command_sock_path);
911 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
912 if (client_socket < 0) {
913 ERR("Cannot create command socket");
914 goto end;
915 }
916
917 ret = lttcomm_listen_unix_sock(client_socket);
918 if (ret < 0) {
919 goto end;
920 }
921
32258573 922 DBG("Sending ready command to lttng-sessiond");
3bd1e081
MD
923 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
924 /* return < 0 on error, but == 0 is not fatal */
925 if (ret < 0) {
32258573 926 ERR("Error sending ready command to lttng-sessiond");
3bd1e081
MD
927 goto end;
928 }
929
930 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
931 if (ret < 0) {
932 perror("fcntl O_NONBLOCK");
933 goto end;
934 }
935
936 /* prepare the FDs to poll : to client socket and the should_quit pipe */
937 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
938 consumer_sockpoll[0].events = POLLIN | POLLPRI;
939 consumer_sockpoll[1].fd = client_socket;
940 consumer_sockpoll[1].events = POLLIN | POLLPRI;
941
942 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
943 goto end;
944 }
945 DBG("Connection on client_socket");
946
947 /* Blocking call, waiting for transmission */
948 sock = lttcomm_accept_unix_sock(client_socket);
949 if (sock <= 0) {
950 WARN("On accept");
951 goto end;
952 }
953 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
954 if (ret < 0) {
955 perror("fcntl O_NONBLOCK");
956 goto end;
957 }
958
959 /* update the polling structure to poll on the established socket */
960 consumer_sockpoll[1].fd = sock;
961 consumer_sockpoll[1].events = POLLIN | POLLPRI;
962
963 while (1) {
964 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
965 goto end;
966 }
967 DBG("Incoming command on sock");
968 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
969 if (ret == -ENOENT) {
970 DBG("Received STOP command");
971 goto end;
972 }
973 if (ret < 0) {
974 ERR("Communication interrupted on command socket");
975 goto end;
976 }
977 if (consumer_quit) {
978 DBG("consumer_thread_receive_fds received quit from signal");
979 goto end;
980 }
981 DBG("received fds on sock");
982 }
983end:
984 DBG("consumer_thread_receive_fds exiting");
985
986 /*
987 * when all fds have hung up, the polling thread
988 * can exit cleanly
989 */
990 consumer_quit = 1;
991
992 /*
993 * 2s of grace period, if no polling events occur during
994 * this period, the polling thread will exit even if there
995 * are still open FDs (should not happen, but safety mechanism).
996 */
997 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
998
999 /* wake up the polling thread */
1000 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1001 if (ret < 0) {
1002 perror("poll pipe write");
1003 }
1004 return NULL;
1005}
This page took 0.058755 seconds and 4 git commands to generate.