Fix: UST consumer need to iterate on streams, just change their key
[lttng-tools.git] / src / common / consumer.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <assert.h>
22#include <fcntl.h>
23#include <poll.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/mman.h>
28#include <sys/socket.h>
29#include <sys/types.h>
30#include <unistd.h>
31
32#include <common/common.h>
33#include <common/kernel-ctl/kernel-ctl.h>
34#include <common/sessiond-comm/sessiond-comm.h>
35#include <common/kernel-consumer/kernel-consumer.h>
36#include <common/ust-consumer/ust-consumer.h>
37
38#include "consumer.h"
39
40struct lttng_consumer_global_data consumer_data = {
41 .stream_count = 0,
42 .need_update = 1,
43 .type = LTTNG_CONSUMER_UNKNOWN,
44};
45
46/* timeout parameter, to control the polling thread grace period. */
47int consumer_poll_timeout = -1;
48
49/*
50 * Flag to inform the polling thread to quit when all fd hung up. Updated by
51 * the consumer_thread_receive_fds when it notices that all fds has hung up.
52 * Also updated by the signal handler (consumer_should_exit()). Read by the
53 * polling threads.
54 */
55volatile int consumer_quit = 0;
56
57/*
58 * Find a stream. The consumer_data.lock must be locked during this
59 * call.
60 */
61static struct lttng_consumer_stream *consumer_find_stream(int key)
62{
63 struct lttng_ht_iter iter;
64 struct lttng_ht_node_ulong *node;
65 struct lttng_consumer_stream *stream = NULL;
66
67 /* Negative keys are lookup failures */
68 if (key < 0)
69 return NULL;
70
71 rcu_read_lock();
72
73 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
74 &iter);
75 node = lttng_ht_iter_get_node_ulong(&iter);
76 if (node != NULL) {
77 stream = caa_container_of(node, struct lttng_consumer_stream, node);
78 }
79
80 rcu_read_unlock();
81
82 return stream;
83}
84
85static void consumer_steal_stream_key(int key)
86{
87 struct lttng_consumer_stream *stream;
88
89 rcu_read_lock();
90 stream = consumer_find_stream(key);
91 if (stream) {
92 stream->key = -1;
93 /*
94 * We don't want the lookup to match, but we still need
95 * to iterate on this stream when iterating over the hash table. Just
96 * change the node key.
97 */
98 stream->node.key = -1;
99 }
100 rcu_read_unlock();
101}
102
103static struct lttng_consumer_channel *consumer_find_channel(int key)
104{
105 struct lttng_ht_iter iter;
106 struct lttng_ht_node_ulong *node;
107 struct lttng_consumer_channel *channel = NULL;
108
109 /* Negative keys are lookup failures */
110 if (key < 0)
111 return NULL;
112
113 rcu_read_lock();
114
115 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
116 &iter);
117 node = lttng_ht_iter_get_node_ulong(&iter);
118 if (node != NULL) {
119 channel = caa_container_of(node, struct lttng_consumer_channel, node);
120 }
121
122 rcu_read_unlock();
123
124 return channel;
125}
126
127static void consumer_steal_channel_key(int key)
128{
129 struct lttng_consumer_channel *channel;
130
131 rcu_read_lock();
132 channel = consumer_find_channel(key);
133 if (channel) {
134 channel->key = -1;
135 /*
136 * We don't want the lookup to match, but we still need
137 * to iterate on this channel when iterating over the hash table. Just
138 * change the node key.
139 */
140 channel->node.key = -1;
141 }
142 rcu_read_unlock();
143}
144
145static
146void consumer_free_stream(struct rcu_head *head)
147{
148 struct lttng_ht_node_ulong *node =
149 caa_container_of(head, struct lttng_ht_node_ulong, head);
150 struct lttng_consumer_stream *stream =
151 caa_container_of(node, struct lttng_consumer_stream, node);
152
153 free(stream);
154}
155
156/*
157 * Remove a stream from the global list protected by a mutex. This
158 * function is also responsible for freeing its data structures.
159 */
160void consumer_del_stream(struct lttng_consumer_stream *stream)
161{
162 int ret;
163 struct lttng_ht_iter iter;
164 struct lttng_consumer_channel *free_chan = NULL;
165
166 pthread_mutex_lock(&consumer_data.lock);
167
168 switch (consumer_data.type) {
169 case LTTNG_CONSUMER_KERNEL:
170 if (stream->mmap_base != NULL) {
171 ret = munmap(stream->mmap_base, stream->mmap_len);
172 if (ret != 0) {
173 perror("munmap");
174 }
175 }
176 break;
177 case LTTNG_CONSUMER32_UST:
178 case LTTNG_CONSUMER64_UST:
179 lttng_ustconsumer_del_stream(stream);
180 break;
181 default:
182 ERR("Unknown consumer_data type");
183 assert(0);
184 goto end;
185 }
186
187 rcu_read_lock();
188 iter.iter.node = &stream->node.node;
189 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
190 assert(!ret);
191
192 rcu_read_unlock();
193
194 if (consumer_data.stream_count <= 0) {
195 goto end;
196 }
197 consumer_data.stream_count--;
198 if (!stream) {
199 goto end;
200 }
201 if (stream->out_fd >= 0) {
202 ret = close(stream->out_fd);
203 if (ret) {
204 PERROR("close");
205 }
206 }
207 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
208 ret = close(stream->wait_fd);
209 if (ret) {
210 PERROR("close");
211 }
212 }
213 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
214 ret = close(stream->shm_fd);
215 if (ret) {
216 PERROR("close");
217 }
218 }
219 if (!--stream->chan->refcount)
220 free_chan = stream->chan;
221
222 call_rcu(&stream->node.head, consumer_free_stream);
223end:
224 consumer_data.need_update = 1;
225 pthread_mutex_unlock(&consumer_data.lock);
226
227 if (free_chan)
228 consumer_del_channel(free_chan);
229}
230
231struct lttng_consumer_stream *consumer_allocate_stream(
232 int channel_key, int stream_key,
233 int shm_fd, int wait_fd,
234 enum lttng_consumer_stream_state state,
235 uint64_t mmap_len,
236 enum lttng_event_output output,
237 const char *path_name,
238 uid_t uid,
239 gid_t gid)
240{
241 struct lttng_consumer_stream *stream;
242 int ret;
243
244 stream = zmalloc(sizeof(*stream));
245 if (stream == NULL) {
246 perror("malloc struct lttng_consumer_stream");
247 goto end;
248 }
249 stream->chan = consumer_find_channel(channel_key);
250 if (!stream->chan) {
251 perror("Unable to find channel key");
252 goto end;
253 }
254 stream->chan->refcount++;
255 stream->key = stream_key;
256 stream->shm_fd = shm_fd;
257 stream->wait_fd = wait_fd;
258 stream->out_fd = -1;
259 stream->out_fd_offset = 0;
260 stream->state = state;
261 stream->mmap_len = mmap_len;
262 stream->mmap_base = NULL;
263 stream->output = output;
264 stream->uid = uid;
265 stream->gid = gid;
266 strncpy(stream->path_name, path_name, PATH_MAX - 1);
267 stream->path_name[PATH_MAX - 1] = '\0';
268 lttng_ht_node_init_ulong(&stream->node, stream->key);
269
270 switch (consumer_data.type) {
271 case LTTNG_CONSUMER_KERNEL:
272 break;
273 case LTTNG_CONSUMER32_UST:
274 case LTTNG_CONSUMER64_UST:
275 stream->cpu = stream->chan->cpucount++;
276 ret = lttng_ustconsumer_allocate_stream(stream);
277 if (ret) {
278 free(stream);
279 return NULL;
280 }
281 break;
282 default:
283 ERR("Unknown consumer_data type");
284 assert(0);
285 goto end;
286 }
287 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
288 stream->path_name, stream->key,
289 stream->shm_fd,
290 stream->wait_fd,
291 (unsigned long long) stream->mmap_len,
292 stream->out_fd);
293end:
294 return stream;
295}
296
297/*
298 * Add a stream to the global list protected by a mutex.
299 */
300int consumer_add_stream(struct lttng_consumer_stream *stream)
301{
302 int ret = 0;
303
304 pthread_mutex_lock(&consumer_data.lock);
305 /* Steal stream identifier, for UST */
306 consumer_steal_stream_key(stream->key);
307 rcu_read_lock();
308 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
309 rcu_read_unlock();
310 consumer_data.stream_count++;
311 consumer_data.need_update = 1;
312
313 switch (consumer_data.type) {
314 case LTTNG_CONSUMER_KERNEL:
315 break;
316 case LTTNG_CONSUMER32_UST:
317 case LTTNG_CONSUMER64_UST:
318 /* Streams are in CPU number order (we rely on this) */
319 stream->cpu = stream->chan->nr_streams++;
320 break;
321 default:
322 ERR("Unknown consumer_data type");
323 assert(0);
324 goto end;
325 }
326
327end:
328 pthread_mutex_unlock(&consumer_data.lock);
329
330 return ret;
331}
332
333/*
334 * Update a stream according to what we just received.
335 */
336void consumer_change_stream_state(int stream_key,
337 enum lttng_consumer_stream_state state)
338{
339 struct lttng_consumer_stream *stream;
340
341 pthread_mutex_lock(&consumer_data.lock);
342 stream = consumer_find_stream(stream_key);
343 if (stream) {
344 stream->state = state;
345 }
346 consumer_data.need_update = 1;
347 pthread_mutex_unlock(&consumer_data.lock);
348}
349
350static
351void consumer_free_channel(struct rcu_head *head)
352{
353 struct lttng_ht_node_ulong *node =
354 caa_container_of(head, struct lttng_ht_node_ulong, head);
355 struct lttng_consumer_channel *channel =
356 caa_container_of(node, struct lttng_consumer_channel, node);
357
358 free(channel);
359}
360
361/*
362 * Remove a channel from the global list protected by a mutex. This
363 * function is also responsible for freeing its data structures.
364 */
365void consumer_del_channel(struct lttng_consumer_channel *channel)
366{
367 int ret;
368 struct lttng_ht_iter iter;
369
370 pthread_mutex_lock(&consumer_data.lock);
371
372 switch (consumer_data.type) {
373 case LTTNG_CONSUMER_KERNEL:
374 break;
375 case LTTNG_CONSUMER32_UST:
376 case LTTNG_CONSUMER64_UST:
377 lttng_ustconsumer_del_channel(channel);
378 break;
379 default:
380 ERR("Unknown consumer_data type");
381 assert(0);
382 goto end;
383 }
384
385 rcu_read_lock();
386 iter.iter.node = &channel->node.node;
387 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
388 assert(!ret);
389 rcu_read_unlock();
390
391 if (channel->mmap_base != NULL) {
392 ret = munmap(channel->mmap_base, channel->mmap_len);
393 if (ret != 0) {
394 perror("munmap");
395 }
396 }
397 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
398 ret = close(channel->wait_fd);
399 if (ret) {
400 PERROR("close");
401 }
402 }
403 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
404 ret = close(channel->shm_fd);
405 if (ret) {
406 PERROR("close");
407 }
408 }
409
410 call_rcu(&channel->node.head, consumer_free_channel);
411end:
412 pthread_mutex_unlock(&consumer_data.lock);
413}
414
415struct lttng_consumer_channel *consumer_allocate_channel(
416 int channel_key,
417 int shm_fd, int wait_fd,
418 uint64_t mmap_len,
419 uint64_t max_sb_size)
420{
421 struct lttng_consumer_channel *channel;
422 int ret;
423
424 channel = zmalloc(sizeof(*channel));
425 if (channel == NULL) {
426 perror("malloc struct lttng_consumer_channel");
427 goto end;
428 }
429 channel->key = channel_key;
430 channel->shm_fd = shm_fd;
431 channel->wait_fd = wait_fd;
432 channel->mmap_len = mmap_len;
433 channel->max_sb_size = max_sb_size;
434 channel->refcount = 0;
435 channel->nr_streams = 0;
436 lttng_ht_node_init_ulong(&channel->node, channel->key);
437
438 switch (consumer_data.type) {
439 case LTTNG_CONSUMER_KERNEL:
440 channel->mmap_base = NULL;
441 channel->mmap_len = 0;
442 break;
443 case LTTNG_CONSUMER32_UST:
444 case LTTNG_CONSUMER64_UST:
445 ret = lttng_ustconsumer_allocate_channel(channel);
446 if (ret) {
447 free(channel);
448 return NULL;
449 }
450 break;
451 default:
452 ERR("Unknown consumer_data type");
453 assert(0);
454 goto end;
455 }
456 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
457 channel->key,
458 channel->shm_fd,
459 channel->wait_fd,
460 (unsigned long long) channel->mmap_len,
461 (unsigned long long) channel->max_sb_size);
462end:
463 return channel;
464}
465
466/*
467 * Add a channel to the global list protected by a mutex.
468 */
469int consumer_add_channel(struct lttng_consumer_channel *channel)
470{
471 pthread_mutex_lock(&consumer_data.lock);
472 /* Steal channel identifier, for UST */
473 consumer_steal_channel_key(channel->key);
474 rcu_read_lock();
475 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
476 rcu_read_unlock();
477 pthread_mutex_unlock(&consumer_data.lock);
478
479 return 0;
480}
481
482/*
483 * Allocate the pollfd structure and the local view of the out fds to avoid
484 * doing a lookup in the linked list and concurrency issues when writing is
485 * needed. Called with consumer_data.lock held.
486 *
487 * Returns the number of fds in the structures.
488 */
489int consumer_update_poll_array(
490 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
491 struct lttng_consumer_stream **local_stream)
492{
493 int i = 0;
494 struct lttng_ht_iter iter;
495 struct lttng_consumer_stream *stream;
496
497 DBG("Updating poll fd array");
498 rcu_read_lock();
499 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
500 node.node) {
501 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
502 continue;
503 }
504 DBG("Active FD %d", stream->wait_fd);
505 (*pollfd)[i].fd = stream->wait_fd;
506 (*pollfd)[i].events = POLLIN | POLLPRI;
507 local_stream[i] = stream;
508 i++;
509 }
510 rcu_read_unlock();
511
512 /*
513 * Insert the consumer_poll_pipe at the end of the array and don't
514 * increment i so nb_fd is the number of real FD.
515 */
516 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
517 (*pollfd)[i].events = POLLIN;
518 return i;
519}
520
521/*
522 * Poll on the should_quit pipe and the command socket return -1 on error and
523 * should exit, 0 if data is available on the command socket
524 */
525int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
526{
527 int num_rdy;
528
529restart:
530 num_rdy = poll(consumer_sockpoll, 2, -1);
531 if (num_rdy == -1) {
532 /*
533 * Restart interrupted system call.
534 */
535 if (errno == EINTR) {
536 goto restart;
537 }
538 perror("Poll error");
539 goto exit;
540 }
541 if (consumer_sockpoll[0].revents == POLLIN) {
542 DBG("consumer_should_quit wake up");
543 goto exit;
544 }
545 return 0;
546
547exit:
548 return -1;
549}
550
551/*
552 * Set the error socket.
553 */
554void lttng_consumer_set_error_sock(
555 struct lttng_consumer_local_data *ctx, int sock)
556{
557 ctx->consumer_error_socket = sock;
558}
559
560/*
561 * Set the command socket path.
562 */
563
564void lttng_consumer_set_command_sock_path(
565 struct lttng_consumer_local_data *ctx, char *sock)
566{
567 ctx->consumer_command_sock_path = sock;
568}
569
570/*
571 * Send return code to the session daemon.
572 * If the socket is not defined, we return 0, it is not a fatal error
573 */
574int lttng_consumer_send_error(
575 struct lttng_consumer_local_data *ctx, int cmd)
576{
577 if (ctx->consumer_error_socket > 0) {
578 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
579 sizeof(enum lttcomm_sessiond_command));
580 }
581
582 return 0;
583}
584
585/*
586 * Close all the tracefiles and stream fds, should be called when all instances
587 * are destroyed.
588 */
589void lttng_consumer_cleanup(void)
590{
591 struct lttng_ht_iter iter;
592 struct lttng_ht_node_ulong *node;
593
594 rcu_read_lock();
595
596 /*
597 * close all outfd. Called when there are no more threads running (after
598 * joining on the threads), no need to protect list iteration with mutex.
599 */
600 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
601 node) {
602 struct lttng_consumer_stream *stream =
603 caa_container_of(node, struct lttng_consumer_stream, node);
604 consumer_del_stream(stream);
605 }
606
607 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
608 node) {
609 struct lttng_consumer_channel *channel =
610 caa_container_of(node, struct lttng_consumer_channel, node);
611 consumer_del_channel(channel);
612 }
613
614 rcu_read_unlock();
615}
616
617/*
618 * Called from signal handler.
619 */
620void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
621{
622 int ret;
623 consumer_quit = 1;
624 ret = write(ctx->consumer_should_quit[1], "4", 1);
625 if (ret < 0) {
626 perror("write consumer quit");
627 }
628}
629
630void lttng_consumer_sync_trace_file(
631 struct lttng_consumer_stream *stream, off_t orig_offset)
632{
633 int outfd = stream->out_fd;
634
635 /*
636 * This does a blocking write-and-wait on any page that belongs to the
637 * subbuffer prior to the one we just wrote.
638 * Don't care about error values, as these are just hints and ways to
639 * limit the amount of page cache used.
640 */
641 if (orig_offset < stream->chan->max_sb_size) {
642 return;
643 }
644 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
645 stream->chan->max_sb_size,
646 SYNC_FILE_RANGE_WAIT_BEFORE
647 | SYNC_FILE_RANGE_WRITE
648 | SYNC_FILE_RANGE_WAIT_AFTER);
649 /*
650 * Give hints to the kernel about how we access the file:
651 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
652 * we write it.
653 *
654 * We need to call fadvise again after the file grows because the
655 * kernel does not seem to apply fadvise to non-existing parts of the
656 * file.
657 *
658 * Call fadvise _after_ having waited for the page writeback to
659 * complete because the dirty page writeback semantic is not well
660 * defined. So it can be expected to lead to lower throughput in
661 * streaming.
662 */
663 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
664 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
665}
666
667/*
668 * Initialise the necessary environnement :
669 * - create a new context
670 * - create the poll_pipe
671 * - create the should_quit pipe (for signal handler)
672 * - create the thread pipe (for splice)
673 *
674 * Takes a function pointer as argument, this function is called when data is
675 * available on a buffer. This function is responsible to do the
676 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
677 * buffer configuration and then kernctl_put_next_subbuf at the end.
678 *
679 * Returns a pointer to the new context or NULL on error.
680 */
681struct lttng_consumer_local_data *lttng_consumer_create(
682 enum lttng_consumer_type type,
683 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
684 struct lttng_consumer_local_data *ctx),
685 int (*recv_channel)(struct lttng_consumer_channel *channel),
686 int (*recv_stream)(struct lttng_consumer_stream *stream),
687 int (*update_stream)(int stream_key, uint32_t state))
688{
689 int ret, i;
690 struct lttng_consumer_local_data *ctx;
691
692 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
693 consumer_data.type == type);
694 consumer_data.type = type;
695
696 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
697 if (ctx == NULL) {
698 perror("allocating context");
699 goto error;
700 }
701
702 ctx->consumer_error_socket = -1;
703 /* assign the callbacks */
704 ctx->on_buffer_ready = buffer_ready;
705 ctx->on_recv_channel = recv_channel;
706 ctx->on_recv_stream = recv_stream;
707 ctx->on_update_stream = update_stream;
708
709 ret = pipe(ctx->consumer_poll_pipe);
710 if (ret < 0) {
711 perror("Error creating poll pipe");
712 goto error_poll_pipe;
713 }
714
715 ret = pipe(ctx->consumer_should_quit);
716 if (ret < 0) {
717 perror("Error creating recv pipe");
718 goto error_quit_pipe;
719 }
720
721 ret = pipe(ctx->consumer_thread_pipe);
722 if (ret < 0) {
723 perror("Error creating thread pipe");
724 goto error_thread_pipe;
725 }
726
727 return ctx;
728
729
730error_thread_pipe:
731 for (i = 0; i < 2; i++) {
732 int err;
733
734 err = close(ctx->consumer_should_quit[i]);
735 if (err) {
736 PERROR("close");
737 }
738 }
739error_quit_pipe:
740 for (i = 0; i < 2; i++) {
741 int err;
742
743 err = close(ctx->consumer_poll_pipe[i]);
744 if (err) {
745 PERROR("close");
746 }
747 }
748error_poll_pipe:
749 free(ctx);
750error:
751 return NULL;
752}
753
754/*
755 * Close all fds associated with the instance and free the context.
756 */
757void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
758{
759 int ret;
760
761 ret = close(ctx->consumer_error_socket);
762 if (ret) {
763 PERROR("close");
764 }
765 ret = close(ctx->consumer_thread_pipe[0]);
766 if (ret) {
767 PERROR("close");
768 }
769 ret = close(ctx->consumer_thread_pipe[1]);
770 if (ret) {
771 PERROR("close");
772 }
773 ret = close(ctx->consumer_poll_pipe[0]);
774 if (ret) {
775 PERROR("close");
776 }
777 ret = close(ctx->consumer_poll_pipe[1]);
778 if (ret) {
779 PERROR("close");
780 }
781 ret = close(ctx->consumer_should_quit[0]);
782 if (ret) {
783 PERROR("close");
784 }
785 ret = close(ctx->consumer_should_quit[1]);
786 if (ret) {
787 PERROR("close");
788 }
789 unlink(ctx->consumer_command_sock_path);
790 free(ctx);
791}
792
793/*
794 * Mmap the ring buffer, read it and write the data to the tracefile.
795 *
796 * Returns the number of bytes written
797 */
798ssize_t lttng_consumer_on_read_subbuffer_mmap(
799 struct lttng_consumer_local_data *ctx,
800 struct lttng_consumer_stream *stream, unsigned long len)
801{
802 switch (consumer_data.type) {
803 case LTTNG_CONSUMER_KERNEL:
804 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
805 case LTTNG_CONSUMER32_UST:
806 case LTTNG_CONSUMER64_UST:
807 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
808 default:
809 ERR("Unknown consumer_data type");
810 assert(0);
811 }
812}
813
814/*
815 * Splice the data from the ring buffer to the tracefile.
816 *
817 * Returns the number of bytes spliced.
818 */
819ssize_t lttng_consumer_on_read_subbuffer_splice(
820 struct lttng_consumer_local_data *ctx,
821 struct lttng_consumer_stream *stream, unsigned long len)
822{
823 switch (consumer_data.type) {
824 case LTTNG_CONSUMER_KERNEL:
825 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
826 case LTTNG_CONSUMER32_UST:
827 case LTTNG_CONSUMER64_UST:
828 return -ENOSYS;
829 default:
830 ERR("Unknown consumer_data type");
831 assert(0);
832 return -ENOSYS;
833 }
834
835}
836
837/*
838 * Take a snapshot for a specific fd
839 *
840 * Returns 0 on success, < 0 on error
841 */
842int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
843 struct lttng_consumer_stream *stream)
844{
845 switch (consumer_data.type) {
846 case LTTNG_CONSUMER_KERNEL:
847 return lttng_kconsumer_take_snapshot(ctx, stream);
848 case LTTNG_CONSUMER32_UST:
849 case LTTNG_CONSUMER64_UST:
850 return lttng_ustconsumer_take_snapshot(ctx, stream);
851 default:
852 ERR("Unknown consumer_data type");
853 assert(0);
854 return -ENOSYS;
855 }
856
857}
858
859/*
860 * Get the produced position
861 *
862 * Returns 0 on success, < 0 on error
863 */
864int lttng_consumer_get_produced_snapshot(
865 struct lttng_consumer_local_data *ctx,
866 struct lttng_consumer_stream *stream,
867 unsigned long *pos)
868{
869 switch (consumer_data.type) {
870 case LTTNG_CONSUMER_KERNEL:
871 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
872 case LTTNG_CONSUMER32_UST:
873 case LTTNG_CONSUMER64_UST:
874 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
875 default:
876 ERR("Unknown consumer_data type");
877 assert(0);
878 return -ENOSYS;
879 }
880}
881
882int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
883 int sock, struct pollfd *consumer_sockpoll)
884{
885 switch (consumer_data.type) {
886 case LTTNG_CONSUMER_KERNEL:
887 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
888 case LTTNG_CONSUMER32_UST:
889 case LTTNG_CONSUMER64_UST:
890 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
891 default:
892 ERR("Unknown consumer_data type");
893 assert(0);
894 return -ENOSYS;
895 }
896}
897
898/*
899 * This thread polls the fds in the set to consume the data and write
900 * it to tracefile if necessary.
901 */
902void *lttng_consumer_thread_poll_fds(void *data)
903{
904 int num_rdy, num_hup, high_prio, ret, i;
905 struct pollfd *pollfd = NULL;
906 /* local view of the streams */
907 struct lttng_consumer_stream **local_stream = NULL;
908 /* local view of consumer_data.fds_count */
909 int nb_fd = 0;
910 char tmp;
911 int tmp2;
912 struct lttng_consumer_local_data *ctx = data;
913
914 rcu_register_thread();
915
916 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
917
918 while (1) {
919 high_prio = 0;
920 num_hup = 0;
921
922 /*
923 * the fds set has been updated, we need to update our
924 * local array as well
925 */
926 pthread_mutex_lock(&consumer_data.lock);
927 if (consumer_data.need_update) {
928 if (pollfd != NULL) {
929 free(pollfd);
930 pollfd = NULL;
931 }
932 if (local_stream != NULL) {
933 free(local_stream);
934 local_stream = NULL;
935 }
936
937 /* allocate for all fds + 1 for the consumer_poll_pipe */
938 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
939 if (pollfd == NULL) {
940 perror("pollfd malloc");
941 pthread_mutex_unlock(&consumer_data.lock);
942 goto end;
943 }
944
945 /* allocate for all fds + 1 for the consumer_poll_pipe */
946 local_stream = zmalloc((consumer_data.stream_count + 1) *
947 sizeof(struct lttng_consumer_stream));
948 if (local_stream == NULL) {
949 perror("local_stream malloc");
950 pthread_mutex_unlock(&consumer_data.lock);
951 goto end;
952 }
953 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
954 if (ret < 0) {
955 ERR("Error in allocating pollfd or local_outfds");
956 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
957 pthread_mutex_unlock(&consumer_data.lock);
958 goto end;
959 }
960 nb_fd = ret;
961 consumer_data.need_update = 0;
962 }
963 pthread_mutex_unlock(&consumer_data.lock);
964
965 /* No FDs and consumer_quit, consumer_cleanup the thread */
966 if (nb_fd == 0 && consumer_quit == 1) {
967 goto end;
968 }
969 /* poll on the array of fds */
970 restart:
971 DBG("polling on %d fd", nb_fd + 1);
972 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
973 DBG("poll num_rdy : %d", num_rdy);
974 if (num_rdy == -1) {
975 /*
976 * Restart interrupted system call.
977 */
978 if (errno == EINTR) {
979 goto restart;
980 }
981 perror("Poll error");
982 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
983 goto end;
984 } else if (num_rdy == 0) {
985 DBG("Polling thread timed out");
986 goto end;
987 }
988
989 /*
990 * If the consumer_poll_pipe triggered poll go
991 * directly to the beginning of the loop to update the
992 * array. We want to prioritize array update over
993 * low-priority reads.
994 */
995 if (pollfd[nb_fd].revents & POLLIN) {
996 DBG("consumer_poll_pipe wake up");
997 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
998 if (tmp2 < 0) {
999 perror("read consumer poll");
1000 }
1001 continue;
1002 }
1003
1004 /* Take care of high priority channels first. */
1005 for (i = 0; i < nb_fd; i++) {
1006 if (pollfd[i].revents & POLLPRI) {
1007 ssize_t len;
1008
1009 DBG("Urgent read on fd %d", pollfd[i].fd);
1010 high_prio = 1;
1011 len = ctx->on_buffer_ready(local_stream[i], ctx);
1012 /* it's ok to have an unavailable sub-buffer */
1013 if (len < 0 && len != -EAGAIN) {
1014 goto end;
1015 } else if (len > 0) {
1016 local_stream[i]->data_read = 1;
1017 }
1018 }
1019 }
1020
1021 /*
1022 * If we read high prio channel in this loop, try again
1023 * for more high prio data.
1024 */
1025 if (high_prio) {
1026 continue;
1027 }
1028
1029 /* Take care of low priority channels. */
1030 for (i = 0; i < nb_fd; i++) {
1031 if ((pollfd[i].revents & POLLIN) ||
1032 local_stream[i]->hangup_flush_done) {
1033 ssize_t len;
1034
1035 DBG("Normal read on fd %d", pollfd[i].fd);
1036 len = ctx->on_buffer_ready(local_stream[i], ctx);
1037 /* it's ok to have an unavailable sub-buffer */
1038 if (len < 0 && len != -EAGAIN) {
1039 goto end;
1040 } else if (len > 0) {
1041 local_stream[i]->data_read = 1;
1042 }
1043 }
1044 }
1045
1046 /* Handle hangup and errors */
1047 for (i = 0; i < nb_fd; i++) {
1048 if (!local_stream[i]->hangup_flush_done
1049 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1050 && (consumer_data.type == LTTNG_CONSUMER32_UST
1051 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1052 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1053 pollfd[i].fd);
1054 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1055 /* Attempt read again, for the data we just flushed. */
1056 local_stream[i]->data_read = 1;
1057 }
1058 /*
1059 * If the poll flag is HUP/ERR/NVAL and we have
1060 * read no data in this pass, we can remove the
1061 * stream from its hash table.
1062 */
1063 if ((pollfd[i].revents & POLLHUP)) {
1064 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1065 if (!local_stream[i]->data_read) {
1066 consumer_del_stream(local_stream[i]);
1067 num_hup++;
1068 }
1069 } else if (pollfd[i].revents & POLLERR) {
1070 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1071 if (!local_stream[i]->data_read) {
1072 consumer_del_stream(local_stream[i]);
1073 num_hup++;
1074 }
1075 } else if (pollfd[i].revents & POLLNVAL) {
1076 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1077 if (!local_stream[i]->data_read) {
1078 consumer_del_stream(local_stream[i]);
1079 num_hup++;
1080 }
1081 }
1082 local_stream[i]->data_read = 0;
1083 }
1084 }
1085end:
1086 DBG("polling thread exiting");
1087 if (pollfd != NULL) {
1088 free(pollfd);
1089 pollfd = NULL;
1090 }
1091 if (local_stream != NULL) {
1092 free(local_stream);
1093 local_stream = NULL;
1094 }
1095 rcu_unregister_thread();
1096 return NULL;
1097}
1098
1099/*
1100 * This thread listens on the consumerd socket and receives the file
1101 * descriptors from the session daemon.
1102 */
1103void *lttng_consumer_thread_receive_fds(void *data)
1104{
1105 int sock, client_socket, ret;
1106 /*
1107 * structure to poll for incoming data on communication socket avoids
1108 * making blocking sockets.
1109 */
1110 struct pollfd consumer_sockpoll[2];
1111 struct lttng_consumer_local_data *ctx = data;
1112
1113 rcu_register_thread();
1114
1115 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1116 unlink(ctx->consumer_command_sock_path);
1117 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1118 if (client_socket < 0) {
1119 ERR("Cannot create command socket");
1120 goto end;
1121 }
1122
1123 ret = lttcomm_listen_unix_sock(client_socket);
1124 if (ret < 0) {
1125 goto end;
1126 }
1127
1128 DBG("Sending ready command to lttng-sessiond");
1129 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1130 /* return < 0 on error, but == 0 is not fatal */
1131 if (ret < 0) {
1132 ERR("Error sending ready command to lttng-sessiond");
1133 goto end;
1134 }
1135
1136 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1137 if (ret < 0) {
1138 perror("fcntl O_NONBLOCK");
1139 goto end;
1140 }
1141
1142 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1143 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1144 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1145 consumer_sockpoll[1].fd = client_socket;
1146 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1147
1148 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1149 goto end;
1150 }
1151 DBG("Connection on client_socket");
1152
1153 /* Blocking call, waiting for transmission */
1154 sock = lttcomm_accept_unix_sock(client_socket);
1155 if (sock <= 0) {
1156 WARN("On accept");
1157 goto end;
1158 }
1159 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1160 if (ret < 0) {
1161 perror("fcntl O_NONBLOCK");
1162 goto end;
1163 }
1164
1165 /* update the polling structure to poll on the established socket */
1166 consumer_sockpoll[1].fd = sock;
1167 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1168
1169 while (1) {
1170 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1171 goto end;
1172 }
1173 DBG("Incoming command on sock");
1174 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1175 if (ret == -ENOENT) {
1176 DBG("Received STOP command");
1177 goto end;
1178 }
1179 if (ret < 0) {
1180 ERR("Communication interrupted on command socket");
1181 goto end;
1182 }
1183 if (consumer_quit) {
1184 DBG("consumer_thread_receive_fds received quit from signal");
1185 goto end;
1186 }
1187 DBG("received fds on sock");
1188 }
1189end:
1190 DBG("consumer_thread_receive_fds exiting");
1191
1192 /*
1193 * when all fds have hung up, the polling thread
1194 * can exit cleanly
1195 */
1196 consumer_quit = 1;
1197
1198 /*
1199 * 2s of grace period, if no polling events occur during
1200 * this period, the polling thread will exit even if there
1201 * are still open FDs (should not happen, but safety mechanism).
1202 */
1203 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1204
1205 /* wake up the polling thread */
1206 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1207 if (ret < 0) {
1208 perror("poll pipe write");
1209 }
1210 rcu_unregister_thread();
1211 return NULL;
1212}
1213
1214ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1215 struct lttng_consumer_local_data *ctx)
1216{
1217 switch (consumer_data.type) {
1218 case LTTNG_CONSUMER_KERNEL:
1219 return lttng_kconsumer_read_subbuffer(stream, ctx);
1220 case LTTNG_CONSUMER32_UST:
1221 case LTTNG_CONSUMER64_UST:
1222 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1223 default:
1224 ERR("Unknown consumer_data type");
1225 assert(0);
1226 return -ENOSYS;
1227 }
1228}
1229
1230int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1231{
1232 switch (consumer_data.type) {
1233 case LTTNG_CONSUMER_KERNEL:
1234 return lttng_kconsumer_on_recv_stream(stream);
1235 case LTTNG_CONSUMER32_UST:
1236 case LTTNG_CONSUMER64_UST:
1237 return lttng_ustconsumer_on_recv_stream(stream);
1238 default:
1239 ERR("Unknown consumer_data type");
1240 assert(0);
1241 return -ENOSYS;
1242 }
1243}
1244
1245/*
1246 * Allocate and set consumer data hash tables.
1247 */
1248void lttng_consumer_init(void)
1249{
1250 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1251 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1252}
1253
This page took 0.026166 seconds and 4 git commands to generate.