Merge branch 'master' of git://git.lttng.org/lttng-tools
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30
31 #include <common/common.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34 #include <common/kernel-consumer/kernel-consumer.h>
35 #include <common/ust-consumer/ust-consumer.h>
36
37 #include "consumer.h"
38
39 struct lttng_consumer_global_data consumer_data = {
40 .stream_count = 0,
41 .need_update = 1,
42 .type = LTTNG_CONSUMER_UNKNOWN,
43 };
44
45 /* timeout parameter, to control the polling thread grace period. */
46 int consumer_poll_timeout = -1;
47
48 /*
49 * Flag to inform the polling thread to quit when all fd hung up. Updated by
50 * the consumer_thread_receive_fds when it notices that all fds has hung up.
51 * Also updated by the signal handler (consumer_should_exit()). Read by the
52 * polling threads.
53 */
54 volatile int consumer_quit = 0;
55
56 /*
57 * Find a stream. The consumer_data.lock must be locked during this
58 * call.
59 */
60 static struct lttng_consumer_stream *consumer_find_stream(int key)
61 {
62 struct lttng_ht_iter iter;
63 struct lttng_ht_node_ulong *node;
64 struct lttng_consumer_stream *stream = NULL;
65
66 /* Negative keys are lookup failures */
67 if (key < 0)
68 return NULL;
69
70 rcu_read_lock();
71
72 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
73 &iter);
74 node = lttng_ht_iter_get_node_ulong(&iter);
75 if (node != NULL) {
76 stream = caa_container_of(node, struct lttng_consumer_stream, node);
77 }
78
79 rcu_read_unlock();
80
81 return stream;
82 }
83
84 static void consumer_steal_stream_key(int key)
85 {
86 struct lttng_consumer_stream *stream;
87
88 stream = consumer_find_stream(key);
89 if (stream)
90 stream->key = -1;
91 }
92
93 static struct lttng_consumer_channel *consumer_find_channel(int key)
94 {
95 struct lttng_ht_iter iter;
96 struct lttng_ht_node_ulong *node;
97 struct lttng_consumer_channel *channel = NULL;
98
99 /* Negative keys are lookup failures */
100 if (key < 0)
101 return NULL;
102
103 rcu_read_lock();
104
105 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
106 &iter);
107 node = lttng_ht_iter_get_node_ulong(&iter);
108 if (node != NULL) {
109 channel = caa_container_of(node, struct lttng_consumer_channel, node);
110 }
111
112 rcu_read_unlock();
113
114 return channel;
115 }
116
117 static void consumer_steal_channel_key(int key)
118 {
119 struct lttng_consumer_channel *channel;
120
121 channel = consumer_find_channel(key);
122 if (channel)
123 channel->key = -1;
124 }
125
126 /*
127 * Remove a stream from the global list protected by a mutex. This
128 * function is also responsible for freeing its data structures.
129 */
130 void consumer_del_stream(struct lttng_consumer_stream *stream)
131 {
132 int ret;
133 struct lttng_ht_iter iter;
134 struct lttng_consumer_channel *free_chan = NULL;
135
136 pthread_mutex_lock(&consumer_data.lock);
137
138 switch (consumer_data.type) {
139 case LTTNG_CONSUMER_KERNEL:
140 if (stream->mmap_base != NULL) {
141 ret = munmap(stream->mmap_base, stream->mmap_len);
142 if (ret != 0) {
143 perror("munmap");
144 }
145 }
146 break;
147 case LTTNG_CONSUMER32_UST:
148 case LTTNG_CONSUMER64_UST:
149 lttng_ustconsumer_del_stream(stream);
150 break;
151 default:
152 ERR("Unknown consumer_data type");
153 assert(0);
154 goto end;
155 }
156
157 rcu_read_lock();
158
159 /* Get stream node from hash table */
160 lttng_ht_lookup(consumer_data.stream_ht,
161 (void *)((unsigned long) stream->key), &iter);
162 /* Remove stream node from hash table */
163 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
164 assert(!ret);
165
166 rcu_read_unlock();
167
168 if (consumer_data.stream_count <= 0) {
169 goto end;
170 }
171 consumer_data.stream_count--;
172 if (!stream) {
173 goto end;
174 }
175 if (stream->out_fd >= 0) {
176 ret = close(stream->out_fd);
177 if (ret) {
178 PERROR("close");
179 }
180 }
181 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
182 ret = close(stream->wait_fd);
183 if (ret) {
184 PERROR("close");
185 }
186 }
187 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
188 ret = close(stream->shm_fd);
189 if (ret) {
190 PERROR("close");
191 }
192 }
193 if (!--stream->chan->refcount)
194 free_chan = stream->chan;
195 free(stream);
196 end:
197 consumer_data.need_update = 1;
198 pthread_mutex_unlock(&consumer_data.lock);
199
200 if (free_chan)
201 consumer_del_channel(free_chan);
202 }
203
204 static void consumer_del_stream_rcu(struct rcu_head *head)
205 {
206 struct lttng_ht_node_ulong *node =
207 caa_container_of(head, struct lttng_ht_node_ulong, head);
208 struct lttng_consumer_stream *stream =
209 caa_container_of(node, struct lttng_consumer_stream, node);
210
211 consumer_del_stream(stream);
212 }
213
214 struct lttng_consumer_stream *consumer_allocate_stream(
215 int channel_key, int stream_key,
216 int shm_fd, int wait_fd,
217 enum lttng_consumer_stream_state state,
218 uint64_t mmap_len,
219 enum lttng_event_output output,
220 const char *path_name,
221 uid_t uid,
222 gid_t gid)
223 {
224 struct lttng_consumer_stream *stream;
225 int ret;
226
227 stream = zmalloc(sizeof(*stream));
228 if (stream == NULL) {
229 perror("malloc struct lttng_consumer_stream");
230 goto end;
231 }
232 stream->chan = consumer_find_channel(channel_key);
233 if (!stream->chan) {
234 perror("Unable to find channel key");
235 goto end;
236 }
237 stream->chan->refcount++;
238 stream->key = stream_key;
239 stream->shm_fd = shm_fd;
240 stream->wait_fd = wait_fd;
241 stream->out_fd = -1;
242 stream->out_fd_offset = 0;
243 stream->state = state;
244 stream->mmap_len = mmap_len;
245 stream->mmap_base = NULL;
246 stream->output = output;
247 stream->uid = uid;
248 stream->gid = gid;
249 strncpy(stream->path_name, path_name, PATH_MAX - 1);
250 stream->path_name[PATH_MAX - 1] = '\0';
251 lttng_ht_node_init_ulong(&stream->node, stream->key);
252
253 switch (consumer_data.type) {
254 case LTTNG_CONSUMER_KERNEL:
255 break;
256 case LTTNG_CONSUMER32_UST:
257 case LTTNG_CONSUMER64_UST:
258 stream->cpu = stream->chan->cpucount++;
259 ret = lttng_ustconsumer_allocate_stream(stream);
260 if (ret) {
261 free(stream);
262 return NULL;
263 }
264 break;
265 default:
266 ERR("Unknown consumer_data type");
267 assert(0);
268 goto end;
269 }
270 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
271 stream->path_name, stream->key,
272 stream->shm_fd,
273 stream->wait_fd,
274 (unsigned long long) stream->mmap_len,
275 stream->out_fd);
276 end:
277 return stream;
278 }
279
280 /*
281 * Add a stream to the global list protected by a mutex.
282 */
283 int consumer_add_stream(struct lttng_consumer_stream *stream)
284 {
285 int ret = 0;
286
287 pthread_mutex_lock(&consumer_data.lock);
288 /* Steal stream identifier, for UST */
289 consumer_steal_stream_key(stream->key);
290 rcu_read_lock();
291 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
292 rcu_read_unlock();
293 consumer_data.stream_count++;
294 consumer_data.need_update = 1;
295
296 switch (consumer_data.type) {
297 case LTTNG_CONSUMER_KERNEL:
298 break;
299 case LTTNG_CONSUMER32_UST:
300 case LTTNG_CONSUMER64_UST:
301 /* Streams are in CPU number order (we rely on this) */
302 stream->cpu = stream->chan->nr_streams++;
303 break;
304 default:
305 ERR("Unknown consumer_data type");
306 assert(0);
307 goto end;
308 }
309
310 end:
311 pthread_mutex_unlock(&consumer_data.lock);
312 return ret;
313 }
314
315 /*
316 * Update a stream according to what we just received.
317 */
318 void consumer_change_stream_state(int stream_key,
319 enum lttng_consumer_stream_state state)
320 {
321 struct lttng_consumer_stream *stream;
322
323 pthread_mutex_lock(&consumer_data.lock);
324 stream = consumer_find_stream(stream_key);
325 if (stream) {
326 stream->state = state;
327 }
328 consumer_data.need_update = 1;
329 pthread_mutex_unlock(&consumer_data.lock);
330 }
331
332 /*
333 * Remove a channel from the global list protected by a mutex. This
334 * function is also responsible for freeing its data structures.
335 */
336 void consumer_del_channel(struct lttng_consumer_channel *channel)
337 {
338 int ret;
339 struct lttng_ht_iter iter;
340
341 pthread_mutex_lock(&consumer_data.lock);
342
343 switch (consumer_data.type) {
344 case LTTNG_CONSUMER_KERNEL:
345 break;
346 case LTTNG_CONSUMER32_UST:
347 case LTTNG_CONSUMER64_UST:
348 lttng_ustconsumer_del_channel(channel);
349 break;
350 default:
351 ERR("Unknown consumer_data type");
352 assert(0);
353 goto end;
354 }
355
356 rcu_read_lock();
357
358 lttng_ht_lookup(consumer_data.channel_ht,
359 (void *)((unsigned long) channel->key), &iter);
360 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
361 assert(!ret);
362
363 rcu_read_unlock();
364
365 if (channel->mmap_base != NULL) {
366 ret = munmap(channel->mmap_base, channel->mmap_len);
367 if (ret != 0) {
368 perror("munmap");
369 }
370 }
371 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
372 ret = close(channel->wait_fd);
373 if (ret) {
374 PERROR("close");
375 }
376 }
377 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
378 ret = close(channel->shm_fd);
379 if (ret) {
380 PERROR("close");
381 }
382 }
383 free(channel);
384 end:
385 pthread_mutex_unlock(&consumer_data.lock);
386 }
387
388 static void consumer_del_channel_rcu(struct rcu_head *head)
389 {
390 struct lttng_ht_node_ulong *node =
391 caa_container_of(head, struct lttng_ht_node_ulong, head);
392 struct lttng_consumer_channel *channel=
393 caa_container_of(node, struct lttng_consumer_channel, node);
394
395 consumer_del_channel(channel);
396 }
397
398 struct lttng_consumer_channel *consumer_allocate_channel(
399 int channel_key,
400 int shm_fd, int wait_fd,
401 uint64_t mmap_len,
402 uint64_t max_sb_size)
403 {
404 struct lttng_consumer_channel *channel;
405 int ret;
406
407 channel = zmalloc(sizeof(*channel));
408 if (channel == NULL) {
409 perror("malloc struct lttng_consumer_channel");
410 goto end;
411 }
412 channel->key = channel_key;
413 channel->shm_fd = shm_fd;
414 channel->wait_fd = wait_fd;
415 channel->mmap_len = mmap_len;
416 channel->max_sb_size = max_sb_size;
417 channel->refcount = 0;
418 channel->nr_streams = 0;
419 lttng_ht_node_init_ulong(&channel->node, channel->key);
420
421 switch (consumer_data.type) {
422 case LTTNG_CONSUMER_KERNEL:
423 channel->mmap_base = NULL;
424 channel->mmap_len = 0;
425 break;
426 case LTTNG_CONSUMER32_UST:
427 case LTTNG_CONSUMER64_UST:
428 ret = lttng_ustconsumer_allocate_channel(channel);
429 if (ret) {
430 free(channel);
431 return NULL;
432 }
433 break;
434 default:
435 ERR("Unknown consumer_data type");
436 assert(0);
437 goto end;
438 }
439 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
440 channel->key,
441 channel->shm_fd,
442 channel->wait_fd,
443 (unsigned long long) channel->mmap_len,
444 (unsigned long long) channel->max_sb_size);
445 end:
446 return channel;
447 }
448
449 /*
450 * Add a channel to the global list protected by a mutex.
451 */
452 int consumer_add_channel(struct lttng_consumer_channel *channel)
453 {
454 pthread_mutex_lock(&consumer_data.lock);
455 /* Steal channel identifier, for UST */
456 consumer_steal_channel_key(channel->key);
457 rcu_read_lock();
458 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
459 rcu_read_unlock();
460 pthread_mutex_unlock(&consumer_data.lock);
461 return 0;
462 }
463
464 /*
465 * Allocate the pollfd structure and the local view of the out fds to avoid
466 * doing a lookup in the linked list and concurrency issues when writing is
467 * needed. Called with consumer_data.lock held.
468 *
469 * Returns the number of fds in the structures.
470 */
471 int consumer_update_poll_array(
472 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
473 struct lttng_consumer_stream **local_stream)
474 {
475 int i = 0;
476 struct lttng_ht_iter iter;
477 struct lttng_consumer_stream *stream;
478
479 DBG("Updating poll fd array");
480 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
481 node.node) {
482 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
483 continue;
484 }
485 DBG("Active FD %d", stream->wait_fd);
486 (*pollfd)[i].fd = stream->wait_fd;
487 (*pollfd)[i].events = POLLIN | POLLPRI;
488 local_stream[i] = stream;
489 i++;
490 }
491
492 /*
493 * Insert the consumer_poll_pipe at the end of the array and don't
494 * increment i so nb_fd is the number of real FD.
495 */
496 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
497 (*pollfd)[i].events = POLLIN;
498 return i;
499 }
500
501 /*
502 * Poll on the should_quit pipe and the command socket return -1 on error and
503 * should exit, 0 if data is available on the command socket
504 */
505 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
506 {
507 int num_rdy;
508
509 restart:
510 num_rdy = poll(consumer_sockpoll, 2, -1);
511 if (num_rdy == -1) {
512 /*
513 * Restart interrupted system call.
514 */
515 if (errno == EINTR) {
516 goto restart;
517 }
518 perror("Poll error");
519 goto exit;
520 }
521 if (consumer_sockpoll[0].revents == POLLIN) {
522 DBG("consumer_should_quit wake up");
523 goto exit;
524 }
525 return 0;
526
527 exit:
528 return -1;
529 }
530
531 /*
532 * Set the error socket.
533 */
534 void lttng_consumer_set_error_sock(
535 struct lttng_consumer_local_data *ctx, int sock)
536 {
537 ctx->consumer_error_socket = sock;
538 }
539
540 /*
541 * Set the command socket path.
542 */
543
544 void lttng_consumer_set_command_sock_path(
545 struct lttng_consumer_local_data *ctx, char *sock)
546 {
547 ctx->consumer_command_sock_path = sock;
548 }
549
550 /*
551 * Send return code to the session daemon.
552 * If the socket is not defined, we return 0, it is not a fatal error
553 */
554 int lttng_consumer_send_error(
555 struct lttng_consumer_local_data *ctx, int cmd)
556 {
557 if (ctx->consumer_error_socket > 0) {
558 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
559 sizeof(enum lttcomm_sessiond_command));
560 }
561
562 return 0;
563 }
564
565 /*
566 * Close all the tracefiles and stream fds, should be called when all instances
567 * are destroyed.
568 */
569 void lttng_consumer_cleanup(void)
570 {
571 int ret;
572 struct lttng_ht_iter iter;
573 struct lttng_ht_node_ulong *node;
574
575 rcu_read_lock();
576
577 /*
578 * close all outfd. Called when there are no more threads running (after
579 * joining on the threads), no need to protect list iteration with mutex.
580 */
581 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
582 node) {
583 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
584 assert(!ret);
585 call_rcu(&node->head, consumer_del_stream_rcu);
586 }
587
588 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
589 node) {
590 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
591 assert(!ret);
592 call_rcu(&node->head, consumer_del_channel_rcu);
593 }
594
595 rcu_read_unlock();
596 }
597
598 /*
599 * Called from signal handler.
600 */
601 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
602 {
603 int ret;
604 consumer_quit = 1;
605 ret = write(ctx->consumer_should_quit[1], "4", 1);
606 if (ret < 0) {
607 perror("write consumer quit");
608 }
609 }
610
611 void lttng_consumer_sync_trace_file(
612 struct lttng_consumer_stream *stream, off_t orig_offset)
613 {
614 int outfd = stream->out_fd;
615
616 /*
617 * This does a blocking write-and-wait on any page that belongs to the
618 * subbuffer prior to the one we just wrote.
619 * Don't care about error values, as these are just hints and ways to
620 * limit the amount of page cache used.
621 */
622 if (orig_offset < stream->chan->max_sb_size) {
623 return;
624 }
625 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
626 stream->chan->max_sb_size,
627 SYNC_FILE_RANGE_WAIT_BEFORE
628 | SYNC_FILE_RANGE_WRITE
629 | SYNC_FILE_RANGE_WAIT_AFTER);
630 /*
631 * Give hints to the kernel about how we access the file:
632 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
633 * we write it.
634 *
635 * We need to call fadvise again after the file grows because the
636 * kernel does not seem to apply fadvise to non-existing parts of the
637 * file.
638 *
639 * Call fadvise _after_ having waited for the page writeback to
640 * complete because the dirty page writeback semantic is not well
641 * defined. So it can be expected to lead to lower throughput in
642 * streaming.
643 */
644 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
645 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
646 }
647
648 /*
649 * Initialise the necessary environnement :
650 * - create a new context
651 * - create the poll_pipe
652 * - create the should_quit pipe (for signal handler)
653 * - create the thread pipe (for splice)
654 *
655 * Takes a function pointer as argument, this function is called when data is
656 * available on a buffer. This function is responsible to do the
657 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
658 * buffer configuration and then kernctl_put_next_subbuf at the end.
659 *
660 * Returns a pointer to the new context or NULL on error.
661 */
662 struct lttng_consumer_local_data *lttng_consumer_create(
663 enum lttng_consumer_type type,
664 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
665 struct lttng_consumer_local_data *ctx),
666 int (*recv_channel)(struct lttng_consumer_channel *channel),
667 int (*recv_stream)(struct lttng_consumer_stream *stream),
668 int (*update_stream)(int stream_key, uint32_t state))
669 {
670 int ret, i;
671 struct lttng_consumer_local_data *ctx;
672
673 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
674 consumer_data.type == type);
675 consumer_data.type = type;
676
677 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
678 if (ctx == NULL) {
679 perror("allocating context");
680 goto error;
681 }
682
683 ctx->consumer_error_socket = -1;
684 /* assign the callbacks */
685 ctx->on_buffer_ready = buffer_ready;
686 ctx->on_recv_channel = recv_channel;
687 ctx->on_recv_stream = recv_stream;
688 ctx->on_update_stream = update_stream;
689
690 ret = pipe(ctx->consumer_poll_pipe);
691 if (ret < 0) {
692 perror("Error creating poll pipe");
693 goto error_poll_pipe;
694 }
695
696 ret = pipe(ctx->consumer_should_quit);
697 if (ret < 0) {
698 perror("Error creating recv pipe");
699 goto error_quit_pipe;
700 }
701
702 ret = pipe(ctx->consumer_thread_pipe);
703 if (ret < 0) {
704 perror("Error creating thread pipe");
705 goto error_thread_pipe;
706 }
707
708 return ctx;
709
710
711 error_thread_pipe:
712 for (i = 0; i < 2; i++) {
713 int err;
714
715 err = close(ctx->consumer_should_quit[i]);
716 if (err) {
717 PERROR("close");
718 }
719 }
720 error_quit_pipe:
721 for (i = 0; i < 2; i++) {
722 int err;
723
724 err = close(ctx->consumer_poll_pipe[i]);
725 if (err) {
726 PERROR("close");
727 }
728 }
729 error_poll_pipe:
730 free(ctx);
731 error:
732 return NULL;
733 }
734
735 /*
736 * Close all fds associated with the instance and free the context.
737 */
738 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
739 {
740 int ret;
741
742 ret = close(ctx->consumer_error_socket);
743 if (ret) {
744 PERROR("close");
745 }
746 ret = close(ctx->consumer_thread_pipe[0]);
747 if (ret) {
748 PERROR("close");
749 }
750 ret = close(ctx->consumer_thread_pipe[1]);
751 if (ret) {
752 PERROR("close");
753 }
754 ret = close(ctx->consumer_poll_pipe[0]);
755 if (ret) {
756 PERROR("close");
757 }
758 ret = close(ctx->consumer_poll_pipe[1]);
759 if (ret) {
760 PERROR("close");
761 }
762 ret = close(ctx->consumer_should_quit[0]);
763 if (ret) {
764 PERROR("close");
765 }
766 ret = close(ctx->consumer_should_quit[1]);
767 if (ret) {
768 PERROR("close");
769 }
770 unlink(ctx->consumer_command_sock_path);
771 free(ctx);
772 }
773
774 /*
775 * Mmap the ring buffer, read it and write the data to the tracefile.
776 *
777 * Returns the number of bytes written
778 */
779 ssize_t lttng_consumer_on_read_subbuffer_mmap(
780 struct lttng_consumer_local_data *ctx,
781 struct lttng_consumer_stream *stream, unsigned long len)
782 {
783 switch (consumer_data.type) {
784 case LTTNG_CONSUMER_KERNEL:
785 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
786 case LTTNG_CONSUMER32_UST:
787 case LTTNG_CONSUMER64_UST:
788 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
789 default:
790 ERR("Unknown consumer_data type");
791 assert(0);
792 }
793
794 return 0;
795 }
796
797 /*
798 * Splice the data from the ring buffer to the tracefile.
799 *
800 * Returns the number of bytes spliced.
801 */
802 ssize_t lttng_consumer_on_read_subbuffer_splice(
803 struct lttng_consumer_local_data *ctx,
804 struct lttng_consumer_stream *stream, unsigned long len)
805 {
806 switch (consumer_data.type) {
807 case LTTNG_CONSUMER_KERNEL:
808 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
809 case LTTNG_CONSUMER32_UST:
810 case LTTNG_CONSUMER64_UST:
811 return -ENOSYS;
812 default:
813 ERR("Unknown consumer_data type");
814 assert(0);
815 return -ENOSYS;
816 }
817
818 }
819
820 /*
821 * Take a snapshot for a specific fd
822 *
823 * Returns 0 on success, < 0 on error
824 */
825 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
826 struct lttng_consumer_stream *stream)
827 {
828 switch (consumer_data.type) {
829 case LTTNG_CONSUMER_KERNEL:
830 return lttng_kconsumer_take_snapshot(ctx, stream);
831 case LTTNG_CONSUMER32_UST:
832 case LTTNG_CONSUMER64_UST:
833 return lttng_ustconsumer_take_snapshot(ctx, stream);
834 default:
835 ERR("Unknown consumer_data type");
836 assert(0);
837 return -ENOSYS;
838 }
839
840 }
841
842 /*
843 * Get the produced position
844 *
845 * Returns 0 on success, < 0 on error
846 */
847 int lttng_consumer_get_produced_snapshot(
848 struct lttng_consumer_local_data *ctx,
849 struct lttng_consumer_stream *stream,
850 unsigned long *pos)
851 {
852 switch (consumer_data.type) {
853 case LTTNG_CONSUMER_KERNEL:
854 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
855 case LTTNG_CONSUMER32_UST:
856 case LTTNG_CONSUMER64_UST:
857 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
858 default:
859 ERR("Unknown consumer_data type");
860 assert(0);
861 return -ENOSYS;
862 }
863 }
864
865 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
866 int sock, struct pollfd *consumer_sockpoll)
867 {
868 switch (consumer_data.type) {
869 case LTTNG_CONSUMER_KERNEL:
870 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
871 case LTTNG_CONSUMER32_UST:
872 case LTTNG_CONSUMER64_UST:
873 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
874 default:
875 ERR("Unknown consumer_data type");
876 assert(0);
877 return -ENOSYS;
878 }
879 }
880
881 /*
882 * This thread polls the fds in the set to consume the data and write
883 * it to tracefile if necessary.
884 */
885 void *lttng_consumer_thread_poll_fds(void *data)
886 {
887 int num_rdy, num_hup, high_prio, ret, i;
888 struct pollfd *pollfd = NULL;
889 /* local view of the streams */
890 struct lttng_consumer_stream **local_stream = NULL;
891 /* local view of consumer_data.fds_count */
892 int nb_fd = 0;
893 char tmp;
894 int tmp2;
895 struct lttng_consumer_local_data *ctx = data;
896
897 rcu_register_thread();
898
899 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
900
901 while (1) {
902 high_prio = 0;
903 num_hup = 0;
904
905 /*
906 * the fds set has been updated, we need to update our
907 * local array as well
908 */
909 pthread_mutex_lock(&consumer_data.lock);
910 if (consumer_data.need_update) {
911 if (pollfd != NULL) {
912 free(pollfd);
913 pollfd = NULL;
914 }
915 if (local_stream != NULL) {
916 free(local_stream);
917 local_stream = NULL;
918 }
919
920 /* allocate for all fds + 1 for the consumer_poll_pipe */
921 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
922 if (pollfd == NULL) {
923 perror("pollfd malloc");
924 pthread_mutex_unlock(&consumer_data.lock);
925 goto end;
926 }
927
928 /* allocate for all fds + 1 for the consumer_poll_pipe */
929 local_stream = zmalloc((consumer_data.stream_count + 1) *
930 sizeof(struct lttng_consumer_stream));
931 if (local_stream == NULL) {
932 perror("local_stream malloc");
933 pthread_mutex_unlock(&consumer_data.lock);
934 goto end;
935 }
936 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
937 if (ret < 0) {
938 ERR("Error in allocating pollfd or local_outfds");
939 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
940 pthread_mutex_unlock(&consumer_data.lock);
941 goto end;
942 }
943 nb_fd = ret;
944 consumer_data.need_update = 0;
945 }
946 pthread_mutex_unlock(&consumer_data.lock);
947
948 /* No FDs and consumer_quit, consumer_cleanup the thread */
949 if (nb_fd == 0 && consumer_quit == 1) {
950 goto end;
951 }
952 /* poll on the array of fds */
953 restart:
954 DBG("polling on %d fd", nb_fd + 1);
955 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
956 DBG("poll num_rdy : %d", num_rdy);
957 if (num_rdy == -1) {
958 /*
959 * Restart interrupted system call.
960 */
961 if (errno == EINTR) {
962 goto restart;
963 }
964 perror("Poll error");
965 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
966 goto end;
967 } else if (num_rdy == 0) {
968 DBG("Polling thread timed out");
969 goto end;
970 }
971
972 /*
973 * If the consumer_poll_pipe triggered poll go
974 * directly to the beginning of the loop to update the
975 * array. We want to prioritize array update over
976 * low-priority reads.
977 */
978 if (pollfd[nb_fd].revents & POLLIN) {
979 DBG("consumer_poll_pipe wake up");
980 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
981 if (tmp2 < 0) {
982 perror("read consumer poll");
983 }
984 continue;
985 }
986
987 /* Take care of high priority channels first. */
988 for (i = 0; i < nb_fd; i++) {
989 if (pollfd[i].revents & POLLPRI) {
990 ssize_t len;
991
992 DBG("Urgent read on fd %d", pollfd[i].fd);
993 high_prio = 1;
994 len = ctx->on_buffer_ready(local_stream[i], ctx);
995 /* it's ok to have an unavailable sub-buffer */
996 if (len < 0 && len != -EAGAIN) {
997 goto end;
998 } else if (len > 0) {
999 local_stream[i]->data_read = 1;
1000 }
1001 }
1002 }
1003
1004 /*
1005 * If we read high prio channel in this loop, try again
1006 * for more high prio data.
1007 */
1008 if (high_prio) {
1009 continue;
1010 }
1011
1012 /* Take care of low priority channels. */
1013 for (i = 0; i < nb_fd; i++) {
1014 if ((pollfd[i].revents & POLLIN) ||
1015 local_stream[i]->hangup_flush_done) {
1016 ssize_t len;
1017
1018 assert(!(pollfd[i].revents & POLLERR));
1019 assert(!(pollfd[i].revents & POLLNVAL));
1020 DBG("Normal read on fd %d", pollfd[i].fd);
1021 len = ctx->on_buffer_ready(local_stream[i], ctx);
1022 /* it's ok to have an unavailable sub-buffer */
1023 if (len < 0 && len != -EAGAIN) {
1024 goto end;
1025 } else if (len > 0) {
1026 local_stream[i]->data_read = 1;
1027 }
1028 }
1029 }
1030
1031 /* Handle hangup and errors */
1032 for (i = 0; i < nb_fd; i++) {
1033 if (!local_stream[i]->hangup_flush_done
1034 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1035 && (consumer_data.type == LTTNG_CONSUMER32_UST
1036 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1037 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1038 pollfd[i].fd);
1039 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1040 /* Attempt read again, for the data we just flushed. */
1041 local_stream[i]->data_read = 1;
1042 }
1043 /*
1044 * If the poll flag is HUP/ERR/NVAL and we have
1045 * read no data in this pass, we can remove the
1046 * stream from its hash table.
1047 */
1048 if ((pollfd[i].revents & POLLHUP)) {
1049 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1050 if (!local_stream[i]->data_read) {
1051 rcu_read_lock();
1052 consumer_del_stream_rcu(&local_stream[i]->node.head);
1053 rcu_read_unlock();
1054 num_hup++;
1055 }
1056 } else if (pollfd[i].revents & POLLERR) {
1057 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1058 if (!local_stream[i]->data_read) {
1059 rcu_read_lock();
1060 consumer_del_stream_rcu(&local_stream[i]->node.head);
1061 rcu_read_unlock();
1062 num_hup++;
1063 }
1064 } else if (pollfd[i].revents & POLLNVAL) {
1065 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1066 if (!local_stream[i]->data_read) {
1067 rcu_read_lock();
1068 consumer_del_stream_rcu(&local_stream[i]->node.head);
1069 rcu_read_unlock();
1070 num_hup++;
1071 }
1072 }
1073 local_stream[i]->data_read = 0;
1074 }
1075 }
1076 end:
1077 DBG("polling thread exiting");
1078 if (pollfd != NULL) {
1079 free(pollfd);
1080 pollfd = NULL;
1081 }
1082 if (local_stream != NULL) {
1083 free(local_stream);
1084 local_stream = NULL;
1085 }
1086 rcu_unregister_thread();
1087 return NULL;
1088 }
1089
1090 /*
1091 * This thread listens on the consumerd socket and receives the file
1092 * descriptors from the session daemon.
1093 */
1094 void *lttng_consumer_thread_receive_fds(void *data)
1095 {
1096 int sock, client_socket, ret;
1097 /*
1098 * structure to poll for incoming data on communication socket avoids
1099 * making blocking sockets.
1100 */
1101 struct pollfd consumer_sockpoll[2];
1102 struct lttng_consumer_local_data *ctx = data;
1103
1104 rcu_register_thread();
1105
1106 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1107 unlink(ctx->consumer_command_sock_path);
1108 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1109 if (client_socket < 0) {
1110 ERR("Cannot create command socket");
1111 goto end;
1112 }
1113
1114 ret = lttcomm_listen_unix_sock(client_socket);
1115 if (ret < 0) {
1116 goto end;
1117 }
1118
1119 DBG("Sending ready command to lttng-sessiond");
1120 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1121 /* return < 0 on error, but == 0 is not fatal */
1122 if (ret < 0) {
1123 ERR("Error sending ready command to lttng-sessiond");
1124 goto end;
1125 }
1126
1127 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1128 if (ret < 0) {
1129 perror("fcntl O_NONBLOCK");
1130 goto end;
1131 }
1132
1133 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1134 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1135 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1136 consumer_sockpoll[1].fd = client_socket;
1137 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1138
1139 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1140 goto end;
1141 }
1142 DBG("Connection on client_socket");
1143
1144 /* Blocking call, waiting for transmission */
1145 sock = lttcomm_accept_unix_sock(client_socket);
1146 if (sock <= 0) {
1147 WARN("On accept");
1148 goto end;
1149 }
1150 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1151 if (ret < 0) {
1152 perror("fcntl O_NONBLOCK");
1153 goto end;
1154 }
1155
1156 /* update the polling structure to poll on the established socket */
1157 consumer_sockpoll[1].fd = sock;
1158 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1159
1160 while (1) {
1161 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1162 goto end;
1163 }
1164 DBG("Incoming command on sock");
1165 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1166 if (ret == -ENOENT) {
1167 DBG("Received STOP command");
1168 goto end;
1169 }
1170 if (ret < 0) {
1171 ERR("Communication interrupted on command socket");
1172 goto end;
1173 }
1174 if (consumer_quit) {
1175 DBG("consumer_thread_receive_fds received quit from signal");
1176 goto end;
1177 }
1178 DBG("received fds on sock");
1179 }
1180 end:
1181 DBG("consumer_thread_receive_fds exiting");
1182
1183 /*
1184 * when all fds have hung up, the polling thread
1185 * can exit cleanly
1186 */
1187 consumer_quit = 1;
1188
1189 /*
1190 * 2s of grace period, if no polling events occur during
1191 * this period, the polling thread will exit even if there
1192 * are still open FDs (should not happen, but safety mechanism).
1193 */
1194 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1195
1196 /* wake up the polling thread */
1197 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1198 if (ret < 0) {
1199 perror("poll pipe write");
1200 }
1201 rcu_unregister_thread();
1202 return NULL;
1203 }
1204
1205 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1206 struct lttng_consumer_local_data *ctx)
1207 {
1208 switch (consumer_data.type) {
1209 case LTTNG_CONSUMER_KERNEL:
1210 return lttng_kconsumer_read_subbuffer(stream, ctx);
1211 case LTTNG_CONSUMER32_UST:
1212 case LTTNG_CONSUMER64_UST:
1213 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1214 default:
1215 ERR("Unknown consumer_data type");
1216 assert(0);
1217 return -ENOSYS;
1218 }
1219 }
1220
1221 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1222 {
1223 switch (consumer_data.type) {
1224 case LTTNG_CONSUMER_KERNEL:
1225 return lttng_kconsumer_on_recv_stream(stream);
1226 case LTTNG_CONSUMER32_UST:
1227 case LTTNG_CONSUMER64_UST:
1228 return lttng_ustconsumer_on_recv_stream(stream);
1229 default:
1230 ERR("Unknown consumer_data type");
1231 assert(0);
1232 return -ENOSYS;
1233 }
1234 }
1235
1236 /*
1237 * Allocate and set consumer data hash tables.
1238 */
1239 void lttng_consumer_init(void)
1240 {
1241 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1242 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1243 }
1244
This page took 0.085006 seconds and 4 git commands to generate.