Fix: make ust consumer posix compliant for poll flags
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <fcntl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
30 #include <unistd.h>
31
32 #include <common/common.h>
33 #include <common/kernel-ctl/kernel-ctl.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35 #include <common/kernel-consumer/kernel-consumer.h>
36 #include <common/ust-consumer/ust-consumer.h>
37
38 #include "consumer.h"
39
40 struct lttng_consumer_global_data consumer_data = {
41 .stream_count = 0,
42 .need_update = 1,
43 .type = LTTNG_CONSUMER_UNKNOWN,
44 };
45
46 /* timeout parameter, to control the polling thread grace period. */
47 int consumer_poll_timeout = -1;
48
49 /*
50 * Flag to inform the polling thread to quit when all fd hung up. Updated by
51 * the consumer_thread_receive_fds when it notices that all fds has hung up.
52 * Also updated by the signal handler (consumer_should_exit()). Read by the
53 * polling threads.
54 */
55 volatile int consumer_quit = 0;
56
57 /*
58 * Find a stream. The consumer_data.lock must be locked during this
59 * call.
60 */
61 static struct lttng_consumer_stream *consumer_find_stream(int key)
62 {
63 struct lttng_ht_iter iter;
64 struct lttng_ht_node_ulong *node;
65 struct lttng_consumer_stream *stream = NULL;
66
67 /* Negative keys are lookup failures */
68 if (key < 0)
69 return NULL;
70
71 rcu_read_lock();
72
73 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
74 &iter);
75 node = lttng_ht_iter_get_node_ulong(&iter);
76 if (node != NULL) {
77 stream = caa_container_of(node, struct lttng_consumer_stream, node);
78 }
79
80 rcu_read_unlock();
81
82 return stream;
83 }
84
85 static void consumer_steal_stream_key(int key)
86 {
87 struct lttng_consumer_stream *stream;
88
89 stream = consumer_find_stream(key);
90 if (stream)
91 stream->key = -1;
92 }
93
94 static struct lttng_consumer_channel *consumer_find_channel(int key)
95 {
96 struct lttng_ht_iter iter;
97 struct lttng_ht_node_ulong *node;
98 struct lttng_consumer_channel *channel = NULL;
99
100 /* Negative keys are lookup failures */
101 if (key < 0)
102 return NULL;
103
104 rcu_read_lock();
105
106 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
107 &iter);
108 node = lttng_ht_iter_get_node_ulong(&iter);
109 if (node != NULL) {
110 channel = caa_container_of(node, struct lttng_consumer_channel, node);
111 }
112
113 rcu_read_unlock();
114
115 return channel;
116 }
117
118 static void consumer_steal_channel_key(int key)
119 {
120 struct lttng_consumer_channel *channel;
121
122 channel = consumer_find_channel(key);
123 if (channel)
124 channel->key = -1;
125 }
126
127 /*
128 * Remove a stream from the global list protected by a mutex. This
129 * function is also responsible for freeing its data structures.
130 */
131 void consumer_del_stream(struct lttng_consumer_stream *stream)
132 {
133 int ret;
134 struct lttng_ht_iter iter;
135 struct lttng_consumer_channel *free_chan = NULL;
136
137 pthread_mutex_lock(&consumer_data.lock);
138
139 switch (consumer_data.type) {
140 case LTTNG_CONSUMER_KERNEL:
141 if (stream->mmap_base != NULL) {
142 ret = munmap(stream->mmap_base, stream->mmap_len);
143 if (ret != 0) {
144 perror("munmap");
145 }
146 }
147 break;
148 case LTTNG_CONSUMER32_UST:
149 case LTTNG_CONSUMER64_UST:
150 lttng_ustconsumer_del_stream(stream);
151 break;
152 default:
153 ERR("Unknown consumer_data type");
154 assert(0);
155 goto end;
156 }
157
158 rcu_read_lock();
159
160 /* Get stream node from hash table */
161 lttng_ht_lookup(consumer_data.stream_ht,
162 (void *)((unsigned long) stream->key), &iter);
163 /* Remove stream node from hash table */
164 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
165 assert(!ret);
166
167 rcu_read_unlock();
168
169 if (consumer_data.stream_count <= 0) {
170 goto end;
171 }
172 consumer_data.stream_count--;
173 if (!stream) {
174 goto end;
175 }
176 if (stream->out_fd >= 0) {
177 close(stream->out_fd);
178 }
179 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
180 close(stream->wait_fd);
181 }
182 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
183 close(stream->shm_fd);
184 }
185 if (!--stream->chan->refcount)
186 free_chan = stream->chan;
187 free(stream);
188 end:
189 consumer_data.need_update = 1;
190 pthread_mutex_unlock(&consumer_data.lock);
191
192 if (free_chan)
193 consumer_del_channel(free_chan);
194 }
195
196 static void consumer_del_stream_rcu(struct rcu_head *head)
197 {
198 struct lttng_ht_node_ulong *node =
199 caa_container_of(head, struct lttng_ht_node_ulong, head);
200 struct lttng_consumer_stream *stream =
201 caa_container_of(node, struct lttng_consumer_stream, node);
202
203 consumer_del_stream(stream);
204 }
205
206 struct lttng_consumer_stream *consumer_allocate_stream(
207 int channel_key, int stream_key,
208 int shm_fd, int wait_fd,
209 enum lttng_consumer_stream_state state,
210 uint64_t mmap_len,
211 enum lttng_event_output output,
212 const char *path_name,
213 uid_t uid,
214 gid_t gid)
215 {
216 struct lttng_consumer_stream *stream;
217 int ret;
218
219 stream = zmalloc(sizeof(*stream));
220 if (stream == NULL) {
221 perror("malloc struct lttng_consumer_stream");
222 goto end;
223 }
224 stream->chan = consumer_find_channel(channel_key);
225 if (!stream->chan) {
226 perror("Unable to find channel key");
227 goto end;
228 }
229 stream->chan->refcount++;
230 stream->key = stream_key;
231 stream->shm_fd = shm_fd;
232 stream->wait_fd = wait_fd;
233 stream->out_fd = -1;
234 stream->out_fd_offset = 0;
235 stream->state = state;
236 stream->mmap_len = mmap_len;
237 stream->mmap_base = NULL;
238 stream->output = output;
239 stream->uid = uid;
240 stream->gid = gid;
241 strncpy(stream->path_name, path_name, PATH_MAX - 1);
242 stream->path_name[PATH_MAX - 1] = '\0';
243 lttng_ht_node_init_ulong(&stream->node, stream->key);
244
245 switch (consumer_data.type) {
246 case LTTNG_CONSUMER_KERNEL:
247 break;
248 case LTTNG_CONSUMER32_UST:
249 case LTTNG_CONSUMER64_UST:
250 stream->cpu = stream->chan->cpucount++;
251 ret = lttng_ustconsumer_allocate_stream(stream);
252 if (ret) {
253 free(stream);
254 return NULL;
255 }
256 break;
257 default:
258 ERR("Unknown consumer_data type");
259 assert(0);
260 goto end;
261 }
262 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
263 stream->path_name, stream->key,
264 stream->shm_fd,
265 stream->wait_fd,
266 (unsigned long long) stream->mmap_len,
267 stream->out_fd);
268 end:
269 return stream;
270 }
271
272 /*
273 * Add a stream to the global list protected by a mutex.
274 */
275 int consumer_add_stream(struct lttng_consumer_stream *stream)
276 {
277 int ret = 0;
278
279 pthread_mutex_lock(&consumer_data.lock);
280 /* Steal stream identifier, for UST */
281 consumer_steal_stream_key(stream->key);
282 rcu_read_lock();
283 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
284 rcu_read_unlock();
285 consumer_data.stream_count++;
286 consumer_data.need_update = 1;
287
288 switch (consumer_data.type) {
289 case LTTNG_CONSUMER_KERNEL:
290 break;
291 case LTTNG_CONSUMER32_UST:
292 case LTTNG_CONSUMER64_UST:
293 /* Streams are in CPU number order (we rely on this) */
294 stream->cpu = stream->chan->nr_streams++;
295 break;
296 default:
297 ERR("Unknown consumer_data type");
298 assert(0);
299 goto end;
300 }
301
302 end:
303 pthread_mutex_unlock(&consumer_data.lock);
304 return ret;
305 }
306
307 /*
308 * Update a stream according to what we just received.
309 */
310 void consumer_change_stream_state(int stream_key,
311 enum lttng_consumer_stream_state state)
312 {
313 struct lttng_consumer_stream *stream;
314
315 pthread_mutex_lock(&consumer_data.lock);
316 stream = consumer_find_stream(stream_key);
317 if (stream) {
318 stream->state = state;
319 }
320 consumer_data.need_update = 1;
321 pthread_mutex_unlock(&consumer_data.lock);
322 }
323
324 /*
325 * Remove a channel from the global list protected by a mutex. This
326 * function is also responsible for freeing its data structures.
327 */
328 void consumer_del_channel(struct lttng_consumer_channel *channel)
329 {
330 int ret;
331 struct lttng_ht_iter iter;
332
333 pthread_mutex_lock(&consumer_data.lock);
334
335 switch (consumer_data.type) {
336 case LTTNG_CONSUMER_KERNEL:
337 break;
338 case LTTNG_CONSUMER32_UST:
339 case LTTNG_CONSUMER64_UST:
340 lttng_ustconsumer_del_channel(channel);
341 break;
342 default:
343 ERR("Unknown consumer_data type");
344 assert(0);
345 goto end;
346 }
347
348 rcu_read_lock();
349
350 lttng_ht_lookup(consumer_data.channel_ht,
351 (void *)((unsigned long) channel->key), &iter);
352 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
353 assert(!ret);
354
355 rcu_read_unlock();
356
357 if (channel->mmap_base != NULL) {
358 ret = munmap(channel->mmap_base, channel->mmap_len);
359 if (ret != 0) {
360 perror("munmap");
361 }
362 }
363 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
364 close(channel->wait_fd);
365 }
366 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
367 close(channel->shm_fd);
368 }
369 free(channel);
370 end:
371 pthread_mutex_unlock(&consumer_data.lock);
372 }
373
374 static void consumer_del_channel_rcu(struct rcu_head *head)
375 {
376 struct lttng_ht_node_ulong *node =
377 caa_container_of(head, struct lttng_ht_node_ulong, head);
378 struct lttng_consumer_channel *channel=
379 caa_container_of(node, struct lttng_consumer_channel, node);
380
381 consumer_del_channel(channel);
382 }
383
384 struct lttng_consumer_channel *consumer_allocate_channel(
385 int channel_key,
386 int shm_fd, int wait_fd,
387 uint64_t mmap_len,
388 uint64_t max_sb_size)
389 {
390 struct lttng_consumer_channel *channel;
391 int ret;
392
393 channel = zmalloc(sizeof(*channel));
394 if (channel == NULL) {
395 perror("malloc struct lttng_consumer_channel");
396 goto end;
397 }
398 channel->key = channel_key;
399 channel->shm_fd = shm_fd;
400 channel->wait_fd = wait_fd;
401 channel->mmap_len = mmap_len;
402 channel->max_sb_size = max_sb_size;
403 channel->refcount = 0;
404 channel->nr_streams = 0;
405 lttng_ht_node_init_ulong(&channel->node, channel->key);
406
407 switch (consumer_data.type) {
408 case LTTNG_CONSUMER_KERNEL:
409 channel->mmap_base = NULL;
410 channel->mmap_len = 0;
411 break;
412 case LTTNG_CONSUMER32_UST:
413 case LTTNG_CONSUMER64_UST:
414 ret = lttng_ustconsumer_allocate_channel(channel);
415 if (ret) {
416 free(channel);
417 return NULL;
418 }
419 break;
420 default:
421 ERR("Unknown consumer_data type");
422 assert(0);
423 goto end;
424 }
425 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
426 channel->key,
427 channel->shm_fd,
428 channel->wait_fd,
429 (unsigned long long) channel->mmap_len,
430 (unsigned long long) channel->max_sb_size);
431 end:
432 return channel;
433 }
434
435 /*
436 * Add a channel to the global list protected by a mutex.
437 */
438 int consumer_add_channel(struct lttng_consumer_channel *channel)
439 {
440 pthread_mutex_lock(&consumer_data.lock);
441 /* Steal channel identifier, for UST */
442 consumer_steal_channel_key(channel->key);
443 rcu_read_lock();
444 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
445 rcu_read_unlock();
446 pthread_mutex_unlock(&consumer_data.lock);
447 return 0;
448 }
449
450 /*
451 * Allocate the pollfd structure and the local view of the out fds to avoid
452 * doing a lookup in the linked list and concurrency issues when writing is
453 * needed. Called with consumer_data.lock held.
454 *
455 * Returns the number of fds in the structures.
456 */
457 int consumer_update_poll_array(
458 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
459 struct lttng_consumer_stream **local_stream)
460 {
461 int i = 0;
462 struct lttng_ht_iter iter;
463 struct lttng_consumer_stream *stream;
464
465 DBG("Updating poll fd array");
466 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
467 node.node) {
468 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
469 continue;
470 }
471 DBG("Active FD %d", stream->wait_fd);
472 (*pollfd)[i].fd = stream->wait_fd;
473 (*pollfd)[i].events = POLLIN | POLLPRI;
474 local_stream[i] = stream;
475 i++;
476 }
477
478 /*
479 * Insert the consumer_poll_pipe at the end of the array and don't
480 * increment i so nb_fd is the number of real FD.
481 */
482 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
483 (*pollfd)[i].events = POLLIN;
484 return i;
485 }
486
487 /*
488 * Poll on the should_quit pipe and the command socket return -1 on error and
489 * should exit, 0 if data is available on the command socket
490 */
491 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
492 {
493 int num_rdy;
494
495 restart:
496 num_rdy = poll(consumer_sockpoll, 2, -1);
497 if (num_rdy == -1) {
498 /*
499 * Restart interrupted system call.
500 */
501 if (errno == EINTR) {
502 goto restart;
503 }
504 perror("Poll error");
505 goto exit;
506 }
507 if (consumer_sockpoll[0].revents == POLLIN) {
508 DBG("consumer_should_quit wake up");
509 goto exit;
510 }
511 return 0;
512
513 exit:
514 return -1;
515 }
516
517 /*
518 * Set the error socket.
519 */
520 void lttng_consumer_set_error_sock(
521 struct lttng_consumer_local_data *ctx, int sock)
522 {
523 ctx->consumer_error_socket = sock;
524 }
525
526 /*
527 * Set the command socket path.
528 */
529
530 void lttng_consumer_set_command_sock_path(
531 struct lttng_consumer_local_data *ctx, char *sock)
532 {
533 ctx->consumer_command_sock_path = sock;
534 }
535
536 /*
537 * Send return code to the session daemon.
538 * If the socket is not defined, we return 0, it is not a fatal error
539 */
540 int lttng_consumer_send_error(
541 struct lttng_consumer_local_data *ctx, int cmd)
542 {
543 if (ctx->consumer_error_socket > 0) {
544 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
545 sizeof(enum lttcomm_sessiond_command));
546 }
547
548 return 0;
549 }
550
551 /*
552 * Close all the tracefiles and stream fds, should be called when all instances
553 * are destroyed.
554 */
555 void lttng_consumer_cleanup(void)
556 {
557 int ret;
558 struct lttng_ht_iter iter;
559 struct lttng_ht_node_ulong *node;
560
561 rcu_read_lock();
562
563 /*
564 * close all outfd. Called when there are no more threads running (after
565 * joining on the threads), no need to protect list iteration with mutex.
566 */
567 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
568 node) {
569 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
570 assert(!ret);
571 call_rcu(&node->head, consumer_del_stream_rcu);
572 }
573
574 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
575 node) {
576 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
577 assert(!ret);
578 call_rcu(&node->head, consumer_del_channel_rcu);
579 }
580
581 rcu_read_unlock();
582 }
583
584 /*
585 * Called from signal handler.
586 */
587 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
588 {
589 int ret;
590 consumer_quit = 1;
591 ret = write(ctx->consumer_should_quit[1], "4", 1);
592 if (ret < 0) {
593 perror("write consumer quit");
594 }
595 }
596
597 void lttng_consumer_sync_trace_file(
598 struct lttng_consumer_stream *stream, off_t orig_offset)
599 {
600 int outfd = stream->out_fd;
601
602 /*
603 * This does a blocking write-and-wait on any page that belongs to the
604 * subbuffer prior to the one we just wrote.
605 * Don't care about error values, as these are just hints and ways to
606 * limit the amount of page cache used.
607 */
608 if (orig_offset < stream->chan->max_sb_size) {
609 return;
610 }
611 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
612 stream->chan->max_sb_size,
613 SYNC_FILE_RANGE_WAIT_BEFORE
614 | SYNC_FILE_RANGE_WRITE
615 | SYNC_FILE_RANGE_WAIT_AFTER);
616 /*
617 * Give hints to the kernel about how we access the file:
618 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
619 * we write it.
620 *
621 * We need to call fadvise again after the file grows because the
622 * kernel does not seem to apply fadvise to non-existing parts of the
623 * file.
624 *
625 * Call fadvise _after_ having waited for the page writeback to
626 * complete because the dirty page writeback semantic is not well
627 * defined. So it can be expected to lead to lower throughput in
628 * streaming.
629 */
630 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
631 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
632 }
633
634 /*
635 * Initialise the necessary environnement :
636 * - create a new context
637 * - create the poll_pipe
638 * - create the should_quit pipe (for signal handler)
639 * - create the thread pipe (for splice)
640 *
641 * Takes a function pointer as argument, this function is called when data is
642 * available on a buffer. This function is responsible to do the
643 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
644 * buffer configuration and then kernctl_put_next_subbuf at the end.
645 *
646 * Returns a pointer to the new context or NULL on error.
647 */
648 struct lttng_consumer_local_data *lttng_consumer_create(
649 enum lttng_consumer_type type,
650 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
651 struct lttng_consumer_local_data *ctx),
652 int (*recv_channel)(struct lttng_consumer_channel *channel),
653 int (*recv_stream)(struct lttng_consumer_stream *stream),
654 int (*update_stream)(int stream_key, uint32_t state))
655 {
656 int ret, i;
657 struct lttng_consumer_local_data *ctx;
658
659 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
660 consumer_data.type == type);
661 consumer_data.type = type;
662
663 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
664 if (ctx == NULL) {
665 perror("allocating context");
666 goto error;
667 }
668
669 ctx->consumer_error_socket = -1;
670 /* assign the callbacks */
671 ctx->on_buffer_ready = buffer_ready;
672 ctx->on_recv_channel = recv_channel;
673 ctx->on_recv_stream = recv_stream;
674 ctx->on_update_stream = update_stream;
675
676 ret = pipe(ctx->consumer_poll_pipe);
677 if (ret < 0) {
678 perror("Error creating poll pipe");
679 goto error_poll_pipe;
680 }
681
682 ret = pipe(ctx->consumer_should_quit);
683 if (ret < 0) {
684 perror("Error creating recv pipe");
685 goto error_quit_pipe;
686 }
687
688 ret = pipe(ctx->consumer_thread_pipe);
689 if (ret < 0) {
690 perror("Error creating thread pipe");
691 goto error_thread_pipe;
692 }
693
694 return ctx;
695
696
697 error_thread_pipe:
698 for (i = 0; i < 2; i++) {
699 int err;
700
701 err = close(ctx->consumer_should_quit[i]);
702 assert(!err);
703 }
704 error_quit_pipe:
705 for (i = 0; i < 2; i++) {
706 int err;
707
708 err = close(ctx->consumer_poll_pipe[i]);
709 assert(!err);
710 }
711 error_poll_pipe:
712 free(ctx);
713 error:
714 return NULL;
715 }
716
717 /*
718 * Close all fds associated with the instance and free the context.
719 */
720 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
721 {
722 close(ctx->consumer_error_socket);
723 close(ctx->consumer_thread_pipe[0]);
724 close(ctx->consumer_thread_pipe[1]);
725 close(ctx->consumer_poll_pipe[0]);
726 close(ctx->consumer_poll_pipe[1]);
727 close(ctx->consumer_should_quit[0]);
728 close(ctx->consumer_should_quit[1]);
729 unlink(ctx->consumer_command_sock_path);
730 free(ctx);
731 }
732
733 /*
734 * Mmap the ring buffer, read it and write the data to the tracefile.
735 *
736 * Returns the number of bytes written
737 */
738 ssize_t lttng_consumer_on_read_subbuffer_mmap(
739 struct lttng_consumer_local_data *ctx,
740 struct lttng_consumer_stream *stream, unsigned long len)
741 {
742 switch (consumer_data.type) {
743 case LTTNG_CONSUMER_KERNEL:
744 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
745 case LTTNG_CONSUMER32_UST:
746 case LTTNG_CONSUMER64_UST:
747 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
748 default:
749 ERR("Unknown consumer_data type");
750 assert(0);
751 }
752 }
753
754 /*
755 * Splice the data from the ring buffer to the tracefile.
756 *
757 * Returns the number of bytes spliced.
758 */
759 ssize_t lttng_consumer_on_read_subbuffer_splice(
760 struct lttng_consumer_local_data *ctx,
761 struct lttng_consumer_stream *stream, unsigned long len)
762 {
763 switch (consumer_data.type) {
764 case LTTNG_CONSUMER_KERNEL:
765 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
766 case LTTNG_CONSUMER32_UST:
767 case LTTNG_CONSUMER64_UST:
768 return -ENOSYS;
769 default:
770 ERR("Unknown consumer_data type");
771 assert(0);
772 return -ENOSYS;
773 }
774
775 }
776
777 /*
778 * Take a snapshot for a specific fd
779 *
780 * Returns 0 on success, < 0 on error
781 */
782 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
783 struct lttng_consumer_stream *stream)
784 {
785 switch (consumer_data.type) {
786 case LTTNG_CONSUMER_KERNEL:
787 return lttng_kconsumer_take_snapshot(ctx, stream);
788 case LTTNG_CONSUMER32_UST:
789 case LTTNG_CONSUMER64_UST:
790 return lttng_ustconsumer_take_snapshot(ctx, stream);
791 default:
792 ERR("Unknown consumer_data type");
793 assert(0);
794 return -ENOSYS;
795 }
796
797 }
798
799 /*
800 * Get the produced position
801 *
802 * Returns 0 on success, < 0 on error
803 */
804 int lttng_consumer_get_produced_snapshot(
805 struct lttng_consumer_local_data *ctx,
806 struct lttng_consumer_stream *stream,
807 unsigned long *pos)
808 {
809 switch (consumer_data.type) {
810 case LTTNG_CONSUMER_KERNEL:
811 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
812 case LTTNG_CONSUMER32_UST:
813 case LTTNG_CONSUMER64_UST:
814 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
815 default:
816 ERR("Unknown consumer_data type");
817 assert(0);
818 return -ENOSYS;
819 }
820 }
821
822 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
823 int sock, struct pollfd *consumer_sockpoll)
824 {
825 switch (consumer_data.type) {
826 case LTTNG_CONSUMER_KERNEL:
827 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
828 case LTTNG_CONSUMER32_UST:
829 case LTTNG_CONSUMER64_UST:
830 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
831 default:
832 ERR("Unknown consumer_data type");
833 assert(0);
834 return -ENOSYS;
835 }
836 }
837
838 /*
839 * This thread polls the fds in the set to consume the data and write
840 * it to tracefile if necessary.
841 */
842 void *lttng_consumer_thread_poll_fds(void *data)
843 {
844 int num_rdy, num_hup, high_prio, ret, i;
845 struct pollfd *pollfd = NULL;
846 /* local view of the streams */
847 struct lttng_consumer_stream **local_stream = NULL;
848 /* local view of consumer_data.fds_count */
849 int nb_fd = 0;
850 char tmp;
851 int tmp2;
852 struct lttng_consumer_local_data *ctx = data;
853
854 rcu_register_thread();
855
856 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
857
858 while (1) {
859 high_prio = 0;
860 num_hup = 0;
861
862 /*
863 * the fds set has been updated, we need to update our
864 * local array as well
865 */
866 pthread_mutex_lock(&consumer_data.lock);
867 if (consumer_data.need_update) {
868 if (pollfd != NULL) {
869 free(pollfd);
870 pollfd = NULL;
871 }
872 if (local_stream != NULL) {
873 free(local_stream);
874 local_stream = NULL;
875 }
876
877 /* allocate for all fds + 1 for the consumer_poll_pipe */
878 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
879 if (pollfd == NULL) {
880 perror("pollfd malloc");
881 pthread_mutex_unlock(&consumer_data.lock);
882 goto end;
883 }
884
885 /* allocate for all fds + 1 for the consumer_poll_pipe */
886 local_stream = zmalloc((consumer_data.stream_count + 1) *
887 sizeof(struct lttng_consumer_stream));
888 if (local_stream == NULL) {
889 perror("local_stream malloc");
890 pthread_mutex_unlock(&consumer_data.lock);
891 goto end;
892 }
893 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
894 if (ret < 0) {
895 ERR("Error in allocating pollfd or local_outfds");
896 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
897 pthread_mutex_unlock(&consumer_data.lock);
898 goto end;
899 }
900 nb_fd = ret;
901 consumer_data.need_update = 0;
902 }
903 pthread_mutex_unlock(&consumer_data.lock);
904
905 /* No FDs and consumer_quit, consumer_cleanup the thread */
906 if (nb_fd == 0 && consumer_quit == 1) {
907 goto end;
908 }
909 /* poll on the array of fds */
910 restart:
911 DBG("polling on %d fd", nb_fd + 1);
912 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
913 DBG("poll num_rdy : %d", num_rdy);
914 if (num_rdy == -1) {
915 /*
916 * Restart interrupted system call.
917 */
918 if (errno == EINTR) {
919 goto restart;
920 }
921 perror("Poll error");
922 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
923 goto end;
924 } else if (num_rdy == 0) {
925 DBG("Polling thread timed out");
926 goto end;
927 }
928
929 /*
930 * If the consumer_poll_pipe triggered poll go
931 * directly to the beginning of the loop to update the
932 * array. We want to prioritize array update over
933 * low-priority reads.
934 */
935 if (pollfd[nb_fd].revents & POLLIN) {
936 DBG("consumer_poll_pipe wake up");
937 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
938 if (tmp2 < 0) {
939 perror("read consumer poll");
940 }
941 continue;
942 }
943
944 /* Take care of high priority channels first. */
945 for (i = 0; i < nb_fd; i++) {
946 if (pollfd[i].revents & POLLPRI) {
947 ssize_t len;
948
949 DBG("Urgent read on fd %d", pollfd[i].fd);
950 high_prio = 1;
951 len = ctx->on_buffer_ready(local_stream[i], ctx);
952 /* it's ok to have an unavailable sub-buffer */
953 if (len < 0 && len != -EAGAIN) {
954 goto end;
955 } else if (len > 0) {
956 local_stream[i]->data_read = 1;
957 }
958 }
959 }
960
961 /*
962 * If we read high prio channel in this loop, try again
963 * for more high prio data.
964 */
965 if (high_prio) {
966 continue;
967 }
968
969 /* Take care of low priority channels. */
970 for (i = 0; i < nb_fd; i++) {
971 if ((pollfd[i].revents & POLLIN) ||
972 local_stream[i]->hangup_flush_done) {
973 ssize_t len;
974
975 assert(!(pollfd[i].revents & POLLERR));
976 assert(!(pollfd[i].revents & POLLNVAL));
977 DBG("Normal read on fd %d", pollfd[i].fd);
978 len = ctx->on_buffer_ready(local_stream[i], ctx);
979 /* it's ok to have an unavailable sub-buffer */
980 if (len < 0 && len != -EAGAIN) {
981 goto end;
982 } else if (len > 0) {
983 local_stream[i]->data_read = 1;
984 }
985 }
986 }
987
988 /* Handle hangup and errors */
989 for (i = 0; i < nb_fd; i++) {
990 if (!local_stream[i]->hangup_flush_done
991 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
992 && (consumer_data.type == LTTNG_CONSUMER32_UST
993 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
994 DBG("fd %d is hup|err|nval. Attempting flush and read.",
995 pollfd[i].fd);
996 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
997 /* Attempt read again, for the data we just flushed. */
998 local_stream[i]->data_read = 1;
999 }
1000 /*
1001 * If the poll flag is HUP/ERR/NVAL and we have
1002 * read no data in this pass, we can remove the
1003 * stream from its hash table.
1004 */
1005 if ((pollfd[i].revents & POLLHUP)) {
1006 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1007 if (!local_stream[i]->data_read) {
1008 rcu_read_lock();
1009 consumer_del_stream_rcu(&local_stream[i]->node.head);
1010 rcu_read_unlock();
1011 num_hup++;
1012 }
1013 } else if (pollfd[i].revents & POLLERR) {
1014 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1015 if (!local_stream[i]->data_read) {
1016 rcu_read_lock();
1017 consumer_del_stream_rcu(&local_stream[i]->node.head);
1018 rcu_read_unlock();
1019 num_hup++;
1020 }
1021 } else if (pollfd[i].revents & POLLNVAL) {
1022 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1023 if (!local_stream[i]->data_read) {
1024 rcu_read_lock();
1025 consumer_del_stream_rcu(&local_stream[i]->node.head);
1026 rcu_read_unlock();
1027 num_hup++;
1028 }
1029 }
1030 local_stream[i]->data_read = 0;
1031 }
1032 }
1033 end:
1034 DBG("polling thread exiting");
1035 if (pollfd != NULL) {
1036 free(pollfd);
1037 pollfd = NULL;
1038 }
1039 if (local_stream != NULL) {
1040 free(local_stream);
1041 local_stream = NULL;
1042 }
1043 rcu_unregister_thread();
1044 return NULL;
1045 }
1046
1047 /*
1048 * This thread listens on the consumerd socket and receives the file
1049 * descriptors from the session daemon.
1050 */
1051 void *lttng_consumer_thread_receive_fds(void *data)
1052 {
1053 int sock, client_socket, ret;
1054 /*
1055 * structure to poll for incoming data on communication socket avoids
1056 * making blocking sockets.
1057 */
1058 struct pollfd consumer_sockpoll[2];
1059 struct lttng_consumer_local_data *ctx = data;
1060
1061 rcu_register_thread();
1062
1063 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1064 unlink(ctx->consumer_command_sock_path);
1065 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1066 if (client_socket < 0) {
1067 ERR("Cannot create command socket");
1068 goto end;
1069 }
1070
1071 ret = lttcomm_listen_unix_sock(client_socket);
1072 if (ret < 0) {
1073 goto end;
1074 }
1075
1076 DBG("Sending ready command to lttng-sessiond");
1077 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1078 /* return < 0 on error, but == 0 is not fatal */
1079 if (ret < 0) {
1080 ERR("Error sending ready command to lttng-sessiond");
1081 goto end;
1082 }
1083
1084 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1085 if (ret < 0) {
1086 perror("fcntl O_NONBLOCK");
1087 goto end;
1088 }
1089
1090 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1091 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1092 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1093 consumer_sockpoll[1].fd = client_socket;
1094 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1095
1096 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1097 goto end;
1098 }
1099 DBG("Connection on client_socket");
1100
1101 /* Blocking call, waiting for transmission */
1102 sock = lttcomm_accept_unix_sock(client_socket);
1103 if (sock <= 0) {
1104 WARN("On accept");
1105 goto end;
1106 }
1107 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1108 if (ret < 0) {
1109 perror("fcntl O_NONBLOCK");
1110 goto end;
1111 }
1112
1113 /* update the polling structure to poll on the established socket */
1114 consumer_sockpoll[1].fd = sock;
1115 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1116
1117 while (1) {
1118 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1119 goto end;
1120 }
1121 DBG("Incoming command on sock");
1122 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1123 if (ret == -ENOENT) {
1124 DBG("Received STOP command");
1125 goto end;
1126 }
1127 if (ret < 0) {
1128 ERR("Communication interrupted on command socket");
1129 goto end;
1130 }
1131 if (consumer_quit) {
1132 DBG("consumer_thread_receive_fds received quit from signal");
1133 goto end;
1134 }
1135 DBG("received fds on sock");
1136 }
1137 end:
1138 DBG("consumer_thread_receive_fds exiting");
1139
1140 /*
1141 * when all fds have hung up, the polling thread
1142 * can exit cleanly
1143 */
1144 consumer_quit = 1;
1145
1146 /*
1147 * 2s of grace period, if no polling events occur during
1148 * this period, the polling thread will exit even if there
1149 * are still open FDs (should not happen, but safety mechanism).
1150 */
1151 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1152
1153 /* wake up the polling thread */
1154 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1155 if (ret < 0) {
1156 perror("poll pipe write");
1157 }
1158 rcu_unregister_thread();
1159 return NULL;
1160 }
1161
1162 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1163 struct lttng_consumer_local_data *ctx)
1164 {
1165 switch (consumer_data.type) {
1166 case LTTNG_CONSUMER_KERNEL:
1167 return lttng_kconsumer_read_subbuffer(stream, ctx);
1168 case LTTNG_CONSUMER32_UST:
1169 case LTTNG_CONSUMER64_UST:
1170 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1171 default:
1172 ERR("Unknown consumer_data type");
1173 assert(0);
1174 return -ENOSYS;
1175 }
1176 }
1177
1178 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1179 {
1180 switch (consumer_data.type) {
1181 case LTTNG_CONSUMER_KERNEL:
1182 return lttng_kconsumer_on_recv_stream(stream);
1183 case LTTNG_CONSUMER32_UST:
1184 case LTTNG_CONSUMER64_UST:
1185 return lttng_ustconsumer_on_recv_stream(stream);
1186 default:
1187 ERR("Unknown consumer_data type");
1188 assert(0);
1189 return -ENOSYS;
1190 }
1191 }
1192
1193 /*
1194 * Allocate and set consumer data hash tables.
1195 */
1196 void lttng_consumer_init(void)
1197 {
1198 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1199 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1200 }
1201
This page took 0.07153 seconds and 4 git commands to generate.