Merge branch 'master' into compat-freebsd
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30
31 #include <common/common.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34 #include <common/kernel-consumer/kernel-consumer.h>
35 #include <common/ust-consumer/ust-consumer.h>
36
37 #include "consumer.h"
38
39 struct lttng_consumer_global_data consumer_data = {
40 .stream_count = 0,
41 .need_update = 1,
42 .type = LTTNG_CONSUMER_UNKNOWN,
43 };
44
45 /* timeout parameter, to control the polling thread grace period. */
46 int consumer_poll_timeout = -1;
47
48 /*
49 * Flag to inform the polling thread to quit when all fd hung up. Updated by
50 * the consumer_thread_receive_fds when it notices that all fds has hung up.
51 * Also updated by the signal handler (consumer_should_exit()). Read by the
52 * polling threads.
53 */
54 volatile int consumer_quit = 0;
55
56 /*
57 * Find a stream. The consumer_data.lock must be locked during this
58 * call.
59 */
60 static struct lttng_consumer_stream *consumer_find_stream(int key)
61 {
62 struct lttng_ht_iter iter;
63 struct lttng_ht_node_ulong *node;
64 struct lttng_consumer_stream *stream = NULL;
65
66 /* Negative keys are lookup failures */
67 if (key < 0)
68 return NULL;
69
70 rcu_read_lock();
71
72 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
73 &iter);
74 node = lttng_ht_iter_get_node_ulong(&iter);
75 if (node != NULL) {
76 stream = caa_container_of(node, struct lttng_consumer_stream, node);
77 }
78
79 rcu_read_unlock();
80
81 return stream;
82 }
83
84 static void consumer_steal_stream_key(int key)
85 {
86 struct lttng_consumer_stream *stream;
87
88 stream = consumer_find_stream(key);
89 if (stream)
90 stream->key = -1;
91 }
92
93 static struct lttng_consumer_channel *consumer_find_channel(int key)
94 {
95 struct lttng_ht_iter iter;
96 struct lttng_ht_node_ulong *node;
97 struct lttng_consumer_channel *channel = NULL;
98
99 /* Negative keys are lookup failures */
100 if (key < 0)
101 return NULL;
102
103 rcu_read_lock();
104
105 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
106 &iter);
107 node = lttng_ht_iter_get_node_ulong(&iter);
108 if (node != NULL) {
109 channel = caa_container_of(node, struct lttng_consumer_channel, node);
110 }
111
112 rcu_read_unlock();
113
114 return channel;
115 }
116
117 static void consumer_steal_channel_key(int key)
118 {
119 struct lttng_consumer_channel *channel;
120
121 channel = consumer_find_channel(key);
122 if (channel)
123 channel->key = -1;
124 }
125
126 /*
127 * Remove a stream from the global list protected by a mutex. This
128 * function is also responsible for freeing its data structures.
129 */
130 void consumer_del_stream(struct lttng_consumer_stream *stream)
131 {
132 int ret;
133 struct lttng_ht_iter iter;
134 struct lttng_consumer_channel *free_chan = NULL;
135
136 pthread_mutex_lock(&consumer_data.lock);
137
138 switch (consumer_data.type) {
139 case LTTNG_CONSUMER_KERNEL:
140 if (stream->mmap_base != NULL) {
141 ret = munmap(stream->mmap_base, stream->mmap_len);
142 if (ret != 0) {
143 perror("munmap");
144 }
145 }
146 break;
147 case LTTNG_CONSUMER32_UST:
148 case LTTNG_CONSUMER64_UST:
149 lttng_ustconsumer_del_stream(stream);
150 break;
151 default:
152 ERR("Unknown consumer_data type");
153 assert(0);
154 goto end;
155 }
156
157 rcu_read_lock();
158
159 /* Get stream node from hash table */
160 lttng_ht_lookup(consumer_data.stream_ht,
161 (void *)((unsigned long) stream->key), &iter);
162 /* Remove stream node from hash table */
163 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
164 assert(!ret);
165
166 rcu_read_unlock();
167
168 if (consumer_data.stream_count <= 0) {
169 goto end;
170 }
171 consumer_data.stream_count--;
172 if (!stream) {
173 goto end;
174 }
175 if (stream->out_fd >= 0) {
176 close(stream->out_fd);
177 }
178 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
179 close(stream->wait_fd);
180 }
181 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
182 close(stream->shm_fd);
183 }
184 if (!--stream->chan->refcount)
185 free_chan = stream->chan;
186 free(stream);
187 end:
188 consumer_data.need_update = 1;
189 pthread_mutex_unlock(&consumer_data.lock);
190
191 if (free_chan)
192 consumer_del_channel(free_chan);
193 }
194
195 static void consumer_del_stream_rcu(struct rcu_head *head)
196 {
197 struct lttng_ht_node_ulong *node =
198 caa_container_of(head, struct lttng_ht_node_ulong, head);
199 struct lttng_consumer_stream *stream =
200 caa_container_of(node, struct lttng_consumer_stream, node);
201
202 consumer_del_stream(stream);
203 }
204
205 struct lttng_consumer_stream *consumer_allocate_stream(
206 int channel_key, int stream_key,
207 int shm_fd, int wait_fd,
208 enum lttng_consumer_stream_state state,
209 uint64_t mmap_len,
210 enum lttng_event_output output,
211 const char *path_name,
212 uid_t uid,
213 gid_t gid)
214 {
215 struct lttng_consumer_stream *stream;
216 int ret;
217
218 stream = zmalloc(sizeof(*stream));
219 if (stream == NULL) {
220 perror("malloc struct lttng_consumer_stream");
221 goto end;
222 }
223 stream->chan = consumer_find_channel(channel_key);
224 if (!stream->chan) {
225 perror("Unable to find channel key");
226 goto end;
227 }
228 stream->chan->refcount++;
229 stream->key = stream_key;
230 stream->shm_fd = shm_fd;
231 stream->wait_fd = wait_fd;
232 stream->out_fd = -1;
233 stream->out_fd_offset = 0;
234 stream->state = state;
235 stream->mmap_len = mmap_len;
236 stream->mmap_base = NULL;
237 stream->output = output;
238 stream->uid = uid;
239 stream->gid = gid;
240 strncpy(stream->path_name, path_name, PATH_MAX - 1);
241 stream->path_name[PATH_MAX - 1] = '\0';
242 lttng_ht_node_init_ulong(&stream->node, stream->key);
243
244 switch (consumer_data.type) {
245 case LTTNG_CONSUMER_KERNEL:
246 break;
247 case LTTNG_CONSUMER32_UST:
248 case LTTNG_CONSUMER64_UST:
249 stream->cpu = stream->chan->cpucount++;
250 ret = lttng_ustconsumer_allocate_stream(stream);
251 if (ret) {
252 free(stream);
253 return NULL;
254 }
255 break;
256 default:
257 ERR("Unknown consumer_data type");
258 assert(0);
259 goto end;
260 }
261 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
262 stream->path_name, stream->key,
263 stream->shm_fd,
264 stream->wait_fd,
265 (unsigned long long) stream->mmap_len,
266 stream->out_fd);
267 end:
268 return stream;
269 }
270
271 /*
272 * Add a stream to the global list protected by a mutex.
273 */
274 int consumer_add_stream(struct lttng_consumer_stream *stream)
275 {
276 int ret = 0;
277
278 pthread_mutex_lock(&consumer_data.lock);
279 /* Steal stream identifier, for UST */
280 consumer_steal_stream_key(stream->key);
281 rcu_read_lock();
282 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
283 rcu_read_unlock();
284 consumer_data.stream_count++;
285 consumer_data.need_update = 1;
286
287 switch (consumer_data.type) {
288 case LTTNG_CONSUMER_KERNEL:
289 break;
290 case LTTNG_CONSUMER32_UST:
291 case LTTNG_CONSUMER64_UST:
292 /* Streams are in CPU number order (we rely on this) */
293 stream->cpu = stream->chan->nr_streams++;
294 break;
295 default:
296 ERR("Unknown consumer_data type");
297 assert(0);
298 goto end;
299 }
300
301 end:
302 pthread_mutex_unlock(&consumer_data.lock);
303 return ret;
304 }
305
306 /*
307 * Update a stream according to what we just received.
308 */
309 void consumer_change_stream_state(int stream_key,
310 enum lttng_consumer_stream_state state)
311 {
312 struct lttng_consumer_stream *stream;
313
314 pthread_mutex_lock(&consumer_data.lock);
315 stream = consumer_find_stream(stream_key);
316 if (stream) {
317 stream->state = state;
318 }
319 consumer_data.need_update = 1;
320 pthread_mutex_unlock(&consumer_data.lock);
321 }
322
323 /*
324 * Remove a channel from the global list protected by a mutex. This
325 * function is also responsible for freeing its data structures.
326 */
327 void consumer_del_channel(struct lttng_consumer_channel *channel)
328 {
329 int ret;
330 struct lttng_ht_iter iter;
331
332 pthread_mutex_lock(&consumer_data.lock);
333
334 switch (consumer_data.type) {
335 case LTTNG_CONSUMER_KERNEL:
336 break;
337 case LTTNG_CONSUMER32_UST:
338 case LTTNG_CONSUMER64_UST:
339 lttng_ustconsumer_del_channel(channel);
340 break;
341 default:
342 ERR("Unknown consumer_data type");
343 assert(0);
344 goto end;
345 }
346
347 rcu_read_lock();
348
349 lttng_ht_lookup(consumer_data.channel_ht,
350 (void *)((unsigned long) channel->key), &iter);
351 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
352 assert(!ret);
353
354 rcu_read_unlock();
355
356 if (channel->mmap_base != NULL) {
357 ret = munmap(channel->mmap_base, channel->mmap_len);
358 if (ret != 0) {
359 perror("munmap");
360 }
361 }
362 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
363 close(channel->wait_fd);
364 }
365 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
366 close(channel->shm_fd);
367 }
368 free(channel);
369 end:
370 pthread_mutex_unlock(&consumer_data.lock);
371 }
372
373 static void consumer_del_channel_rcu(struct rcu_head *head)
374 {
375 struct lttng_ht_node_ulong *node =
376 caa_container_of(head, struct lttng_ht_node_ulong, head);
377 struct lttng_consumer_channel *channel=
378 caa_container_of(node, struct lttng_consumer_channel, node);
379
380 consumer_del_channel(channel);
381 }
382
383 struct lttng_consumer_channel *consumer_allocate_channel(
384 int channel_key,
385 int shm_fd, int wait_fd,
386 uint64_t mmap_len,
387 uint64_t max_sb_size)
388 {
389 struct lttng_consumer_channel *channel;
390 int ret;
391
392 channel = zmalloc(sizeof(*channel));
393 if (channel == NULL) {
394 perror("malloc struct lttng_consumer_channel");
395 goto end;
396 }
397 channel->key = channel_key;
398 channel->shm_fd = shm_fd;
399 channel->wait_fd = wait_fd;
400 channel->mmap_len = mmap_len;
401 channel->max_sb_size = max_sb_size;
402 channel->refcount = 0;
403 channel->nr_streams = 0;
404 lttng_ht_node_init_ulong(&channel->node, channel->key);
405
406 switch (consumer_data.type) {
407 case LTTNG_CONSUMER_KERNEL:
408 channel->mmap_base = NULL;
409 channel->mmap_len = 0;
410 break;
411 case LTTNG_CONSUMER32_UST:
412 case LTTNG_CONSUMER64_UST:
413 ret = lttng_ustconsumer_allocate_channel(channel);
414 if (ret) {
415 free(channel);
416 return NULL;
417 }
418 break;
419 default:
420 ERR("Unknown consumer_data type");
421 assert(0);
422 goto end;
423 }
424 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
425 channel->key,
426 channel->shm_fd,
427 channel->wait_fd,
428 (unsigned long long) channel->mmap_len,
429 (unsigned long long) channel->max_sb_size);
430 end:
431 return channel;
432 }
433
434 /*
435 * Add a channel to the global list protected by a mutex.
436 */
437 int consumer_add_channel(struct lttng_consumer_channel *channel)
438 {
439 pthread_mutex_lock(&consumer_data.lock);
440 /* Steal channel identifier, for UST */
441 consumer_steal_channel_key(channel->key);
442 rcu_read_lock();
443 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
444 rcu_read_unlock();
445 pthread_mutex_unlock(&consumer_data.lock);
446 return 0;
447 }
448
449 /*
450 * Allocate the pollfd structure and the local view of the out fds to avoid
451 * doing a lookup in the linked list and concurrency issues when writing is
452 * needed. Called with consumer_data.lock held.
453 *
454 * Returns the number of fds in the structures.
455 */
456 int consumer_update_poll_array(
457 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
458 struct lttng_consumer_stream **local_stream)
459 {
460 int i = 0;
461 struct lttng_ht_iter iter;
462 struct lttng_consumer_stream *stream;
463
464 DBG("Updating poll fd array");
465 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
466 node.node) {
467 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
468 continue;
469 }
470 DBG("Active FD %d", stream->wait_fd);
471 (*pollfd)[i].fd = stream->wait_fd;
472 (*pollfd)[i].events = POLLIN | POLLPRI;
473 local_stream[i] = stream;
474 i++;
475 }
476
477 /*
478 * Insert the consumer_poll_pipe at the end of the array and don't
479 * increment i so nb_fd is the number of real FD.
480 */
481 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
482 (*pollfd)[i].events = POLLIN;
483 return i;
484 }
485
486 /*
487 * Poll on the should_quit pipe and the command socket return -1 on error and
488 * should exit, 0 if data is available on the command socket
489 */
490 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
491 {
492 int num_rdy;
493
494 restart:
495 num_rdy = poll(consumer_sockpoll, 2, -1);
496 if (num_rdy == -1) {
497 /*
498 * Restart interrupted system call.
499 */
500 if (errno == EINTR) {
501 goto restart;
502 }
503 perror("Poll error");
504 goto exit;
505 }
506 if (consumer_sockpoll[0].revents == POLLIN) {
507 DBG("consumer_should_quit wake up");
508 goto exit;
509 }
510 return 0;
511
512 exit:
513 return -1;
514 }
515
516 /*
517 * Set the error socket.
518 */
519 void lttng_consumer_set_error_sock(
520 struct lttng_consumer_local_data *ctx, int sock)
521 {
522 ctx->consumer_error_socket = sock;
523 }
524
525 /*
526 * Set the command socket path.
527 */
528
529 void lttng_consumer_set_command_sock_path(
530 struct lttng_consumer_local_data *ctx, char *sock)
531 {
532 ctx->consumer_command_sock_path = sock;
533 }
534
535 /*
536 * Send return code to the session daemon.
537 * If the socket is not defined, we return 0, it is not a fatal error
538 */
539 int lttng_consumer_send_error(
540 struct lttng_consumer_local_data *ctx, int cmd)
541 {
542 if (ctx->consumer_error_socket > 0) {
543 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
544 sizeof(enum lttcomm_sessiond_command));
545 }
546
547 return 0;
548 }
549
550 /*
551 * Close all the tracefiles and stream fds, should be called when all instances
552 * are destroyed.
553 */
554 void lttng_consumer_cleanup(void)
555 {
556 int ret;
557 struct lttng_ht_iter iter;
558 struct lttng_ht_node_ulong *node;
559
560 rcu_read_lock();
561
562 /*
563 * close all outfd. Called when there are no more threads running (after
564 * joining on the threads), no need to protect list iteration with mutex.
565 */
566 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
567 node) {
568 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
569 assert(!ret);
570 call_rcu(&node->head, consumer_del_stream_rcu);
571 }
572
573 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
574 node) {
575 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
576 assert(!ret);
577 call_rcu(&node->head, consumer_del_channel_rcu);
578 }
579
580 rcu_read_unlock();
581 }
582
583 /*
584 * Called from signal handler.
585 */
586 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
587 {
588 int ret;
589 consumer_quit = 1;
590 ret = write(ctx->consumer_should_quit[1], "4", 1);
591 if (ret < 0) {
592 perror("write consumer quit");
593 }
594 }
595
596 void lttng_consumer_sync_trace_file(
597 struct lttng_consumer_stream *stream, off_t orig_offset)
598 {
599 int outfd = stream->out_fd;
600
601 /*
602 * This does a blocking write-and-wait on any page that belongs to the
603 * subbuffer prior to the one we just wrote.
604 * Don't care about error values, as these are just hints and ways to
605 * limit the amount of page cache used.
606 */
607 if (orig_offset < stream->chan->max_sb_size) {
608 return;
609 }
610 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
611 stream->chan->max_sb_size,
612 SYNC_FILE_RANGE_WAIT_BEFORE
613 | SYNC_FILE_RANGE_WRITE
614 | SYNC_FILE_RANGE_WAIT_AFTER);
615 /*
616 * Give hints to the kernel about how we access the file:
617 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
618 * we write it.
619 *
620 * We need to call fadvise again after the file grows because the
621 * kernel does not seem to apply fadvise to non-existing parts of the
622 * file.
623 *
624 * Call fadvise _after_ having waited for the page writeback to
625 * complete because the dirty page writeback semantic is not well
626 * defined. So it can be expected to lead to lower throughput in
627 * streaming.
628 */
629 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
630 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
631 }
632
633 /*
634 * Initialise the necessary environnement :
635 * - create a new context
636 * - create the poll_pipe
637 * - create the should_quit pipe (for signal handler)
638 * - create the thread pipe (for splice)
639 *
640 * Takes a function pointer as argument, this function is called when data is
641 * available on a buffer. This function is responsible to do the
642 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
643 * buffer configuration and then kernctl_put_next_subbuf at the end.
644 *
645 * Returns a pointer to the new context or NULL on error.
646 */
647 struct lttng_consumer_local_data *lttng_consumer_create(
648 enum lttng_consumer_type type,
649 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
650 struct lttng_consumer_local_data *ctx),
651 int (*recv_channel)(struct lttng_consumer_channel *channel),
652 int (*recv_stream)(struct lttng_consumer_stream *stream),
653 int (*update_stream)(int stream_key, uint32_t state))
654 {
655 int ret, i;
656 struct lttng_consumer_local_data *ctx;
657
658 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
659 consumer_data.type == type);
660 consumer_data.type = type;
661
662 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
663 if (ctx == NULL) {
664 perror("allocating context");
665 goto error;
666 }
667
668 ctx->consumer_error_socket = -1;
669 /* assign the callbacks */
670 ctx->on_buffer_ready = buffer_ready;
671 ctx->on_recv_channel = recv_channel;
672 ctx->on_recv_stream = recv_stream;
673 ctx->on_update_stream = update_stream;
674
675 ret = pipe(ctx->consumer_poll_pipe);
676 if (ret < 0) {
677 perror("Error creating poll pipe");
678 goto error_poll_pipe;
679 }
680
681 ret = pipe(ctx->consumer_should_quit);
682 if (ret < 0) {
683 perror("Error creating recv pipe");
684 goto error_quit_pipe;
685 }
686
687 ret = pipe(ctx->consumer_thread_pipe);
688 if (ret < 0) {
689 perror("Error creating thread pipe");
690 goto error_thread_pipe;
691 }
692
693 return ctx;
694
695
696 error_thread_pipe:
697 for (i = 0; i < 2; i++) {
698 int err;
699
700 err = close(ctx->consumer_should_quit[i]);
701 assert(!err);
702 }
703 error_quit_pipe:
704 for (i = 0; i < 2; i++) {
705 int err;
706
707 err = close(ctx->consumer_poll_pipe[i]);
708 assert(!err);
709 }
710 error_poll_pipe:
711 free(ctx);
712 error:
713 return NULL;
714 }
715
716 /*
717 * Close all fds associated with the instance and free the context.
718 */
719 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
720 {
721 close(ctx->consumer_error_socket);
722 close(ctx->consumer_thread_pipe[0]);
723 close(ctx->consumer_thread_pipe[1]);
724 close(ctx->consumer_poll_pipe[0]);
725 close(ctx->consumer_poll_pipe[1]);
726 close(ctx->consumer_should_quit[0]);
727 close(ctx->consumer_should_quit[1]);
728 unlink(ctx->consumer_command_sock_path);
729 free(ctx);
730 }
731
732 /*
733 * Mmap the ring buffer, read it and write the data to the tracefile.
734 *
735 * Returns the number of bytes written
736 */
737 ssize_t lttng_consumer_on_read_subbuffer_mmap(
738 struct lttng_consumer_local_data *ctx,
739 struct lttng_consumer_stream *stream, unsigned long len)
740 {
741 switch (consumer_data.type) {
742 case LTTNG_CONSUMER_KERNEL:
743 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
744 case LTTNG_CONSUMER32_UST:
745 case LTTNG_CONSUMER64_UST:
746 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
747 default:
748 ERR("Unknown consumer_data type");
749 assert(0);
750 }
751
752 return 0;
753 }
754
755 /*
756 * Splice the data from the ring buffer to the tracefile.
757 *
758 * Returns the number of bytes spliced.
759 */
760 ssize_t lttng_consumer_on_read_subbuffer_splice(
761 struct lttng_consumer_local_data *ctx,
762 struct lttng_consumer_stream *stream, unsigned long len)
763 {
764 switch (consumer_data.type) {
765 case LTTNG_CONSUMER_KERNEL:
766 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
767 case LTTNG_CONSUMER32_UST:
768 case LTTNG_CONSUMER64_UST:
769 return -ENOSYS;
770 default:
771 ERR("Unknown consumer_data type");
772 assert(0);
773 return -ENOSYS;
774 }
775
776 }
777
778 /*
779 * Take a snapshot for a specific fd
780 *
781 * Returns 0 on success, < 0 on error
782 */
783 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
784 struct lttng_consumer_stream *stream)
785 {
786 switch (consumer_data.type) {
787 case LTTNG_CONSUMER_KERNEL:
788 return lttng_kconsumer_take_snapshot(ctx, stream);
789 case LTTNG_CONSUMER32_UST:
790 case LTTNG_CONSUMER64_UST:
791 return lttng_ustconsumer_take_snapshot(ctx, stream);
792 default:
793 ERR("Unknown consumer_data type");
794 assert(0);
795 return -ENOSYS;
796 }
797
798 }
799
800 /*
801 * Get the produced position
802 *
803 * Returns 0 on success, < 0 on error
804 */
805 int lttng_consumer_get_produced_snapshot(
806 struct lttng_consumer_local_data *ctx,
807 struct lttng_consumer_stream *stream,
808 unsigned long *pos)
809 {
810 switch (consumer_data.type) {
811 case LTTNG_CONSUMER_KERNEL:
812 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
813 case LTTNG_CONSUMER32_UST:
814 case LTTNG_CONSUMER64_UST:
815 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
816 default:
817 ERR("Unknown consumer_data type");
818 assert(0);
819 return -ENOSYS;
820 }
821 }
822
823 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
824 int sock, struct pollfd *consumer_sockpoll)
825 {
826 switch (consumer_data.type) {
827 case LTTNG_CONSUMER_KERNEL:
828 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
829 case LTTNG_CONSUMER32_UST:
830 case LTTNG_CONSUMER64_UST:
831 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
832 default:
833 ERR("Unknown consumer_data type");
834 assert(0);
835 return -ENOSYS;
836 }
837 }
838
839 /*
840 * This thread polls the fds in the set to consume the data and write
841 * it to tracefile if necessary.
842 */
843 void *lttng_consumer_thread_poll_fds(void *data)
844 {
845 int num_rdy, num_hup, high_prio, ret, i;
846 struct pollfd *pollfd = NULL;
847 /* local view of the streams */
848 struct lttng_consumer_stream **local_stream = NULL;
849 /* local view of consumer_data.fds_count */
850 int nb_fd = 0;
851 char tmp;
852 int tmp2;
853 struct lttng_consumer_local_data *ctx = data;
854
855 rcu_register_thread();
856
857 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
858
859 while (1) {
860 high_prio = 0;
861 num_hup = 0;
862
863 /*
864 * the fds set has been updated, we need to update our
865 * local array as well
866 */
867 pthread_mutex_lock(&consumer_data.lock);
868 if (consumer_data.need_update) {
869 if (pollfd != NULL) {
870 free(pollfd);
871 pollfd = NULL;
872 }
873 if (local_stream != NULL) {
874 free(local_stream);
875 local_stream = NULL;
876 }
877
878 /* allocate for all fds + 1 for the consumer_poll_pipe */
879 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
880 if (pollfd == NULL) {
881 perror("pollfd malloc");
882 pthread_mutex_unlock(&consumer_data.lock);
883 goto end;
884 }
885
886 /* allocate for all fds + 1 for the consumer_poll_pipe */
887 local_stream = zmalloc((consumer_data.stream_count + 1) *
888 sizeof(struct lttng_consumer_stream));
889 if (local_stream == NULL) {
890 perror("local_stream malloc");
891 pthread_mutex_unlock(&consumer_data.lock);
892 goto end;
893 }
894 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
895 if (ret < 0) {
896 ERR("Error in allocating pollfd or local_outfds");
897 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
898 pthread_mutex_unlock(&consumer_data.lock);
899 goto end;
900 }
901 nb_fd = ret;
902 consumer_data.need_update = 0;
903 }
904 pthread_mutex_unlock(&consumer_data.lock);
905
906 /* No FDs and consumer_quit, consumer_cleanup the thread */
907 if (nb_fd == 0 && consumer_quit == 1) {
908 goto end;
909 }
910 /* poll on the array of fds */
911 restart:
912 DBG("polling on %d fd", nb_fd + 1);
913 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
914 DBG("poll num_rdy : %d", num_rdy);
915 if (num_rdy == -1) {
916 /*
917 * Restart interrupted system call.
918 */
919 if (errno == EINTR) {
920 goto restart;
921 }
922 perror("Poll error");
923 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
924 goto end;
925 } else if (num_rdy == 0) {
926 DBG("Polling thread timed out");
927 goto end;
928 }
929
930 /*
931 * If the consumer_poll_pipe triggered poll go
932 * directly to the beginning of the loop to update the
933 * array. We want to prioritize array update over
934 * low-priority reads.
935 */
936 if (pollfd[nb_fd].revents & POLLIN) {
937 DBG("consumer_poll_pipe wake up");
938 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
939 if (tmp2 < 0) {
940 perror("read consumer poll");
941 }
942 continue;
943 }
944
945 /* Take care of high priority channels first. */
946 for (i = 0; i < nb_fd; i++) {
947 if (pollfd[i].revents & POLLPRI) {
948 ssize_t len;
949
950 DBG("Urgent read on fd %d", pollfd[i].fd);
951 high_prio = 1;
952 len = ctx->on_buffer_ready(local_stream[i], ctx);
953 /* it's ok to have an unavailable sub-buffer */
954 if (len < 0 && len != -EAGAIN) {
955 goto end;
956 } else if (len > 0) {
957 local_stream[i]->data_read = 1;
958 }
959 }
960 }
961
962 /*
963 * If we read high prio channel in this loop, try again
964 * for more high prio data.
965 */
966 if (high_prio) {
967 continue;
968 }
969
970 /* Take care of low priority channels. */
971 for (i = 0; i < nb_fd; i++) {
972 if ((pollfd[i].revents & POLLIN) ||
973 local_stream[i]->hangup_flush_done) {
974 ssize_t len;
975
976 assert(!(pollfd[i].revents & POLLERR));
977 assert(!(pollfd[i].revents & POLLNVAL));
978 DBG("Normal read on fd %d", pollfd[i].fd);
979 len = ctx->on_buffer_ready(local_stream[i], ctx);
980 /* it's ok to have an unavailable sub-buffer */
981 if (len < 0 && len != -EAGAIN) {
982 goto end;
983 } else if (len > 0) {
984 local_stream[i]->data_read = 1;
985 }
986 }
987 }
988
989 /* Handle hangup and errors */
990 for (i = 0; i < nb_fd; i++) {
991 if (!local_stream[i]->hangup_flush_done
992 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
993 && (consumer_data.type == LTTNG_CONSUMER32_UST
994 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
995 DBG("fd %d is hup|err|nval. Attempting flush and read.",
996 pollfd[i].fd);
997 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
998 /* Attempt read again, for the data we just flushed. */
999 local_stream[i]->data_read = 1;
1000 }
1001 /*
1002 * If the poll flag is HUP/ERR/NVAL and we have
1003 * read no data in this pass, we can remove the
1004 * stream from its hash table.
1005 */
1006 if ((pollfd[i].revents & POLLHUP)) {
1007 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1008 if (!local_stream[i]->data_read) {
1009 rcu_read_lock();
1010 consumer_del_stream_rcu(&local_stream[i]->node.head);
1011 rcu_read_unlock();
1012 num_hup++;
1013 }
1014 } else if (pollfd[i].revents & POLLERR) {
1015 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1016 if (!local_stream[i]->data_read) {
1017 rcu_read_lock();
1018 consumer_del_stream_rcu(&local_stream[i]->node.head);
1019 rcu_read_unlock();
1020 num_hup++;
1021 }
1022 } else if (pollfd[i].revents & POLLNVAL) {
1023 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1024 if (!local_stream[i]->data_read) {
1025 rcu_read_lock();
1026 consumer_del_stream_rcu(&local_stream[i]->node.head);
1027 rcu_read_unlock();
1028 num_hup++;
1029 }
1030 }
1031 local_stream[i]->data_read = 0;
1032 }
1033 }
1034 end:
1035 DBG("polling thread exiting");
1036 if (pollfd != NULL) {
1037 free(pollfd);
1038 pollfd = NULL;
1039 }
1040 if (local_stream != NULL) {
1041 free(local_stream);
1042 local_stream = NULL;
1043 }
1044 rcu_unregister_thread();
1045 return NULL;
1046 }
1047
1048 /*
1049 * This thread listens on the consumerd socket and receives the file
1050 * descriptors from the session daemon.
1051 */
1052 void *lttng_consumer_thread_receive_fds(void *data)
1053 {
1054 int sock, client_socket, ret;
1055 /*
1056 * structure to poll for incoming data on communication socket avoids
1057 * making blocking sockets.
1058 */
1059 struct pollfd consumer_sockpoll[2];
1060 struct lttng_consumer_local_data *ctx = data;
1061
1062 rcu_register_thread();
1063
1064 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1065 unlink(ctx->consumer_command_sock_path);
1066 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1067 if (client_socket < 0) {
1068 ERR("Cannot create command socket");
1069 goto end;
1070 }
1071
1072 ret = lttcomm_listen_unix_sock(client_socket);
1073 if (ret < 0) {
1074 goto end;
1075 }
1076
1077 DBG("Sending ready command to lttng-sessiond");
1078 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1079 /* return < 0 on error, but == 0 is not fatal */
1080 if (ret < 0) {
1081 ERR("Error sending ready command to lttng-sessiond");
1082 goto end;
1083 }
1084
1085 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1086 if (ret < 0) {
1087 perror("fcntl O_NONBLOCK");
1088 goto end;
1089 }
1090
1091 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1092 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1093 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1094 consumer_sockpoll[1].fd = client_socket;
1095 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1096
1097 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1098 goto end;
1099 }
1100 DBG("Connection on client_socket");
1101
1102 /* Blocking call, waiting for transmission */
1103 sock = lttcomm_accept_unix_sock(client_socket);
1104 if (sock <= 0) {
1105 WARN("On accept");
1106 goto end;
1107 }
1108 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1109 if (ret < 0) {
1110 perror("fcntl O_NONBLOCK");
1111 goto end;
1112 }
1113
1114 /* update the polling structure to poll on the established socket */
1115 consumer_sockpoll[1].fd = sock;
1116 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1117
1118 while (1) {
1119 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1120 goto end;
1121 }
1122 DBG("Incoming command on sock");
1123 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1124 if (ret == -ENOENT) {
1125 DBG("Received STOP command");
1126 goto end;
1127 }
1128 if (ret < 0) {
1129 ERR("Communication interrupted on command socket");
1130 goto end;
1131 }
1132 if (consumer_quit) {
1133 DBG("consumer_thread_receive_fds received quit from signal");
1134 goto end;
1135 }
1136 DBG("received fds on sock");
1137 }
1138 end:
1139 DBG("consumer_thread_receive_fds exiting");
1140
1141 /*
1142 * when all fds have hung up, the polling thread
1143 * can exit cleanly
1144 */
1145 consumer_quit = 1;
1146
1147 /*
1148 * 2s of grace period, if no polling events occur during
1149 * this period, the polling thread will exit even if there
1150 * are still open FDs (should not happen, but safety mechanism).
1151 */
1152 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1153
1154 /* wake up the polling thread */
1155 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1156 if (ret < 0) {
1157 perror("poll pipe write");
1158 }
1159 rcu_unregister_thread();
1160 return NULL;
1161 }
1162
1163 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1164 struct lttng_consumer_local_data *ctx)
1165 {
1166 switch (consumer_data.type) {
1167 case LTTNG_CONSUMER_KERNEL:
1168 return lttng_kconsumer_read_subbuffer(stream, ctx);
1169 case LTTNG_CONSUMER32_UST:
1170 case LTTNG_CONSUMER64_UST:
1171 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1172 default:
1173 ERR("Unknown consumer_data type");
1174 assert(0);
1175 return -ENOSYS;
1176 }
1177 }
1178
1179 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1180 {
1181 switch (consumer_data.type) {
1182 case LTTNG_CONSUMER_KERNEL:
1183 return lttng_kconsumer_on_recv_stream(stream);
1184 case LTTNG_CONSUMER32_UST:
1185 case LTTNG_CONSUMER64_UST:
1186 return lttng_ustconsumer_on_recv_stream(stream);
1187 default:
1188 ERR("Unknown consumer_data type");
1189 assert(0);
1190 return -ENOSYS;
1191 }
1192 }
1193
1194 /*
1195 * Allocate and set consumer data hash tables.
1196 */
1197 void lttng_consumer_init(void)
1198 {
1199 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1200 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1201 }
1202
This page took 0.058145 seconds and 5 git commands to generate.