Fix double PID registration race
[lttng-tools.git] / src / common / consumer.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#define _GNU_SOURCE
20#include <assert.h>
21#include <poll.h>
22#include <pthread.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/mman.h>
26#include <sys/socket.h>
27#include <sys/types.h>
28#include <unistd.h>
29
30#include <common/common.h>
31#include <common/kernel-ctl/kernel-ctl.h>
32#include <common/sessiond-comm/sessiond-comm.h>
33#include <common/kernel-consumer/kernel-consumer.h>
34#include <common/ust-consumer/ust-consumer.h>
35
36#include "consumer.h"
37
38struct lttng_consumer_global_data consumer_data = {
39 .stream_count = 0,
40 .need_update = 1,
41 .type = LTTNG_CONSUMER_UNKNOWN,
42};
43
44/* timeout parameter, to control the polling thread grace period. */
45int consumer_poll_timeout = -1;
46
47/*
48 * Flag to inform the polling thread to quit when all fd hung up. Updated by
49 * the consumer_thread_receive_fds when it notices that all fds has hung up.
50 * Also updated by the signal handler (consumer_should_exit()). Read by the
51 * polling threads.
52 */
53volatile int consumer_quit = 0;
54
55/*
56 * Find a stream. The consumer_data.lock must be locked during this
57 * call.
58 */
59static struct lttng_consumer_stream *consumer_find_stream(int key)
60{
61 struct lttng_ht_iter iter;
62 struct lttng_ht_node_ulong *node;
63 struct lttng_consumer_stream *stream = NULL;
64
65 /* Negative keys are lookup failures */
66 if (key < 0)
67 return NULL;
68
69 rcu_read_lock();
70
71 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
72 &iter);
73 node = lttng_ht_iter_get_node_ulong(&iter);
74 if (node != NULL) {
75 stream = caa_container_of(node, struct lttng_consumer_stream, node);
76 }
77
78 rcu_read_unlock();
79
80 return stream;
81}
82
83static void consumer_steal_stream_key(int key)
84{
85 struct lttng_consumer_stream *stream;
86
87 rcu_read_lock();
88 stream = consumer_find_stream(key);
89 if (stream) {
90 stream->key = -1;
91 /*
92 * We don't want the lookup to match, but we still need
93 * to iterate on this stream when iterating over the hash table. Just
94 * change the node key.
95 */
96 stream->node.key = -1;
97 }
98 rcu_read_unlock();
99}
100
101static struct lttng_consumer_channel *consumer_find_channel(int key)
102{
103 struct lttng_ht_iter iter;
104 struct lttng_ht_node_ulong *node;
105 struct lttng_consumer_channel *channel = NULL;
106
107 /* Negative keys are lookup failures */
108 if (key < 0)
109 return NULL;
110
111 rcu_read_lock();
112
113 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
114 &iter);
115 node = lttng_ht_iter_get_node_ulong(&iter);
116 if (node != NULL) {
117 channel = caa_container_of(node, struct lttng_consumer_channel, node);
118 }
119
120 rcu_read_unlock();
121
122 return channel;
123}
124
125static void consumer_steal_channel_key(int key)
126{
127 struct lttng_consumer_channel *channel;
128
129 rcu_read_lock();
130 channel = consumer_find_channel(key);
131 if (channel) {
132 channel->key = -1;
133 /*
134 * We don't want the lookup to match, but we still need
135 * to iterate on this channel when iterating over the hash table. Just
136 * change the node key.
137 */
138 channel->node.key = -1;
139 }
140 rcu_read_unlock();
141}
142
143static
144void consumer_free_stream(struct rcu_head *head)
145{
146 struct lttng_ht_node_ulong *node =
147 caa_container_of(head, struct lttng_ht_node_ulong, head);
148 struct lttng_consumer_stream *stream =
149 caa_container_of(node, struct lttng_consumer_stream, node);
150
151 free(stream);
152}
153
154/*
155 * Remove a stream from the global list protected by a mutex. This
156 * function is also responsible for freeing its data structures.
157 */
158void consumer_del_stream(struct lttng_consumer_stream *stream)
159{
160 int ret;
161 struct lttng_ht_iter iter;
162 struct lttng_consumer_channel *free_chan = NULL;
163
164 pthread_mutex_lock(&consumer_data.lock);
165
166 switch (consumer_data.type) {
167 case LTTNG_CONSUMER_KERNEL:
168 if (stream->mmap_base != NULL) {
169 ret = munmap(stream->mmap_base, stream->mmap_len);
170 if (ret != 0) {
171 perror("munmap");
172 }
173 }
174 break;
175 case LTTNG_CONSUMER32_UST:
176 case LTTNG_CONSUMER64_UST:
177 lttng_ustconsumer_del_stream(stream);
178 break;
179 default:
180 ERR("Unknown consumer_data type");
181 assert(0);
182 goto end;
183 }
184
185 rcu_read_lock();
186 iter.iter.node = &stream->node.node;
187 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
188 assert(!ret);
189
190 rcu_read_unlock();
191
192 if (consumer_data.stream_count <= 0) {
193 goto end;
194 }
195 consumer_data.stream_count--;
196 if (!stream) {
197 goto end;
198 }
199 if (stream->out_fd >= 0) {
200 ret = close(stream->out_fd);
201 if (ret) {
202 PERROR("close");
203 }
204 }
205 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
206 ret = close(stream->wait_fd);
207 if (ret) {
208 PERROR("close");
209 }
210 }
211 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
212 ret = close(stream->shm_fd);
213 if (ret) {
214 PERROR("close");
215 }
216 }
217 if (!--stream->chan->refcount)
218 free_chan = stream->chan;
219
220 call_rcu(&stream->node.head, consumer_free_stream);
221end:
222 consumer_data.need_update = 1;
223 pthread_mutex_unlock(&consumer_data.lock);
224
225 if (free_chan)
226 consumer_del_channel(free_chan);
227}
228
229struct lttng_consumer_stream *consumer_allocate_stream(
230 int channel_key, int stream_key,
231 int shm_fd, int wait_fd,
232 enum lttng_consumer_stream_state state,
233 uint64_t mmap_len,
234 enum lttng_event_output output,
235 const char *path_name,
236 uid_t uid,
237 gid_t gid)
238{
239 struct lttng_consumer_stream *stream;
240 int ret;
241
242 stream = zmalloc(sizeof(*stream));
243 if (stream == NULL) {
244 perror("malloc struct lttng_consumer_stream");
245 goto end;
246 }
247 stream->chan = consumer_find_channel(channel_key);
248 if (!stream->chan) {
249 perror("Unable to find channel key");
250 goto end;
251 }
252 stream->chan->refcount++;
253 stream->key = stream_key;
254 stream->shm_fd = shm_fd;
255 stream->wait_fd = wait_fd;
256 stream->out_fd = -1;
257 stream->out_fd_offset = 0;
258 stream->state = state;
259 stream->mmap_len = mmap_len;
260 stream->mmap_base = NULL;
261 stream->output = output;
262 stream->uid = uid;
263 stream->gid = gid;
264 strncpy(stream->path_name, path_name, PATH_MAX - 1);
265 stream->path_name[PATH_MAX - 1] = '\0';
266 lttng_ht_node_init_ulong(&stream->node, stream->key);
267
268 switch (consumer_data.type) {
269 case LTTNG_CONSUMER_KERNEL:
270 break;
271 case LTTNG_CONSUMER32_UST:
272 case LTTNG_CONSUMER64_UST:
273 stream->cpu = stream->chan->cpucount++;
274 ret = lttng_ustconsumer_allocate_stream(stream);
275 if (ret) {
276 free(stream);
277 return NULL;
278 }
279 break;
280 default:
281 ERR("Unknown consumer_data type");
282 assert(0);
283 goto end;
284 }
285 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
286 stream->path_name, stream->key,
287 stream->shm_fd,
288 stream->wait_fd,
289 (unsigned long long) stream->mmap_len,
290 stream->out_fd);
291end:
292 return stream;
293}
294
295/*
296 * Add a stream to the global list protected by a mutex.
297 */
298int consumer_add_stream(struct lttng_consumer_stream *stream)
299{
300 int ret = 0;
301
302 pthread_mutex_lock(&consumer_data.lock);
303 /* Steal stream identifier, for UST */
304 consumer_steal_stream_key(stream->key);
305 rcu_read_lock();
306 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
307 rcu_read_unlock();
308 consumer_data.stream_count++;
309 consumer_data.need_update = 1;
310
311 switch (consumer_data.type) {
312 case LTTNG_CONSUMER_KERNEL:
313 break;
314 case LTTNG_CONSUMER32_UST:
315 case LTTNG_CONSUMER64_UST:
316 /* Streams are in CPU number order (we rely on this) */
317 stream->cpu = stream->chan->nr_streams++;
318 break;
319 default:
320 ERR("Unknown consumer_data type");
321 assert(0);
322 goto end;
323 }
324
325end:
326 pthread_mutex_unlock(&consumer_data.lock);
327
328 return ret;
329}
330
331/*
332 * Update a stream according to what we just received.
333 */
334void consumer_change_stream_state(int stream_key,
335 enum lttng_consumer_stream_state state)
336{
337 struct lttng_consumer_stream *stream;
338
339 pthread_mutex_lock(&consumer_data.lock);
340 stream = consumer_find_stream(stream_key);
341 if (stream) {
342 stream->state = state;
343 }
344 consumer_data.need_update = 1;
345 pthread_mutex_unlock(&consumer_data.lock);
346}
347
348static
349void consumer_free_channel(struct rcu_head *head)
350{
351 struct lttng_ht_node_ulong *node =
352 caa_container_of(head, struct lttng_ht_node_ulong, head);
353 struct lttng_consumer_channel *channel =
354 caa_container_of(node, struct lttng_consumer_channel, node);
355
356 free(channel);
357}
358
359/*
360 * Remove a channel from the global list protected by a mutex. This
361 * function is also responsible for freeing its data structures.
362 */
363void consumer_del_channel(struct lttng_consumer_channel *channel)
364{
365 int ret;
366 struct lttng_ht_iter iter;
367
368 pthread_mutex_lock(&consumer_data.lock);
369
370 switch (consumer_data.type) {
371 case LTTNG_CONSUMER_KERNEL:
372 break;
373 case LTTNG_CONSUMER32_UST:
374 case LTTNG_CONSUMER64_UST:
375 lttng_ustconsumer_del_channel(channel);
376 break;
377 default:
378 ERR("Unknown consumer_data type");
379 assert(0);
380 goto end;
381 }
382
383 rcu_read_lock();
384 iter.iter.node = &channel->node.node;
385 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
386 assert(!ret);
387 rcu_read_unlock();
388
389 if (channel->mmap_base != NULL) {
390 ret = munmap(channel->mmap_base, channel->mmap_len);
391 if (ret != 0) {
392 perror("munmap");
393 }
394 }
395 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
396 ret = close(channel->wait_fd);
397 if (ret) {
398 PERROR("close");
399 }
400 }
401 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
402 ret = close(channel->shm_fd);
403 if (ret) {
404 PERROR("close");
405 }
406 }
407
408 call_rcu(&channel->node.head, consumer_free_channel);
409end:
410 pthread_mutex_unlock(&consumer_data.lock);
411}
412
413struct lttng_consumer_channel *consumer_allocate_channel(
414 int channel_key,
415 int shm_fd, int wait_fd,
416 uint64_t mmap_len,
417 uint64_t max_sb_size)
418{
419 struct lttng_consumer_channel *channel;
420 int ret;
421
422 channel = zmalloc(sizeof(*channel));
423 if (channel == NULL) {
424 perror("malloc struct lttng_consumer_channel");
425 goto end;
426 }
427 channel->key = channel_key;
428 channel->shm_fd = shm_fd;
429 channel->wait_fd = wait_fd;
430 channel->mmap_len = mmap_len;
431 channel->max_sb_size = max_sb_size;
432 channel->refcount = 0;
433 channel->nr_streams = 0;
434 lttng_ht_node_init_ulong(&channel->node, channel->key);
435
436 switch (consumer_data.type) {
437 case LTTNG_CONSUMER_KERNEL:
438 channel->mmap_base = NULL;
439 channel->mmap_len = 0;
440 break;
441 case LTTNG_CONSUMER32_UST:
442 case LTTNG_CONSUMER64_UST:
443 ret = lttng_ustconsumer_allocate_channel(channel);
444 if (ret) {
445 free(channel);
446 return NULL;
447 }
448 break;
449 default:
450 ERR("Unknown consumer_data type");
451 assert(0);
452 goto end;
453 }
454 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
455 channel->key,
456 channel->shm_fd,
457 channel->wait_fd,
458 (unsigned long long) channel->mmap_len,
459 (unsigned long long) channel->max_sb_size);
460end:
461 return channel;
462}
463
464/*
465 * Add a channel to the global list protected by a mutex.
466 */
467int consumer_add_channel(struct lttng_consumer_channel *channel)
468{
469 pthread_mutex_lock(&consumer_data.lock);
470 /* Steal channel identifier, for UST */
471 consumer_steal_channel_key(channel->key);
472 rcu_read_lock();
473 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
474 rcu_read_unlock();
475 pthread_mutex_unlock(&consumer_data.lock);
476
477 return 0;
478}
479
480/*
481 * Allocate the pollfd structure and the local view of the out fds to avoid
482 * doing a lookup in the linked list and concurrency issues when writing is
483 * needed. Called with consumer_data.lock held.
484 *
485 * Returns the number of fds in the structures.
486 */
487int consumer_update_poll_array(
488 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
489 struct lttng_consumer_stream **local_stream)
490{
491 int i = 0;
492 struct lttng_ht_iter iter;
493 struct lttng_consumer_stream *stream;
494
495 DBG("Updating poll fd array");
496 rcu_read_lock();
497 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
498 node.node) {
499 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
500 continue;
501 }
502 DBG("Active FD %d", stream->wait_fd);
503 (*pollfd)[i].fd = stream->wait_fd;
504 (*pollfd)[i].events = POLLIN | POLLPRI;
505 local_stream[i] = stream;
506 i++;
507 }
508 rcu_read_unlock();
509
510 /*
511 * Insert the consumer_poll_pipe at the end of the array and don't
512 * increment i so nb_fd is the number of real FD.
513 */
514 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
515 (*pollfd)[i].events = POLLIN;
516 return i;
517}
518
519/*
520 * Poll on the should_quit pipe and the command socket return -1 on error and
521 * should exit, 0 if data is available on the command socket
522 */
523int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
524{
525 int num_rdy;
526
527restart:
528 num_rdy = poll(consumer_sockpoll, 2, -1);
529 if (num_rdy == -1) {
530 /*
531 * Restart interrupted system call.
532 */
533 if (errno == EINTR) {
534 goto restart;
535 }
536 perror("Poll error");
537 goto exit;
538 }
539 if (consumer_sockpoll[0].revents == POLLIN) {
540 DBG("consumer_should_quit wake up");
541 goto exit;
542 }
543 return 0;
544
545exit:
546 return -1;
547}
548
549/*
550 * Set the error socket.
551 */
552void lttng_consumer_set_error_sock(
553 struct lttng_consumer_local_data *ctx, int sock)
554{
555 ctx->consumer_error_socket = sock;
556}
557
558/*
559 * Set the command socket path.
560 */
561
562void lttng_consumer_set_command_sock_path(
563 struct lttng_consumer_local_data *ctx, char *sock)
564{
565 ctx->consumer_command_sock_path = sock;
566}
567
568/*
569 * Send return code to the session daemon.
570 * If the socket is not defined, we return 0, it is not a fatal error
571 */
572int lttng_consumer_send_error(
573 struct lttng_consumer_local_data *ctx, int cmd)
574{
575 if (ctx->consumer_error_socket > 0) {
576 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
577 sizeof(enum lttcomm_sessiond_command));
578 }
579
580 return 0;
581}
582
583/*
584 * Close all the tracefiles and stream fds, should be called when all instances
585 * are destroyed.
586 */
587void lttng_consumer_cleanup(void)
588{
589 struct lttng_ht_iter iter;
590 struct lttng_ht_node_ulong *node;
591
592 rcu_read_lock();
593
594 /*
595 * close all outfd. Called when there are no more threads running (after
596 * joining on the threads), no need to protect list iteration with mutex.
597 */
598 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
599 node) {
600 struct lttng_consumer_stream *stream =
601 caa_container_of(node, struct lttng_consumer_stream, node);
602 consumer_del_stream(stream);
603 }
604
605 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
606 node) {
607 struct lttng_consumer_channel *channel =
608 caa_container_of(node, struct lttng_consumer_channel, node);
609 consumer_del_channel(channel);
610 }
611
612 rcu_read_unlock();
613}
614
615/*
616 * Called from signal handler.
617 */
618void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
619{
620 int ret;
621 consumer_quit = 1;
622 ret = write(ctx->consumer_should_quit[1], "4", 1);
623 if (ret < 0) {
624 perror("write consumer quit");
625 }
626}
627
628void lttng_consumer_sync_trace_file(
629 struct lttng_consumer_stream *stream, off_t orig_offset)
630{
631 int outfd = stream->out_fd;
632
633 /*
634 * This does a blocking write-and-wait on any page that belongs to the
635 * subbuffer prior to the one we just wrote.
636 * Don't care about error values, as these are just hints and ways to
637 * limit the amount of page cache used.
638 */
639 if (orig_offset < stream->chan->max_sb_size) {
640 return;
641 }
642 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
643 stream->chan->max_sb_size,
644 SYNC_FILE_RANGE_WAIT_BEFORE
645 | SYNC_FILE_RANGE_WRITE
646 | SYNC_FILE_RANGE_WAIT_AFTER);
647 /*
648 * Give hints to the kernel about how we access the file:
649 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
650 * we write it.
651 *
652 * We need to call fadvise again after the file grows because the
653 * kernel does not seem to apply fadvise to non-existing parts of the
654 * file.
655 *
656 * Call fadvise _after_ having waited for the page writeback to
657 * complete because the dirty page writeback semantic is not well
658 * defined. So it can be expected to lead to lower throughput in
659 * streaming.
660 */
661 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
662 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
663}
664
665/*
666 * Initialise the necessary environnement :
667 * - create a new context
668 * - create the poll_pipe
669 * - create the should_quit pipe (for signal handler)
670 * - create the thread pipe (for splice)
671 *
672 * Takes a function pointer as argument, this function is called when data is
673 * available on a buffer. This function is responsible to do the
674 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
675 * buffer configuration and then kernctl_put_next_subbuf at the end.
676 *
677 * Returns a pointer to the new context or NULL on error.
678 */
679struct lttng_consumer_local_data *lttng_consumer_create(
680 enum lttng_consumer_type type,
681 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
682 struct lttng_consumer_local_data *ctx),
683 int (*recv_channel)(struct lttng_consumer_channel *channel),
684 int (*recv_stream)(struct lttng_consumer_stream *stream),
685 int (*update_stream)(int stream_key, uint32_t state))
686{
687 int ret, i;
688 struct lttng_consumer_local_data *ctx;
689
690 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
691 consumer_data.type == type);
692 consumer_data.type = type;
693
694 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
695 if (ctx == NULL) {
696 perror("allocating context");
697 goto error;
698 }
699
700 ctx->consumer_error_socket = -1;
701 /* assign the callbacks */
702 ctx->on_buffer_ready = buffer_ready;
703 ctx->on_recv_channel = recv_channel;
704 ctx->on_recv_stream = recv_stream;
705 ctx->on_update_stream = update_stream;
706
707 ret = pipe(ctx->consumer_poll_pipe);
708 if (ret < 0) {
709 perror("Error creating poll pipe");
710 goto error_poll_pipe;
711 }
712
713 ret = pipe(ctx->consumer_should_quit);
714 if (ret < 0) {
715 perror("Error creating recv pipe");
716 goto error_quit_pipe;
717 }
718
719 ret = pipe(ctx->consumer_thread_pipe);
720 if (ret < 0) {
721 perror("Error creating thread pipe");
722 goto error_thread_pipe;
723 }
724
725 return ctx;
726
727
728error_thread_pipe:
729 for (i = 0; i < 2; i++) {
730 int err;
731
732 err = close(ctx->consumer_should_quit[i]);
733 if (err) {
734 PERROR("close");
735 }
736 }
737error_quit_pipe:
738 for (i = 0; i < 2; i++) {
739 int err;
740
741 err = close(ctx->consumer_poll_pipe[i]);
742 if (err) {
743 PERROR("close");
744 }
745 }
746error_poll_pipe:
747 free(ctx);
748error:
749 return NULL;
750}
751
752/*
753 * Close all fds associated with the instance and free the context.
754 */
755void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
756{
757 int ret;
758
759 ret = close(ctx->consumer_error_socket);
760 if (ret) {
761 PERROR("close");
762 }
763 ret = close(ctx->consumer_thread_pipe[0]);
764 if (ret) {
765 PERROR("close");
766 }
767 ret = close(ctx->consumer_thread_pipe[1]);
768 if (ret) {
769 PERROR("close");
770 }
771 ret = close(ctx->consumer_poll_pipe[0]);
772 if (ret) {
773 PERROR("close");
774 }
775 ret = close(ctx->consumer_poll_pipe[1]);
776 if (ret) {
777 PERROR("close");
778 }
779 ret = close(ctx->consumer_should_quit[0]);
780 if (ret) {
781 PERROR("close");
782 }
783 ret = close(ctx->consumer_should_quit[1]);
784 if (ret) {
785 PERROR("close");
786 }
787 unlink(ctx->consumer_command_sock_path);
788 free(ctx);
789}
790
791/*
792 * Mmap the ring buffer, read it and write the data to the tracefile.
793 *
794 * Returns the number of bytes written
795 */
796ssize_t lttng_consumer_on_read_subbuffer_mmap(
797 struct lttng_consumer_local_data *ctx,
798 struct lttng_consumer_stream *stream, unsigned long len)
799{
800 switch (consumer_data.type) {
801 case LTTNG_CONSUMER_KERNEL:
802 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
803 case LTTNG_CONSUMER32_UST:
804 case LTTNG_CONSUMER64_UST:
805 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
806 default:
807 ERR("Unknown consumer_data type");
808 assert(0);
809 }
810
811 return 0;
812}
813
814/*
815 * Splice the data from the ring buffer to the tracefile.
816 *
817 * Returns the number of bytes spliced.
818 */
819ssize_t lttng_consumer_on_read_subbuffer_splice(
820 struct lttng_consumer_local_data *ctx,
821 struct lttng_consumer_stream *stream, unsigned long len)
822{
823 switch (consumer_data.type) {
824 case LTTNG_CONSUMER_KERNEL:
825 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
826 case LTTNG_CONSUMER32_UST:
827 case LTTNG_CONSUMER64_UST:
828 return -ENOSYS;
829 default:
830 ERR("Unknown consumer_data type");
831 assert(0);
832 return -ENOSYS;
833 }
834
835}
836
837/*
838 * Take a snapshot for a specific fd
839 *
840 * Returns 0 on success, < 0 on error
841 */
842int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
843 struct lttng_consumer_stream *stream)
844{
845 switch (consumer_data.type) {
846 case LTTNG_CONSUMER_KERNEL:
847 return lttng_kconsumer_take_snapshot(ctx, stream);
848 case LTTNG_CONSUMER32_UST:
849 case LTTNG_CONSUMER64_UST:
850 return lttng_ustconsumer_take_snapshot(ctx, stream);
851 default:
852 ERR("Unknown consumer_data type");
853 assert(0);
854 return -ENOSYS;
855 }
856
857}
858
859/*
860 * Get the produced position
861 *
862 * Returns 0 on success, < 0 on error
863 */
864int lttng_consumer_get_produced_snapshot(
865 struct lttng_consumer_local_data *ctx,
866 struct lttng_consumer_stream *stream,
867 unsigned long *pos)
868{
869 switch (consumer_data.type) {
870 case LTTNG_CONSUMER_KERNEL:
871 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
872 case LTTNG_CONSUMER32_UST:
873 case LTTNG_CONSUMER64_UST:
874 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
875 default:
876 ERR("Unknown consumer_data type");
877 assert(0);
878 return -ENOSYS;
879 }
880}
881
882int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
883 int sock, struct pollfd *consumer_sockpoll)
884{
885 switch (consumer_data.type) {
886 case LTTNG_CONSUMER_KERNEL:
887 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
888 case LTTNG_CONSUMER32_UST:
889 case LTTNG_CONSUMER64_UST:
890 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
891 default:
892 ERR("Unknown consumer_data type");
893 assert(0);
894 return -ENOSYS;
895 }
896}
897
898/*
899 * This thread polls the fds in the set to consume the data and write
900 * it to tracefile if necessary.
901 */
902void *lttng_consumer_thread_poll_fds(void *data)
903{
904 int num_rdy, num_hup, high_prio, ret, i;
905 struct pollfd *pollfd = NULL;
906 /* local view of the streams */
907 struct lttng_consumer_stream **local_stream = NULL;
908 /* local view of consumer_data.fds_count */
909 int nb_fd = 0;
910 char tmp;
911 int tmp2;
912 struct lttng_consumer_local_data *ctx = data;
913
914 rcu_register_thread();
915
916 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
917
918 while (1) {
919 high_prio = 0;
920 num_hup = 0;
921
922 /*
923 * the fds set has been updated, we need to update our
924 * local array as well
925 */
926 pthread_mutex_lock(&consumer_data.lock);
927 if (consumer_data.need_update) {
928 if (pollfd != NULL) {
929 free(pollfd);
930 pollfd = NULL;
931 }
932 if (local_stream != NULL) {
933 free(local_stream);
934 local_stream = NULL;
935 }
936
937 /* allocate for all fds + 1 for the consumer_poll_pipe */
938 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
939 if (pollfd == NULL) {
940 perror("pollfd malloc");
941 pthread_mutex_unlock(&consumer_data.lock);
942 goto end;
943 }
944
945 /* allocate for all fds + 1 for the consumer_poll_pipe */
946 local_stream = zmalloc((consumer_data.stream_count + 1) *
947 sizeof(struct lttng_consumer_stream));
948 if (local_stream == NULL) {
949 perror("local_stream malloc");
950 pthread_mutex_unlock(&consumer_data.lock);
951 goto end;
952 }
953 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
954 if (ret < 0) {
955 ERR("Error in allocating pollfd or local_outfds");
956 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
957 pthread_mutex_unlock(&consumer_data.lock);
958 goto end;
959 }
960 nb_fd = ret;
961 consumer_data.need_update = 0;
962 }
963 pthread_mutex_unlock(&consumer_data.lock);
964
965 /* No FDs and consumer_quit, consumer_cleanup the thread */
966 if (nb_fd == 0 && consumer_quit == 1) {
967 goto end;
968 }
969 /* poll on the array of fds */
970 restart:
971 DBG("polling on %d fd", nb_fd + 1);
972 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
973 DBG("poll num_rdy : %d", num_rdy);
974 if (num_rdy == -1) {
975 /*
976 * Restart interrupted system call.
977 */
978 if (errno == EINTR) {
979 goto restart;
980 }
981 perror("Poll error");
982 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
983 goto end;
984 } else if (num_rdy == 0) {
985 DBG("Polling thread timed out");
986 goto end;
987 }
988
989 /*
990 * If the consumer_poll_pipe triggered poll go
991 * directly to the beginning of the loop to update the
992 * array. We want to prioritize array update over
993 * low-priority reads.
994 */
995 if (pollfd[nb_fd].revents & POLLIN) {
996 DBG("consumer_poll_pipe wake up");
997 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
998 if (tmp2 < 0) {
999 perror("read consumer poll");
1000 }
1001 continue;
1002 }
1003
1004 /* Take care of high priority channels first. */
1005 for (i = 0; i < nb_fd; i++) {
1006 if (pollfd[i].revents & POLLPRI) {
1007 ssize_t len;
1008
1009 DBG("Urgent read on fd %d", pollfd[i].fd);
1010 high_prio = 1;
1011 len = ctx->on_buffer_ready(local_stream[i], ctx);
1012 /* it's ok to have an unavailable sub-buffer */
1013 if (len < 0 && len != -EAGAIN) {
1014 goto end;
1015 } else if (len > 0) {
1016 local_stream[i]->data_read = 1;
1017 }
1018 }
1019 }
1020
1021 /*
1022 * If we read high prio channel in this loop, try again
1023 * for more high prio data.
1024 */
1025 if (high_prio) {
1026 continue;
1027 }
1028
1029 /* Take care of low priority channels. */
1030 for (i = 0; i < nb_fd; i++) {
1031 if ((pollfd[i].revents & POLLIN) ||
1032 local_stream[i]->hangup_flush_done) {
1033 ssize_t len;
1034
1035 DBG("Normal read on fd %d", pollfd[i].fd);
1036 len = ctx->on_buffer_ready(local_stream[i], ctx);
1037 /* it's ok to have an unavailable sub-buffer */
1038 if (len < 0 && len != -EAGAIN) {
1039 goto end;
1040 } else if (len > 0) {
1041 local_stream[i]->data_read = 1;
1042 }
1043 }
1044 }
1045
1046 /* Handle hangup and errors */
1047 for (i = 0; i < nb_fd; i++) {
1048 if (!local_stream[i]->hangup_flush_done
1049 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1050 && (consumer_data.type == LTTNG_CONSUMER32_UST
1051 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1052 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1053 pollfd[i].fd);
1054 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1055 /* Attempt read again, for the data we just flushed. */
1056 local_stream[i]->data_read = 1;
1057 }
1058 /*
1059 * If the poll flag is HUP/ERR/NVAL and we have
1060 * read no data in this pass, we can remove the
1061 * stream from its hash table.
1062 */
1063 if ((pollfd[i].revents & POLLHUP)) {
1064 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1065 if (!local_stream[i]->data_read) {
1066 consumer_del_stream(local_stream[i]);
1067 num_hup++;
1068 }
1069 } else if (pollfd[i].revents & POLLERR) {
1070 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1071 if (!local_stream[i]->data_read) {
1072 consumer_del_stream(local_stream[i]);
1073 num_hup++;
1074 }
1075 } else if (pollfd[i].revents & POLLNVAL) {
1076 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1077 if (!local_stream[i]->data_read) {
1078 consumer_del_stream(local_stream[i]);
1079 num_hup++;
1080 }
1081 }
1082 local_stream[i]->data_read = 0;
1083 }
1084 }
1085end:
1086 DBG("polling thread exiting");
1087 if (pollfd != NULL) {
1088 free(pollfd);
1089 pollfd = NULL;
1090 }
1091 if (local_stream != NULL) {
1092 free(local_stream);
1093 local_stream = NULL;
1094 }
1095 rcu_unregister_thread();
1096 return NULL;
1097}
1098
1099/*
1100 * This thread listens on the consumerd socket and receives the file
1101 * descriptors from the session daemon.
1102 */
1103void *lttng_consumer_thread_receive_fds(void *data)
1104{
1105 int sock, client_socket, ret;
1106 /*
1107 * structure to poll for incoming data on communication socket avoids
1108 * making blocking sockets.
1109 */
1110 struct pollfd consumer_sockpoll[2];
1111 struct lttng_consumer_local_data *ctx = data;
1112
1113 rcu_register_thread();
1114
1115 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1116 unlink(ctx->consumer_command_sock_path);
1117 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1118 if (client_socket < 0) {
1119 ERR("Cannot create command socket");
1120 goto end;
1121 }
1122
1123 ret = lttcomm_listen_unix_sock(client_socket);
1124 if (ret < 0) {
1125 goto end;
1126 }
1127
1128 DBG("Sending ready command to lttng-sessiond");
1129 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1130 /* return < 0 on error, but == 0 is not fatal */
1131 if (ret < 0) {
1132 ERR("Error sending ready command to lttng-sessiond");
1133 goto end;
1134 }
1135
1136 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1137 if (ret < 0) {
1138 perror("fcntl O_NONBLOCK");
1139 goto end;
1140 }
1141
1142 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1143 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1144 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1145 consumer_sockpoll[1].fd = client_socket;
1146 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1147
1148 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1149 goto end;
1150 }
1151 DBG("Connection on client_socket");
1152
1153 /* Blocking call, waiting for transmission */
1154 sock = lttcomm_accept_unix_sock(client_socket);
1155 if (sock <= 0) {
1156 WARN("On accept");
1157 goto end;
1158 }
1159 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1160 if (ret < 0) {
1161 perror("fcntl O_NONBLOCK");
1162 goto end;
1163 }
1164
1165 /* update the polling structure to poll on the established socket */
1166 consumer_sockpoll[1].fd = sock;
1167 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1168
1169 while (1) {
1170 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1171 goto end;
1172 }
1173 DBG("Incoming command on sock");
1174 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1175 if (ret == -ENOENT) {
1176 DBG("Received STOP command");
1177 goto end;
1178 }
1179 if (ret < 0) {
1180 ERR("Communication interrupted on command socket");
1181 goto end;
1182 }
1183 if (consumer_quit) {
1184 DBG("consumer_thread_receive_fds received quit from signal");
1185 goto end;
1186 }
1187 DBG("received fds on sock");
1188 }
1189end:
1190 DBG("consumer_thread_receive_fds exiting");
1191
1192 /*
1193 * when all fds have hung up, the polling thread
1194 * can exit cleanly
1195 */
1196 consumer_quit = 1;
1197
1198 /*
1199 * 2s of grace period, if no polling events occur during
1200 * this period, the polling thread will exit even if there
1201 * are still open FDs (should not happen, but safety mechanism).
1202 */
1203 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1204
1205 /* wake up the polling thread */
1206 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1207 if (ret < 0) {
1208 perror("poll pipe write");
1209 }
1210 rcu_unregister_thread();
1211 return NULL;
1212}
1213
1214ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1215 struct lttng_consumer_local_data *ctx)
1216{
1217 switch (consumer_data.type) {
1218 case LTTNG_CONSUMER_KERNEL:
1219 return lttng_kconsumer_read_subbuffer(stream, ctx);
1220 case LTTNG_CONSUMER32_UST:
1221 case LTTNG_CONSUMER64_UST:
1222 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1223 default:
1224 ERR("Unknown consumer_data type");
1225 assert(0);
1226 return -ENOSYS;
1227 }
1228}
1229
1230int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1231{
1232 switch (consumer_data.type) {
1233 case LTTNG_CONSUMER_KERNEL:
1234 return lttng_kconsumer_on_recv_stream(stream);
1235 case LTTNG_CONSUMER32_UST:
1236 case LTTNG_CONSUMER64_UST:
1237 return lttng_ustconsumer_on_recv_stream(stream);
1238 default:
1239 ERR("Unknown consumer_data type");
1240 assert(0);
1241 return -ENOSYS;
1242 }
1243}
1244
1245/*
1246 * Allocate and set consumer data hash tables.
1247 */
1248void lttng_consumer_init(void)
1249{
1250 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1251 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1252}
1253
This page took 0.027113 seconds and 4 git commands to generate.