d7b319452a87088eab5d6cd648fd7515e5b387f5
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <fcntl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
30 #include <unistd.h>
31
32 #include <common/common.h>
33 #include <common/kernel-ctl/kernel-ctl.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35 #include <common/kernel-consumer/kernel-consumer.h>
36 #include <common/ust-consumer/ust-consumer.h>
37
38 #include "consumer.h"
39
40 struct lttng_consumer_global_data consumer_data = {
41 .stream_count = 0,
42 .need_update = 1,
43 .type = LTTNG_CONSUMER_UNKNOWN,
44 };
45
46 /* timeout parameter, to control the polling thread grace period. */
47 int consumer_poll_timeout = -1;
48
49 /*
50 * Flag to inform the polling thread to quit when all fd hung up. Updated by
51 * the consumer_thread_receive_fds when it notices that all fds has hung up.
52 * Also updated by the signal handler (consumer_should_exit()). Read by the
53 * polling threads.
54 */
55 volatile int consumer_quit = 0;
56
57 /*
58 * Find a stream. The consumer_data.lock must be locked during this
59 * call.
60 */
61 static struct lttng_consumer_stream *consumer_find_stream(int key)
62 {
63 struct lttng_ht_iter iter;
64 struct lttng_ht_node_ulong *node;
65 struct lttng_consumer_stream *stream = NULL;
66
67 /* Negative keys are lookup failures */
68 if (key < 0)
69 return NULL;
70
71 rcu_read_lock();
72
73 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
74 &iter);
75 node = lttng_ht_iter_get_node_ulong(&iter);
76 if (node != NULL) {
77 stream = caa_container_of(node, struct lttng_consumer_stream, node);
78 }
79
80 rcu_read_unlock();
81
82 return stream;
83 }
84
85 static void consumer_steal_stream_key(int key)
86 {
87 struct lttng_consumer_stream *stream;
88
89 stream = consumer_find_stream(key);
90 if (stream)
91 stream->key = -1;
92 }
93
94 static struct lttng_consumer_channel *consumer_find_channel(int key)
95 {
96 struct lttng_ht_iter iter;
97 struct lttng_ht_node_ulong *node;
98 struct lttng_consumer_channel *channel = NULL;
99
100 /* Negative keys are lookup failures */
101 if (key < 0)
102 return NULL;
103
104 rcu_read_lock();
105
106 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
107 &iter);
108 node = lttng_ht_iter_get_node_ulong(&iter);
109 if (node != NULL) {
110 channel = caa_container_of(node, struct lttng_consumer_channel, node);
111 }
112
113 rcu_read_unlock();
114
115 return channel;
116 }
117
118 static void consumer_steal_channel_key(int key)
119 {
120 struct lttng_consumer_channel *channel;
121
122 channel = consumer_find_channel(key);
123 if (channel)
124 channel->key = -1;
125 }
126
127 static
128 void consumer_free_stream(struct rcu_head *head)
129 {
130 struct lttng_ht_node_ulong *node =
131 caa_container_of(head, struct lttng_ht_node_ulong, head);
132 struct lttng_consumer_stream *stream =
133 caa_container_of(node, struct lttng_consumer_stream, node);
134
135 free(stream);
136 }
137
138 /*
139 * Remove a stream from the global list protected by a mutex. This
140 * function is also responsible for freeing its data structures.
141 */
142 void consumer_del_stream(struct lttng_consumer_stream *stream)
143 {
144 int ret;
145 struct lttng_ht_iter iter;
146 struct lttng_consumer_channel *free_chan = NULL;
147
148 pthread_mutex_lock(&consumer_data.lock);
149
150 switch (consumer_data.type) {
151 case LTTNG_CONSUMER_KERNEL:
152 if (stream->mmap_base != NULL) {
153 ret = munmap(stream->mmap_base, stream->mmap_len);
154 if (ret != 0) {
155 perror("munmap");
156 }
157 }
158 break;
159 case LTTNG_CONSUMER32_UST:
160 case LTTNG_CONSUMER64_UST:
161 lttng_ustconsumer_del_stream(stream);
162 break;
163 default:
164 ERR("Unknown consumer_data type");
165 assert(0);
166 goto end;
167 }
168
169 rcu_read_lock();
170
171 /* Get stream node from hash table */
172 lttng_ht_lookup(consumer_data.stream_ht,
173 (void *)((unsigned long) stream->key), &iter);
174 /*
175 * Remove stream node from hash table. It can fail if it's been
176 * replaced due to key reuse.
177 */
178 (void) lttng_ht_del(consumer_data.stream_ht, &iter);
179
180 rcu_read_unlock();
181
182 if (consumer_data.stream_count <= 0) {
183 goto end;
184 }
185 consumer_data.stream_count--;
186 if (!stream) {
187 goto end;
188 }
189 if (stream->out_fd >= 0) {
190 ret = close(stream->out_fd);
191 if (ret) {
192 PERROR("close");
193 }
194 }
195 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
196 ret = close(stream->wait_fd);
197 if (ret) {
198 PERROR("close");
199 }
200 }
201 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
202 ret = close(stream->shm_fd);
203 if (ret) {
204 PERROR("close");
205 }
206 }
207 if (!--stream->chan->refcount)
208 free_chan = stream->chan;
209
210 call_rcu(&stream->node.head, consumer_free_stream);
211 end:
212 consumer_data.need_update = 1;
213 pthread_mutex_unlock(&consumer_data.lock);
214
215 if (free_chan)
216 consumer_del_channel(free_chan);
217 }
218
219 struct lttng_consumer_stream *consumer_allocate_stream(
220 int channel_key, int stream_key,
221 int shm_fd, int wait_fd,
222 enum lttng_consumer_stream_state state,
223 uint64_t mmap_len,
224 enum lttng_event_output output,
225 const char *path_name,
226 uid_t uid,
227 gid_t gid)
228 {
229 struct lttng_consumer_stream *stream;
230 int ret;
231
232 stream = zmalloc(sizeof(*stream));
233 if (stream == NULL) {
234 perror("malloc struct lttng_consumer_stream");
235 goto end;
236 }
237 stream->chan = consumer_find_channel(channel_key);
238 if (!stream->chan) {
239 perror("Unable to find channel key");
240 goto end;
241 }
242 stream->chan->refcount++;
243 stream->key = stream_key;
244 stream->shm_fd = shm_fd;
245 stream->wait_fd = wait_fd;
246 stream->out_fd = -1;
247 stream->out_fd_offset = 0;
248 stream->state = state;
249 stream->mmap_len = mmap_len;
250 stream->mmap_base = NULL;
251 stream->output = output;
252 stream->uid = uid;
253 stream->gid = gid;
254 strncpy(stream->path_name, path_name, PATH_MAX - 1);
255 stream->path_name[PATH_MAX - 1] = '\0';
256 lttng_ht_node_init_ulong(&stream->node, stream->key);
257
258 switch (consumer_data.type) {
259 case LTTNG_CONSUMER_KERNEL:
260 break;
261 case LTTNG_CONSUMER32_UST:
262 case LTTNG_CONSUMER64_UST:
263 stream->cpu = stream->chan->cpucount++;
264 ret = lttng_ustconsumer_allocate_stream(stream);
265 if (ret) {
266 free(stream);
267 return NULL;
268 }
269 break;
270 default:
271 ERR("Unknown consumer_data type");
272 assert(0);
273 goto end;
274 }
275 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
276 stream->path_name, stream->key,
277 stream->shm_fd,
278 stream->wait_fd,
279 (unsigned long long) stream->mmap_len,
280 stream->out_fd);
281 end:
282 return stream;
283 }
284
285 /*
286 * Add a stream to the global list protected by a mutex.
287 */
288 int consumer_add_stream(struct lttng_consumer_stream *stream)
289 {
290 int ret = 0;
291
292 pthread_mutex_lock(&consumer_data.lock);
293 /* Steal stream identifier, for UST */
294 consumer_steal_stream_key(stream->key);
295 rcu_read_lock();
296 /*
297 * We simply remove the old channel from the hash table. It's
298 * ok, since we know for sure the sessiond wants to replace it
299 * with the new version, because the key has been reused.
300 */
301 (void) lttng_ht_add_replace_ulong(consumer_data.stream_ht, &stream->node);
302 rcu_read_unlock();
303 consumer_data.stream_count++;
304 consumer_data.need_update = 1;
305
306 switch (consumer_data.type) {
307 case LTTNG_CONSUMER_KERNEL:
308 break;
309 case LTTNG_CONSUMER32_UST:
310 case LTTNG_CONSUMER64_UST:
311 /* Streams are in CPU number order (we rely on this) */
312 stream->cpu = stream->chan->nr_streams++;
313 break;
314 default:
315 ERR("Unknown consumer_data type");
316 assert(0);
317 goto end;
318 }
319
320 end:
321 pthread_mutex_unlock(&consumer_data.lock);
322
323 return ret;
324 }
325
326 /*
327 * Update a stream according to what we just received.
328 */
329 void consumer_change_stream_state(int stream_key,
330 enum lttng_consumer_stream_state state)
331 {
332 struct lttng_consumer_stream *stream;
333
334 pthread_mutex_lock(&consumer_data.lock);
335 stream = consumer_find_stream(stream_key);
336 if (stream) {
337 stream->state = state;
338 }
339 consumer_data.need_update = 1;
340 pthread_mutex_unlock(&consumer_data.lock);
341 }
342
343 static
344 void consumer_free_channel(struct rcu_head *head)
345 {
346 struct lttng_ht_node_ulong *node =
347 caa_container_of(head, struct lttng_ht_node_ulong, head);
348 struct lttng_consumer_channel *channel =
349 caa_container_of(node, struct lttng_consumer_channel, node);
350
351 free(channel);
352 }
353
354 /*
355 * Remove a channel from the global list protected by a mutex. This
356 * function is also responsible for freeing its data structures.
357 */
358 void consumer_del_channel(struct lttng_consumer_channel *channel)
359 {
360 int ret;
361 struct lttng_ht_iter iter;
362
363 pthread_mutex_lock(&consumer_data.lock);
364
365 switch (consumer_data.type) {
366 case LTTNG_CONSUMER_KERNEL:
367 break;
368 case LTTNG_CONSUMER32_UST:
369 case LTTNG_CONSUMER64_UST:
370 lttng_ustconsumer_del_channel(channel);
371 break;
372 default:
373 ERR("Unknown consumer_data type");
374 assert(0);
375 goto end;
376 }
377
378 rcu_read_lock();
379
380 lttng_ht_lookup(consumer_data.channel_ht,
381 (void *)((unsigned long) channel->key), &iter);
382
383 /*
384 * Remove channel node from hash table. It can fail if it's been
385 * replaced due to key reuse.
386 */
387 (void) lttng_ht_del(consumer_data.channel_ht, &iter);
388
389 rcu_read_unlock();
390
391 if (channel->mmap_base != NULL) {
392 ret = munmap(channel->mmap_base, channel->mmap_len);
393 if (ret != 0) {
394 perror("munmap");
395 }
396 }
397 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
398 ret = close(channel->wait_fd);
399 if (ret) {
400 PERROR("close");
401 }
402 }
403 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
404 ret = close(channel->shm_fd);
405 if (ret) {
406 PERROR("close");
407 }
408 }
409
410 call_rcu(&channel->node.head, consumer_free_channel);
411 end:
412 pthread_mutex_unlock(&consumer_data.lock);
413 }
414
415 struct lttng_consumer_channel *consumer_allocate_channel(
416 int channel_key,
417 int shm_fd, int wait_fd,
418 uint64_t mmap_len,
419 uint64_t max_sb_size)
420 {
421 struct lttng_consumer_channel *channel;
422 int ret;
423
424 channel = zmalloc(sizeof(*channel));
425 if (channel == NULL) {
426 perror("malloc struct lttng_consumer_channel");
427 goto end;
428 }
429 channel->key = channel_key;
430 channel->shm_fd = shm_fd;
431 channel->wait_fd = wait_fd;
432 channel->mmap_len = mmap_len;
433 channel->max_sb_size = max_sb_size;
434 channel->refcount = 0;
435 channel->nr_streams = 0;
436 lttng_ht_node_init_ulong(&channel->node, channel->key);
437
438 switch (consumer_data.type) {
439 case LTTNG_CONSUMER_KERNEL:
440 channel->mmap_base = NULL;
441 channel->mmap_len = 0;
442 break;
443 case LTTNG_CONSUMER32_UST:
444 case LTTNG_CONSUMER64_UST:
445 ret = lttng_ustconsumer_allocate_channel(channel);
446 if (ret) {
447 free(channel);
448 return NULL;
449 }
450 break;
451 default:
452 ERR("Unknown consumer_data type");
453 assert(0);
454 goto end;
455 }
456 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
457 channel->key,
458 channel->shm_fd,
459 channel->wait_fd,
460 (unsigned long long) channel->mmap_len,
461 (unsigned long long) channel->max_sb_size);
462 end:
463 return channel;
464 }
465
466 /*
467 * Add a channel to the global list protected by a mutex.
468 */
469 int consumer_add_channel(struct lttng_consumer_channel *channel)
470 {
471 pthread_mutex_lock(&consumer_data.lock);
472 /* Steal channel identifier, for UST */
473 consumer_steal_channel_key(channel->key);
474 rcu_read_lock();
475 /*
476 * We simply remove the old channel from the hash table. It's
477 * ok, since we know for sure the sessiond wants to replace it
478 * with the new version, because the key has been reused.
479 */
480 (void) lttng_ht_add_replace_ulong(consumer_data.channel_ht, &channel->node);
481 rcu_read_unlock();
482 pthread_mutex_unlock(&consumer_data.lock);
483
484 return 0;
485 }
486
487 /*
488 * Allocate the pollfd structure and the local view of the out fds to avoid
489 * doing a lookup in the linked list and concurrency issues when writing is
490 * needed. Called with consumer_data.lock held.
491 *
492 * Returns the number of fds in the structures.
493 */
494 int consumer_update_poll_array(
495 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
496 struct lttng_consumer_stream **local_stream)
497 {
498 int i = 0;
499 struct lttng_ht_iter iter;
500 struct lttng_consumer_stream *stream;
501
502 DBG("Updating poll fd array");
503 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
504 node.node) {
505 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
506 continue;
507 }
508 DBG("Active FD %d", stream->wait_fd);
509 (*pollfd)[i].fd = stream->wait_fd;
510 (*pollfd)[i].events = POLLIN | POLLPRI;
511 local_stream[i] = stream;
512 i++;
513 }
514
515 /*
516 * Insert the consumer_poll_pipe at the end of the array and don't
517 * increment i so nb_fd is the number of real FD.
518 */
519 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
520 (*pollfd)[i].events = POLLIN;
521 return i;
522 }
523
524 /*
525 * Poll on the should_quit pipe and the command socket return -1 on error and
526 * should exit, 0 if data is available on the command socket
527 */
528 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
529 {
530 int num_rdy;
531
532 restart:
533 num_rdy = poll(consumer_sockpoll, 2, -1);
534 if (num_rdy == -1) {
535 /*
536 * Restart interrupted system call.
537 */
538 if (errno == EINTR) {
539 goto restart;
540 }
541 perror("Poll error");
542 goto exit;
543 }
544 if (consumer_sockpoll[0].revents == POLLIN) {
545 DBG("consumer_should_quit wake up");
546 goto exit;
547 }
548 return 0;
549
550 exit:
551 return -1;
552 }
553
554 /*
555 * Set the error socket.
556 */
557 void lttng_consumer_set_error_sock(
558 struct lttng_consumer_local_data *ctx, int sock)
559 {
560 ctx->consumer_error_socket = sock;
561 }
562
563 /*
564 * Set the command socket path.
565 */
566
567 void lttng_consumer_set_command_sock_path(
568 struct lttng_consumer_local_data *ctx, char *sock)
569 {
570 ctx->consumer_command_sock_path = sock;
571 }
572
573 /*
574 * Send return code to the session daemon.
575 * If the socket is not defined, we return 0, it is not a fatal error
576 */
577 int lttng_consumer_send_error(
578 struct lttng_consumer_local_data *ctx, int cmd)
579 {
580 if (ctx->consumer_error_socket > 0) {
581 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
582 sizeof(enum lttcomm_sessiond_command));
583 }
584
585 return 0;
586 }
587
588 /*
589 * Close all the tracefiles and stream fds, should be called when all instances
590 * are destroyed.
591 */
592 void lttng_consumer_cleanup(void)
593 {
594 struct lttng_ht_iter iter;
595 struct lttng_ht_node_ulong *node;
596
597 rcu_read_lock();
598
599 /*
600 * close all outfd. Called when there are no more threads running (after
601 * joining on the threads), no need to protect list iteration with mutex.
602 */
603 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
604 node) {
605 struct lttng_consumer_stream *stream =
606 caa_container_of(node, struct lttng_consumer_stream, node);
607 consumer_del_stream(stream);
608 }
609
610 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
611 node) {
612 struct lttng_consumer_channel *channel =
613 caa_container_of(node, struct lttng_consumer_channel, node);
614 consumer_del_channel(channel);
615 }
616
617 rcu_read_unlock();
618 }
619
620 /*
621 * Called from signal handler.
622 */
623 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
624 {
625 int ret;
626 consumer_quit = 1;
627 ret = write(ctx->consumer_should_quit[1], "4", 1);
628 if (ret < 0) {
629 perror("write consumer quit");
630 }
631 }
632
633 void lttng_consumer_sync_trace_file(
634 struct lttng_consumer_stream *stream, off_t orig_offset)
635 {
636 int outfd = stream->out_fd;
637
638 /*
639 * This does a blocking write-and-wait on any page that belongs to the
640 * subbuffer prior to the one we just wrote.
641 * Don't care about error values, as these are just hints and ways to
642 * limit the amount of page cache used.
643 */
644 if (orig_offset < stream->chan->max_sb_size) {
645 return;
646 }
647 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
648 stream->chan->max_sb_size,
649 SYNC_FILE_RANGE_WAIT_BEFORE
650 | SYNC_FILE_RANGE_WRITE
651 | SYNC_FILE_RANGE_WAIT_AFTER);
652 /*
653 * Give hints to the kernel about how we access the file:
654 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
655 * we write it.
656 *
657 * We need to call fadvise again after the file grows because the
658 * kernel does not seem to apply fadvise to non-existing parts of the
659 * file.
660 *
661 * Call fadvise _after_ having waited for the page writeback to
662 * complete because the dirty page writeback semantic is not well
663 * defined. So it can be expected to lead to lower throughput in
664 * streaming.
665 */
666 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
667 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
668 }
669
670 /*
671 * Initialise the necessary environnement :
672 * - create a new context
673 * - create the poll_pipe
674 * - create the should_quit pipe (for signal handler)
675 * - create the thread pipe (for splice)
676 *
677 * Takes a function pointer as argument, this function is called when data is
678 * available on a buffer. This function is responsible to do the
679 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
680 * buffer configuration and then kernctl_put_next_subbuf at the end.
681 *
682 * Returns a pointer to the new context or NULL on error.
683 */
684 struct lttng_consumer_local_data *lttng_consumer_create(
685 enum lttng_consumer_type type,
686 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
687 struct lttng_consumer_local_data *ctx),
688 int (*recv_channel)(struct lttng_consumer_channel *channel),
689 int (*recv_stream)(struct lttng_consumer_stream *stream),
690 int (*update_stream)(int stream_key, uint32_t state))
691 {
692 int ret, i;
693 struct lttng_consumer_local_data *ctx;
694
695 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
696 consumer_data.type == type);
697 consumer_data.type = type;
698
699 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
700 if (ctx == NULL) {
701 perror("allocating context");
702 goto error;
703 }
704
705 ctx->consumer_error_socket = -1;
706 /* assign the callbacks */
707 ctx->on_buffer_ready = buffer_ready;
708 ctx->on_recv_channel = recv_channel;
709 ctx->on_recv_stream = recv_stream;
710 ctx->on_update_stream = update_stream;
711
712 ret = pipe(ctx->consumer_poll_pipe);
713 if (ret < 0) {
714 perror("Error creating poll pipe");
715 goto error_poll_pipe;
716 }
717
718 ret = pipe(ctx->consumer_should_quit);
719 if (ret < 0) {
720 perror("Error creating recv pipe");
721 goto error_quit_pipe;
722 }
723
724 ret = pipe(ctx->consumer_thread_pipe);
725 if (ret < 0) {
726 perror("Error creating thread pipe");
727 goto error_thread_pipe;
728 }
729
730 return ctx;
731
732
733 error_thread_pipe:
734 for (i = 0; i < 2; i++) {
735 int err;
736
737 err = close(ctx->consumer_should_quit[i]);
738 if (err) {
739 PERROR("close");
740 }
741 }
742 error_quit_pipe:
743 for (i = 0; i < 2; i++) {
744 int err;
745
746 err = close(ctx->consumer_poll_pipe[i]);
747 if (err) {
748 PERROR("close");
749 }
750 }
751 error_poll_pipe:
752 free(ctx);
753 error:
754 return NULL;
755 }
756
757 /*
758 * Close all fds associated with the instance and free the context.
759 */
760 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
761 {
762 int ret;
763
764 ret = close(ctx->consumer_error_socket);
765 if (ret) {
766 PERROR("close");
767 }
768 ret = close(ctx->consumer_thread_pipe[0]);
769 if (ret) {
770 PERROR("close");
771 }
772 ret = close(ctx->consumer_thread_pipe[1]);
773 if (ret) {
774 PERROR("close");
775 }
776 ret = close(ctx->consumer_poll_pipe[0]);
777 if (ret) {
778 PERROR("close");
779 }
780 ret = close(ctx->consumer_poll_pipe[1]);
781 if (ret) {
782 PERROR("close");
783 }
784 ret = close(ctx->consumer_should_quit[0]);
785 if (ret) {
786 PERROR("close");
787 }
788 ret = close(ctx->consumer_should_quit[1]);
789 if (ret) {
790 PERROR("close");
791 }
792 unlink(ctx->consumer_command_sock_path);
793 free(ctx);
794 }
795
796 /*
797 * Mmap the ring buffer, read it and write the data to the tracefile.
798 *
799 * Returns the number of bytes written
800 */
801 ssize_t lttng_consumer_on_read_subbuffer_mmap(
802 struct lttng_consumer_local_data *ctx,
803 struct lttng_consumer_stream *stream, unsigned long len)
804 {
805 switch (consumer_data.type) {
806 case LTTNG_CONSUMER_KERNEL:
807 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
808 case LTTNG_CONSUMER32_UST:
809 case LTTNG_CONSUMER64_UST:
810 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
811 default:
812 ERR("Unknown consumer_data type");
813 assert(0);
814 }
815 }
816
817 /*
818 * Splice the data from the ring buffer to the tracefile.
819 *
820 * Returns the number of bytes spliced.
821 */
822 ssize_t lttng_consumer_on_read_subbuffer_splice(
823 struct lttng_consumer_local_data *ctx,
824 struct lttng_consumer_stream *stream, unsigned long len)
825 {
826 switch (consumer_data.type) {
827 case LTTNG_CONSUMER_KERNEL:
828 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
829 case LTTNG_CONSUMER32_UST:
830 case LTTNG_CONSUMER64_UST:
831 return -ENOSYS;
832 default:
833 ERR("Unknown consumer_data type");
834 assert(0);
835 return -ENOSYS;
836 }
837
838 }
839
840 /*
841 * Take a snapshot for a specific fd
842 *
843 * Returns 0 on success, < 0 on error
844 */
845 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
846 struct lttng_consumer_stream *stream)
847 {
848 switch (consumer_data.type) {
849 case LTTNG_CONSUMER_KERNEL:
850 return lttng_kconsumer_take_snapshot(ctx, stream);
851 case LTTNG_CONSUMER32_UST:
852 case LTTNG_CONSUMER64_UST:
853 return lttng_ustconsumer_take_snapshot(ctx, stream);
854 default:
855 ERR("Unknown consumer_data type");
856 assert(0);
857 return -ENOSYS;
858 }
859
860 }
861
862 /*
863 * Get the produced position
864 *
865 * Returns 0 on success, < 0 on error
866 */
867 int lttng_consumer_get_produced_snapshot(
868 struct lttng_consumer_local_data *ctx,
869 struct lttng_consumer_stream *stream,
870 unsigned long *pos)
871 {
872 switch (consumer_data.type) {
873 case LTTNG_CONSUMER_KERNEL:
874 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
875 case LTTNG_CONSUMER32_UST:
876 case LTTNG_CONSUMER64_UST:
877 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
878 default:
879 ERR("Unknown consumer_data type");
880 assert(0);
881 return -ENOSYS;
882 }
883 }
884
885 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
886 int sock, struct pollfd *consumer_sockpoll)
887 {
888 switch (consumer_data.type) {
889 case LTTNG_CONSUMER_KERNEL:
890 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
891 case LTTNG_CONSUMER32_UST:
892 case LTTNG_CONSUMER64_UST:
893 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
894 default:
895 ERR("Unknown consumer_data type");
896 assert(0);
897 return -ENOSYS;
898 }
899 }
900
901 /*
902 * This thread polls the fds in the set to consume the data and write
903 * it to tracefile if necessary.
904 */
905 void *lttng_consumer_thread_poll_fds(void *data)
906 {
907 int num_rdy, num_hup, high_prio, ret, i;
908 struct pollfd *pollfd = NULL;
909 /* local view of the streams */
910 struct lttng_consumer_stream **local_stream = NULL;
911 /* local view of consumer_data.fds_count */
912 int nb_fd = 0;
913 char tmp;
914 int tmp2;
915 struct lttng_consumer_local_data *ctx = data;
916
917 rcu_register_thread();
918
919 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
920
921 while (1) {
922 high_prio = 0;
923 num_hup = 0;
924
925 /*
926 * the fds set has been updated, we need to update our
927 * local array as well
928 */
929 pthread_mutex_lock(&consumer_data.lock);
930 if (consumer_data.need_update) {
931 if (pollfd != NULL) {
932 free(pollfd);
933 pollfd = NULL;
934 }
935 if (local_stream != NULL) {
936 free(local_stream);
937 local_stream = NULL;
938 }
939
940 /* allocate for all fds + 1 for the consumer_poll_pipe */
941 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
942 if (pollfd == NULL) {
943 perror("pollfd malloc");
944 pthread_mutex_unlock(&consumer_data.lock);
945 goto end;
946 }
947
948 /* allocate for all fds + 1 for the consumer_poll_pipe */
949 local_stream = zmalloc((consumer_data.stream_count + 1) *
950 sizeof(struct lttng_consumer_stream));
951 if (local_stream == NULL) {
952 perror("local_stream malloc");
953 pthread_mutex_unlock(&consumer_data.lock);
954 goto end;
955 }
956 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
957 if (ret < 0) {
958 ERR("Error in allocating pollfd or local_outfds");
959 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
960 pthread_mutex_unlock(&consumer_data.lock);
961 goto end;
962 }
963 nb_fd = ret;
964 consumer_data.need_update = 0;
965 }
966 pthread_mutex_unlock(&consumer_data.lock);
967
968 /* No FDs and consumer_quit, consumer_cleanup the thread */
969 if (nb_fd == 0 && consumer_quit == 1) {
970 goto end;
971 }
972 /* poll on the array of fds */
973 restart:
974 DBG("polling on %d fd", nb_fd + 1);
975 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
976 DBG("poll num_rdy : %d", num_rdy);
977 if (num_rdy == -1) {
978 /*
979 * Restart interrupted system call.
980 */
981 if (errno == EINTR) {
982 goto restart;
983 }
984 perror("Poll error");
985 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
986 goto end;
987 } else if (num_rdy == 0) {
988 DBG("Polling thread timed out");
989 goto end;
990 }
991
992 /*
993 * If the consumer_poll_pipe triggered poll go
994 * directly to the beginning of the loop to update the
995 * array. We want to prioritize array update over
996 * low-priority reads.
997 */
998 if (pollfd[nb_fd].revents & POLLIN) {
999 DBG("consumer_poll_pipe wake up");
1000 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
1001 if (tmp2 < 0) {
1002 perror("read consumer poll");
1003 }
1004 continue;
1005 }
1006
1007 /* Take care of high priority channels first. */
1008 for (i = 0; i < nb_fd; i++) {
1009 if (pollfd[i].revents & POLLPRI) {
1010 ssize_t len;
1011
1012 DBG("Urgent read on fd %d", pollfd[i].fd);
1013 high_prio = 1;
1014 len = ctx->on_buffer_ready(local_stream[i], ctx);
1015 /* it's ok to have an unavailable sub-buffer */
1016 if (len < 0 && len != -EAGAIN) {
1017 goto end;
1018 } else if (len > 0) {
1019 local_stream[i]->data_read = 1;
1020 }
1021 }
1022 }
1023
1024 /*
1025 * If we read high prio channel in this loop, try again
1026 * for more high prio data.
1027 */
1028 if (high_prio) {
1029 continue;
1030 }
1031
1032 /* Take care of low priority channels. */
1033 for (i = 0; i < nb_fd; i++) {
1034 if ((pollfd[i].revents & POLLIN) ||
1035 local_stream[i]->hangup_flush_done) {
1036 ssize_t len;
1037
1038 assert(!(pollfd[i].revents & POLLERR));
1039 assert(!(pollfd[i].revents & POLLNVAL));
1040 DBG("Normal read on fd %d", pollfd[i].fd);
1041 len = ctx->on_buffer_ready(local_stream[i], ctx);
1042 /* it's ok to have an unavailable sub-buffer */
1043 if (len < 0 && len != -EAGAIN) {
1044 goto end;
1045 } else if (len > 0) {
1046 local_stream[i]->data_read = 1;
1047 }
1048 }
1049 }
1050
1051 /* Handle hangup and errors */
1052 for (i = 0; i < nb_fd; i++) {
1053 if (!local_stream[i]->hangup_flush_done
1054 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1055 && (consumer_data.type == LTTNG_CONSUMER32_UST
1056 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1057 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1058 pollfd[i].fd);
1059 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1060 /* Attempt read again, for the data we just flushed. */
1061 local_stream[i]->data_read = 1;
1062 }
1063 /*
1064 * If the poll flag is HUP/ERR/NVAL and we have
1065 * read no data in this pass, we can remove the
1066 * stream from its hash table.
1067 */
1068 if ((pollfd[i].revents & POLLHUP)) {
1069 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1070 if (!local_stream[i]->data_read) {
1071 consumer_del_stream(local_stream[i]);
1072 num_hup++;
1073 }
1074 } else if (pollfd[i].revents & POLLERR) {
1075 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1076 if (!local_stream[i]->data_read) {
1077 consumer_del_stream(local_stream[i]);
1078 num_hup++;
1079 }
1080 } else if (pollfd[i].revents & POLLNVAL) {
1081 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1082 if (!local_stream[i]->data_read) {
1083 consumer_del_stream(local_stream[i]);
1084 num_hup++;
1085 }
1086 }
1087 local_stream[i]->data_read = 0;
1088 }
1089 }
1090 end:
1091 DBG("polling thread exiting");
1092 if (pollfd != NULL) {
1093 free(pollfd);
1094 pollfd = NULL;
1095 }
1096 if (local_stream != NULL) {
1097 free(local_stream);
1098 local_stream = NULL;
1099 }
1100 rcu_unregister_thread();
1101 return NULL;
1102 }
1103
1104 /*
1105 * This thread listens on the consumerd socket and receives the file
1106 * descriptors from the session daemon.
1107 */
1108 void *lttng_consumer_thread_receive_fds(void *data)
1109 {
1110 int sock, client_socket, ret;
1111 /*
1112 * structure to poll for incoming data on communication socket avoids
1113 * making blocking sockets.
1114 */
1115 struct pollfd consumer_sockpoll[2];
1116 struct lttng_consumer_local_data *ctx = data;
1117
1118 rcu_register_thread();
1119
1120 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1121 unlink(ctx->consumer_command_sock_path);
1122 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1123 if (client_socket < 0) {
1124 ERR("Cannot create command socket");
1125 goto end;
1126 }
1127
1128 ret = lttcomm_listen_unix_sock(client_socket);
1129 if (ret < 0) {
1130 goto end;
1131 }
1132
1133 DBG("Sending ready command to lttng-sessiond");
1134 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1135 /* return < 0 on error, but == 0 is not fatal */
1136 if (ret < 0) {
1137 ERR("Error sending ready command to lttng-sessiond");
1138 goto end;
1139 }
1140
1141 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1142 if (ret < 0) {
1143 perror("fcntl O_NONBLOCK");
1144 goto end;
1145 }
1146
1147 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1148 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1149 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1150 consumer_sockpoll[1].fd = client_socket;
1151 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1152
1153 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1154 goto end;
1155 }
1156 DBG("Connection on client_socket");
1157
1158 /* Blocking call, waiting for transmission */
1159 sock = lttcomm_accept_unix_sock(client_socket);
1160 if (sock <= 0) {
1161 WARN("On accept");
1162 goto end;
1163 }
1164 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1165 if (ret < 0) {
1166 perror("fcntl O_NONBLOCK");
1167 goto end;
1168 }
1169
1170 /* update the polling structure to poll on the established socket */
1171 consumer_sockpoll[1].fd = sock;
1172 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1173
1174 while (1) {
1175 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1176 goto end;
1177 }
1178 DBG("Incoming command on sock");
1179 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1180 if (ret == -ENOENT) {
1181 DBG("Received STOP command");
1182 goto end;
1183 }
1184 if (ret < 0) {
1185 ERR("Communication interrupted on command socket");
1186 goto end;
1187 }
1188 if (consumer_quit) {
1189 DBG("consumer_thread_receive_fds received quit from signal");
1190 goto end;
1191 }
1192 DBG("received fds on sock");
1193 }
1194 end:
1195 DBG("consumer_thread_receive_fds exiting");
1196
1197 /*
1198 * when all fds have hung up, the polling thread
1199 * can exit cleanly
1200 */
1201 consumer_quit = 1;
1202
1203 /*
1204 * 2s of grace period, if no polling events occur during
1205 * this period, the polling thread will exit even if there
1206 * are still open FDs (should not happen, but safety mechanism).
1207 */
1208 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1209
1210 /* wake up the polling thread */
1211 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1212 if (ret < 0) {
1213 perror("poll pipe write");
1214 }
1215 rcu_unregister_thread();
1216 return NULL;
1217 }
1218
1219 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1220 struct lttng_consumer_local_data *ctx)
1221 {
1222 switch (consumer_data.type) {
1223 case LTTNG_CONSUMER_KERNEL:
1224 return lttng_kconsumer_read_subbuffer(stream, ctx);
1225 case LTTNG_CONSUMER32_UST:
1226 case LTTNG_CONSUMER64_UST:
1227 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1228 default:
1229 ERR("Unknown consumer_data type");
1230 assert(0);
1231 return -ENOSYS;
1232 }
1233 }
1234
1235 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1236 {
1237 switch (consumer_data.type) {
1238 case LTTNG_CONSUMER_KERNEL:
1239 return lttng_kconsumer_on_recv_stream(stream);
1240 case LTTNG_CONSUMER32_UST:
1241 case LTTNG_CONSUMER64_UST:
1242 return lttng_ustconsumer_on_recv_stream(stream);
1243 default:
1244 ERR("Unknown consumer_data type");
1245 assert(0);
1246 return -ENOSYS;
1247 }
1248 }
1249
1250 /*
1251 * Allocate and set consumer data hash tables.
1252 */
1253 void lttng_consumer_init(void)
1254 {
1255 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1256 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1257 }
1258
This page took 0.089878 seconds and 4 git commands to generate.