Fix: Possible buffer overflows in strncat() usage
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <inttypes.h>
31
32 #include <common/common.h>
33 #include <common/kernel-ctl/kernel-ctl.h>
34 #include <common/sessiond-comm/relayd.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36 #include <common/kernel-consumer/kernel-consumer.h>
37 #include <common/relayd/relayd.h>
38 #include <common/ust-consumer/ust-consumer.h>
39
40 #include "consumer.h"
41
42 struct lttng_consumer_global_data consumer_data = {
43 .stream_count = 0,
44 .need_update = 1,
45 .type = LTTNG_CONSUMER_UNKNOWN,
46 };
47
48 /* timeout parameter, to control the polling thread grace period. */
49 int consumer_poll_timeout = -1;
50
51 /*
52 * Flag to inform the polling thread to quit when all fd hung up. Updated by
53 * the consumer_thread_receive_fds when it notices that all fds has hung up.
54 * Also updated by the signal handler (consumer_should_exit()). Read by the
55 * polling threads.
56 */
57 volatile int consumer_quit = 0;
58
59 /*
60 * Find a stream. The consumer_data.lock must be locked during this
61 * call.
62 */
63 static struct lttng_consumer_stream *consumer_find_stream(int key)
64 {
65 struct lttng_ht_iter iter;
66 struct lttng_ht_node_ulong *node;
67 struct lttng_consumer_stream *stream = NULL;
68
69 /* Negative keys are lookup failures */
70 if (key < 0)
71 return NULL;
72
73 rcu_read_lock();
74
75 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
76 &iter);
77 node = lttng_ht_iter_get_node_ulong(&iter);
78 if (node != NULL) {
79 stream = caa_container_of(node, struct lttng_consumer_stream, node);
80 }
81
82 rcu_read_unlock();
83
84 return stream;
85 }
86
87 static void consumer_steal_stream_key(int key)
88 {
89 struct lttng_consumer_stream *stream;
90
91 rcu_read_lock();
92 stream = consumer_find_stream(key);
93 if (stream) {
94 stream->key = -1;
95 /*
96 * We don't want the lookup to match, but we still need
97 * to iterate on this stream when iterating over the hash table. Just
98 * change the node key.
99 */
100 stream->node.key = -1;
101 }
102 rcu_read_unlock();
103 }
104
105 static struct lttng_consumer_channel *consumer_find_channel(int key)
106 {
107 struct lttng_ht_iter iter;
108 struct lttng_ht_node_ulong *node;
109 struct lttng_consumer_channel *channel = NULL;
110
111 /* Negative keys are lookup failures */
112 if (key < 0)
113 return NULL;
114
115 rcu_read_lock();
116
117 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
118 &iter);
119 node = lttng_ht_iter_get_node_ulong(&iter);
120 if (node != NULL) {
121 channel = caa_container_of(node, struct lttng_consumer_channel, node);
122 }
123
124 rcu_read_unlock();
125
126 return channel;
127 }
128
129 static void consumer_steal_channel_key(int key)
130 {
131 struct lttng_consumer_channel *channel;
132
133 rcu_read_lock();
134 channel = consumer_find_channel(key);
135 if (channel) {
136 channel->key = -1;
137 /*
138 * We don't want the lookup to match, but we still need
139 * to iterate on this channel when iterating over the hash table. Just
140 * change the node key.
141 */
142 channel->node.key = -1;
143 }
144 rcu_read_unlock();
145 }
146
147 static
148 void consumer_free_stream(struct rcu_head *head)
149 {
150 struct lttng_ht_node_ulong *node =
151 caa_container_of(head, struct lttng_ht_node_ulong, head);
152 struct lttng_consumer_stream *stream =
153 caa_container_of(node, struct lttng_consumer_stream, node);
154
155 free(stream);
156 }
157
158 /*
159 * RCU protected relayd socket pair free.
160 */
161 static void consumer_rcu_free_relayd(struct rcu_head *head)
162 {
163 struct lttng_ht_node_ulong *node =
164 caa_container_of(head, struct lttng_ht_node_ulong, head);
165 struct consumer_relayd_sock_pair *relayd =
166 caa_container_of(node, struct consumer_relayd_sock_pair, node);
167
168 free(relayd);
169 }
170
171 /*
172 * Destroy and free relayd socket pair object.
173 *
174 * This function MUST be called with the consumer_data lock acquired.
175 */
176 void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd)
177 {
178 int ret;
179 struct lttng_ht_iter iter;
180
181 if (relayd == NULL) {
182 return;
183 }
184
185 DBG("Consumer destroy and close relayd socket pair");
186
187 iter.iter.node = &relayd->node.node;
188 ret = lttng_ht_del(consumer_data.relayd_ht, &iter);
189 if (ret != 0) {
190 /* We assume the relayd was already destroyed */
191 return;
192 }
193
194 /* Close all sockets */
195 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
196 (void) relayd_close(&relayd->control_sock);
197 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
198 (void) relayd_close(&relayd->data_sock);
199
200 /* RCU free() call */
201 call_rcu(&relayd->node.head, consumer_rcu_free_relayd);
202 }
203
204 /*
205 * Remove a stream from the global list protected by a mutex. This
206 * function is also responsible for freeing its data structures.
207 */
208 void consumer_del_stream(struct lttng_consumer_stream *stream)
209 {
210 int ret;
211 struct lttng_ht_iter iter;
212 struct lttng_consumer_channel *free_chan = NULL;
213 struct consumer_relayd_sock_pair *relayd;
214
215 assert(stream);
216
217 pthread_mutex_lock(&consumer_data.lock);
218
219 switch (consumer_data.type) {
220 case LTTNG_CONSUMER_KERNEL:
221 if (stream->mmap_base != NULL) {
222 ret = munmap(stream->mmap_base, stream->mmap_len);
223 if (ret != 0) {
224 perror("munmap");
225 }
226 }
227 break;
228 case LTTNG_CONSUMER32_UST:
229 case LTTNG_CONSUMER64_UST:
230 lttng_ustconsumer_del_stream(stream);
231 break;
232 default:
233 ERR("Unknown consumer_data type");
234 assert(0);
235 goto end;
236 }
237
238 rcu_read_lock();
239 iter.iter.node = &stream->node.node;
240 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
241 assert(!ret);
242
243 rcu_read_unlock();
244
245 if (consumer_data.stream_count <= 0) {
246 goto end;
247 }
248 consumer_data.stream_count--;
249 if (!stream) {
250 goto end;
251 }
252 if (stream->out_fd >= 0) {
253 ret = close(stream->out_fd);
254 if (ret) {
255 PERROR("close");
256 }
257 }
258 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
259 ret = close(stream->wait_fd);
260 if (ret) {
261 PERROR("close");
262 }
263 }
264 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
265 ret = close(stream->shm_fd);
266 if (ret) {
267 PERROR("close");
268 }
269 }
270
271 /* Check and cleanup relayd */
272 rcu_read_lock();
273 relayd = consumer_find_relayd(stream->net_seq_idx);
274 if (relayd != NULL) {
275 uatomic_dec(&relayd->refcount);
276 assert(uatomic_read(&relayd->refcount) >= 0);
277
278 /* Closing streams requires to lock the control socket. */
279 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
280 ret = relayd_send_close_stream(&relayd->control_sock,
281 stream->relayd_stream_id,
282 stream->next_net_seq_num - 1);
283 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
284 if (ret < 0) {
285 DBG("Unable to close stream on the relayd. Continuing");
286 /*
287 * Continue here. There is nothing we can do for the relayd.
288 * Chances are that the relayd has closed the socket so we just
289 * continue cleaning up.
290 */
291 }
292
293 /* Both conditions are met, we destroy the relayd. */
294 if (uatomic_read(&relayd->refcount) == 0 &&
295 uatomic_read(&relayd->destroy_flag)) {
296 consumer_destroy_relayd(relayd);
297 }
298 }
299 rcu_read_unlock();
300
301 if (!--stream->chan->refcount) {
302 free_chan = stream->chan;
303 }
304
305
306 call_rcu(&stream->node.head, consumer_free_stream);
307 end:
308 consumer_data.need_update = 1;
309 pthread_mutex_unlock(&consumer_data.lock);
310
311 if (free_chan)
312 consumer_del_channel(free_chan);
313 }
314
315 struct lttng_consumer_stream *consumer_allocate_stream(
316 int channel_key, int stream_key,
317 int shm_fd, int wait_fd,
318 enum lttng_consumer_stream_state state,
319 uint64_t mmap_len,
320 enum lttng_event_output output,
321 const char *path_name,
322 uid_t uid,
323 gid_t gid,
324 int net_index,
325 int metadata_flag)
326 {
327 struct lttng_consumer_stream *stream;
328 int ret;
329
330 stream = zmalloc(sizeof(*stream));
331 if (stream == NULL) {
332 perror("malloc struct lttng_consumer_stream");
333 goto end;
334 }
335 stream->chan = consumer_find_channel(channel_key);
336 if (!stream->chan) {
337 perror("Unable to find channel key");
338 goto end;
339 }
340 stream->chan->refcount++;
341 stream->key = stream_key;
342 stream->shm_fd = shm_fd;
343 stream->wait_fd = wait_fd;
344 stream->out_fd = -1;
345 stream->out_fd_offset = 0;
346 stream->state = state;
347 stream->mmap_len = mmap_len;
348 stream->mmap_base = NULL;
349 stream->output = output;
350 stream->uid = uid;
351 stream->gid = gid;
352 stream->net_seq_idx = net_index;
353 stream->metadata_flag = metadata_flag;
354 strncpy(stream->path_name, path_name, sizeof(stream->path_name));
355 stream->path_name[sizeof(stream->path_name) - 1] = '\0';
356 lttng_ht_node_init_ulong(&stream->node, stream->key);
357 lttng_ht_node_init_ulong(&stream->waitfd_node, stream->wait_fd);
358
359 switch (consumer_data.type) {
360 case LTTNG_CONSUMER_KERNEL:
361 break;
362 case LTTNG_CONSUMER32_UST:
363 case LTTNG_CONSUMER64_UST:
364 stream->cpu = stream->chan->cpucount++;
365 ret = lttng_ustconsumer_allocate_stream(stream);
366 if (ret) {
367 free(stream);
368 return NULL;
369 }
370 break;
371 default:
372 ERR("Unknown consumer_data type");
373 assert(0);
374 goto end;
375 }
376 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d, net_seq_idx %d)",
377 stream->path_name, stream->key,
378 stream->shm_fd,
379 stream->wait_fd,
380 (unsigned long long) stream->mmap_len,
381 stream->out_fd,
382 stream->net_seq_idx);
383 end:
384 return stream;
385 }
386
387 /*
388 * Add a stream to the global list protected by a mutex.
389 */
390 int consumer_add_stream(struct lttng_consumer_stream *stream)
391 {
392 int ret = 0;
393 struct lttng_ht_node_ulong *node;
394 struct lttng_ht_iter iter;
395 struct consumer_relayd_sock_pair *relayd;
396
397 pthread_mutex_lock(&consumer_data.lock);
398 /* Steal stream identifier, for UST */
399 consumer_steal_stream_key(stream->key);
400
401 rcu_read_lock();
402 lttng_ht_lookup(consumer_data.stream_ht,
403 (void *)((unsigned long) stream->key), &iter);
404 node = lttng_ht_iter_get_node_ulong(&iter);
405 if (node != NULL) {
406 rcu_read_unlock();
407 /* Stream already exist. Ignore the insertion */
408 goto end;
409 }
410
411 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
412
413 /* Check and cleanup relayd */
414 relayd = consumer_find_relayd(stream->net_seq_idx);
415 if (relayd != NULL) {
416 uatomic_inc(&relayd->refcount);
417 }
418 rcu_read_unlock();
419
420 /* Update consumer data */
421 consumer_data.stream_count++;
422 consumer_data.need_update = 1;
423
424 switch (consumer_data.type) {
425 case LTTNG_CONSUMER_KERNEL:
426 break;
427 case LTTNG_CONSUMER32_UST:
428 case LTTNG_CONSUMER64_UST:
429 /* Streams are in CPU number order (we rely on this) */
430 stream->cpu = stream->chan->nr_streams++;
431 break;
432 default:
433 ERR("Unknown consumer_data type");
434 assert(0);
435 goto end;
436 }
437
438 end:
439 pthread_mutex_unlock(&consumer_data.lock);
440
441 return ret;
442 }
443
444 /*
445 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
446 * be acquired before calling this.
447 */
448
449 int consumer_add_relayd(struct consumer_relayd_sock_pair *relayd)
450 {
451 int ret = 0;
452 struct lttng_ht_node_ulong *node;
453 struct lttng_ht_iter iter;
454
455 if (relayd == NULL) {
456 ret = -1;
457 goto end;
458 }
459
460 lttng_ht_lookup(consumer_data.relayd_ht,
461 (void *)((unsigned long) relayd->net_seq_idx), &iter);
462 node = lttng_ht_iter_get_node_ulong(&iter);
463 if (node != NULL) {
464 /* Relayd already exist. Ignore the insertion */
465 goto end;
466 }
467 lttng_ht_add_unique_ulong(consumer_data.relayd_ht, &relayd->node);
468
469 end:
470 return ret;
471 }
472
473 /*
474 * Allocate and return a consumer relayd socket.
475 */
476 struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
477 int net_seq_idx)
478 {
479 struct consumer_relayd_sock_pair *obj = NULL;
480
481 /* Negative net sequence index is a failure */
482 if (net_seq_idx < 0) {
483 goto error;
484 }
485
486 obj = zmalloc(sizeof(struct consumer_relayd_sock_pair));
487 if (obj == NULL) {
488 PERROR("zmalloc relayd sock");
489 goto error;
490 }
491
492 obj->net_seq_idx = net_seq_idx;
493 obj->refcount = 0;
494 obj->destroy_flag = 0;
495 lttng_ht_node_init_ulong(&obj->node, obj->net_seq_idx);
496 pthread_mutex_init(&obj->ctrl_sock_mutex, NULL);
497
498 error:
499 return obj;
500 }
501
502 /*
503 * Find a relayd socket pair in the global consumer data.
504 *
505 * Return the object if found else NULL.
506 * RCU read-side lock must be held across this call and while using the
507 * returned object.
508 */
509 struct consumer_relayd_sock_pair *consumer_find_relayd(int key)
510 {
511 struct lttng_ht_iter iter;
512 struct lttng_ht_node_ulong *node;
513 struct consumer_relayd_sock_pair *relayd = NULL;
514
515 /* Negative keys are lookup failures */
516 if (key < 0) {
517 goto error;
518 }
519
520 lttng_ht_lookup(consumer_data.relayd_ht, (void *)((unsigned long) key),
521 &iter);
522 node = lttng_ht_iter_get_node_ulong(&iter);
523 if (node != NULL) {
524 relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node);
525 }
526
527 error:
528 return relayd;
529 }
530
531 /*
532 * Handle stream for relayd transmission if the stream applies for network
533 * streaming where the net sequence index is set.
534 *
535 * Return destination file descriptor or negative value on error.
536 */
537 static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
538 size_t data_size, struct consumer_relayd_sock_pair *relayd)
539 {
540 int outfd = -1, ret;
541 struct lttcomm_relayd_data_hdr data_hdr;
542
543 /* Safety net */
544 assert(stream);
545 assert(relayd);
546
547 /* Reset data header */
548 memset(&data_hdr, 0, sizeof(data_hdr));
549
550 if (stream->metadata_flag) {
551 /* Caller MUST acquire the relayd control socket lock */
552 ret = relayd_send_metadata(&relayd->control_sock, data_size);
553 if (ret < 0) {
554 goto error;
555 }
556
557 /* Metadata are always sent on the control socket. */
558 outfd = relayd->control_sock.fd;
559 } else {
560 /* Set header with stream information */
561 data_hdr.stream_id = htobe64(stream->relayd_stream_id);
562 data_hdr.data_size = htobe32(data_size);
563 data_hdr.net_seq_num = htobe64(stream->next_net_seq_num++);
564 /* Other fields are zeroed previously */
565
566 ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr,
567 sizeof(data_hdr));
568 if (ret < 0) {
569 goto error;
570 }
571
572 /* Set to go on data socket */
573 outfd = relayd->data_sock.fd;
574 }
575
576 error:
577 return outfd;
578 }
579
580 /*
581 * Update a stream according to what we just received.
582 */
583 void consumer_change_stream_state(int stream_key,
584 enum lttng_consumer_stream_state state)
585 {
586 struct lttng_consumer_stream *stream;
587
588 pthread_mutex_lock(&consumer_data.lock);
589 stream = consumer_find_stream(stream_key);
590 if (stream) {
591 stream->state = state;
592 }
593 consumer_data.need_update = 1;
594 pthread_mutex_unlock(&consumer_data.lock);
595 }
596
597 static
598 void consumer_free_channel(struct rcu_head *head)
599 {
600 struct lttng_ht_node_ulong *node =
601 caa_container_of(head, struct lttng_ht_node_ulong, head);
602 struct lttng_consumer_channel *channel =
603 caa_container_of(node, struct lttng_consumer_channel, node);
604
605 free(channel);
606 }
607
608 /*
609 * Remove a channel from the global list protected by a mutex. This
610 * function is also responsible for freeing its data structures.
611 */
612 void consumer_del_channel(struct lttng_consumer_channel *channel)
613 {
614 int ret;
615 struct lttng_ht_iter iter;
616
617 pthread_mutex_lock(&consumer_data.lock);
618
619 switch (consumer_data.type) {
620 case LTTNG_CONSUMER_KERNEL:
621 break;
622 case LTTNG_CONSUMER32_UST:
623 case LTTNG_CONSUMER64_UST:
624 lttng_ustconsumer_del_channel(channel);
625 break;
626 default:
627 ERR("Unknown consumer_data type");
628 assert(0);
629 goto end;
630 }
631
632 rcu_read_lock();
633 iter.iter.node = &channel->node.node;
634 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
635 assert(!ret);
636 rcu_read_unlock();
637
638 if (channel->mmap_base != NULL) {
639 ret = munmap(channel->mmap_base, channel->mmap_len);
640 if (ret != 0) {
641 perror("munmap");
642 }
643 }
644 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
645 ret = close(channel->wait_fd);
646 if (ret) {
647 PERROR("close");
648 }
649 }
650 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
651 ret = close(channel->shm_fd);
652 if (ret) {
653 PERROR("close");
654 }
655 }
656
657 call_rcu(&channel->node.head, consumer_free_channel);
658 end:
659 pthread_mutex_unlock(&consumer_data.lock);
660 }
661
662 struct lttng_consumer_channel *consumer_allocate_channel(
663 int channel_key,
664 int shm_fd, int wait_fd,
665 uint64_t mmap_len,
666 uint64_t max_sb_size)
667 {
668 struct lttng_consumer_channel *channel;
669 int ret;
670
671 channel = zmalloc(sizeof(*channel));
672 if (channel == NULL) {
673 perror("malloc struct lttng_consumer_channel");
674 goto end;
675 }
676 channel->key = channel_key;
677 channel->shm_fd = shm_fd;
678 channel->wait_fd = wait_fd;
679 channel->mmap_len = mmap_len;
680 channel->max_sb_size = max_sb_size;
681 channel->refcount = 0;
682 channel->nr_streams = 0;
683 lttng_ht_node_init_ulong(&channel->node, channel->key);
684
685 switch (consumer_data.type) {
686 case LTTNG_CONSUMER_KERNEL:
687 channel->mmap_base = NULL;
688 channel->mmap_len = 0;
689 break;
690 case LTTNG_CONSUMER32_UST:
691 case LTTNG_CONSUMER64_UST:
692 ret = lttng_ustconsumer_allocate_channel(channel);
693 if (ret) {
694 free(channel);
695 return NULL;
696 }
697 break;
698 default:
699 ERR("Unknown consumer_data type");
700 assert(0);
701 goto end;
702 }
703 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
704 channel->key, channel->shm_fd, channel->wait_fd,
705 (unsigned long long) channel->mmap_len,
706 (unsigned long long) channel->max_sb_size);
707 end:
708 return channel;
709 }
710
711 /*
712 * Add a channel to the global list protected by a mutex.
713 */
714 int consumer_add_channel(struct lttng_consumer_channel *channel)
715 {
716 struct lttng_ht_node_ulong *node;
717 struct lttng_ht_iter iter;
718
719 pthread_mutex_lock(&consumer_data.lock);
720 /* Steal channel identifier, for UST */
721 consumer_steal_channel_key(channel->key);
722 rcu_read_lock();
723
724 lttng_ht_lookup(consumer_data.channel_ht,
725 (void *)((unsigned long) channel->key), &iter);
726 node = lttng_ht_iter_get_node_ulong(&iter);
727 if (node != NULL) {
728 /* Channel already exist. Ignore the insertion */
729 goto end;
730 }
731
732 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
733
734 end:
735 rcu_read_unlock();
736 pthread_mutex_unlock(&consumer_data.lock);
737
738 return 0;
739 }
740
741 /*
742 * Allocate the pollfd structure and the local view of the out fds to avoid
743 * doing a lookup in the linked list and concurrency issues when writing is
744 * needed. Called with consumer_data.lock held.
745 *
746 * Returns the number of fds in the structures.
747 */
748 int consumer_update_poll_array(
749 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
750 struct lttng_consumer_stream **local_stream,
751 struct lttng_ht *metadata_ht)
752 {
753 int i = 0;
754 struct lttng_ht_iter iter;
755 struct lttng_consumer_stream *stream;
756
757 DBG("Updating poll fd array");
758 rcu_read_lock();
759 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
760 node.node) {
761 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
762 continue;
763 }
764 DBG("Active FD %d", stream->wait_fd);
765 (*pollfd)[i].fd = stream->wait_fd;
766 (*pollfd)[i].events = POLLIN | POLLPRI;
767 if (stream->metadata_flag && metadata_ht) {
768 lttng_ht_add_unique_ulong(metadata_ht, &stream->waitfd_node);
769 DBG("Active FD added to metadata hash table");
770 }
771 local_stream[i] = stream;
772 i++;
773 }
774 rcu_read_unlock();
775
776 /*
777 * Insert the consumer_poll_pipe at the end of the array and don't
778 * increment i so nb_fd is the number of real FD.
779 */
780 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
781 (*pollfd)[i].events = POLLIN | POLLPRI;
782 return i;
783 }
784
785 /*
786 * Poll on the should_quit pipe and the command socket return -1 on error and
787 * should exit, 0 if data is available on the command socket
788 */
789 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
790 {
791 int num_rdy;
792
793 restart:
794 num_rdy = poll(consumer_sockpoll, 2, -1);
795 if (num_rdy == -1) {
796 /*
797 * Restart interrupted system call.
798 */
799 if (errno == EINTR) {
800 goto restart;
801 }
802 perror("Poll error");
803 goto exit;
804 }
805 if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) {
806 DBG("consumer_should_quit wake up");
807 goto exit;
808 }
809 return 0;
810
811 exit:
812 return -1;
813 }
814
815 /*
816 * Set the error socket.
817 */
818 void lttng_consumer_set_error_sock(
819 struct lttng_consumer_local_data *ctx, int sock)
820 {
821 ctx->consumer_error_socket = sock;
822 }
823
824 /*
825 * Set the command socket path.
826 */
827 void lttng_consumer_set_command_sock_path(
828 struct lttng_consumer_local_data *ctx, char *sock)
829 {
830 ctx->consumer_command_sock_path = sock;
831 }
832
833 /*
834 * Send return code to the session daemon.
835 * If the socket is not defined, we return 0, it is not a fatal error
836 */
837 int lttng_consumer_send_error(
838 struct lttng_consumer_local_data *ctx, int cmd)
839 {
840 if (ctx->consumer_error_socket > 0) {
841 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
842 sizeof(enum lttcomm_sessiond_command));
843 }
844
845 return 0;
846 }
847
848 /*
849 * Close all the tracefiles and stream fds, should be called when all instances
850 * are destroyed.
851 */
852 void lttng_consumer_cleanup(void)
853 {
854 struct lttng_ht_iter iter;
855 struct lttng_ht_node_ulong *node;
856
857 rcu_read_lock();
858
859 /*
860 * close all outfd. Called when there are no more threads running (after
861 * joining on the threads), no need to protect list iteration with mutex.
862 */
863 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
864 node) {
865 struct lttng_consumer_stream *stream =
866 caa_container_of(node, struct lttng_consumer_stream, node);
867 consumer_del_stream(stream);
868 }
869
870 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
871 node) {
872 struct lttng_consumer_channel *channel =
873 caa_container_of(node, struct lttng_consumer_channel, node);
874 consumer_del_channel(channel);
875 }
876
877 rcu_read_unlock();
878
879 lttng_ht_destroy(consumer_data.stream_ht);
880 lttng_ht_destroy(consumer_data.channel_ht);
881 }
882
883 /*
884 * Called from signal handler.
885 */
886 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
887 {
888 int ret;
889 consumer_quit = 1;
890 do {
891 ret = write(ctx->consumer_should_quit[1], "4", 1);
892 } while (ret < 0 && errno == EINTR);
893 if (ret < 0) {
894 perror("write consumer quit");
895 }
896 }
897
898 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream,
899 off_t orig_offset)
900 {
901 int outfd = stream->out_fd;
902
903 /*
904 * This does a blocking write-and-wait on any page that belongs to the
905 * subbuffer prior to the one we just wrote.
906 * Don't care about error values, as these are just hints and ways to
907 * limit the amount of page cache used.
908 */
909 if (orig_offset < stream->chan->max_sb_size) {
910 return;
911 }
912 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
913 stream->chan->max_sb_size,
914 SYNC_FILE_RANGE_WAIT_BEFORE
915 | SYNC_FILE_RANGE_WRITE
916 | SYNC_FILE_RANGE_WAIT_AFTER);
917 /*
918 * Give hints to the kernel about how we access the file:
919 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
920 * we write it.
921 *
922 * We need to call fadvise again after the file grows because the
923 * kernel does not seem to apply fadvise to non-existing parts of the
924 * file.
925 *
926 * Call fadvise _after_ having waited for the page writeback to
927 * complete because the dirty page writeback semantic is not well
928 * defined. So it can be expected to lead to lower throughput in
929 * streaming.
930 */
931 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
932 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
933 }
934
935 /*
936 * Initialise the necessary environnement :
937 * - create a new context
938 * - create the poll_pipe
939 * - create the should_quit pipe (for signal handler)
940 * - create the thread pipe (for splice)
941 *
942 * Takes a function pointer as argument, this function is called when data is
943 * available on a buffer. This function is responsible to do the
944 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
945 * buffer configuration and then kernctl_put_next_subbuf at the end.
946 *
947 * Returns a pointer to the new context or NULL on error.
948 */
949 struct lttng_consumer_local_data *lttng_consumer_create(
950 enum lttng_consumer_type type,
951 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
952 struct lttng_consumer_local_data *ctx),
953 int (*recv_channel)(struct lttng_consumer_channel *channel),
954 int (*recv_stream)(struct lttng_consumer_stream *stream),
955 int (*update_stream)(int stream_key, uint32_t state))
956 {
957 int ret, i;
958 struct lttng_consumer_local_data *ctx;
959
960 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
961 consumer_data.type == type);
962 consumer_data.type = type;
963
964 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
965 if (ctx == NULL) {
966 perror("allocating context");
967 goto error;
968 }
969
970 ctx->consumer_error_socket = -1;
971 /* assign the callbacks */
972 ctx->on_buffer_ready = buffer_ready;
973 ctx->on_recv_channel = recv_channel;
974 ctx->on_recv_stream = recv_stream;
975 ctx->on_update_stream = update_stream;
976
977 ret = pipe(ctx->consumer_poll_pipe);
978 if (ret < 0) {
979 perror("Error creating poll pipe");
980 goto error_poll_pipe;
981 }
982
983 /* set read end of the pipe to non-blocking */
984 ret = fcntl(ctx->consumer_poll_pipe[0], F_SETFL, O_NONBLOCK);
985 if (ret < 0) {
986 perror("fcntl O_NONBLOCK");
987 goto error_poll_fcntl;
988 }
989
990 /* set write end of the pipe to non-blocking */
991 ret = fcntl(ctx->consumer_poll_pipe[1], F_SETFL, O_NONBLOCK);
992 if (ret < 0) {
993 perror("fcntl O_NONBLOCK");
994 goto error_poll_fcntl;
995 }
996
997 ret = pipe(ctx->consumer_should_quit);
998 if (ret < 0) {
999 perror("Error creating recv pipe");
1000 goto error_quit_pipe;
1001 }
1002
1003 ret = pipe(ctx->consumer_thread_pipe);
1004 if (ret < 0) {
1005 perror("Error creating thread pipe");
1006 goto error_thread_pipe;
1007 }
1008
1009 return ctx;
1010
1011
1012 error_thread_pipe:
1013 for (i = 0; i < 2; i++) {
1014 int err;
1015
1016 err = close(ctx->consumer_should_quit[i]);
1017 if (err) {
1018 PERROR("close");
1019 }
1020 }
1021 error_poll_fcntl:
1022 error_quit_pipe:
1023 for (i = 0; i < 2; i++) {
1024 int err;
1025
1026 err = close(ctx->consumer_poll_pipe[i]);
1027 if (err) {
1028 PERROR("close");
1029 }
1030 }
1031 error_poll_pipe:
1032 free(ctx);
1033 error:
1034 return NULL;
1035 }
1036
1037 /*
1038 * Close all fds associated with the instance and free the context.
1039 */
1040 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
1041 {
1042 int ret;
1043
1044 ret = close(ctx->consumer_error_socket);
1045 if (ret) {
1046 PERROR("close");
1047 }
1048 ret = close(ctx->consumer_thread_pipe[0]);
1049 if (ret) {
1050 PERROR("close");
1051 }
1052 ret = close(ctx->consumer_thread_pipe[1]);
1053 if (ret) {
1054 PERROR("close");
1055 }
1056 ret = close(ctx->consumer_poll_pipe[0]);
1057 if (ret) {
1058 PERROR("close");
1059 }
1060 ret = close(ctx->consumer_poll_pipe[1]);
1061 if (ret) {
1062 PERROR("close");
1063 }
1064 ret = close(ctx->consumer_should_quit[0]);
1065 if (ret) {
1066 PERROR("close");
1067 }
1068 ret = close(ctx->consumer_should_quit[1]);
1069 if (ret) {
1070 PERROR("close");
1071 }
1072 unlink(ctx->consumer_command_sock_path);
1073 free(ctx);
1074 }
1075
1076 /*
1077 * Write the metadata stream id on the specified file descriptor.
1078 */
1079 static int write_relayd_metadata_id(int fd,
1080 struct lttng_consumer_stream *stream,
1081 struct consumer_relayd_sock_pair *relayd)
1082 {
1083 int ret;
1084 uint64_t metadata_id;
1085
1086 metadata_id = htobe64(stream->relayd_stream_id);
1087 do {
1088 ret = write(fd, (void *) &metadata_id,
1089 sizeof(stream->relayd_stream_id));
1090 } while (ret < 0 && errno == EINTR);
1091 if (ret < 0) {
1092 PERROR("write metadata stream id");
1093 goto end;
1094 }
1095 DBG("Metadata stream id %" PRIu64 " written before data",
1096 stream->relayd_stream_id);
1097
1098 end:
1099 return ret;
1100 }
1101
1102 /*
1103 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1104 * core function for writing trace buffers to either the local filesystem or
1105 * the network.
1106 *
1107 * Careful review MUST be put if any changes occur!
1108 *
1109 * Returns the number of bytes written
1110 */
1111 ssize_t lttng_consumer_on_read_subbuffer_mmap(
1112 struct lttng_consumer_local_data *ctx,
1113 struct lttng_consumer_stream *stream, unsigned long len)
1114 {
1115 unsigned long mmap_offset;
1116 ssize_t ret = 0, written = 0;
1117 off_t orig_offset = stream->out_fd_offset;
1118 /* Default is on the disk */
1119 int outfd = stream->out_fd;
1120 struct consumer_relayd_sock_pair *relayd = NULL;
1121
1122 /* RCU lock for the relayd pointer */
1123 rcu_read_lock();
1124
1125 /* Flag that the current stream if set for network streaming. */
1126 if (stream->net_seq_idx != -1) {
1127 relayd = consumer_find_relayd(stream->net_seq_idx);
1128 if (relayd == NULL) {
1129 goto end;
1130 }
1131 }
1132
1133 /* get the offset inside the fd to mmap */
1134 switch (consumer_data.type) {
1135 case LTTNG_CONSUMER_KERNEL:
1136 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
1137 break;
1138 case LTTNG_CONSUMER32_UST:
1139 case LTTNG_CONSUMER64_UST:
1140 ret = lttng_ustctl_get_mmap_read_offset(stream->chan->handle,
1141 stream->buf, &mmap_offset);
1142 break;
1143 default:
1144 ERR("Unknown consumer_data type");
1145 assert(0);
1146 }
1147 if (ret != 0) {
1148 errno = -ret;
1149 PERROR("tracer ctl get_mmap_read_offset");
1150 written = ret;
1151 goto end;
1152 }
1153
1154 /* Handle stream on the relayd if the output is on the network */
1155 if (relayd) {
1156 unsigned long netlen = len;
1157
1158 /*
1159 * Lock the control socket for the complete duration of the function
1160 * since from this point on we will use the socket.
1161 */
1162 if (stream->metadata_flag) {
1163 /* Metadata requires the control socket. */
1164 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1165 netlen += sizeof(stream->relayd_stream_id);
1166 }
1167
1168 ret = write_relayd_stream_header(stream, netlen, relayd);
1169 if (ret >= 0) {
1170 /* Use the returned socket. */
1171 outfd = ret;
1172
1173 /* Write metadata stream id before payload */
1174 if (stream->metadata_flag) {
1175 ret = write_relayd_metadata_id(outfd, stream, relayd);
1176 if (ret < 0) {
1177 written = ret;
1178 goto end;
1179 }
1180 }
1181 }
1182 /* Else, use the default set before which is the filesystem. */
1183 }
1184
1185 while (len > 0) {
1186 do {
1187 ret = write(outfd, stream->mmap_base + mmap_offset, len);
1188 } while (ret < 0 && errno == EINTR);
1189 if (ret < 0) {
1190 PERROR("Error in file write");
1191 if (written == 0) {
1192 written = ret;
1193 }
1194 goto end;
1195 } else if (ret > len) {
1196 PERROR("Error in file write (ret %zd > len %lu)", ret, len);
1197 written += ret;
1198 goto end;
1199 } else {
1200 len -= ret;
1201 mmap_offset += ret;
1202 }
1203 DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
1204
1205 /* This call is useless on a socket so better save a syscall. */
1206 if (!relayd) {
1207 /* This won't block, but will start writeout asynchronously */
1208 lttng_sync_file_range(outfd, stream->out_fd_offset, ret,
1209 SYNC_FILE_RANGE_WRITE);
1210 stream->out_fd_offset += ret;
1211 }
1212 written += ret;
1213 }
1214 lttng_consumer_sync_trace_file(stream, orig_offset);
1215
1216 end:
1217 /* Unlock only if ctrl socket used */
1218 if (relayd && stream->metadata_flag) {
1219 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1220 }
1221
1222 rcu_read_unlock();
1223 return written;
1224 }
1225
1226 /*
1227 * Splice the data from the ring buffer to the tracefile.
1228 *
1229 * Returns the number of bytes spliced.
1230 */
1231 ssize_t lttng_consumer_on_read_subbuffer_splice(
1232 struct lttng_consumer_local_data *ctx,
1233 struct lttng_consumer_stream *stream, unsigned long len)
1234 {
1235 ssize_t ret = 0, written = 0, ret_splice = 0;
1236 loff_t offset = 0;
1237 off_t orig_offset = stream->out_fd_offset;
1238 int fd = stream->wait_fd;
1239 /* Default is on the disk */
1240 int outfd = stream->out_fd;
1241 struct consumer_relayd_sock_pair *relayd = NULL;
1242
1243 switch (consumer_data.type) {
1244 case LTTNG_CONSUMER_KERNEL:
1245 break;
1246 case LTTNG_CONSUMER32_UST:
1247 case LTTNG_CONSUMER64_UST:
1248 /* Not supported for user space tracing */
1249 return -ENOSYS;
1250 default:
1251 ERR("Unknown consumer_data type");
1252 assert(0);
1253 }
1254
1255 /* RCU lock for the relayd pointer */
1256 rcu_read_lock();
1257
1258 /* Flag that the current stream if set for network streaming. */
1259 if (stream->net_seq_idx != -1) {
1260 relayd = consumer_find_relayd(stream->net_seq_idx);
1261 if (relayd == NULL) {
1262 goto end;
1263 }
1264 }
1265
1266 /* Write metadata stream id before payload */
1267 if (stream->metadata_flag && relayd) {
1268 /*
1269 * Lock the control socket for the complete duration of the function
1270 * since from this point on we will use the socket.
1271 */
1272 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1273
1274 ret = write_relayd_metadata_id(ctx->consumer_thread_pipe[1],
1275 stream, relayd);
1276 if (ret < 0) {
1277 written = ret;
1278 goto end;
1279 }
1280 }
1281
1282 while (len > 0) {
1283 DBG("splice chan to pipe offset %lu of len %lu (fd : %d)",
1284 (unsigned long)offset, len, fd);
1285 ret_splice = splice(fd, &offset, ctx->consumer_thread_pipe[1], NULL, len,
1286 SPLICE_F_MOVE | SPLICE_F_MORE);
1287 DBG("splice chan to pipe, ret %zd", ret_splice);
1288 if (ret_splice < 0) {
1289 PERROR("Error in relay splice");
1290 if (written == 0) {
1291 written = ret_splice;
1292 }
1293 ret = errno;
1294 goto splice_error;
1295 }
1296
1297 /* Handle stream on the relayd if the output is on the network */
1298 if (relayd) {
1299 if (stream->metadata_flag) {
1300 /* Update counter to fit the spliced data */
1301 ret_splice += sizeof(stream->relayd_stream_id);
1302 len += sizeof(stream->relayd_stream_id);
1303 /*
1304 * We do this so the return value can match the len passed as
1305 * argument to this function.
1306 */
1307 written -= sizeof(stream->relayd_stream_id);
1308 }
1309
1310 ret = write_relayd_stream_header(stream, ret_splice, relayd);
1311 if (ret >= 0) {
1312 /* Use the returned socket. */
1313 outfd = ret;
1314 } else {
1315 ERR("Remote relayd disconnected. Stopping");
1316 goto end;
1317 }
1318 }
1319
1320 /* Splice data out */
1321 ret_splice = splice(ctx->consumer_thread_pipe[0], NULL, outfd, NULL,
1322 ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE);
1323 DBG("Kernel consumer splice pipe to file, ret %zd", ret_splice);
1324 if (ret_splice < 0) {
1325 PERROR("Error in file splice");
1326 if (written == 0) {
1327 written = ret_splice;
1328 }
1329 ret = errno;
1330 goto splice_error;
1331 } else if (ret_splice > len) {
1332 errno = EINVAL;
1333 PERROR("Wrote more data than requested %zd (len: %lu)",
1334 ret_splice, len);
1335 written += ret_splice;
1336 ret = errno;
1337 goto splice_error;
1338 }
1339 len -= ret_splice;
1340
1341 /* This call is useless on a socket so better save a syscall. */
1342 if (!relayd) {
1343 /* This won't block, but will start writeout asynchronously */
1344 lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice,
1345 SYNC_FILE_RANGE_WRITE);
1346 stream->out_fd_offset += ret_splice;
1347 }
1348 written += ret_splice;
1349 }
1350 lttng_consumer_sync_trace_file(stream, orig_offset);
1351
1352 ret = ret_splice;
1353
1354 goto end;
1355
1356 splice_error:
1357 /* send the appropriate error description to sessiond */
1358 switch (ret) {
1359 case EBADF:
1360 lttng_consumer_send_error(ctx, CONSUMERD_SPLICE_EBADF);
1361 break;
1362 case EINVAL:
1363 lttng_consumer_send_error(ctx, CONSUMERD_SPLICE_EINVAL);
1364 break;
1365 case ENOMEM:
1366 lttng_consumer_send_error(ctx, CONSUMERD_SPLICE_ENOMEM);
1367 break;
1368 case ESPIPE:
1369 lttng_consumer_send_error(ctx, CONSUMERD_SPLICE_ESPIPE);
1370 break;
1371 }
1372
1373 end:
1374 if (relayd && stream->metadata_flag) {
1375 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1376 }
1377
1378 rcu_read_unlock();
1379 return written;
1380 }
1381
1382 /*
1383 * Take a snapshot for a specific fd
1384 *
1385 * Returns 0 on success, < 0 on error
1386 */
1387 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
1388 struct lttng_consumer_stream *stream)
1389 {
1390 switch (consumer_data.type) {
1391 case LTTNG_CONSUMER_KERNEL:
1392 return lttng_kconsumer_take_snapshot(ctx, stream);
1393 case LTTNG_CONSUMER32_UST:
1394 case LTTNG_CONSUMER64_UST:
1395 return lttng_ustconsumer_take_snapshot(ctx, stream);
1396 default:
1397 ERR("Unknown consumer_data type");
1398 assert(0);
1399 return -ENOSYS;
1400 }
1401
1402 }
1403
1404 /*
1405 * Get the produced position
1406 *
1407 * Returns 0 on success, < 0 on error
1408 */
1409 int lttng_consumer_get_produced_snapshot(
1410 struct lttng_consumer_local_data *ctx,
1411 struct lttng_consumer_stream *stream,
1412 unsigned long *pos)
1413 {
1414 switch (consumer_data.type) {
1415 case LTTNG_CONSUMER_KERNEL:
1416 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
1417 case LTTNG_CONSUMER32_UST:
1418 case LTTNG_CONSUMER64_UST:
1419 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
1420 default:
1421 ERR("Unknown consumer_data type");
1422 assert(0);
1423 return -ENOSYS;
1424 }
1425 }
1426
1427 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1428 int sock, struct pollfd *consumer_sockpoll)
1429 {
1430 switch (consumer_data.type) {
1431 case LTTNG_CONSUMER_KERNEL:
1432 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1433 case LTTNG_CONSUMER32_UST:
1434 case LTTNG_CONSUMER64_UST:
1435 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1436 default:
1437 ERR("Unknown consumer_data type");
1438 assert(0);
1439 return -ENOSYS;
1440 }
1441 }
1442
1443 /*
1444 * This thread polls the fds in the set to consume the data and write
1445 * it to tracefile if necessary.
1446 */
1447 void *lttng_consumer_thread_poll_fds(void *data)
1448 {
1449 int num_rdy, num_hup, high_prio, ret, i;
1450 struct pollfd *pollfd = NULL;
1451 /* local view of the streams */
1452 struct lttng_consumer_stream **local_stream = NULL;
1453 /* local view of consumer_data.fds_count */
1454 int nb_fd = 0;
1455 struct lttng_consumer_local_data *ctx = data;
1456 struct lttng_ht *metadata_ht;
1457 struct lttng_ht_iter iter;
1458 struct lttng_ht_node_ulong *node;
1459 struct lttng_consumer_stream *metadata_stream;
1460 ssize_t len;
1461
1462 metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1463
1464 rcu_register_thread();
1465
1466 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
1467
1468 while (1) {
1469 high_prio = 0;
1470 num_hup = 0;
1471
1472 /*
1473 * the fds set has been updated, we need to update our
1474 * local array as well
1475 */
1476 pthread_mutex_lock(&consumer_data.lock);
1477 if (consumer_data.need_update) {
1478 if (pollfd != NULL) {
1479 free(pollfd);
1480 pollfd = NULL;
1481 }
1482 if (local_stream != NULL) {
1483 free(local_stream);
1484 local_stream = NULL;
1485 }
1486
1487 /* allocate for all fds + 1 for the consumer_poll_pipe */
1488 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
1489 if (pollfd == NULL) {
1490 perror("pollfd malloc");
1491 pthread_mutex_unlock(&consumer_data.lock);
1492 goto end;
1493 }
1494
1495 /* allocate for all fds + 1 for the consumer_poll_pipe */
1496 local_stream = zmalloc((consumer_data.stream_count + 1) *
1497 sizeof(struct lttng_consumer_stream));
1498 if (local_stream == NULL) {
1499 perror("local_stream malloc");
1500 pthread_mutex_unlock(&consumer_data.lock);
1501 goto end;
1502 }
1503 ret = consumer_update_poll_array(ctx, &pollfd, local_stream,
1504 metadata_ht);
1505 if (ret < 0) {
1506 ERR("Error in allocating pollfd or local_outfds");
1507 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
1508 pthread_mutex_unlock(&consumer_data.lock);
1509 goto end;
1510 }
1511 nb_fd = ret;
1512 consumer_data.need_update = 0;
1513 }
1514 pthread_mutex_unlock(&consumer_data.lock);
1515
1516 /* No FDs and consumer_quit, consumer_cleanup the thread */
1517 if (nb_fd == 0 && consumer_quit == 1) {
1518 goto end;
1519 }
1520 /* poll on the array of fds */
1521 restart:
1522 DBG("polling on %d fd", nb_fd + 1);
1523 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
1524 DBG("poll num_rdy : %d", num_rdy);
1525 if (num_rdy == -1) {
1526 /*
1527 * Restart interrupted system call.
1528 */
1529 if (errno == EINTR) {
1530 goto restart;
1531 }
1532 perror("Poll error");
1533 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
1534 goto end;
1535 } else if (num_rdy == 0) {
1536 DBG("Polling thread timed out");
1537 goto end;
1538 }
1539
1540 /*
1541 * If the consumer_poll_pipe triggered poll go directly to the
1542 * beginning of the loop to update the array. We want to prioritize
1543 * array update over low-priority reads.
1544 */
1545 if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) {
1546 size_t pipe_readlen;
1547 char tmp;
1548
1549 DBG("consumer_poll_pipe wake up");
1550 /* Consume 1 byte of pipe data */
1551 do {
1552 pipe_readlen = read(ctx->consumer_poll_pipe[0], &tmp, 1);
1553 } while (pipe_readlen == -1 && errno == EINTR);
1554 continue;
1555 }
1556
1557 /* Take care of high priority channels first. */
1558 for (i = 0; i < nb_fd; i++) {
1559 /* Lookup for metadata which is the highest priority */
1560 lttng_ht_lookup(metadata_ht,
1561 (void *)((unsigned long) pollfd[i].fd), &iter);
1562 node = lttng_ht_iter_get_node_ulong(&iter);
1563 if (node != NULL &&
1564 (pollfd[i].revents & (POLLIN | POLLPRI))) {
1565 DBG("Urgent metadata read on fd %d", pollfd[i].fd);
1566 metadata_stream = caa_container_of(node,
1567 struct lttng_consumer_stream, waitfd_node);
1568 high_prio = 1;
1569 len = ctx->on_buffer_ready(metadata_stream, ctx);
1570 /* it's ok to have an unavailable sub-buffer */
1571 if (len < 0 && len != -EAGAIN) {
1572 goto end;
1573 } else if (len > 0) {
1574 metadata_stream->data_read = 1;
1575 }
1576 } else if (pollfd[i].revents & POLLPRI) {
1577 DBG("Urgent read on fd %d", pollfd[i].fd);
1578 high_prio = 1;
1579 len = ctx->on_buffer_ready(local_stream[i], ctx);
1580 /* it's ok to have an unavailable sub-buffer */
1581 if (len < 0 && len != -EAGAIN) {
1582 goto end;
1583 } else if (len > 0) {
1584 local_stream[i]->data_read = 1;
1585 }
1586 }
1587 }
1588
1589 /*
1590 * If we read high prio channel in this loop, try again
1591 * for more high prio data.
1592 */
1593 if (high_prio) {
1594 continue;
1595 }
1596
1597 /* Take care of low priority channels. */
1598 for (i = 0; i < nb_fd; i++) {
1599 if ((pollfd[i].revents & POLLIN) ||
1600 local_stream[i]->hangup_flush_done) {
1601 DBG("Normal read on fd %d", pollfd[i].fd);
1602 len = ctx->on_buffer_ready(local_stream[i], ctx);
1603 /* it's ok to have an unavailable sub-buffer */
1604 if (len < 0 && len != -EAGAIN) {
1605 goto end;
1606 } else if (len > 0) {
1607 local_stream[i]->data_read = 1;
1608 }
1609 }
1610 }
1611
1612 /* Handle hangup and errors */
1613 for (i = 0; i < nb_fd; i++) {
1614 if (!local_stream[i]->hangup_flush_done
1615 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1616 && (consumer_data.type == LTTNG_CONSUMER32_UST
1617 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1618 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1619 pollfd[i].fd);
1620 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1621 /* Attempt read again, for the data we just flushed. */
1622 local_stream[i]->data_read = 1;
1623 }
1624 /*
1625 * If the poll flag is HUP/ERR/NVAL and we have
1626 * read no data in this pass, we can remove the
1627 * stream from its hash table.
1628 */
1629 if ((pollfd[i].revents & POLLHUP)) {
1630 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1631 if (!local_stream[i]->data_read) {
1632 if (local_stream[i]->metadata_flag) {
1633 iter.iter.node = &local_stream[i]->waitfd_node.node;
1634 ret = lttng_ht_del(metadata_ht, &iter);
1635 assert(!ret);
1636 }
1637 consumer_del_stream(local_stream[i]);
1638 num_hup++;
1639 }
1640 } else if (pollfd[i].revents & POLLERR) {
1641 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1642 if (!local_stream[i]->data_read) {
1643 if (local_stream[i]->metadata_flag) {
1644 iter.iter.node = &local_stream[i]->waitfd_node.node;
1645 ret = lttng_ht_del(metadata_ht, &iter);
1646 assert(!ret);
1647 }
1648 consumer_del_stream(local_stream[i]);
1649 num_hup++;
1650 }
1651 } else if (pollfd[i].revents & POLLNVAL) {
1652 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1653 if (!local_stream[i]->data_read) {
1654 if (local_stream[i]->metadata_flag) {
1655 iter.iter.node = &local_stream[i]->waitfd_node.node;
1656 ret = lttng_ht_del(metadata_ht, &iter);
1657 assert(!ret);
1658 }
1659 consumer_del_stream(local_stream[i]);
1660 num_hup++;
1661 }
1662 }
1663 local_stream[i]->data_read = 0;
1664 }
1665 }
1666 end:
1667 DBG("polling thread exiting");
1668 if (pollfd != NULL) {
1669 free(pollfd);
1670 pollfd = NULL;
1671 }
1672 if (local_stream != NULL) {
1673 free(local_stream);
1674 local_stream = NULL;
1675 }
1676 rcu_unregister_thread();
1677 return NULL;
1678 }
1679
1680 /*
1681 * This thread listens on the consumerd socket and receives the file
1682 * descriptors from the session daemon.
1683 */
1684 void *lttng_consumer_thread_receive_fds(void *data)
1685 {
1686 int sock, client_socket, ret;
1687 /*
1688 * structure to poll for incoming data on communication socket avoids
1689 * making blocking sockets.
1690 */
1691 struct pollfd consumer_sockpoll[2];
1692 struct lttng_consumer_local_data *ctx = data;
1693
1694 rcu_register_thread();
1695
1696 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1697 unlink(ctx->consumer_command_sock_path);
1698 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1699 if (client_socket < 0) {
1700 ERR("Cannot create command socket");
1701 goto end;
1702 }
1703
1704 ret = lttcomm_listen_unix_sock(client_socket);
1705 if (ret < 0) {
1706 goto end;
1707 }
1708
1709 DBG("Sending ready command to lttng-sessiond");
1710 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1711 /* return < 0 on error, but == 0 is not fatal */
1712 if (ret < 0) {
1713 ERR("Error sending ready command to lttng-sessiond");
1714 goto end;
1715 }
1716
1717 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1718 if (ret < 0) {
1719 perror("fcntl O_NONBLOCK");
1720 goto end;
1721 }
1722
1723 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1724 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1725 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1726 consumer_sockpoll[1].fd = client_socket;
1727 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1728
1729 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1730 goto end;
1731 }
1732 DBG("Connection on client_socket");
1733
1734 /* Blocking call, waiting for transmission */
1735 sock = lttcomm_accept_unix_sock(client_socket);
1736 if (sock <= 0) {
1737 WARN("On accept");
1738 goto end;
1739 }
1740 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1741 if (ret < 0) {
1742 perror("fcntl O_NONBLOCK");
1743 goto end;
1744 }
1745
1746 /* update the polling structure to poll on the established socket */
1747 consumer_sockpoll[1].fd = sock;
1748 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1749
1750 while (1) {
1751 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1752 goto end;
1753 }
1754 DBG("Incoming command on sock");
1755 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1756 if (ret == -ENOENT) {
1757 DBG("Received STOP command");
1758 goto end;
1759 }
1760 if (ret < 0) {
1761 ERR("Communication interrupted on command socket");
1762 goto end;
1763 }
1764 if (consumer_quit) {
1765 DBG("consumer_thread_receive_fds received quit from signal");
1766 goto end;
1767 }
1768 DBG("received fds on sock");
1769 }
1770 end:
1771 DBG("consumer_thread_receive_fds exiting");
1772
1773 /*
1774 * when all fds have hung up, the polling thread
1775 * can exit cleanly
1776 */
1777 consumer_quit = 1;
1778
1779 /*
1780 * 2s of grace period, if no polling events occur during
1781 * this period, the polling thread will exit even if there
1782 * are still open FDs (should not happen, but safety mechanism).
1783 */
1784 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1785
1786 /*
1787 * Wake-up the other end by writing a null byte in the pipe
1788 * (non-blocking). Important note: Because writing into the
1789 * pipe is non-blocking (and therefore we allow dropping wakeup
1790 * data, as long as there is wakeup data present in the pipe
1791 * buffer to wake up the other end), the other end should
1792 * perform the following sequence for waiting:
1793 * 1) empty the pipe (reads).
1794 * 2) perform update operation.
1795 * 3) wait on the pipe (poll).
1796 */
1797 do {
1798 ret = write(ctx->consumer_poll_pipe[1], "", 1);
1799 } while (ret < 0 && errno == EINTR);
1800 rcu_unregister_thread();
1801 return NULL;
1802 }
1803
1804 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1805 struct lttng_consumer_local_data *ctx)
1806 {
1807 switch (consumer_data.type) {
1808 case LTTNG_CONSUMER_KERNEL:
1809 return lttng_kconsumer_read_subbuffer(stream, ctx);
1810 case LTTNG_CONSUMER32_UST:
1811 case LTTNG_CONSUMER64_UST:
1812 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1813 default:
1814 ERR("Unknown consumer_data type");
1815 assert(0);
1816 return -ENOSYS;
1817 }
1818 }
1819
1820 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1821 {
1822 switch (consumer_data.type) {
1823 case LTTNG_CONSUMER_KERNEL:
1824 return lttng_kconsumer_on_recv_stream(stream);
1825 case LTTNG_CONSUMER32_UST:
1826 case LTTNG_CONSUMER64_UST:
1827 return lttng_ustconsumer_on_recv_stream(stream);
1828 default:
1829 ERR("Unknown consumer_data type");
1830 assert(0);
1831 return -ENOSYS;
1832 }
1833 }
1834
1835 /*
1836 * Allocate and set consumer data hash tables.
1837 */
1838 void lttng_consumer_init(void)
1839 {
1840 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1841 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1842 consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1843 }
This page took 0.10388 seconds and 5 git commands to generate.