Add lttng-error.h containing every API err. code
[lttng-tools.git] / src / common / consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <inttypes.h>
31
32 #include <common/common.h>
33 #include <common/kernel-ctl/kernel-ctl.h>
34 #include <common/sessiond-comm/relayd.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
36 #include <common/kernel-consumer/kernel-consumer.h>
37 #include <common/relayd/relayd.h>
38 #include <common/ust-consumer/ust-consumer.h>
39
40 #include "consumer.h"
41
42 struct lttng_consumer_global_data consumer_data = {
43 .stream_count = 0,
44 .need_update = 1,
45 .type = LTTNG_CONSUMER_UNKNOWN,
46 };
47
48 /* timeout parameter, to control the polling thread grace period. */
49 int consumer_poll_timeout = -1;
50
51 /*
52 * Flag to inform the polling thread to quit when all fd hung up. Updated by
53 * the consumer_thread_receive_fds when it notices that all fds has hung up.
54 * Also updated by the signal handler (consumer_should_exit()). Read by the
55 * polling threads.
56 */
57 volatile int consumer_quit = 0;
58
59 /*
60 * Find a stream. The consumer_data.lock must be locked during this
61 * call.
62 */
63 static struct lttng_consumer_stream *consumer_find_stream(int key)
64 {
65 struct lttng_ht_iter iter;
66 struct lttng_ht_node_ulong *node;
67 struct lttng_consumer_stream *stream = NULL;
68
69 /* Negative keys are lookup failures */
70 if (key < 0)
71 return NULL;
72
73 rcu_read_lock();
74
75 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
76 &iter);
77 node = lttng_ht_iter_get_node_ulong(&iter);
78 if (node != NULL) {
79 stream = caa_container_of(node, struct lttng_consumer_stream, node);
80 }
81
82 rcu_read_unlock();
83
84 return stream;
85 }
86
87 static void consumer_steal_stream_key(int key)
88 {
89 struct lttng_consumer_stream *stream;
90
91 rcu_read_lock();
92 stream = consumer_find_stream(key);
93 if (stream) {
94 stream->key = -1;
95 /*
96 * We don't want the lookup to match, but we still need
97 * to iterate on this stream when iterating over the hash table. Just
98 * change the node key.
99 */
100 stream->node.key = -1;
101 }
102 rcu_read_unlock();
103 }
104
105 static struct lttng_consumer_channel *consumer_find_channel(int key)
106 {
107 struct lttng_ht_iter iter;
108 struct lttng_ht_node_ulong *node;
109 struct lttng_consumer_channel *channel = NULL;
110
111 /* Negative keys are lookup failures */
112 if (key < 0)
113 return NULL;
114
115 rcu_read_lock();
116
117 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
118 &iter);
119 node = lttng_ht_iter_get_node_ulong(&iter);
120 if (node != NULL) {
121 channel = caa_container_of(node, struct lttng_consumer_channel, node);
122 }
123
124 rcu_read_unlock();
125
126 return channel;
127 }
128
129 static void consumer_steal_channel_key(int key)
130 {
131 struct lttng_consumer_channel *channel;
132
133 rcu_read_lock();
134 channel = consumer_find_channel(key);
135 if (channel) {
136 channel->key = -1;
137 /*
138 * We don't want the lookup to match, but we still need
139 * to iterate on this channel when iterating over the hash table. Just
140 * change the node key.
141 */
142 channel->node.key = -1;
143 }
144 rcu_read_unlock();
145 }
146
147 static
148 void consumer_free_stream(struct rcu_head *head)
149 {
150 struct lttng_ht_node_ulong *node =
151 caa_container_of(head, struct lttng_ht_node_ulong, head);
152 struct lttng_consumer_stream *stream =
153 caa_container_of(node, struct lttng_consumer_stream, node);
154
155 free(stream);
156 }
157
158 /*
159 * RCU protected relayd socket pair free.
160 */
161 static void consumer_rcu_free_relayd(struct rcu_head *head)
162 {
163 struct lttng_ht_node_ulong *node =
164 caa_container_of(head, struct lttng_ht_node_ulong, head);
165 struct consumer_relayd_sock_pair *relayd =
166 caa_container_of(node, struct consumer_relayd_sock_pair, node);
167
168 free(relayd);
169 }
170
171 /*
172 * Destroy and free relayd socket pair object.
173 *
174 * This function MUST be called with the consumer_data lock acquired.
175 */
176 void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd)
177 {
178 int ret;
179 struct lttng_ht_iter iter;
180
181 if (relayd == NULL) {
182 return;
183 }
184
185 DBG("Consumer destroy and close relayd socket pair");
186
187 iter.iter.node = &relayd->node.node;
188 ret = lttng_ht_del(consumer_data.relayd_ht, &iter);
189 if (ret != 0) {
190 /* We assume the relayd was already destroyed */
191 return;
192 }
193
194 /* Close all sockets */
195 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
196 (void) relayd_close(&relayd->control_sock);
197 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
198 (void) relayd_close(&relayd->data_sock);
199
200 /* RCU free() call */
201 call_rcu(&relayd->node.head, consumer_rcu_free_relayd);
202 }
203
204 /*
205 * Flag a relayd socket pair for destruction. Destroy it if the refcount
206 * reaches zero.
207 *
208 * RCU read side lock MUST be aquired before calling this function.
209 */
210 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd)
211 {
212 assert(relayd);
213
214 /* Set destroy flag for this object */
215 uatomic_set(&relayd->destroy_flag, 1);
216
217 /* Destroy the relayd if refcount is 0 */
218 if (uatomic_read(&relayd->refcount) == 0) {
219 consumer_destroy_relayd(relayd);
220 }
221 }
222
223 /*
224 * Remove a stream from the global list protected by a mutex. This
225 * function is also responsible for freeing its data structures.
226 */
227 void consumer_del_stream(struct lttng_consumer_stream *stream)
228 {
229 int ret;
230 struct lttng_ht_iter iter;
231 struct lttng_consumer_channel *free_chan = NULL;
232 struct consumer_relayd_sock_pair *relayd;
233
234 assert(stream);
235
236 pthread_mutex_lock(&consumer_data.lock);
237
238 switch (consumer_data.type) {
239 case LTTNG_CONSUMER_KERNEL:
240 if (stream->mmap_base != NULL) {
241 ret = munmap(stream->mmap_base, stream->mmap_len);
242 if (ret != 0) {
243 perror("munmap");
244 }
245 }
246 break;
247 case LTTNG_CONSUMER32_UST:
248 case LTTNG_CONSUMER64_UST:
249 lttng_ustconsumer_del_stream(stream);
250 break;
251 default:
252 ERR("Unknown consumer_data type");
253 assert(0);
254 goto end;
255 }
256
257 rcu_read_lock();
258 iter.iter.node = &stream->node.node;
259 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
260 assert(!ret);
261
262 rcu_read_unlock();
263
264 if (consumer_data.stream_count <= 0) {
265 goto end;
266 }
267 consumer_data.stream_count--;
268 if (!stream) {
269 goto end;
270 }
271 if (stream->out_fd >= 0) {
272 ret = close(stream->out_fd);
273 if (ret) {
274 PERROR("close");
275 }
276 }
277 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
278 ret = close(stream->wait_fd);
279 if (ret) {
280 PERROR("close");
281 }
282 }
283 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd) {
284 ret = close(stream->shm_fd);
285 if (ret) {
286 PERROR("close");
287 }
288 }
289
290 /* Check and cleanup relayd */
291 rcu_read_lock();
292 relayd = consumer_find_relayd(stream->net_seq_idx);
293 if (relayd != NULL) {
294 uatomic_dec(&relayd->refcount);
295 assert(uatomic_read(&relayd->refcount) >= 0);
296
297 /* Closing streams requires to lock the control socket. */
298 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
299 ret = relayd_send_close_stream(&relayd->control_sock,
300 stream->relayd_stream_id,
301 stream->next_net_seq_num - 1);
302 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
303 if (ret < 0) {
304 DBG("Unable to close stream on the relayd. Continuing");
305 /*
306 * Continue here. There is nothing we can do for the relayd.
307 * Chances are that the relayd has closed the socket so we just
308 * continue cleaning up.
309 */
310 }
311
312 /* Both conditions are met, we destroy the relayd. */
313 if (uatomic_read(&relayd->refcount) == 0 &&
314 uatomic_read(&relayd->destroy_flag)) {
315 consumer_destroy_relayd(relayd);
316 }
317 }
318 rcu_read_unlock();
319
320 if (!--stream->chan->refcount) {
321 free_chan = stream->chan;
322 }
323
324
325 call_rcu(&stream->node.head, consumer_free_stream);
326 end:
327 consumer_data.need_update = 1;
328 pthread_mutex_unlock(&consumer_data.lock);
329
330 if (free_chan)
331 consumer_del_channel(free_chan);
332 }
333
334 struct lttng_consumer_stream *consumer_allocate_stream(
335 int channel_key, int stream_key,
336 int shm_fd, int wait_fd,
337 enum lttng_consumer_stream_state state,
338 uint64_t mmap_len,
339 enum lttng_event_output output,
340 const char *path_name,
341 uid_t uid,
342 gid_t gid,
343 int net_index,
344 int metadata_flag)
345 {
346 struct lttng_consumer_stream *stream;
347 int ret;
348
349 stream = zmalloc(sizeof(*stream));
350 if (stream == NULL) {
351 perror("malloc struct lttng_consumer_stream");
352 goto end;
353 }
354 stream->chan = consumer_find_channel(channel_key);
355 if (!stream->chan) {
356 perror("Unable to find channel key");
357 goto end;
358 }
359 stream->chan->refcount++;
360 stream->key = stream_key;
361 stream->shm_fd = shm_fd;
362 stream->wait_fd = wait_fd;
363 stream->out_fd = -1;
364 stream->out_fd_offset = 0;
365 stream->state = state;
366 stream->mmap_len = mmap_len;
367 stream->mmap_base = NULL;
368 stream->output = output;
369 stream->uid = uid;
370 stream->gid = gid;
371 stream->net_seq_idx = net_index;
372 stream->metadata_flag = metadata_flag;
373 strncpy(stream->path_name, path_name, sizeof(stream->path_name));
374 stream->path_name[sizeof(stream->path_name) - 1] = '\0';
375 lttng_ht_node_init_ulong(&stream->node, stream->key);
376 lttng_ht_node_init_ulong(&stream->waitfd_node, stream->wait_fd);
377
378 switch (consumer_data.type) {
379 case LTTNG_CONSUMER_KERNEL:
380 break;
381 case LTTNG_CONSUMER32_UST:
382 case LTTNG_CONSUMER64_UST:
383 stream->cpu = stream->chan->cpucount++;
384 ret = lttng_ustconsumer_allocate_stream(stream);
385 if (ret) {
386 free(stream);
387 return NULL;
388 }
389 break;
390 default:
391 ERR("Unknown consumer_data type");
392 assert(0);
393 goto end;
394 }
395 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d, net_seq_idx %d)",
396 stream->path_name, stream->key,
397 stream->shm_fd,
398 stream->wait_fd,
399 (unsigned long long) stream->mmap_len,
400 stream->out_fd,
401 stream->net_seq_idx);
402 end:
403 return stream;
404 }
405
406 /*
407 * Add a stream to the global list protected by a mutex.
408 */
409 int consumer_add_stream(struct lttng_consumer_stream *stream)
410 {
411 int ret = 0;
412 struct lttng_ht_node_ulong *node;
413 struct lttng_ht_iter iter;
414 struct consumer_relayd_sock_pair *relayd;
415
416 pthread_mutex_lock(&consumer_data.lock);
417 /* Steal stream identifier, for UST */
418 consumer_steal_stream_key(stream->key);
419
420 rcu_read_lock();
421 lttng_ht_lookup(consumer_data.stream_ht,
422 (void *)((unsigned long) stream->key), &iter);
423 node = lttng_ht_iter_get_node_ulong(&iter);
424 if (node != NULL) {
425 rcu_read_unlock();
426 /* Stream already exist. Ignore the insertion */
427 goto end;
428 }
429
430 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
431
432 /* Check and cleanup relayd */
433 relayd = consumer_find_relayd(stream->net_seq_idx);
434 if (relayd != NULL) {
435 uatomic_inc(&relayd->refcount);
436 }
437 rcu_read_unlock();
438
439 /* Update consumer data */
440 consumer_data.stream_count++;
441 consumer_data.need_update = 1;
442
443 switch (consumer_data.type) {
444 case LTTNG_CONSUMER_KERNEL:
445 break;
446 case LTTNG_CONSUMER32_UST:
447 case LTTNG_CONSUMER64_UST:
448 /* Streams are in CPU number order (we rely on this) */
449 stream->cpu = stream->chan->nr_streams++;
450 break;
451 default:
452 ERR("Unknown consumer_data type");
453 assert(0);
454 goto end;
455 }
456
457 end:
458 pthread_mutex_unlock(&consumer_data.lock);
459
460 return ret;
461 }
462
463 /*
464 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
465 * be acquired before calling this.
466 */
467
468 int consumer_add_relayd(struct consumer_relayd_sock_pair *relayd)
469 {
470 int ret = 0;
471 struct lttng_ht_node_ulong *node;
472 struct lttng_ht_iter iter;
473
474 if (relayd == NULL) {
475 ret = -1;
476 goto end;
477 }
478
479 lttng_ht_lookup(consumer_data.relayd_ht,
480 (void *)((unsigned long) relayd->net_seq_idx), &iter);
481 node = lttng_ht_iter_get_node_ulong(&iter);
482 if (node != NULL) {
483 /* Relayd already exist. Ignore the insertion */
484 goto end;
485 }
486 lttng_ht_add_unique_ulong(consumer_data.relayd_ht, &relayd->node);
487
488 end:
489 return ret;
490 }
491
492 /*
493 * Allocate and return a consumer relayd socket.
494 */
495 struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair(
496 int net_seq_idx)
497 {
498 struct consumer_relayd_sock_pair *obj = NULL;
499
500 /* Negative net sequence index is a failure */
501 if (net_seq_idx < 0) {
502 goto error;
503 }
504
505 obj = zmalloc(sizeof(struct consumer_relayd_sock_pair));
506 if (obj == NULL) {
507 PERROR("zmalloc relayd sock");
508 goto error;
509 }
510
511 obj->net_seq_idx = net_seq_idx;
512 obj->refcount = 0;
513 obj->destroy_flag = 0;
514 lttng_ht_node_init_ulong(&obj->node, obj->net_seq_idx);
515 pthread_mutex_init(&obj->ctrl_sock_mutex, NULL);
516
517 error:
518 return obj;
519 }
520
521 /*
522 * Find a relayd socket pair in the global consumer data.
523 *
524 * Return the object if found else NULL.
525 * RCU read-side lock must be held across this call and while using the
526 * returned object.
527 */
528 struct consumer_relayd_sock_pair *consumer_find_relayd(int key)
529 {
530 struct lttng_ht_iter iter;
531 struct lttng_ht_node_ulong *node;
532 struct consumer_relayd_sock_pair *relayd = NULL;
533
534 /* Negative keys are lookup failures */
535 if (key < 0) {
536 goto error;
537 }
538
539 lttng_ht_lookup(consumer_data.relayd_ht, (void *)((unsigned long) key),
540 &iter);
541 node = lttng_ht_iter_get_node_ulong(&iter);
542 if (node != NULL) {
543 relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node);
544 }
545
546 error:
547 return relayd;
548 }
549
550 /*
551 * Handle stream for relayd transmission if the stream applies for network
552 * streaming where the net sequence index is set.
553 *
554 * Return destination file descriptor or negative value on error.
555 */
556 static int write_relayd_stream_header(struct lttng_consumer_stream *stream,
557 size_t data_size, struct consumer_relayd_sock_pair *relayd)
558 {
559 int outfd = -1, ret;
560 struct lttcomm_relayd_data_hdr data_hdr;
561
562 /* Safety net */
563 assert(stream);
564 assert(relayd);
565
566 /* Reset data header */
567 memset(&data_hdr, 0, sizeof(data_hdr));
568
569 if (stream->metadata_flag) {
570 /* Caller MUST acquire the relayd control socket lock */
571 ret = relayd_send_metadata(&relayd->control_sock, data_size);
572 if (ret < 0) {
573 goto error;
574 }
575
576 /* Metadata are always sent on the control socket. */
577 outfd = relayd->control_sock.fd;
578 } else {
579 /* Set header with stream information */
580 data_hdr.stream_id = htobe64(stream->relayd_stream_id);
581 data_hdr.data_size = htobe32(data_size);
582 data_hdr.net_seq_num = htobe64(stream->next_net_seq_num++);
583 /* Other fields are zeroed previously */
584
585 ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr,
586 sizeof(data_hdr));
587 if (ret < 0) {
588 goto error;
589 }
590
591 /* Set to go on data socket */
592 outfd = relayd->data_sock.fd;
593 }
594
595 error:
596 return outfd;
597 }
598
599 /*
600 * Update a stream according to what we just received.
601 */
602 void consumer_change_stream_state(int stream_key,
603 enum lttng_consumer_stream_state state)
604 {
605 struct lttng_consumer_stream *stream;
606
607 pthread_mutex_lock(&consumer_data.lock);
608 stream = consumer_find_stream(stream_key);
609 if (stream) {
610 stream->state = state;
611 }
612 consumer_data.need_update = 1;
613 pthread_mutex_unlock(&consumer_data.lock);
614 }
615
616 static
617 void consumer_free_channel(struct rcu_head *head)
618 {
619 struct lttng_ht_node_ulong *node =
620 caa_container_of(head, struct lttng_ht_node_ulong, head);
621 struct lttng_consumer_channel *channel =
622 caa_container_of(node, struct lttng_consumer_channel, node);
623
624 free(channel);
625 }
626
627 /*
628 * Remove a channel from the global list protected by a mutex. This
629 * function is also responsible for freeing its data structures.
630 */
631 void consumer_del_channel(struct lttng_consumer_channel *channel)
632 {
633 int ret;
634 struct lttng_ht_iter iter;
635
636 pthread_mutex_lock(&consumer_data.lock);
637
638 switch (consumer_data.type) {
639 case LTTNG_CONSUMER_KERNEL:
640 break;
641 case LTTNG_CONSUMER32_UST:
642 case LTTNG_CONSUMER64_UST:
643 lttng_ustconsumer_del_channel(channel);
644 break;
645 default:
646 ERR("Unknown consumer_data type");
647 assert(0);
648 goto end;
649 }
650
651 rcu_read_lock();
652 iter.iter.node = &channel->node.node;
653 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
654 assert(!ret);
655 rcu_read_unlock();
656
657 if (channel->mmap_base != NULL) {
658 ret = munmap(channel->mmap_base, channel->mmap_len);
659 if (ret != 0) {
660 perror("munmap");
661 }
662 }
663 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
664 ret = close(channel->wait_fd);
665 if (ret) {
666 PERROR("close");
667 }
668 }
669 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd) {
670 ret = close(channel->shm_fd);
671 if (ret) {
672 PERROR("close");
673 }
674 }
675
676 call_rcu(&channel->node.head, consumer_free_channel);
677 end:
678 pthread_mutex_unlock(&consumer_data.lock);
679 }
680
681 struct lttng_consumer_channel *consumer_allocate_channel(
682 int channel_key,
683 int shm_fd, int wait_fd,
684 uint64_t mmap_len,
685 uint64_t max_sb_size)
686 {
687 struct lttng_consumer_channel *channel;
688 int ret;
689
690 channel = zmalloc(sizeof(*channel));
691 if (channel == NULL) {
692 perror("malloc struct lttng_consumer_channel");
693 goto end;
694 }
695 channel->key = channel_key;
696 channel->shm_fd = shm_fd;
697 channel->wait_fd = wait_fd;
698 channel->mmap_len = mmap_len;
699 channel->max_sb_size = max_sb_size;
700 channel->refcount = 0;
701 channel->nr_streams = 0;
702 lttng_ht_node_init_ulong(&channel->node, channel->key);
703
704 switch (consumer_data.type) {
705 case LTTNG_CONSUMER_KERNEL:
706 channel->mmap_base = NULL;
707 channel->mmap_len = 0;
708 break;
709 case LTTNG_CONSUMER32_UST:
710 case LTTNG_CONSUMER64_UST:
711 ret = lttng_ustconsumer_allocate_channel(channel);
712 if (ret) {
713 free(channel);
714 return NULL;
715 }
716 break;
717 default:
718 ERR("Unknown consumer_data type");
719 assert(0);
720 goto end;
721 }
722 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
723 channel->key, channel->shm_fd, channel->wait_fd,
724 (unsigned long long) channel->mmap_len,
725 (unsigned long long) channel->max_sb_size);
726 end:
727 return channel;
728 }
729
730 /*
731 * Add a channel to the global list protected by a mutex.
732 */
733 int consumer_add_channel(struct lttng_consumer_channel *channel)
734 {
735 struct lttng_ht_node_ulong *node;
736 struct lttng_ht_iter iter;
737
738 pthread_mutex_lock(&consumer_data.lock);
739 /* Steal channel identifier, for UST */
740 consumer_steal_channel_key(channel->key);
741 rcu_read_lock();
742
743 lttng_ht_lookup(consumer_data.channel_ht,
744 (void *)((unsigned long) channel->key), &iter);
745 node = lttng_ht_iter_get_node_ulong(&iter);
746 if (node != NULL) {
747 /* Channel already exist. Ignore the insertion */
748 goto end;
749 }
750
751 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
752
753 end:
754 rcu_read_unlock();
755 pthread_mutex_unlock(&consumer_data.lock);
756
757 return 0;
758 }
759
760 /*
761 * Allocate the pollfd structure and the local view of the out fds to avoid
762 * doing a lookup in the linked list and concurrency issues when writing is
763 * needed. Called with consumer_data.lock held.
764 *
765 * Returns the number of fds in the structures.
766 */
767 int consumer_update_poll_array(
768 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
769 struct lttng_consumer_stream **local_stream,
770 struct lttng_ht *metadata_ht)
771 {
772 int i = 0;
773 struct lttng_ht_iter iter;
774 struct lttng_consumer_stream *stream;
775
776 DBG("Updating poll fd array");
777 rcu_read_lock();
778 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
779 node.node) {
780 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
781 continue;
782 }
783 DBG("Active FD %d", stream->wait_fd);
784 (*pollfd)[i].fd = stream->wait_fd;
785 (*pollfd)[i].events = POLLIN | POLLPRI;
786 if (stream->metadata_flag && metadata_ht) {
787 lttng_ht_add_unique_ulong(metadata_ht, &stream->waitfd_node);
788 DBG("Active FD added to metadata hash table");
789 }
790 local_stream[i] = stream;
791 i++;
792 }
793 rcu_read_unlock();
794
795 /*
796 * Insert the consumer_poll_pipe at the end of the array and don't
797 * increment i so nb_fd is the number of real FD.
798 */
799 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
800 (*pollfd)[i].events = POLLIN | POLLPRI;
801 return i;
802 }
803
804 /*
805 * Poll on the should_quit pipe and the command socket return -1 on error and
806 * should exit, 0 if data is available on the command socket
807 */
808 int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
809 {
810 int num_rdy;
811
812 restart:
813 num_rdy = poll(consumer_sockpoll, 2, -1);
814 if (num_rdy == -1) {
815 /*
816 * Restart interrupted system call.
817 */
818 if (errno == EINTR) {
819 goto restart;
820 }
821 perror("Poll error");
822 goto exit;
823 }
824 if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) {
825 DBG("consumer_should_quit wake up");
826 goto exit;
827 }
828 return 0;
829
830 exit:
831 return -1;
832 }
833
834 /*
835 * Set the error socket.
836 */
837 void lttng_consumer_set_error_sock(
838 struct lttng_consumer_local_data *ctx, int sock)
839 {
840 ctx->consumer_error_socket = sock;
841 }
842
843 /*
844 * Set the command socket path.
845 */
846 void lttng_consumer_set_command_sock_path(
847 struct lttng_consumer_local_data *ctx, char *sock)
848 {
849 ctx->consumer_command_sock_path = sock;
850 }
851
852 /*
853 * Send return code to the session daemon.
854 * If the socket is not defined, we return 0, it is not a fatal error
855 */
856 int lttng_consumer_send_error(
857 struct lttng_consumer_local_data *ctx, int cmd)
858 {
859 if (ctx->consumer_error_socket > 0) {
860 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
861 sizeof(enum lttcomm_sessiond_command));
862 }
863
864 return 0;
865 }
866
867 /*
868 * Close all the tracefiles and stream fds, should be called when all instances
869 * are destroyed.
870 */
871 void lttng_consumer_cleanup(void)
872 {
873 struct lttng_ht_iter iter;
874 struct lttng_ht_node_ulong *node;
875
876 rcu_read_lock();
877
878 /*
879 * close all outfd. Called when there are no more threads running (after
880 * joining on the threads), no need to protect list iteration with mutex.
881 */
882 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
883 node) {
884 struct lttng_consumer_stream *stream =
885 caa_container_of(node, struct lttng_consumer_stream, node);
886 consumer_del_stream(stream);
887 }
888
889 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
890 node) {
891 struct lttng_consumer_channel *channel =
892 caa_container_of(node, struct lttng_consumer_channel, node);
893 consumer_del_channel(channel);
894 }
895
896 rcu_read_unlock();
897
898 lttng_ht_destroy(consumer_data.stream_ht);
899 lttng_ht_destroy(consumer_data.channel_ht);
900 }
901
902 /*
903 * Called from signal handler.
904 */
905 void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
906 {
907 int ret;
908 consumer_quit = 1;
909 do {
910 ret = write(ctx->consumer_should_quit[1], "4", 1);
911 } while (ret < 0 && errno == EINTR);
912 if (ret < 0) {
913 perror("write consumer quit");
914 }
915 }
916
917 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream,
918 off_t orig_offset)
919 {
920 int outfd = stream->out_fd;
921
922 /*
923 * This does a blocking write-and-wait on any page that belongs to the
924 * subbuffer prior to the one we just wrote.
925 * Don't care about error values, as these are just hints and ways to
926 * limit the amount of page cache used.
927 */
928 if (orig_offset < stream->chan->max_sb_size) {
929 return;
930 }
931 lttng_sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
932 stream->chan->max_sb_size,
933 SYNC_FILE_RANGE_WAIT_BEFORE
934 | SYNC_FILE_RANGE_WRITE
935 | SYNC_FILE_RANGE_WAIT_AFTER);
936 /*
937 * Give hints to the kernel about how we access the file:
938 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
939 * we write it.
940 *
941 * We need to call fadvise again after the file grows because the
942 * kernel does not seem to apply fadvise to non-existing parts of the
943 * file.
944 *
945 * Call fadvise _after_ having waited for the page writeback to
946 * complete because the dirty page writeback semantic is not well
947 * defined. So it can be expected to lead to lower throughput in
948 * streaming.
949 */
950 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
951 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
952 }
953
954 /*
955 * Initialise the necessary environnement :
956 * - create a new context
957 * - create the poll_pipe
958 * - create the should_quit pipe (for signal handler)
959 * - create the thread pipe (for splice)
960 *
961 * Takes a function pointer as argument, this function is called when data is
962 * available on a buffer. This function is responsible to do the
963 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
964 * buffer configuration and then kernctl_put_next_subbuf at the end.
965 *
966 * Returns a pointer to the new context or NULL on error.
967 */
968 struct lttng_consumer_local_data *lttng_consumer_create(
969 enum lttng_consumer_type type,
970 ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream,
971 struct lttng_consumer_local_data *ctx),
972 int (*recv_channel)(struct lttng_consumer_channel *channel),
973 int (*recv_stream)(struct lttng_consumer_stream *stream),
974 int (*update_stream)(int stream_key, uint32_t state))
975 {
976 int ret, i;
977 struct lttng_consumer_local_data *ctx;
978
979 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
980 consumer_data.type == type);
981 consumer_data.type = type;
982
983 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
984 if (ctx == NULL) {
985 perror("allocating context");
986 goto error;
987 }
988
989 ctx->consumer_error_socket = -1;
990 /* assign the callbacks */
991 ctx->on_buffer_ready = buffer_ready;
992 ctx->on_recv_channel = recv_channel;
993 ctx->on_recv_stream = recv_stream;
994 ctx->on_update_stream = update_stream;
995
996 ret = pipe(ctx->consumer_poll_pipe);
997 if (ret < 0) {
998 perror("Error creating poll pipe");
999 goto error_poll_pipe;
1000 }
1001
1002 /* set read end of the pipe to non-blocking */
1003 ret = fcntl(ctx->consumer_poll_pipe[0], F_SETFL, O_NONBLOCK);
1004 if (ret < 0) {
1005 perror("fcntl O_NONBLOCK");
1006 goto error_poll_fcntl;
1007 }
1008
1009 /* set write end of the pipe to non-blocking */
1010 ret = fcntl(ctx->consumer_poll_pipe[1], F_SETFL, O_NONBLOCK);
1011 if (ret < 0) {
1012 perror("fcntl O_NONBLOCK");
1013 goto error_poll_fcntl;
1014 }
1015
1016 ret = pipe(ctx->consumer_should_quit);
1017 if (ret < 0) {
1018 perror("Error creating recv pipe");
1019 goto error_quit_pipe;
1020 }
1021
1022 ret = pipe(ctx->consumer_thread_pipe);
1023 if (ret < 0) {
1024 perror("Error creating thread pipe");
1025 goto error_thread_pipe;
1026 }
1027
1028 return ctx;
1029
1030
1031 error_thread_pipe:
1032 for (i = 0; i < 2; i++) {
1033 int err;
1034
1035 err = close(ctx->consumer_should_quit[i]);
1036 if (err) {
1037 PERROR("close");
1038 }
1039 }
1040 error_poll_fcntl:
1041 error_quit_pipe:
1042 for (i = 0; i < 2; i++) {
1043 int err;
1044
1045 err = close(ctx->consumer_poll_pipe[i]);
1046 if (err) {
1047 PERROR("close");
1048 }
1049 }
1050 error_poll_pipe:
1051 free(ctx);
1052 error:
1053 return NULL;
1054 }
1055
1056 /*
1057 * Close all fds associated with the instance and free the context.
1058 */
1059 void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
1060 {
1061 int ret;
1062
1063 ret = close(ctx->consumer_error_socket);
1064 if (ret) {
1065 PERROR("close");
1066 }
1067 ret = close(ctx->consumer_thread_pipe[0]);
1068 if (ret) {
1069 PERROR("close");
1070 }
1071 ret = close(ctx->consumer_thread_pipe[1]);
1072 if (ret) {
1073 PERROR("close");
1074 }
1075 ret = close(ctx->consumer_poll_pipe[0]);
1076 if (ret) {
1077 PERROR("close");
1078 }
1079 ret = close(ctx->consumer_poll_pipe[1]);
1080 if (ret) {
1081 PERROR("close");
1082 }
1083 ret = close(ctx->consumer_should_quit[0]);
1084 if (ret) {
1085 PERROR("close");
1086 }
1087 ret = close(ctx->consumer_should_quit[1]);
1088 if (ret) {
1089 PERROR("close");
1090 }
1091 unlink(ctx->consumer_command_sock_path);
1092 free(ctx);
1093 }
1094
1095 /*
1096 * Write the metadata stream id on the specified file descriptor.
1097 */
1098 static int write_relayd_metadata_id(int fd,
1099 struct lttng_consumer_stream *stream,
1100 struct consumer_relayd_sock_pair *relayd)
1101 {
1102 int ret;
1103 uint64_t metadata_id;
1104
1105 metadata_id = htobe64(stream->relayd_stream_id);
1106 do {
1107 ret = write(fd, (void *) &metadata_id,
1108 sizeof(stream->relayd_stream_id));
1109 } while (ret < 0 && errno == EINTR);
1110 if (ret < 0) {
1111 PERROR("write metadata stream id");
1112 goto end;
1113 }
1114 DBG("Metadata stream id %" PRIu64 " written before data",
1115 stream->relayd_stream_id);
1116
1117 end:
1118 return ret;
1119 }
1120
1121 /*
1122 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1123 * core function for writing trace buffers to either the local filesystem or
1124 * the network.
1125 *
1126 * Careful review MUST be put if any changes occur!
1127 *
1128 * Returns the number of bytes written
1129 */
1130 ssize_t lttng_consumer_on_read_subbuffer_mmap(
1131 struct lttng_consumer_local_data *ctx,
1132 struct lttng_consumer_stream *stream, unsigned long len)
1133 {
1134 unsigned long mmap_offset;
1135 ssize_t ret = 0, written = 0;
1136 off_t orig_offset = stream->out_fd_offset;
1137 /* Default is on the disk */
1138 int outfd = stream->out_fd;
1139 struct consumer_relayd_sock_pair *relayd = NULL;
1140
1141 /* RCU lock for the relayd pointer */
1142 rcu_read_lock();
1143
1144 /* Flag that the current stream if set for network streaming. */
1145 if (stream->net_seq_idx != -1) {
1146 relayd = consumer_find_relayd(stream->net_seq_idx);
1147 if (relayd == NULL) {
1148 goto end;
1149 }
1150 }
1151
1152 /* get the offset inside the fd to mmap */
1153 switch (consumer_data.type) {
1154 case LTTNG_CONSUMER_KERNEL:
1155 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
1156 break;
1157 case LTTNG_CONSUMER32_UST:
1158 case LTTNG_CONSUMER64_UST:
1159 ret = lttng_ustctl_get_mmap_read_offset(stream->chan->handle,
1160 stream->buf, &mmap_offset);
1161 break;
1162 default:
1163 ERR("Unknown consumer_data type");
1164 assert(0);
1165 }
1166 if (ret != 0) {
1167 errno = -ret;
1168 PERROR("tracer ctl get_mmap_read_offset");
1169 written = ret;
1170 goto end;
1171 }
1172
1173 /* Handle stream on the relayd if the output is on the network */
1174 if (relayd) {
1175 unsigned long netlen = len;
1176
1177 /*
1178 * Lock the control socket for the complete duration of the function
1179 * since from this point on we will use the socket.
1180 */
1181 if (stream->metadata_flag) {
1182 /* Metadata requires the control socket. */
1183 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1184 netlen += sizeof(stream->relayd_stream_id);
1185 }
1186
1187 ret = write_relayd_stream_header(stream, netlen, relayd);
1188 if (ret >= 0) {
1189 /* Use the returned socket. */
1190 outfd = ret;
1191
1192 /* Write metadata stream id before payload */
1193 if (stream->metadata_flag) {
1194 ret = write_relayd_metadata_id(outfd, stream, relayd);
1195 if (ret < 0) {
1196 written = ret;
1197 goto end;
1198 }
1199 }
1200 }
1201 /* Else, use the default set before which is the filesystem. */
1202 }
1203
1204 while (len > 0) {
1205 do {
1206 ret = write(outfd, stream->mmap_base + mmap_offset, len);
1207 } while (ret < 0 && errno == EINTR);
1208 if (ret < 0) {
1209 PERROR("Error in file write");
1210 if (written == 0) {
1211 written = ret;
1212 }
1213 goto end;
1214 } else if (ret > len) {
1215 PERROR("Error in file write (ret %zd > len %lu)", ret, len);
1216 written += ret;
1217 goto end;
1218 } else {
1219 len -= ret;
1220 mmap_offset += ret;
1221 }
1222 DBG("Consumer mmap write() ret %zd (len %lu)", ret, len);
1223
1224 /* This call is useless on a socket so better save a syscall. */
1225 if (!relayd) {
1226 /* This won't block, but will start writeout asynchronously */
1227 lttng_sync_file_range(outfd, stream->out_fd_offset, ret,
1228 SYNC_FILE_RANGE_WRITE);
1229 stream->out_fd_offset += ret;
1230 }
1231 written += ret;
1232 }
1233 lttng_consumer_sync_trace_file(stream, orig_offset);
1234
1235 end:
1236 /* Unlock only if ctrl socket used */
1237 if (relayd && stream->metadata_flag) {
1238 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1239 }
1240
1241 rcu_read_unlock();
1242 return written;
1243 }
1244
1245 /*
1246 * Splice the data from the ring buffer to the tracefile.
1247 *
1248 * Returns the number of bytes spliced.
1249 */
1250 ssize_t lttng_consumer_on_read_subbuffer_splice(
1251 struct lttng_consumer_local_data *ctx,
1252 struct lttng_consumer_stream *stream, unsigned long len)
1253 {
1254 ssize_t ret = 0, written = 0, ret_splice = 0;
1255 loff_t offset = 0;
1256 off_t orig_offset = stream->out_fd_offset;
1257 int fd = stream->wait_fd;
1258 /* Default is on the disk */
1259 int outfd = stream->out_fd;
1260 struct consumer_relayd_sock_pair *relayd = NULL;
1261
1262 switch (consumer_data.type) {
1263 case LTTNG_CONSUMER_KERNEL:
1264 break;
1265 case LTTNG_CONSUMER32_UST:
1266 case LTTNG_CONSUMER64_UST:
1267 /* Not supported for user space tracing */
1268 return -ENOSYS;
1269 default:
1270 ERR("Unknown consumer_data type");
1271 assert(0);
1272 }
1273
1274 /* RCU lock for the relayd pointer */
1275 rcu_read_lock();
1276
1277 /* Flag that the current stream if set for network streaming. */
1278 if (stream->net_seq_idx != -1) {
1279 relayd = consumer_find_relayd(stream->net_seq_idx);
1280 if (relayd == NULL) {
1281 goto end;
1282 }
1283 }
1284
1285 /* Write metadata stream id before payload */
1286 if (stream->metadata_flag && relayd) {
1287 /*
1288 * Lock the control socket for the complete duration of the function
1289 * since from this point on we will use the socket.
1290 */
1291 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1292
1293 ret = write_relayd_metadata_id(ctx->consumer_thread_pipe[1],
1294 stream, relayd);
1295 if (ret < 0) {
1296 written = ret;
1297 goto end;
1298 }
1299 }
1300
1301 while (len > 0) {
1302 DBG("splice chan to pipe offset %lu of len %lu (fd : %d)",
1303 (unsigned long)offset, len, fd);
1304 ret_splice = splice(fd, &offset, ctx->consumer_thread_pipe[1], NULL, len,
1305 SPLICE_F_MOVE | SPLICE_F_MORE);
1306 DBG("splice chan to pipe, ret %zd", ret_splice);
1307 if (ret_splice < 0) {
1308 PERROR("Error in relay splice");
1309 if (written == 0) {
1310 written = ret_splice;
1311 }
1312 ret = errno;
1313 goto splice_error;
1314 }
1315
1316 /* Handle stream on the relayd if the output is on the network */
1317 if (relayd) {
1318 if (stream->metadata_flag) {
1319 /* Update counter to fit the spliced data */
1320 ret_splice += sizeof(stream->relayd_stream_id);
1321 len += sizeof(stream->relayd_stream_id);
1322 /*
1323 * We do this so the return value can match the len passed as
1324 * argument to this function.
1325 */
1326 written -= sizeof(stream->relayd_stream_id);
1327 }
1328
1329 ret = write_relayd_stream_header(stream, ret_splice, relayd);
1330 if (ret >= 0) {
1331 /* Use the returned socket. */
1332 outfd = ret;
1333 } else {
1334 ERR("Remote relayd disconnected. Stopping");
1335 goto end;
1336 }
1337 }
1338
1339 /* Splice data out */
1340 ret_splice = splice(ctx->consumer_thread_pipe[0], NULL, outfd, NULL,
1341 ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE);
1342 DBG("Kernel consumer splice pipe to file, ret %zd", ret_splice);
1343 if (ret_splice < 0) {
1344 PERROR("Error in file splice");
1345 if (written == 0) {
1346 written = ret_splice;
1347 }
1348 ret = errno;
1349 goto splice_error;
1350 } else if (ret_splice > len) {
1351 errno = EINVAL;
1352 PERROR("Wrote more data than requested %zd (len: %lu)",
1353 ret_splice, len);
1354 written += ret_splice;
1355 ret = errno;
1356 goto splice_error;
1357 }
1358 len -= ret_splice;
1359
1360 /* This call is useless on a socket so better save a syscall. */
1361 if (!relayd) {
1362 /* This won't block, but will start writeout asynchronously */
1363 lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice,
1364 SYNC_FILE_RANGE_WRITE);
1365 stream->out_fd_offset += ret_splice;
1366 }
1367 written += ret_splice;
1368 }
1369 lttng_consumer_sync_trace_file(stream, orig_offset);
1370
1371 ret = ret_splice;
1372
1373 goto end;
1374
1375 splice_error:
1376 /* send the appropriate error description to sessiond */
1377 switch (ret) {
1378 case EBADF:
1379 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EBADF);
1380 break;
1381 case EINVAL:
1382 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL);
1383 break;
1384 case ENOMEM:
1385 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ENOMEM);
1386 break;
1387 case ESPIPE:
1388 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ESPIPE);
1389 break;
1390 }
1391
1392 end:
1393 if (relayd && stream->metadata_flag) {
1394 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1395 }
1396
1397 rcu_read_unlock();
1398 return written;
1399 }
1400
1401 /*
1402 * Take a snapshot for a specific fd
1403 *
1404 * Returns 0 on success, < 0 on error
1405 */
1406 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
1407 struct lttng_consumer_stream *stream)
1408 {
1409 switch (consumer_data.type) {
1410 case LTTNG_CONSUMER_KERNEL:
1411 return lttng_kconsumer_take_snapshot(ctx, stream);
1412 case LTTNG_CONSUMER32_UST:
1413 case LTTNG_CONSUMER64_UST:
1414 return lttng_ustconsumer_take_snapshot(ctx, stream);
1415 default:
1416 ERR("Unknown consumer_data type");
1417 assert(0);
1418 return -ENOSYS;
1419 }
1420
1421 }
1422
1423 /*
1424 * Get the produced position
1425 *
1426 * Returns 0 on success, < 0 on error
1427 */
1428 int lttng_consumer_get_produced_snapshot(
1429 struct lttng_consumer_local_data *ctx,
1430 struct lttng_consumer_stream *stream,
1431 unsigned long *pos)
1432 {
1433 switch (consumer_data.type) {
1434 case LTTNG_CONSUMER_KERNEL:
1435 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
1436 case LTTNG_CONSUMER32_UST:
1437 case LTTNG_CONSUMER64_UST:
1438 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
1439 default:
1440 ERR("Unknown consumer_data type");
1441 assert(0);
1442 return -ENOSYS;
1443 }
1444 }
1445
1446 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1447 int sock, struct pollfd *consumer_sockpoll)
1448 {
1449 switch (consumer_data.type) {
1450 case LTTNG_CONSUMER_KERNEL:
1451 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1452 case LTTNG_CONSUMER32_UST:
1453 case LTTNG_CONSUMER64_UST:
1454 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
1455 default:
1456 ERR("Unknown consumer_data type");
1457 assert(0);
1458 return -ENOSYS;
1459 }
1460 }
1461
1462 /*
1463 * This thread polls the fds in the set to consume the data and write
1464 * it to tracefile if necessary.
1465 */
1466 void *lttng_consumer_thread_poll_fds(void *data)
1467 {
1468 int num_rdy, num_hup, high_prio, ret, i;
1469 struct pollfd *pollfd = NULL;
1470 /* local view of the streams */
1471 struct lttng_consumer_stream **local_stream = NULL;
1472 /* local view of consumer_data.fds_count */
1473 int nb_fd = 0;
1474 struct lttng_consumer_local_data *ctx = data;
1475 struct lttng_ht *metadata_ht;
1476 struct lttng_ht_iter iter;
1477 struct lttng_ht_node_ulong *node;
1478 struct lttng_consumer_stream *metadata_stream;
1479 ssize_t len;
1480
1481 metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1482
1483 rcu_register_thread();
1484
1485 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
1486
1487 while (1) {
1488 high_prio = 0;
1489 num_hup = 0;
1490
1491 /*
1492 * the fds set has been updated, we need to update our
1493 * local array as well
1494 */
1495 pthread_mutex_lock(&consumer_data.lock);
1496 if (consumer_data.need_update) {
1497 if (pollfd != NULL) {
1498 free(pollfd);
1499 pollfd = NULL;
1500 }
1501 if (local_stream != NULL) {
1502 free(local_stream);
1503 local_stream = NULL;
1504 }
1505
1506 /* allocate for all fds + 1 for the consumer_poll_pipe */
1507 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
1508 if (pollfd == NULL) {
1509 perror("pollfd malloc");
1510 pthread_mutex_unlock(&consumer_data.lock);
1511 goto end;
1512 }
1513
1514 /* allocate for all fds + 1 for the consumer_poll_pipe */
1515 local_stream = zmalloc((consumer_data.stream_count + 1) *
1516 sizeof(struct lttng_consumer_stream));
1517 if (local_stream == NULL) {
1518 perror("local_stream malloc");
1519 pthread_mutex_unlock(&consumer_data.lock);
1520 goto end;
1521 }
1522 ret = consumer_update_poll_array(ctx, &pollfd, local_stream,
1523 metadata_ht);
1524 if (ret < 0) {
1525 ERR("Error in allocating pollfd or local_outfds");
1526 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
1527 pthread_mutex_unlock(&consumer_data.lock);
1528 goto end;
1529 }
1530 nb_fd = ret;
1531 consumer_data.need_update = 0;
1532 }
1533 pthread_mutex_unlock(&consumer_data.lock);
1534
1535 /* No FDs and consumer_quit, consumer_cleanup the thread */
1536 if (nb_fd == 0 && consumer_quit == 1) {
1537 goto end;
1538 }
1539 /* poll on the array of fds */
1540 restart:
1541 DBG("polling on %d fd", nb_fd + 1);
1542 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
1543 DBG("poll num_rdy : %d", num_rdy);
1544 if (num_rdy == -1) {
1545 /*
1546 * Restart interrupted system call.
1547 */
1548 if (errno == EINTR) {
1549 goto restart;
1550 }
1551 perror("Poll error");
1552 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR);
1553 goto end;
1554 } else if (num_rdy == 0) {
1555 DBG("Polling thread timed out");
1556 goto end;
1557 }
1558
1559 /*
1560 * If the consumer_poll_pipe triggered poll go directly to the
1561 * beginning of the loop to update the array. We want to prioritize
1562 * array update over low-priority reads.
1563 */
1564 if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) {
1565 size_t pipe_readlen;
1566 char tmp;
1567
1568 DBG("consumer_poll_pipe wake up");
1569 /* Consume 1 byte of pipe data */
1570 do {
1571 pipe_readlen = read(ctx->consumer_poll_pipe[0], &tmp, 1);
1572 } while (pipe_readlen == -1 && errno == EINTR);
1573 continue;
1574 }
1575
1576 /* Take care of high priority channels first. */
1577 for (i = 0; i < nb_fd; i++) {
1578 /* Lookup for metadata which is the highest priority */
1579 lttng_ht_lookup(metadata_ht,
1580 (void *)((unsigned long) pollfd[i].fd), &iter);
1581 node = lttng_ht_iter_get_node_ulong(&iter);
1582 if (node != NULL &&
1583 (pollfd[i].revents & (POLLIN | POLLPRI))) {
1584 DBG("Urgent metadata read on fd %d", pollfd[i].fd);
1585 metadata_stream = caa_container_of(node,
1586 struct lttng_consumer_stream, waitfd_node);
1587 high_prio = 1;
1588 len = ctx->on_buffer_ready(metadata_stream, ctx);
1589 /* it's ok to have an unavailable sub-buffer */
1590 if (len < 0 && len != -EAGAIN) {
1591 goto end;
1592 } else if (len > 0) {
1593 metadata_stream->data_read = 1;
1594 }
1595 } else if (pollfd[i].revents & POLLPRI) {
1596 DBG("Urgent read on fd %d", pollfd[i].fd);
1597 high_prio = 1;
1598 len = ctx->on_buffer_ready(local_stream[i], ctx);
1599 /* it's ok to have an unavailable sub-buffer */
1600 if (len < 0 && len != -EAGAIN) {
1601 goto end;
1602 } else if (len > 0) {
1603 local_stream[i]->data_read = 1;
1604 }
1605 }
1606 }
1607
1608 /*
1609 * If we read high prio channel in this loop, try again
1610 * for more high prio data.
1611 */
1612 if (high_prio) {
1613 continue;
1614 }
1615
1616 /* Take care of low priority channels. */
1617 for (i = 0; i < nb_fd; i++) {
1618 if ((pollfd[i].revents & POLLIN) ||
1619 local_stream[i]->hangup_flush_done) {
1620 DBG("Normal read on fd %d", pollfd[i].fd);
1621 len = ctx->on_buffer_ready(local_stream[i], ctx);
1622 /* it's ok to have an unavailable sub-buffer */
1623 if (len < 0 && len != -EAGAIN) {
1624 goto end;
1625 } else if (len > 0) {
1626 local_stream[i]->data_read = 1;
1627 }
1628 }
1629 }
1630
1631 /* Handle hangup and errors */
1632 for (i = 0; i < nb_fd; i++) {
1633 if (!local_stream[i]->hangup_flush_done
1634 && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
1635 && (consumer_data.type == LTTNG_CONSUMER32_UST
1636 || consumer_data.type == LTTNG_CONSUMER64_UST)) {
1637 DBG("fd %d is hup|err|nval. Attempting flush and read.",
1638 pollfd[i].fd);
1639 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
1640 /* Attempt read again, for the data we just flushed. */
1641 local_stream[i]->data_read = 1;
1642 }
1643 /*
1644 * If the poll flag is HUP/ERR/NVAL and we have
1645 * read no data in this pass, we can remove the
1646 * stream from its hash table.
1647 */
1648 if ((pollfd[i].revents & POLLHUP)) {
1649 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
1650 if (!local_stream[i]->data_read) {
1651 if (local_stream[i]->metadata_flag) {
1652 iter.iter.node = &local_stream[i]->waitfd_node.node;
1653 ret = lttng_ht_del(metadata_ht, &iter);
1654 assert(!ret);
1655 }
1656 consumer_del_stream(local_stream[i]);
1657 num_hup++;
1658 }
1659 } else if (pollfd[i].revents & POLLERR) {
1660 ERR("Error returned in polling fd %d.", pollfd[i].fd);
1661 if (!local_stream[i]->data_read) {
1662 if (local_stream[i]->metadata_flag) {
1663 iter.iter.node = &local_stream[i]->waitfd_node.node;
1664 ret = lttng_ht_del(metadata_ht, &iter);
1665 assert(!ret);
1666 }
1667 consumer_del_stream(local_stream[i]);
1668 num_hup++;
1669 }
1670 } else if (pollfd[i].revents & POLLNVAL) {
1671 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
1672 if (!local_stream[i]->data_read) {
1673 if (local_stream[i]->metadata_flag) {
1674 iter.iter.node = &local_stream[i]->waitfd_node.node;
1675 ret = lttng_ht_del(metadata_ht, &iter);
1676 assert(!ret);
1677 }
1678 consumer_del_stream(local_stream[i]);
1679 num_hup++;
1680 }
1681 }
1682 local_stream[i]->data_read = 0;
1683 }
1684 }
1685 end:
1686 DBG("polling thread exiting");
1687 if (pollfd != NULL) {
1688 free(pollfd);
1689 pollfd = NULL;
1690 }
1691 if (local_stream != NULL) {
1692 free(local_stream);
1693 local_stream = NULL;
1694 }
1695 rcu_unregister_thread();
1696 return NULL;
1697 }
1698
1699 /*
1700 * This thread listens on the consumerd socket and receives the file
1701 * descriptors from the session daemon.
1702 */
1703 void *lttng_consumer_thread_receive_fds(void *data)
1704 {
1705 int sock, client_socket, ret;
1706 /*
1707 * structure to poll for incoming data on communication socket avoids
1708 * making blocking sockets.
1709 */
1710 struct pollfd consumer_sockpoll[2];
1711 struct lttng_consumer_local_data *ctx = data;
1712
1713 rcu_register_thread();
1714
1715 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1716 unlink(ctx->consumer_command_sock_path);
1717 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1718 if (client_socket < 0) {
1719 ERR("Cannot create command socket");
1720 goto end;
1721 }
1722
1723 ret = lttcomm_listen_unix_sock(client_socket);
1724 if (ret < 0) {
1725 goto end;
1726 }
1727
1728 DBG("Sending ready command to lttng-sessiond");
1729 ret = lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY);
1730 /* return < 0 on error, but == 0 is not fatal */
1731 if (ret < 0) {
1732 ERR("Error sending ready command to lttng-sessiond");
1733 goto end;
1734 }
1735
1736 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1737 if (ret < 0) {
1738 perror("fcntl O_NONBLOCK");
1739 goto end;
1740 }
1741
1742 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1743 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1744 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1745 consumer_sockpoll[1].fd = client_socket;
1746 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1747
1748 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1749 goto end;
1750 }
1751 DBG("Connection on client_socket");
1752
1753 /* Blocking call, waiting for transmission */
1754 sock = lttcomm_accept_unix_sock(client_socket);
1755 if (sock <= 0) {
1756 WARN("On accept");
1757 goto end;
1758 }
1759 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1760 if (ret < 0) {
1761 perror("fcntl O_NONBLOCK");
1762 goto end;
1763 }
1764
1765 /* update the polling structure to poll on the established socket */
1766 consumer_sockpoll[1].fd = sock;
1767 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1768
1769 while (1) {
1770 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1771 goto end;
1772 }
1773 DBG("Incoming command on sock");
1774 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1775 if (ret == -ENOENT) {
1776 DBG("Received STOP command");
1777 goto end;
1778 }
1779 if (ret < 0) {
1780 ERR("Communication interrupted on command socket");
1781 goto end;
1782 }
1783 if (consumer_quit) {
1784 DBG("consumer_thread_receive_fds received quit from signal");
1785 goto end;
1786 }
1787 DBG("received fds on sock");
1788 }
1789 end:
1790 DBG("consumer_thread_receive_fds exiting");
1791
1792 /*
1793 * when all fds have hung up, the polling thread
1794 * can exit cleanly
1795 */
1796 consumer_quit = 1;
1797
1798 /*
1799 * 2s of grace period, if no polling events occur during
1800 * this period, the polling thread will exit even if there
1801 * are still open FDs (should not happen, but safety mechanism).
1802 */
1803 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1804
1805 /*
1806 * Wake-up the other end by writing a null byte in the pipe
1807 * (non-blocking). Important note: Because writing into the
1808 * pipe is non-blocking (and therefore we allow dropping wakeup
1809 * data, as long as there is wakeup data present in the pipe
1810 * buffer to wake up the other end), the other end should
1811 * perform the following sequence for waiting:
1812 * 1) empty the pipe (reads).
1813 * 2) perform update operation.
1814 * 3) wait on the pipe (poll).
1815 */
1816 do {
1817 ret = write(ctx->consumer_poll_pipe[1], "", 1);
1818 } while (ret < 0 && errno == EINTR);
1819 rcu_unregister_thread();
1820 return NULL;
1821 }
1822
1823 ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1824 struct lttng_consumer_local_data *ctx)
1825 {
1826 switch (consumer_data.type) {
1827 case LTTNG_CONSUMER_KERNEL:
1828 return lttng_kconsumer_read_subbuffer(stream, ctx);
1829 case LTTNG_CONSUMER32_UST:
1830 case LTTNG_CONSUMER64_UST:
1831 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1832 default:
1833 ERR("Unknown consumer_data type");
1834 assert(0);
1835 return -ENOSYS;
1836 }
1837 }
1838
1839 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1840 {
1841 switch (consumer_data.type) {
1842 case LTTNG_CONSUMER_KERNEL:
1843 return lttng_kconsumer_on_recv_stream(stream);
1844 case LTTNG_CONSUMER32_UST:
1845 case LTTNG_CONSUMER64_UST:
1846 return lttng_ustconsumer_on_recv_stream(stream);
1847 default:
1848 ERR("Unknown consumer_data type");
1849 assert(0);
1850 return -ENOSYS;
1851 }
1852 }
1853
1854 /*
1855 * Allocate and set consumer data hash tables.
1856 */
1857 void lttng_consumer_init(void)
1858 {
1859 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1860 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1861 consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1862 }
1863
1864 /*
1865 * Process the ADD_RELAYD command receive by a consumer.
1866 *
1867 * This will create a relayd socket pair and add it to the relayd hash table.
1868 * The caller MUST acquire a RCU read side lock before calling it.
1869 */
1870 int consumer_add_relayd_socket(int net_seq_idx, int sock_type,
1871 struct lttng_consumer_local_data *ctx, int sock,
1872 struct pollfd *consumer_sockpoll, struct lttcomm_sock *relayd_sock)
1873 {
1874 int fd, ret = -1;
1875 struct consumer_relayd_sock_pair *relayd;
1876
1877 DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx);
1878
1879 /* Get relayd reference if exists. */
1880 relayd = consumer_find_relayd(net_seq_idx);
1881 if (relayd == NULL) {
1882 /* Not found. Allocate one. */
1883 relayd = consumer_allocate_relayd_sock_pair(net_seq_idx);
1884 if (relayd == NULL) {
1885 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
1886 goto error;
1887 }
1888 }
1889
1890 /* Poll on consumer socket. */
1891 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1892 ret = -EINTR;
1893 goto error;
1894 }
1895
1896 /* Get relayd socket from session daemon */
1897 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
1898 if (ret != sizeof(fd)) {
1899 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
1900 ret = -1;
1901 goto error;
1902 }
1903
1904 /* Copy socket information and received FD */
1905 switch (sock_type) {
1906 case LTTNG_STREAM_CONTROL:
1907 /* Copy received lttcomm socket */
1908 lttcomm_copy_sock(&relayd->control_sock, relayd_sock);
1909 ret = lttcomm_create_sock(&relayd->control_sock);
1910 if (ret < 0) {
1911 goto error;
1912 }
1913
1914 /* Close the created socket fd which is useless */
1915 close(relayd->control_sock.fd);
1916
1917 /* Assign new file descriptor */
1918 relayd->control_sock.fd = fd;
1919 break;
1920 case LTTNG_STREAM_DATA:
1921 /* Copy received lttcomm socket */
1922 lttcomm_copy_sock(&relayd->data_sock, relayd_sock);
1923 ret = lttcomm_create_sock(&relayd->data_sock);
1924 if (ret < 0) {
1925 goto error;
1926 }
1927
1928 /* Close the created socket fd which is useless */
1929 close(relayd->data_sock.fd);
1930
1931 /* Assign new file descriptor */
1932 relayd->data_sock.fd = fd;
1933 break;
1934 default:
1935 ERR("Unknown relayd socket type (%d)", sock_type);
1936 goto error;
1937 }
1938
1939 DBG("Consumer %s socket created successfully with net idx %d (fd: %d)",
1940 sock_type == LTTNG_STREAM_CONTROL ? "control" : "data",
1941 relayd->net_seq_idx, fd);
1942
1943 /*
1944 * Add relayd socket pair to consumer data hashtable. If object already
1945 * exists or on error, the function gracefully returns.
1946 */
1947 consumer_add_relayd(relayd);
1948
1949 /* All good! */
1950 ret = 0;
1951
1952 error:
1953 return ret;
1954 }
This page took 0.088379 seconds and 5 git commands to generate.