7622a22537738b2164f57d8fb15caf0b46963606
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <inttypes.h>
30 #include <unistd.h>
31 #include <sys/stat.h>
32
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/sessiond-comm.h>
37 #include <common/sessiond-comm/relayd.h>
38 #include <common/compat/fcntl.h>
39 #include <common/compat/endian.h>
40 #include <common/pipe.h>
41 #include <common/relayd/relayd.h>
42 #include <common/utils.h>
43 #include <common/consumer-stream.h>
44 #include <common/index/index.h>
45 #include <common/consumer-timer.h>
46
47 #include "kernel-consumer.h"
48
49 extern struct lttng_consumer_global_data consumer_data;
50 extern int consumer_poll_timeout;
51 extern volatile int consumer_quit;
52
53 /*
54 * Take a snapshot for a specific fd
55 *
56 * Returns 0 on success, < 0 on error
57 */
58 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
59 {
60 int ret = 0;
61 int infd = stream->wait_fd;
62
63 ret = kernctl_snapshot(infd);
64 if (ret != 0) {
65 PERROR("Getting sub-buffer snapshot.");
66 ret = -errno;
67 }
68
69 return ret;
70 }
71
72 /*
73 * Get the produced position
74 *
75 * Returns 0 on success, < 0 on error
76 */
77 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
78 unsigned long *pos)
79 {
80 int ret;
81 int infd = stream->wait_fd;
82
83 ret = kernctl_snapshot_get_produced(infd, pos);
84 if (ret != 0) {
85 PERROR("kernctl_snapshot_get_produced");
86 ret = -errno;
87 }
88
89 return ret;
90 }
91
92 /*
93 * Get the consumerd position
94 *
95 * Returns 0 on success, < 0 on error
96 */
97 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
98 unsigned long *pos)
99 {
100 int ret;
101 int infd = stream->wait_fd;
102
103 ret = kernctl_snapshot_get_consumed(infd, pos);
104 if (ret != 0) {
105 PERROR("kernctl_snapshot_get_consumed");
106 ret = -errno;
107 }
108
109 return ret;
110 }
111
112 /*
113 * Take a snapshot of all the stream of a channel
114 *
115 * Returns 0 on success, < 0 on error
116 */
117 int lttng_kconsumer_snapshot_channel(uint64_t key, char *path,
118 uint64_t relayd_id, uint64_t nb_packets_per_stream,
119 struct lttng_consumer_local_data *ctx)
120 {
121 int ret;
122 unsigned long consumed_pos, produced_pos;
123 struct lttng_consumer_channel *channel;
124 struct lttng_consumer_stream *stream;
125
126 DBG("Kernel consumer snapshot channel %" PRIu64, key);
127
128 rcu_read_lock();
129
130 channel = consumer_find_channel(key);
131 if (!channel) {
132 ERR("No channel found for key %" PRIu64, key);
133 ret = -1;
134 goto end;
135 }
136
137 /* Splice is not supported yet for channel snapshot. */
138 if (channel->output != CONSUMER_CHANNEL_MMAP) {
139 ERR("Unsupported output %d", channel->output);
140 ret = -1;
141 goto end;
142 }
143
144 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
145
146 health_code_update();
147
148 /*
149 * Lock stream because we are about to change its state.
150 */
151 pthread_mutex_lock(&stream->lock);
152
153 /*
154 * Assign the received relayd ID so we can use it for streaming. The streams
155 * are not visible to anyone so this is OK to change it.
156 */
157 stream->net_seq_idx = relayd_id;
158 channel->relayd_id = relayd_id;
159 if (relayd_id != (uint64_t) -1ULL) {
160 ret = consumer_send_relayd_stream(stream, path);
161 if (ret < 0) {
162 ERR("sending stream to relayd");
163 goto end_unlock;
164 }
165 } else {
166 ret = utils_create_stream_file(path, stream->name,
167 stream->chan->tracefile_size,
168 stream->tracefile_count_current,
169 stream->uid, stream->gid, NULL);
170 if (ret < 0) {
171 ERR("utils_create_stream_file");
172 goto end_unlock;
173 }
174
175 stream->out_fd = ret;
176 stream->tracefile_size_current = 0;
177
178 DBG("Kernel consumer snapshot stream %s/%s (%" PRIu64 ")",
179 path, stream->name, stream->key);
180 }
181 if (relayd_id != -1ULL) {
182 ret = consumer_send_relayd_streams_sent(relayd_id);
183 if (ret < 0) {
184 ERR("sending streams sent to relayd");
185 goto end_unlock;
186 }
187 }
188
189 ret = kernctl_buffer_flush(stream->wait_fd);
190 if (ret < 0) {
191 ERR("Failed to flush kernel stream");
192 ret = -errno;
193 goto end_unlock;
194 }
195
196 ret = lttng_kconsumer_take_snapshot(stream);
197 if (ret < 0) {
198 ERR("Taking kernel snapshot");
199 goto end_unlock;
200 }
201
202 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
203 if (ret < 0) {
204 ERR("Produced kernel snapshot position");
205 goto end_unlock;
206 }
207
208 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
209 if (ret < 0) {
210 ERR("Consumerd kernel snapshot position");
211 goto end_unlock;
212 }
213
214 if (stream->max_sb_size == 0) {
215 ret = kernctl_get_max_subbuf_size(stream->wait_fd,
216 &stream->max_sb_size);
217 if (ret < 0) {
218 ERR("Getting kernel max_sb_size");
219 ret = -errno;
220 goto end_unlock;
221 }
222 }
223
224 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
225 produced_pos, nb_packets_per_stream,
226 stream->max_sb_size);
227
228 while (consumed_pos < produced_pos) {
229 ssize_t read_len;
230 unsigned long len, padded_len;
231
232 health_code_update();
233
234 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
235
236 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
237 if (ret < 0) {
238 if (errno != EAGAIN) {
239 PERROR("kernctl_get_subbuf snapshot");
240 ret = -errno;
241 goto end_unlock;
242 }
243 DBG("Kernel consumer get subbuf failed. Skipping it.");
244 consumed_pos += stream->max_sb_size;
245 continue;
246 }
247
248 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
249 if (ret < 0) {
250 ERR("Snapshot kernctl_get_subbuf_size");
251 ret = -errno;
252 goto error_put_subbuf;
253 }
254
255 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
256 if (ret < 0) {
257 ERR("Snapshot kernctl_get_padded_subbuf_size");
258 ret = -errno;
259 goto error_put_subbuf;
260 }
261
262 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
263 padded_len - len, NULL);
264 /*
265 * We write the padded len in local tracefiles but the data len
266 * when using a relay. Display the error but continue processing
267 * to try to release the subbuffer.
268 */
269 if (relayd_id != (uint64_t) -1ULL) {
270 if (read_len != len) {
271 ERR("Error sending to the relay (ret: %zd != len: %lu)",
272 read_len, len);
273 }
274 } else {
275 if (read_len != padded_len) {
276 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
277 read_len, padded_len);
278 }
279 }
280
281 ret = kernctl_put_subbuf(stream->wait_fd);
282 if (ret < 0) {
283 ERR("Snapshot kernctl_put_subbuf");
284 ret = -errno;
285 goto end_unlock;
286 }
287 consumed_pos += stream->max_sb_size;
288 }
289
290 if (relayd_id == (uint64_t) -1ULL) {
291 if (stream->out_fd >= 0) {
292 ret = close(stream->out_fd);
293 if (ret < 0) {
294 PERROR("Kernel consumer snapshot close out_fd");
295 goto end_unlock;
296 }
297 stream->out_fd = -1;
298 }
299 } else {
300 close_relayd_stream(stream);
301 stream->net_seq_idx = (uint64_t) -1ULL;
302 }
303 pthread_mutex_unlock(&stream->lock);
304 }
305
306 /* All good! */
307 ret = 0;
308 goto end;
309
310 error_put_subbuf:
311 ret = kernctl_put_subbuf(stream->wait_fd);
312 if (ret < 0) {
313 ret = -errno;
314 ERR("Snapshot kernctl_put_subbuf error path");
315 }
316 end_unlock:
317 pthread_mutex_unlock(&stream->lock);
318 end:
319 rcu_read_unlock();
320 return ret;
321 }
322
323 /*
324 * Read the whole metadata available for a snapshot.
325 *
326 * Returns 0 on success, < 0 on error
327 */
328 int lttng_kconsumer_snapshot_metadata(uint64_t key, char *path,
329 uint64_t relayd_id, struct lttng_consumer_local_data *ctx)
330 {
331 int ret, use_relayd = 0;
332 ssize_t ret_read;
333 struct lttng_consumer_channel *metadata_channel;
334 struct lttng_consumer_stream *metadata_stream;
335
336 assert(ctx);
337
338 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
339 key, path);
340
341 rcu_read_lock();
342
343 metadata_channel = consumer_find_channel(key);
344 if (!metadata_channel) {
345 ERR("Kernel snapshot metadata not found for key %" PRIu64, key);
346 ret = -1;
347 goto error;
348 }
349
350 metadata_stream = metadata_channel->metadata_stream;
351 assert(metadata_stream);
352
353 /* Flag once that we have a valid relayd for the stream. */
354 if (relayd_id != (uint64_t) -1ULL) {
355 use_relayd = 1;
356 }
357
358 if (use_relayd) {
359 ret = consumer_send_relayd_stream(metadata_stream, path);
360 if (ret < 0) {
361 goto error;
362 }
363 } else {
364 ret = utils_create_stream_file(path, metadata_stream->name,
365 metadata_stream->chan->tracefile_size,
366 metadata_stream->tracefile_count_current,
367 metadata_stream->uid, metadata_stream->gid, NULL);
368 if (ret < 0) {
369 goto error;
370 }
371 metadata_stream->out_fd = ret;
372 }
373
374 do {
375 health_code_update();
376
377 ret_read = lttng_kconsumer_read_subbuffer(metadata_stream, ctx);
378 if (ret_read < 0) {
379 if (ret_read != -EAGAIN) {
380 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
381 ret_read);
382 goto error;
383 }
384 /* ret_read is negative at this point so we will exit the loop. */
385 continue;
386 }
387 } while (ret_read >= 0);
388
389 if (use_relayd) {
390 close_relayd_stream(metadata_stream);
391 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
392 } else {
393 if (metadata_stream->out_fd >= 0) {
394 ret = close(metadata_stream->out_fd);
395 if (ret < 0) {
396 PERROR("Kernel consumer snapshot metadata close out_fd");
397 /*
398 * Don't go on error here since the snapshot was successful at this
399 * point but somehow the close failed.
400 */
401 }
402 metadata_stream->out_fd = -1;
403 }
404 }
405
406 ret = 0;
407
408 cds_list_del(&metadata_stream->send_node);
409 consumer_stream_destroy(metadata_stream, NULL);
410 metadata_channel->metadata_stream = NULL;
411 error:
412 rcu_read_unlock();
413 return ret;
414 }
415
416 /*
417 * Receive command from session daemon and process it.
418 *
419 * Return 1 on success else a negative value or 0.
420 */
421 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
422 int sock, struct pollfd *consumer_sockpoll)
423 {
424 ssize_t ret;
425 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
426 struct lttcomm_consumer_msg msg;
427
428 health_code_update();
429
430 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
431 if (ret != sizeof(msg)) {
432 if (ret > 0) {
433 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
434 ret = -1;
435 }
436 return ret;
437 }
438
439 health_code_update();
440
441 /* Deprecated command */
442 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
443
444 health_code_update();
445
446 /* relayd needs RCU read-side protection */
447 rcu_read_lock();
448
449 switch (msg.cmd_type) {
450 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
451 {
452 /* Session daemon status message are handled in the following call. */
453 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
454 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
455 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
456 msg.u.relayd_sock.relayd_session_id);
457 goto end_nosignal;
458 }
459 case LTTNG_CONSUMER_ADD_CHANNEL:
460 {
461 struct lttng_consumer_channel *new_channel;
462 int ret_recv;
463
464 health_code_update();
465
466 /* First send a status message before receiving the fds. */
467 ret = consumer_send_status_msg(sock, ret_code);
468 if (ret < 0) {
469 /* Somehow, the session daemon is not responding anymore. */
470 goto error_fatal;
471 }
472
473 health_code_update();
474
475 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
476 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
477 msg.u.channel.session_id, msg.u.channel.pathname,
478 msg.u.channel.name, msg.u.channel.uid, msg.u.channel.gid,
479 msg.u.channel.relayd_id, msg.u.channel.output,
480 msg.u.channel.tracefile_size,
481 msg.u.channel.tracefile_count, 0,
482 msg.u.channel.monitor,
483 msg.u.channel.live_timer_interval);
484 if (new_channel == NULL) {
485 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
486 goto end_nosignal;
487 }
488 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
489 switch (msg.u.channel.output) {
490 case LTTNG_EVENT_SPLICE:
491 new_channel->output = CONSUMER_CHANNEL_SPLICE;
492 break;
493 case LTTNG_EVENT_MMAP:
494 new_channel->output = CONSUMER_CHANNEL_MMAP;
495 break;
496 default:
497 ERR("Channel output unknown %d", msg.u.channel.output);
498 goto end_nosignal;
499 }
500
501 /* Translate and save channel type. */
502 switch (msg.u.channel.type) {
503 case CONSUMER_CHANNEL_TYPE_DATA:
504 case CONSUMER_CHANNEL_TYPE_METADATA:
505 new_channel->type = msg.u.channel.type;
506 break;
507 default:
508 assert(0);
509 goto end_nosignal;
510 };
511
512 health_code_update();
513
514 if (ctx->on_recv_channel != NULL) {
515 ret_recv = ctx->on_recv_channel(new_channel);
516 if (ret_recv == 0) {
517 ret = consumer_add_channel(new_channel, ctx);
518 } else if (ret_recv < 0) {
519 goto end_nosignal;
520 }
521 } else {
522 ret = consumer_add_channel(new_channel, ctx);
523 }
524 if (CONSUMER_CHANNEL_TYPE_DATA) {
525 consumer_timer_live_start(new_channel,
526 msg.u.channel.live_timer_interval);
527 }
528
529 health_code_update();
530
531 /* If we received an error in add_channel, we need to report it. */
532 if (ret < 0) {
533 ret = consumer_send_status_msg(sock, ret);
534 if (ret < 0) {
535 goto error_fatal;
536 }
537 goto end_nosignal;
538 }
539
540 goto end_nosignal;
541 }
542 case LTTNG_CONSUMER_ADD_STREAM:
543 {
544 int fd;
545 struct lttng_pipe *stream_pipe;
546 struct lttng_consumer_stream *new_stream;
547 struct lttng_consumer_channel *channel;
548 int alloc_ret = 0;
549
550 /*
551 * Get stream's channel reference. Needed when adding the stream to the
552 * global hash table.
553 */
554 channel = consumer_find_channel(msg.u.stream.channel_key);
555 if (!channel) {
556 /*
557 * We could not find the channel. Can happen if cpu hotplug
558 * happens while tearing down.
559 */
560 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
561 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
562 }
563
564 health_code_update();
565
566 /* First send a status message before receiving the fds. */
567 ret = consumer_send_status_msg(sock, ret_code);
568 if (ret < 0) {
569 /* Somehow, the session daemon is not responding anymore. */
570 goto error_fatal;
571 }
572
573 health_code_update();
574
575 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
576 /* Channel was not found. */
577 goto end_nosignal;
578 }
579
580 /* Blocking call */
581 health_poll_entry();
582 ret = lttng_consumer_poll_socket(consumer_sockpoll);
583 health_poll_exit();
584 if (ret) {
585 goto error_fatal;
586 }
587
588 health_code_update();
589
590 /* Get stream file descriptor from socket */
591 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
592 if (ret != sizeof(fd)) {
593 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
594 rcu_read_unlock();
595 return ret;
596 }
597
598 health_code_update();
599
600 /*
601 * Send status code to session daemon only if the recv works. If the
602 * above recv() failed, the session daemon is notified through the
603 * error socket and the teardown is eventually done.
604 */
605 ret = consumer_send_status_msg(sock, ret_code);
606 if (ret < 0) {
607 /* Somehow, the session daemon is not responding anymore. */
608 goto end_nosignal;
609 }
610
611 health_code_update();
612
613 new_stream = consumer_allocate_stream(channel->key,
614 fd,
615 LTTNG_CONSUMER_ACTIVE_STREAM,
616 channel->name,
617 channel->uid,
618 channel->gid,
619 channel->relayd_id,
620 channel->session_id,
621 msg.u.stream.cpu,
622 &alloc_ret,
623 channel->type,
624 channel->monitor);
625 if (new_stream == NULL) {
626 switch (alloc_ret) {
627 case -ENOMEM:
628 case -EINVAL:
629 default:
630 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
631 break;
632 }
633 goto end_nosignal;
634 }
635
636 new_stream->chan = channel;
637 new_stream->wait_fd = fd;
638 switch (channel->output) {
639 case CONSUMER_CHANNEL_SPLICE:
640 new_stream->output = LTTNG_EVENT_SPLICE;
641 ret = utils_create_pipe(new_stream->splice_pipe);
642 if (ret < 0) {
643 goto end_nosignal;
644 }
645 break;
646 case CONSUMER_CHANNEL_MMAP:
647 new_stream->output = LTTNG_EVENT_MMAP;
648 break;
649 default:
650 ERR("Stream output unknown %d", channel->output);
651 goto end_nosignal;
652 }
653
654 /*
655 * We've just assigned the channel to the stream so increment the
656 * refcount right now. We don't need to increment the refcount for
657 * streams in no monitor because we handle manually the cleanup of
658 * those. It is very important to make sure there is NO prior
659 * consumer_del_stream() calls or else the refcount will be unbalanced.
660 */
661 if (channel->monitor) {
662 uatomic_inc(&new_stream->chan->refcount);
663 }
664
665 /*
666 * The buffer flush is done on the session daemon side for the kernel
667 * so no need for the stream "hangup_flush_done" variable to be
668 * tracked. This is important for a kernel stream since we don't rely
669 * on the flush state of the stream to read data. It's not the case for
670 * user space tracing.
671 */
672 new_stream->hangup_flush_done = 0;
673
674 health_code_update();
675
676 if (ctx->on_recv_stream) {
677 ret = ctx->on_recv_stream(new_stream);
678 if (ret < 0) {
679 consumer_stream_free(new_stream);
680 goto end_nosignal;
681 }
682 }
683
684 health_code_update();
685
686 if (new_stream->metadata_flag) {
687 channel->metadata_stream = new_stream;
688 }
689
690 /* Do not monitor this stream. */
691 if (!channel->monitor) {
692 DBG("Kernel consumer add stream %s in no monitor mode with "
693 "relayd id %" PRIu64, new_stream->name,
694 new_stream->net_seq_idx);
695 cds_list_add(&new_stream->send_node, &channel->streams.head);
696 break;
697 }
698
699 /* Send stream to relayd if the stream has an ID. */
700 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
701 ret = consumer_send_relayd_stream(new_stream,
702 new_stream->chan->pathname);
703 if (ret < 0) {
704 consumer_stream_free(new_stream);
705 goto end_nosignal;
706 }
707 }
708
709 /* Get the right pipe where the stream will be sent. */
710 if (new_stream->metadata_flag) {
711 ret = consumer_add_metadata_stream(new_stream);
712 if (ret) {
713 ERR("Consumer add metadata stream %" PRIu64 " failed. Continuing",
714 new_stream->key);
715 consumer_stream_free(new_stream);
716 goto end_nosignal;
717 }
718 stream_pipe = ctx->consumer_metadata_pipe;
719 } else {
720 ret = consumer_add_data_stream(new_stream);
721 if (ret) {
722 ERR("Consumer add stream %" PRIu64 " failed. Continuing",
723 new_stream->key);
724 consumer_stream_free(new_stream);
725 goto end_nosignal;
726 }
727 stream_pipe = ctx->consumer_data_pipe;
728 }
729
730 /* Vitible to other threads */
731 new_stream->globally_visible = 1;
732
733 health_code_update();
734
735 ret = lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream));
736 if (ret < 0) {
737 ERR("Consumer write %s stream to pipe %d",
738 new_stream->metadata_flag ? "metadata" : "data",
739 lttng_pipe_get_writefd(stream_pipe));
740 if (new_stream->metadata_flag) {
741 consumer_del_stream_for_metadata(new_stream);
742 } else {
743 consumer_del_stream_for_data(new_stream);
744 }
745 goto end_nosignal;
746 }
747
748 DBG("Kernel consumer ADD_STREAM %s (fd: %d) with relayd id %" PRIu64,
749 new_stream->name, fd, new_stream->relayd_stream_id);
750 break;
751 }
752 case LTTNG_CONSUMER_STREAMS_SENT:
753 {
754 struct lttng_consumer_channel *channel;
755
756 /*
757 * Get stream's channel reference. Needed when adding the stream to the
758 * global hash table.
759 */
760 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
761 if (!channel) {
762 /*
763 * We could not find the channel. Can happen if cpu hotplug
764 * happens while tearing down.
765 */
766 ERR("Unable to find channel key %" PRIu64,
767 msg.u.sent_streams.channel_key);
768 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
769 }
770
771 health_code_update();
772
773 /*
774 * Send status code to session daemon.
775 */
776 ret = consumer_send_status_msg(sock, ret_code);
777 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
778 /* Somehow, the session daemon is not responding anymore. */
779 goto end_nosignal;
780 }
781
782 health_code_update();
783
784 /*
785 * We should not send this message if we don't monitor the
786 * streams in this channel.
787 */
788 if (!channel->monitor) {
789 break;
790 }
791
792 health_code_update();
793 /* Send stream to relayd if the stream has an ID. */
794 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
795 ret = consumer_send_relayd_streams_sent(
796 msg.u.sent_streams.net_seq_idx);
797 if (ret < 0) {
798 goto end_nosignal;
799 }
800 }
801 break;
802 }
803 case LTTNG_CONSUMER_UPDATE_STREAM:
804 {
805 rcu_read_unlock();
806 return -ENOSYS;
807 }
808 case LTTNG_CONSUMER_DESTROY_RELAYD:
809 {
810 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
811 struct consumer_relayd_sock_pair *relayd;
812
813 DBG("Kernel consumer destroying relayd %" PRIu64, index);
814
815 /* Get relayd reference if exists. */
816 relayd = consumer_find_relayd(index);
817 if (relayd == NULL) {
818 DBG("Unable to find relayd %" PRIu64, index);
819 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
820 }
821
822 /*
823 * Each relayd socket pair has a refcount of stream attached to it
824 * which tells if the relayd is still active or not depending on the
825 * refcount value.
826 *
827 * This will set the destroy flag of the relayd object and destroy it
828 * if the refcount reaches zero when called.
829 *
830 * The destroy can happen either here or when a stream fd hangs up.
831 */
832 if (relayd) {
833 consumer_flag_relayd_for_destroy(relayd);
834 }
835
836 health_code_update();
837
838 ret = consumer_send_status_msg(sock, ret_code);
839 if (ret < 0) {
840 /* Somehow, the session daemon is not responding anymore. */
841 goto error_fatal;
842 }
843
844 goto end_nosignal;
845 }
846 case LTTNG_CONSUMER_DATA_PENDING:
847 {
848 int32_t ret;
849 uint64_t id = msg.u.data_pending.session_id;
850
851 DBG("Kernel consumer data pending command for id %" PRIu64, id);
852
853 ret = consumer_data_pending(id);
854
855 health_code_update();
856
857 /* Send back returned value to session daemon */
858 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
859 if (ret < 0) {
860 PERROR("send data pending ret code");
861 goto error_fatal;
862 }
863
864 /*
865 * No need to send back a status message since the data pending
866 * returned value is the response.
867 */
868 break;
869 }
870 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
871 {
872 if (msg.u.snapshot_channel.metadata == 1) {
873 ret = lttng_kconsumer_snapshot_metadata(msg.u.snapshot_channel.key,
874 msg.u.snapshot_channel.pathname,
875 msg.u.snapshot_channel.relayd_id, ctx);
876 if (ret < 0) {
877 ERR("Snapshot metadata failed");
878 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
879 }
880 } else {
881 ret = lttng_kconsumer_snapshot_channel(msg.u.snapshot_channel.key,
882 msg.u.snapshot_channel.pathname,
883 msg.u.snapshot_channel.relayd_id,
884 msg.u.snapshot_channel.nb_packets_per_stream,
885 ctx);
886 if (ret < 0) {
887 ERR("Snapshot channel failed");
888 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
889 }
890 }
891
892 health_code_update();
893
894 ret = consumer_send_status_msg(sock, ret_code);
895 if (ret < 0) {
896 /* Somehow, the session daemon is not responding anymore. */
897 goto end_nosignal;
898 }
899 break;
900 }
901 case LTTNG_CONSUMER_DESTROY_CHANNEL:
902 {
903 uint64_t key = msg.u.destroy_channel.key;
904 struct lttng_consumer_channel *channel;
905
906 channel = consumer_find_channel(key);
907 if (!channel) {
908 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
909 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
910 }
911
912 health_code_update();
913
914 ret = consumer_send_status_msg(sock, ret_code);
915 if (ret < 0) {
916 /* Somehow, the session daemon is not responding anymore. */
917 goto end_nosignal;
918 }
919
920 health_code_update();
921
922 /* Stop right now if no channel was found. */
923 if (!channel) {
924 goto end_nosignal;
925 }
926
927 /*
928 * This command should ONLY be issued for channel with streams set in
929 * no monitor mode.
930 */
931 assert(!channel->monitor);
932
933 /*
934 * The refcount should ALWAYS be 0 in the case of a channel in no
935 * monitor mode.
936 */
937 assert(!uatomic_sub_return(&channel->refcount, 1));
938
939 consumer_del_channel(channel);
940
941 goto end_nosignal;
942 }
943 default:
944 goto end_nosignal;
945 }
946
947 end_nosignal:
948 rcu_read_unlock();
949
950 /*
951 * Return 1 to indicate success since the 0 value can be a socket
952 * shutdown during the recv() or send() call.
953 */
954 health_code_update();
955 return 1;
956
957 error_fatal:
958 rcu_read_unlock();
959 /* This will issue a consumer stop. */
960 return -1;
961 }
962
963 /*
964 * Populate index values of a kernel stream. Values are set in big endian order.
965 *
966 * Return 0 on success or else a negative value.
967 */
968 static int get_index_values(struct ctf_packet_index *index, int infd)
969 {
970 int ret;
971
972 ret = kernctl_get_timestamp_begin(infd, &index->timestamp_begin);
973 if (ret < 0) {
974 PERROR("kernctl_get_timestamp_begin");
975 goto error;
976 }
977 index->timestamp_begin = htobe64(index->timestamp_begin);
978
979 ret = kernctl_get_timestamp_end(infd, &index->timestamp_end);
980 if (ret < 0) {
981 PERROR("kernctl_get_timestamp_end");
982 goto error;
983 }
984 index->timestamp_end = htobe64(index->timestamp_end);
985
986 ret = kernctl_get_events_discarded(infd, &index->events_discarded);
987 if (ret < 0) {
988 PERROR("kernctl_get_events_discarded");
989 goto error;
990 }
991 index->events_discarded = htobe64(index->events_discarded);
992
993 ret = kernctl_get_content_size(infd, &index->content_size);
994 if (ret < 0) {
995 PERROR("kernctl_get_content_size");
996 goto error;
997 }
998 index->content_size = htobe64(index->content_size);
999
1000 ret = kernctl_get_packet_size(infd, &index->packet_size);
1001 if (ret < 0) {
1002 PERROR("kernctl_get_packet_size");
1003 goto error;
1004 }
1005 index->packet_size = htobe64(index->packet_size);
1006
1007 ret = kernctl_get_stream_id(infd, &index->stream_id);
1008 if (ret < 0) {
1009 PERROR("kernctl_get_stream_id");
1010 goto error;
1011 }
1012 index->stream_id = htobe64(index->stream_id);
1013
1014 error:
1015 return ret;
1016 }
1017 /*
1018 * Sync metadata meaning request them to the session daemon and snapshot to the
1019 * metadata thread can consumer them.
1020 *
1021 * Metadata stream lock MUST be acquired.
1022 *
1023 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1024 * is empty or a negative value on error.
1025 */
1026 int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata)
1027 {
1028 int ret;
1029
1030 assert(metadata);
1031
1032 ret = kernctl_buffer_flush(metadata->wait_fd);
1033 if (ret < 0) {
1034 ERR("Failed to flush kernel stream");
1035 goto end;
1036 }
1037
1038 ret = kernctl_snapshot(metadata->wait_fd);
1039 if (ret < 0) {
1040 if (errno != EAGAIN) {
1041 ERR("Sync metadata, taking kernel snapshot failed.");
1042 goto end;
1043 }
1044 DBG("Sync metadata, no new kernel metadata");
1045 /* No new metadata, exit. */
1046 ret = ENODATA;
1047 goto end;
1048 }
1049
1050 end:
1051 return ret;
1052 }
1053
1054 /*
1055 * Consume data on a file descriptor and write it on a trace file.
1056 */
1057 ssize_t lttng_kconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1058 struct lttng_consumer_local_data *ctx)
1059 {
1060 unsigned long len, subbuf_size, padding;
1061 int err, write_index = 1;
1062 ssize_t ret = 0;
1063 int infd = stream->wait_fd;
1064 struct ctf_packet_index index;
1065
1066 DBG("In read_subbuffer (infd : %d)", infd);
1067
1068 /* Get the next subbuffer */
1069 err = kernctl_get_next_subbuf(infd);
1070 if (err != 0) {
1071 /*
1072 * This is a debug message even for single-threaded consumer,
1073 * because poll() have more relaxed criterions than get subbuf,
1074 * so get_subbuf may fail for short race windows where poll()
1075 * would issue wakeups.
1076 */
1077 DBG("Reserving sub buffer failed (everything is normal, "
1078 "it is due to concurrency)");
1079 ret = -errno;
1080 goto end;
1081 }
1082
1083 /* Get the full subbuffer size including padding */
1084 err = kernctl_get_padded_subbuf_size(infd, &len);
1085 if (err != 0) {
1086 PERROR("Getting sub-buffer len failed.");
1087 err = kernctl_put_subbuf(infd);
1088 if (err != 0) {
1089 if (errno == EFAULT) {
1090 PERROR("Error in unreserving sub buffer\n");
1091 } else if (errno == EIO) {
1092 /* Should never happen with newer LTTng versions */
1093 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted.");
1094 }
1095 ret = -errno;
1096 goto end;
1097 }
1098 ret = -errno;
1099 goto end;
1100 }
1101
1102 if (!stream->metadata_flag) {
1103 ret = get_index_values(&index, infd);
1104 if (ret < 0) {
1105 err = kernctl_put_subbuf(infd);
1106 if (err != 0) {
1107 if (errno == EFAULT) {
1108 PERROR("Error in unreserving sub buffer\n");
1109 } else if (errno == EIO) {
1110 /* Should never happen with newer LTTng versions */
1111 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted.");
1112 }
1113 ret = -errno;
1114 goto end;
1115 }
1116 goto end;
1117 }
1118 } else {
1119 write_index = 0;
1120 }
1121
1122 switch (stream->chan->output) {
1123 case CONSUMER_CHANNEL_SPLICE:
1124 /*
1125 * XXX: The lttng-modules splice "actor" does not handle copying
1126 * partial pages hence only using the subbuffer size without the
1127 * padding makes the splice fail.
1128 */
1129 subbuf_size = len;
1130 padding = 0;
1131
1132 /* splice the subbuffer to the tracefile */
1133 ret = lttng_consumer_on_read_subbuffer_splice(ctx, stream, subbuf_size,
1134 padding, &index);
1135 /*
1136 * XXX: Splice does not support network streaming so the return value
1137 * is simply checked against subbuf_size and not like the mmap() op.
1138 */
1139 if (ret != subbuf_size) {
1140 /*
1141 * display the error but continue processing to try
1142 * to release the subbuffer
1143 */
1144 ERR("Error splicing to tracefile (ret: %zd != len: %lu)",
1145 ret, subbuf_size);
1146 write_index = 0;
1147 }
1148 break;
1149 case CONSUMER_CHANNEL_MMAP:
1150 /* Get subbuffer size without padding */
1151 err = kernctl_get_subbuf_size(infd, &subbuf_size);
1152 if (err != 0) {
1153 PERROR("Getting sub-buffer len failed.");
1154 err = kernctl_put_subbuf(infd);
1155 if (err != 0) {
1156 if (errno == EFAULT) {
1157 PERROR("Error in unreserving sub buffer\n");
1158 } else if (errno == EIO) {
1159 /* Should never happen with newer LTTng versions */
1160 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted.");
1161 }
1162 ret = -errno;
1163 goto end;
1164 }
1165 ret = -errno;
1166 goto end;
1167 }
1168
1169 /* Make sure the tracer is not gone mad on us! */
1170 assert(len >= subbuf_size);
1171
1172 padding = len - subbuf_size;
1173
1174 /* write the subbuffer to the tracefile */
1175 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size,
1176 padding, &index);
1177 /*
1178 * The mmap operation should write subbuf_size amount of data when
1179 * network streaming or the full padding (len) size when we are _not_
1180 * streaming.
1181 */
1182 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
1183 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
1184 /*
1185 * Display the error but continue processing to try to release the
1186 * subbuffer. This is a DBG statement since this is possible to
1187 * happen without being a critical error.
1188 */
1189 DBG("Error writing to tracefile "
1190 "(ret: %zd != len: %lu != subbuf_size: %lu)",
1191 ret, len, subbuf_size);
1192 write_index = 0;
1193 }
1194 break;
1195 default:
1196 ERR("Unknown output method");
1197 ret = -EPERM;
1198 }
1199
1200 err = kernctl_put_next_subbuf(infd);
1201 if (err != 0) {
1202 if (errno == EFAULT) {
1203 PERROR("Error in unreserving sub buffer\n");
1204 } else if (errno == EIO) {
1205 /* Should never happen with newer LTTng versions */
1206 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted.");
1207 }
1208 ret = -errno;
1209 goto end;
1210 }
1211
1212 /* Write index if needed. */
1213 if (!write_index) {
1214 goto end;
1215 }
1216
1217 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
1218 /*
1219 * In live, block until all the metadata is sent.
1220 */
1221 err = consumer_stream_sync_metadata(ctx, stream->session_id);
1222 if (err < 0) {
1223 goto end;
1224 }
1225 }
1226
1227 err = consumer_stream_write_index(stream, &index);
1228 if (err < 0) {
1229 goto end;
1230 }
1231
1232 end:
1233 return ret;
1234 }
1235
1236 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1237 {
1238 int ret;
1239
1240 assert(stream);
1241
1242 /*
1243 * Don't create anything if this is set for streaming or should not be
1244 * monitored.
1245 */
1246 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
1247 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
1248 stream->chan->tracefile_size, stream->tracefile_count_current,
1249 stream->uid, stream->gid, NULL);
1250 if (ret < 0) {
1251 goto error;
1252 }
1253 stream->out_fd = ret;
1254 stream->tracefile_size_current = 0;
1255
1256 if (!stream->metadata_flag) {
1257 ret = index_create_file(stream->chan->pathname,
1258 stream->name, stream->uid, stream->gid,
1259 stream->chan->tracefile_size,
1260 stream->tracefile_count_current);
1261 if (ret < 0) {
1262 goto error;
1263 }
1264 stream->index_fd = ret;
1265 }
1266 }
1267
1268 if (stream->output == LTTNG_EVENT_MMAP) {
1269 /* get the len of the mmap region */
1270 unsigned long mmap_len;
1271
1272 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1273 if (ret != 0) {
1274 PERROR("kernctl_get_mmap_len");
1275 ret = -errno;
1276 goto error_close_fd;
1277 }
1278 stream->mmap_len = (size_t) mmap_len;
1279
1280 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1281 MAP_PRIVATE, stream->wait_fd, 0);
1282 if (stream->mmap_base == MAP_FAILED) {
1283 PERROR("Error mmaping");
1284 ret = -1;
1285 goto error_close_fd;
1286 }
1287 }
1288
1289 /* we return 0 to let the library handle the FD internally */
1290 return 0;
1291
1292 error_close_fd:
1293 if (stream->out_fd >= 0) {
1294 int err;
1295
1296 err = close(stream->out_fd);
1297 assert(!err);
1298 stream->out_fd = -1;
1299 }
1300 error:
1301 return ret;
1302 }
1303
1304 /*
1305 * Check if data is still being extracted from the buffers for a specific
1306 * stream. Consumer data lock MUST be acquired before calling this function
1307 * and the stream lock.
1308 *
1309 * Return 1 if the traced data are still getting read else 0 meaning that the
1310 * data is available for trace viewer reading.
1311 */
1312 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1313 {
1314 int ret;
1315
1316 assert(stream);
1317
1318 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1319 ret = 0;
1320 goto end;
1321 }
1322
1323 ret = kernctl_get_next_subbuf(stream->wait_fd);
1324 if (ret == 0) {
1325 /* There is still data so let's put back this subbuffer. */
1326 ret = kernctl_put_subbuf(stream->wait_fd);
1327 assert(ret == 0);
1328 ret = 1; /* Data is pending */
1329 goto end;
1330 }
1331
1332 /* Data is NOT pending and ready to be read. */
1333 ret = 0;
1334
1335 end:
1336 return ret;
1337 }
This page took 0.060314 seconds and 3 git commands to generate.