consumerd: refactor: split read_subbuf into sub-operations
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include <inttypes.h>
30 #include <unistd.h>
31 #include <sys/stat.h>
32
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/sessiond-comm.h>
37 #include <common/sessiond-comm/relayd.h>
38 #include <common/compat/fcntl.h>
39 #include <common/compat/endian.h>
40 #include <common/pipe.h>
41 #include <common/relayd/relayd.h>
42 #include <common/utils.h>
43 #include <common/consumer/consumer-stream.h>
44 #include <common/index/index.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/optional.h>
47 #include <common/buffer-view.h>
48 #include <common/consumer/consumer.h>
49 #include <stdint.h>
50
51 #include "kernel-consumer.h"
52
53 extern struct lttng_consumer_global_data consumer_data;
54 extern int consumer_poll_timeout;
55
56 /*
57 * Take a snapshot for a specific fd
58 *
59 * Returns 0 on success, < 0 on error
60 */
61 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
62 {
63 int ret = 0;
64 int infd = stream->wait_fd;
65
66 ret = kernctl_snapshot(infd);
67 /*
68 * -EAGAIN is not an error, it just means that there is no data to
69 * be read.
70 */
71 if (ret != 0 && ret != -EAGAIN) {
72 PERROR("Getting sub-buffer snapshot.");
73 }
74
75 return ret;
76 }
77
78 /*
79 * Sample consumed and produced positions for a specific fd.
80 *
81 * Returns 0 on success, < 0 on error.
82 */
83 int lttng_kconsumer_sample_snapshot_positions(
84 struct lttng_consumer_stream *stream)
85 {
86 assert(stream);
87
88 return kernctl_snapshot_sample_positions(stream->wait_fd);
89 }
90
91 /*
92 * Get the produced position
93 *
94 * Returns 0 on success, < 0 on error
95 */
96 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
97 unsigned long *pos)
98 {
99 int ret;
100 int infd = stream->wait_fd;
101
102 ret = kernctl_snapshot_get_produced(infd, pos);
103 if (ret != 0) {
104 PERROR("kernctl_snapshot_get_produced");
105 }
106
107 return ret;
108 }
109
110 /*
111 * Get the consumerd position
112 *
113 * Returns 0 on success, < 0 on error
114 */
115 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
116 unsigned long *pos)
117 {
118 int ret;
119 int infd = stream->wait_fd;
120
121 ret = kernctl_snapshot_get_consumed(infd, pos);
122 if (ret != 0) {
123 PERROR("kernctl_snapshot_get_consumed");
124 }
125
126 return ret;
127 }
128
129 static
130 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
131 const char **addr)
132 {
133 int ret;
134 unsigned long mmap_offset;
135 const char *mmap_base = stream->mmap_base;
136
137 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
138 if (ret < 0) {
139 PERROR("Failed to get mmap read offset");
140 goto error;
141 }
142
143 *addr = mmap_base + mmap_offset;
144 error:
145 return ret;
146 }
147
148 /*
149 * Take a snapshot of all the stream of a channel
150 * RCU read-side lock must be held across this function to ensure existence of
151 * channel. The channel lock must be held by the caller.
152 *
153 * Returns 0 on success, < 0 on error
154 */
155 static int lttng_kconsumer_snapshot_channel(
156 struct lttng_consumer_channel *channel,
157 uint64_t key, char *path, uint64_t relayd_id,
158 uint64_t nb_packets_per_stream,
159 struct lttng_consumer_local_data *ctx)
160 {
161 int ret;
162 struct lttng_consumer_stream *stream;
163
164 DBG("Kernel consumer snapshot channel %" PRIu64, key);
165
166 rcu_read_lock();
167
168 /* Splice is not supported yet for channel snapshot. */
169 if (channel->output != CONSUMER_CHANNEL_MMAP) {
170 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
171 channel->name);
172 ret = -1;
173 goto end;
174 }
175
176 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
177 unsigned long consumed_pos, produced_pos;
178
179 health_code_update();
180
181 /*
182 * Lock stream because we are about to change its state.
183 */
184 pthread_mutex_lock(&stream->lock);
185
186 assert(channel->trace_chunk);
187 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
188 /*
189 * Can't happen barring an internal error as the channel
190 * holds a reference to the trace chunk.
191 */
192 ERR("Failed to acquire reference to channel's trace chunk");
193 ret = -1;
194 goto end_unlock;
195 }
196 assert(!stream->trace_chunk);
197 stream->trace_chunk = channel->trace_chunk;
198
199 /*
200 * Assign the received relayd ID so we can use it for streaming. The streams
201 * are not visible to anyone so this is OK to change it.
202 */
203 stream->net_seq_idx = relayd_id;
204 channel->relayd_id = relayd_id;
205 if (relayd_id != (uint64_t) -1ULL) {
206 ret = consumer_send_relayd_stream(stream, path);
207 if (ret < 0) {
208 ERR("sending stream to relayd");
209 goto end_unlock;
210 }
211 } else {
212 ret = consumer_stream_create_output_files(stream,
213 false);
214 if (ret < 0) {
215 goto end_unlock;
216 }
217 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
218 stream->key);
219 }
220
221 ret = kernctl_buffer_flush_empty(stream->wait_fd);
222 if (ret < 0) {
223 /*
224 * Doing a buffer flush which does not take into
225 * account empty packets. This is not perfect
226 * for stream intersection, but required as a
227 * fall-back when "flush_empty" is not
228 * implemented by lttng-modules.
229 */
230 ret = kernctl_buffer_flush(stream->wait_fd);
231 if (ret < 0) {
232 ERR("Failed to flush kernel stream");
233 goto end_unlock;
234 }
235 goto end_unlock;
236 }
237
238 ret = lttng_kconsumer_take_snapshot(stream);
239 if (ret < 0) {
240 ERR("Taking kernel snapshot");
241 goto end_unlock;
242 }
243
244 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
245 if (ret < 0) {
246 ERR("Produced kernel snapshot position");
247 goto end_unlock;
248 }
249
250 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
251 if (ret < 0) {
252 ERR("Consumerd kernel snapshot position");
253 goto end_unlock;
254 }
255
256 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
257 produced_pos, nb_packets_per_stream,
258 stream->max_sb_size);
259
260 while ((long) (consumed_pos - produced_pos) < 0) {
261 ssize_t read_len;
262 unsigned long len, padded_len;
263 const char *subbuf_addr;
264 struct lttng_buffer_view subbuf_view;
265
266 health_code_update();
267 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
268
269 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
270 if (ret < 0) {
271 if (ret != -EAGAIN) {
272 PERROR("kernctl_get_subbuf snapshot");
273 goto end_unlock;
274 }
275 DBG("Kernel consumer get subbuf failed. Skipping it.");
276 consumed_pos += stream->max_sb_size;
277 stream->chan->lost_packets++;
278 continue;
279 }
280
281 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
282 if (ret < 0) {
283 ERR("Snapshot kernctl_get_subbuf_size");
284 goto error_put_subbuf;
285 }
286
287 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
288 if (ret < 0) {
289 ERR("Snapshot kernctl_get_padded_subbuf_size");
290 goto error_put_subbuf;
291 }
292
293 ret = get_current_subbuf_addr(stream, &subbuf_addr);
294 if (ret) {
295 goto error_put_subbuf;
296 }
297
298 subbuf_view = lttng_buffer_view_init(
299 subbuf_addr, 0, padded_len);
300 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx,
301 stream, &subbuf_view,
302 padded_len - len);
303 /*
304 * We write the padded len in local tracefiles but the data len
305 * when using a relay. Display the error but continue processing
306 * to try to release the subbuffer.
307 */
308 if (relayd_id != (uint64_t) -1ULL) {
309 if (read_len != len) {
310 ERR("Error sending to the relay (ret: %zd != len: %lu)",
311 read_len, len);
312 }
313 } else {
314 if (read_len != padded_len) {
315 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
316 read_len, padded_len);
317 }
318 }
319
320 ret = kernctl_put_subbuf(stream->wait_fd);
321 if (ret < 0) {
322 ERR("Snapshot kernctl_put_subbuf");
323 goto end_unlock;
324 }
325 consumed_pos += stream->max_sb_size;
326 }
327
328 if (relayd_id == (uint64_t) -1ULL) {
329 if (stream->out_fd >= 0) {
330 ret = close(stream->out_fd);
331 if (ret < 0) {
332 PERROR("Kernel consumer snapshot close out_fd");
333 goto end_unlock;
334 }
335 stream->out_fd = -1;
336 }
337 } else {
338 close_relayd_stream(stream);
339 stream->net_seq_idx = (uint64_t) -1ULL;
340 }
341 lttng_trace_chunk_put(stream->trace_chunk);
342 stream->trace_chunk = NULL;
343 pthread_mutex_unlock(&stream->lock);
344 }
345
346 /* All good! */
347 ret = 0;
348 goto end;
349
350 error_put_subbuf:
351 ret = kernctl_put_subbuf(stream->wait_fd);
352 if (ret < 0) {
353 ERR("Snapshot kernctl_put_subbuf error path");
354 }
355 end_unlock:
356 pthread_mutex_unlock(&stream->lock);
357 end:
358 rcu_read_unlock();
359 return ret;
360 }
361
362 /*
363 * Read the whole metadata available for a snapshot.
364 * RCU read-side lock must be held across this function to ensure existence of
365 * metadata_channel. The channel lock must be held by the caller.
366 *
367 * Returns 0 on success, < 0 on error
368 */
369 static int lttng_kconsumer_snapshot_metadata(
370 struct lttng_consumer_channel *metadata_channel,
371 uint64_t key, char *path, uint64_t relayd_id,
372 struct lttng_consumer_local_data *ctx)
373 {
374 int ret, use_relayd = 0;
375 ssize_t ret_read;
376 struct lttng_consumer_stream *metadata_stream;
377
378 assert(ctx);
379
380 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
381 key, path);
382
383 rcu_read_lock();
384
385 metadata_stream = metadata_channel->metadata_stream;
386 assert(metadata_stream);
387
388 pthread_mutex_lock(&metadata_stream->lock);
389 assert(metadata_channel->trace_chunk);
390 assert(metadata_stream->trace_chunk);
391
392 /* Flag once that we have a valid relayd for the stream. */
393 if (relayd_id != (uint64_t) -1ULL) {
394 use_relayd = 1;
395 }
396
397 if (use_relayd) {
398 ret = consumer_send_relayd_stream(metadata_stream, path);
399 if (ret < 0) {
400 goto error_snapshot;
401 }
402 } else {
403 ret = consumer_stream_create_output_files(metadata_stream,
404 false);
405 if (ret < 0) {
406 goto error_snapshot;
407 }
408 }
409
410 do {
411 health_code_update();
412
413 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
414 if (ret_read < 0) {
415 if (ret_read != -EAGAIN) {
416 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
417 ret_read);
418 ret = ret_read;
419 goto error_snapshot;
420 }
421 /* ret_read is negative at this point so we will exit the loop. */
422 continue;
423 }
424 } while (ret_read >= 0);
425
426 if (use_relayd) {
427 close_relayd_stream(metadata_stream);
428 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
429 } else {
430 if (metadata_stream->out_fd >= 0) {
431 ret = close(metadata_stream->out_fd);
432 if (ret < 0) {
433 PERROR("Kernel consumer snapshot metadata close out_fd");
434 /*
435 * Don't go on error here since the snapshot was successful at this
436 * point but somehow the close failed.
437 */
438 }
439 metadata_stream->out_fd = -1;
440 lttng_trace_chunk_put(metadata_stream->trace_chunk);
441 metadata_stream->trace_chunk = NULL;
442 }
443 }
444
445 ret = 0;
446 error_snapshot:
447 pthread_mutex_unlock(&metadata_stream->lock);
448 cds_list_del(&metadata_stream->send_node);
449 consumer_stream_destroy(metadata_stream, NULL);
450 metadata_channel->metadata_stream = NULL;
451 rcu_read_unlock();
452 return ret;
453 }
454
455 /*
456 * Receive command from session daemon and process it.
457 *
458 * Return 1 on success else a negative value or 0.
459 */
460 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
461 int sock, struct pollfd *consumer_sockpoll)
462 {
463 ssize_t ret;
464 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
465 struct lttcomm_consumer_msg msg;
466
467 health_code_update();
468
469 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
470 if (ret != sizeof(msg)) {
471 if (ret > 0) {
472 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
473 ret = -1;
474 }
475 return ret;
476 }
477
478 health_code_update();
479
480 /* Deprecated command */
481 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
482
483 health_code_update();
484
485 /* relayd needs RCU read-side protection */
486 rcu_read_lock();
487
488 switch (msg.cmd_type) {
489 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
490 {
491 /* Session daemon status message are handled in the following call. */
492 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
493 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
494 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
495 msg.u.relayd_sock.relayd_session_id);
496 goto end_nosignal;
497 }
498 case LTTNG_CONSUMER_ADD_CHANNEL:
499 {
500 struct lttng_consumer_channel *new_channel;
501 int ret_recv;
502 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
503
504 health_code_update();
505
506 /* First send a status message before receiving the fds. */
507 ret = consumer_send_status_msg(sock, ret_code);
508 if (ret < 0) {
509 /* Somehow, the session daemon is not responding anymore. */
510 goto error_fatal;
511 }
512
513 health_code_update();
514
515 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
516 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
517 msg.u.channel.session_id,
518 msg.u.channel.chunk_id.is_set ?
519 &chunk_id : NULL,
520 msg.u.channel.pathname,
521 msg.u.channel.name,
522 msg.u.channel.relayd_id, msg.u.channel.output,
523 msg.u.channel.tracefile_size,
524 msg.u.channel.tracefile_count, 0,
525 msg.u.channel.monitor,
526 msg.u.channel.live_timer_interval,
527 msg.u.channel.is_live,
528 NULL, NULL);
529 if (new_channel == NULL) {
530 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
531 goto end_nosignal;
532 }
533 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
534 switch (msg.u.channel.output) {
535 case LTTNG_EVENT_SPLICE:
536 new_channel->output = CONSUMER_CHANNEL_SPLICE;
537 break;
538 case LTTNG_EVENT_MMAP:
539 new_channel->output = CONSUMER_CHANNEL_MMAP;
540 break;
541 default:
542 ERR("Channel output unknown %d", msg.u.channel.output);
543 goto end_nosignal;
544 }
545
546 /* Translate and save channel type. */
547 switch (msg.u.channel.type) {
548 case CONSUMER_CHANNEL_TYPE_DATA:
549 case CONSUMER_CHANNEL_TYPE_METADATA:
550 new_channel->type = msg.u.channel.type;
551 break;
552 default:
553 assert(0);
554 goto end_nosignal;
555 };
556
557 health_code_update();
558
559 if (ctx->on_recv_channel != NULL) {
560 ret_recv = ctx->on_recv_channel(new_channel);
561 if (ret_recv == 0) {
562 ret = consumer_add_channel(new_channel, ctx);
563 } else if (ret_recv < 0) {
564 goto end_nosignal;
565 }
566 } else {
567 ret = consumer_add_channel(new_channel, ctx);
568 }
569 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && !ret) {
570 int monitor_start_ret;
571
572 DBG("Consumer starting monitor timer");
573 consumer_timer_live_start(new_channel,
574 msg.u.channel.live_timer_interval);
575 monitor_start_ret = consumer_timer_monitor_start(
576 new_channel,
577 msg.u.channel.monitor_timer_interval);
578 if (monitor_start_ret < 0) {
579 ERR("Starting channel monitoring timer failed");
580 goto end_nosignal;
581 }
582
583 }
584
585 health_code_update();
586
587 /* If we received an error in add_channel, we need to report it. */
588 if (ret < 0) {
589 ret = consumer_send_status_msg(sock, ret);
590 if (ret < 0) {
591 goto error_fatal;
592 }
593 goto end_nosignal;
594 }
595
596 goto end_nosignal;
597 }
598 case LTTNG_CONSUMER_ADD_STREAM:
599 {
600 int fd;
601 struct lttng_pipe *stream_pipe;
602 struct lttng_consumer_stream *new_stream;
603 struct lttng_consumer_channel *channel;
604 int alloc_ret = 0;
605
606 /*
607 * Get stream's channel reference. Needed when adding the stream to the
608 * global hash table.
609 */
610 channel = consumer_find_channel(msg.u.stream.channel_key);
611 if (!channel) {
612 /*
613 * We could not find the channel. Can happen if cpu hotplug
614 * happens while tearing down.
615 */
616 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
617 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
618 }
619
620 health_code_update();
621
622 /* First send a status message before receiving the fds. */
623 ret = consumer_send_status_msg(sock, ret_code);
624 if (ret < 0) {
625 /* Somehow, the session daemon is not responding anymore. */
626 goto error_add_stream_fatal;
627 }
628
629 health_code_update();
630
631 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
632 /* Channel was not found. */
633 goto error_add_stream_nosignal;
634 }
635
636 /* Blocking call */
637 health_poll_entry();
638 ret = lttng_consumer_poll_socket(consumer_sockpoll);
639 health_poll_exit();
640 if (ret) {
641 goto error_add_stream_fatal;
642 }
643
644 health_code_update();
645
646 /* Get stream file descriptor from socket */
647 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
648 if (ret != sizeof(fd)) {
649 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
650 goto end;
651 }
652
653 health_code_update();
654
655 /*
656 * Send status code to session daemon only if the recv works. If the
657 * above recv() failed, the session daemon is notified through the
658 * error socket and the teardown is eventually done.
659 */
660 ret = consumer_send_status_msg(sock, ret_code);
661 if (ret < 0) {
662 /* Somehow, the session daemon is not responding anymore. */
663 goto error_add_stream_nosignal;
664 }
665
666 health_code_update();
667
668 pthread_mutex_lock(&channel->lock);
669 new_stream = consumer_stream_create(
670 channel,
671 channel->key,
672 fd,
673 channel->name,
674 channel->relayd_id,
675 channel->session_id,
676 channel->trace_chunk,
677 msg.u.stream.cpu,
678 &alloc_ret,
679 channel->type,
680 channel->monitor);
681 if (new_stream == NULL) {
682 switch (alloc_ret) {
683 case -ENOMEM:
684 case -EINVAL:
685 default:
686 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
687 break;
688 }
689 pthread_mutex_unlock(&channel->lock);
690 goto error_add_stream_nosignal;
691 }
692
693 new_stream->wait_fd = fd;
694 ret = kernctl_get_max_subbuf_size(new_stream->wait_fd,
695 &new_stream->max_sb_size);
696 if (ret < 0) {
697 pthread_mutex_unlock(&channel->lock);
698 ERR("Failed to get kernel maximal subbuffer size");
699 goto error_add_stream_nosignal;
700 }
701
702 consumer_stream_update_channel_attributes(new_stream,
703 channel);
704
705 /*
706 * We've just assigned the channel to the stream so increment the
707 * refcount right now. We don't need to increment the refcount for
708 * streams in no monitor because we handle manually the cleanup of
709 * those. It is very important to make sure there is NO prior
710 * consumer_del_stream() calls or else the refcount will be unbalanced.
711 */
712 if (channel->monitor) {
713 uatomic_inc(&new_stream->chan->refcount);
714 }
715
716 /*
717 * The buffer flush is done on the session daemon side for the kernel
718 * so no need for the stream "hangup_flush_done" variable to be
719 * tracked. This is important for a kernel stream since we don't rely
720 * on the flush state of the stream to read data. It's not the case for
721 * user space tracing.
722 */
723 new_stream->hangup_flush_done = 0;
724
725 health_code_update();
726
727 pthread_mutex_lock(&new_stream->lock);
728 if (ctx->on_recv_stream) {
729 ret = ctx->on_recv_stream(new_stream);
730 if (ret < 0) {
731 pthread_mutex_unlock(&new_stream->lock);
732 pthread_mutex_unlock(&channel->lock);
733 consumer_stream_free(new_stream);
734 goto error_add_stream_nosignal;
735 }
736 }
737 health_code_update();
738
739 if (new_stream->metadata_flag) {
740 channel->metadata_stream = new_stream;
741 }
742
743 /* Do not monitor this stream. */
744 if (!channel->monitor) {
745 DBG("Kernel consumer add stream %s in no monitor mode with "
746 "relayd id %" PRIu64, new_stream->name,
747 new_stream->net_seq_idx);
748 cds_list_add(&new_stream->send_node, &channel->streams.head);
749 pthread_mutex_unlock(&new_stream->lock);
750 pthread_mutex_unlock(&channel->lock);
751 goto end_add_stream;
752 }
753
754 /* Send stream to relayd if the stream has an ID. */
755 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
756 ret = consumer_send_relayd_stream(new_stream,
757 new_stream->chan->pathname);
758 if (ret < 0) {
759 pthread_mutex_unlock(&new_stream->lock);
760 pthread_mutex_unlock(&channel->lock);
761 consumer_stream_free(new_stream);
762 goto error_add_stream_nosignal;
763 }
764
765 /*
766 * If adding an extra stream to an already
767 * existing channel (e.g. cpu hotplug), we need
768 * to send the "streams_sent" command to relayd.
769 */
770 if (channel->streams_sent_to_relayd) {
771 ret = consumer_send_relayd_streams_sent(
772 new_stream->net_seq_idx);
773 if (ret < 0) {
774 pthread_mutex_unlock(&new_stream->lock);
775 pthread_mutex_unlock(&channel->lock);
776 goto error_add_stream_nosignal;
777 }
778 }
779 }
780 pthread_mutex_unlock(&new_stream->lock);
781 pthread_mutex_unlock(&channel->lock);
782
783 /* Get the right pipe where the stream will be sent. */
784 if (new_stream->metadata_flag) {
785 consumer_add_metadata_stream(new_stream);
786 stream_pipe = ctx->consumer_metadata_pipe;
787 } else {
788 consumer_add_data_stream(new_stream);
789 stream_pipe = ctx->consumer_data_pipe;
790 }
791
792 /* Visible to other threads */
793 new_stream->globally_visible = 1;
794
795 health_code_update();
796
797 ret = lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream));
798 if (ret < 0) {
799 ERR("Consumer write %s stream to pipe %d",
800 new_stream->metadata_flag ? "metadata" : "data",
801 lttng_pipe_get_writefd(stream_pipe));
802 if (new_stream->metadata_flag) {
803 consumer_del_stream_for_metadata(new_stream);
804 } else {
805 consumer_del_stream_for_data(new_stream);
806 }
807 goto error_add_stream_nosignal;
808 }
809
810 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
811 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
812 end_add_stream:
813 break;
814 error_add_stream_nosignal:
815 goto end_nosignal;
816 error_add_stream_fatal:
817 goto error_fatal;
818 }
819 case LTTNG_CONSUMER_STREAMS_SENT:
820 {
821 struct lttng_consumer_channel *channel;
822
823 /*
824 * Get stream's channel reference. Needed when adding the stream to the
825 * global hash table.
826 */
827 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
828 if (!channel) {
829 /*
830 * We could not find the channel. Can happen if cpu hotplug
831 * happens while tearing down.
832 */
833 ERR("Unable to find channel key %" PRIu64,
834 msg.u.sent_streams.channel_key);
835 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
836 }
837
838 health_code_update();
839
840 /*
841 * Send status code to session daemon.
842 */
843 ret = consumer_send_status_msg(sock, ret_code);
844 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
845 /* Somehow, the session daemon is not responding anymore. */
846 goto error_streams_sent_nosignal;
847 }
848
849 health_code_update();
850
851 /*
852 * We should not send this message if we don't monitor the
853 * streams in this channel.
854 */
855 if (!channel->monitor) {
856 goto end_error_streams_sent;
857 }
858
859 health_code_update();
860 /* Send stream to relayd if the stream has an ID. */
861 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
862 ret = consumer_send_relayd_streams_sent(
863 msg.u.sent_streams.net_seq_idx);
864 if (ret < 0) {
865 goto error_streams_sent_nosignal;
866 }
867 channel->streams_sent_to_relayd = true;
868 }
869 end_error_streams_sent:
870 break;
871 error_streams_sent_nosignal:
872 goto end_nosignal;
873 }
874 case LTTNG_CONSUMER_UPDATE_STREAM:
875 {
876 rcu_read_unlock();
877 return -ENOSYS;
878 }
879 case LTTNG_CONSUMER_DESTROY_RELAYD:
880 {
881 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
882 struct consumer_relayd_sock_pair *relayd;
883
884 DBG("Kernel consumer destroying relayd %" PRIu64, index);
885
886 /* Get relayd reference if exists. */
887 relayd = consumer_find_relayd(index);
888 if (relayd == NULL) {
889 DBG("Unable to find relayd %" PRIu64, index);
890 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
891 }
892
893 /*
894 * Each relayd socket pair has a refcount of stream attached to it
895 * which tells if the relayd is still active or not depending on the
896 * refcount value.
897 *
898 * This will set the destroy flag of the relayd object and destroy it
899 * if the refcount reaches zero when called.
900 *
901 * The destroy can happen either here or when a stream fd hangs up.
902 */
903 if (relayd) {
904 consumer_flag_relayd_for_destroy(relayd);
905 }
906
907 health_code_update();
908
909 ret = consumer_send_status_msg(sock, ret_code);
910 if (ret < 0) {
911 /* Somehow, the session daemon is not responding anymore. */
912 goto error_fatal;
913 }
914
915 goto end_nosignal;
916 }
917 case LTTNG_CONSUMER_DATA_PENDING:
918 {
919 int32_t ret;
920 uint64_t id = msg.u.data_pending.session_id;
921
922 DBG("Kernel consumer data pending command for id %" PRIu64, id);
923
924 ret = consumer_data_pending(id);
925
926 health_code_update();
927
928 /* Send back returned value to session daemon */
929 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
930 if (ret < 0) {
931 PERROR("send data pending ret code");
932 goto error_fatal;
933 }
934
935 /*
936 * No need to send back a status message since the data pending
937 * returned value is the response.
938 */
939 break;
940 }
941 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
942 {
943 struct lttng_consumer_channel *channel;
944 uint64_t key = msg.u.snapshot_channel.key;
945
946 channel = consumer_find_channel(key);
947 if (!channel) {
948 ERR("Channel %" PRIu64 " not found", key);
949 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
950 } else {
951 pthread_mutex_lock(&channel->lock);
952 if (msg.u.snapshot_channel.metadata == 1) {
953 ret = lttng_kconsumer_snapshot_metadata(channel, key,
954 msg.u.snapshot_channel.pathname,
955 msg.u.snapshot_channel.relayd_id, ctx);
956 if (ret < 0) {
957 ERR("Snapshot metadata failed");
958 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
959 }
960 } else {
961 ret = lttng_kconsumer_snapshot_channel(channel, key,
962 msg.u.snapshot_channel.pathname,
963 msg.u.snapshot_channel.relayd_id,
964 msg.u.snapshot_channel.nb_packets_per_stream,
965 ctx);
966 if (ret < 0) {
967 ERR("Snapshot channel failed");
968 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
969 }
970 }
971 pthread_mutex_unlock(&channel->lock);
972 }
973 health_code_update();
974
975 ret = consumer_send_status_msg(sock, ret_code);
976 if (ret < 0) {
977 /* Somehow, the session daemon is not responding anymore. */
978 goto end_nosignal;
979 }
980 break;
981 }
982 case LTTNG_CONSUMER_DESTROY_CHANNEL:
983 {
984 uint64_t key = msg.u.destroy_channel.key;
985 struct lttng_consumer_channel *channel;
986
987 channel = consumer_find_channel(key);
988 if (!channel) {
989 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
990 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
991 }
992
993 health_code_update();
994
995 ret = consumer_send_status_msg(sock, ret_code);
996 if (ret < 0) {
997 /* Somehow, the session daemon is not responding anymore. */
998 goto end_destroy_channel;
999 }
1000
1001 health_code_update();
1002
1003 /* Stop right now if no channel was found. */
1004 if (!channel) {
1005 goto end_destroy_channel;
1006 }
1007
1008 /*
1009 * This command should ONLY be issued for channel with streams set in
1010 * no monitor mode.
1011 */
1012 assert(!channel->monitor);
1013
1014 /*
1015 * The refcount should ALWAYS be 0 in the case of a channel in no
1016 * monitor mode.
1017 */
1018 assert(!uatomic_sub_return(&channel->refcount, 1));
1019
1020 consumer_del_channel(channel);
1021 end_destroy_channel:
1022 goto end_nosignal;
1023 }
1024 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1025 {
1026 ssize_t ret;
1027 uint64_t count;
1028 struct lttng_consumer_channel *channel;
1029 uint64_t id = msg.u.discarded_events.session_id;
1030 uint64_t key = msg.u.discarded_events.channel_key;
1031
1032 DBG("Kernel consumer discarded events command for session id %"
1033 PRIu64 ", channel key %" PRIu64, id, key);
1034
1035 channel = consumer_find_channel(key);
1036 if (!channel) {
1037 ERR("Kernel consumer discarded events channel %"
1038 PRIu64 " not found", key);
1039 count = 0;
1040 } else {
1041 count = channel->discarded_events;
1042 }
1043
1044 health_code_update();
1045
1046 /* Send back returned value to session daemon */
1047 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1048 if (ret < 0) {
1049 PERROR("send discarded events");
1050 goto error_fatal;
1051 }
1052
1053 break;
1054 }
1055 case LTTNG_CONSUMER_LOST_PACKETS:
1056 {
1057 ssize_t ret;
1058 uint64_t count;
1059 struct lttng_consumer_channel *channel;
1060 uint64_t id = msg.u.lost_packets.session_id;
1061 uint64_t key = msg.u.lost_packets.channel_key;
1062
1063 DBG("Kernel consumer lost packets command for session id %"
1064 PRIu64 ", channel key %" PRIu64, id, key);
1065
1066 channel = consumer_find_channel(key);
1067 if (!channel) {
1068 ERR("Kernel consumer lost packets channel %"
1069 PRIu64 " not found", key);
1070 count = 0;
1071 } else {
1072 count = channel->lost_packets;
1073 }
1074
1075 health_code_update();
1076
1077 /* Send back returned value to session daemon */
1078 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1079 if (ret < 0) {
1080 PERROR("send lost packets");
1081 goto error_fatal;
1082 }
1083
1084 break;
1085 }
1086 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1087 {
1088 int channel_monitor_pipe;
1089
1090 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1091 /* Successfully received the command's type. */
1092 ret = consumer_send_status_msg(sock, ret_code);
1093 if (ret < 0) {
1094 goto error_fatal;
1095 }
1096
1097 ret = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe,
1098 1);
1099 if (ret != sizeof(channel_monitor_pipe)) {
1100 ERR("Failed to receive channel monitor pipe");
1101 goto error_fatal;
1102 }
1103
1104 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1105 ret = consumer_timer_thread_set_channel_monitor_pipe(
1106 channel_monitor_pipe);
1107 if (!ret) {
1108 int flags;
1109
1110 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1111 /* Set the pipe as non-blocking. */
1112 ret = fcntl(channel_monitor_pipe, F_GETFL, 0);
1113 if (ret == -1) {
1114 PERROR("fcntl get flags of the channel monitoring pipe");
1115 goto error_fatal;
1116 }
1117 flags = ret;
1118
1119 ret = fcntl(channel_monitor_pipe, F_SETFL,
1120 flags | O_NONBLOCK);
1121 if (ret == -1) {
1122 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1123 goto error_fatal;
1124 }
1125 DBG("Channel monitor pipe set as non-blocking");
1126 } else {
1127 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1128 }
1129 ret = consumer_send_status_msg(sock, ret_code);
1130 if (ret < 0) {
1131 goto error_fatal;
1132 }
1133 break;
1134 }
1135 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1136 {
1137 struct lttng_consumer_channel *channel;
1138 uint64_t key = msg.u.rotate_channel.key;
1139
1140 DBG("Consumer rotate channel %" PRIu64, key);
1141
1142 channel = consumer_find_channel(key);
1143 if (!channel) {
1144 ERR("Channel %" PRIu64 " not found", key);
1145 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1146 } else {
1147 /*
1148 * Sample the rotate position of all the streams in this channel.
1149 */
1150 ret = lttng_consumer_rotate_channel(channel, key,
1151 msg.u.rotate_channel.relayd_id,
1152 msg.u.rotate_channel.metadata,
1153 ctx);
1154 if (ret < 0) {
1155 ERR("Rotate channel failed");
1156 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1157 }
1158
1159 health_code_update();
1160 }
1161 ret = consumer_send_status_msg(sock, ret_code);
1162 if (ret < 0) {
1163 /* Somehow, the session daemon is not responding anymore. */
1164 goto error_rotate_channel;
1165 }
1166 if (channel) {
1167 /* Rotate the streams that are ready right now. */
1168 ret = lttng_consumer_rotate_ready_streams(
1169 channel, key, ctx);
1170 if (ret < 0) {
1171 ERR("Rotate ready streams failed");
1172 }
1173 }
1174 break;
1175 error_rotate_channel:
1176 goto end_nosignal;
1177 }
1178 case LTTNG_CONSUMER_INIT:
1179 {
1180 ret_code = lttng_consumer_init_command(ctx,
1181 msg.u.init.sessiond_uuid);
1182 health_code_update();
1183 ret = consumer_send_status_msg(sock, ret_code);
1184 if (ret < 0) {
1185 /* Somehow, the session daemon is not responding anymore. */
1186 goto end_nosignal;
1187 }
1188 break;
1189 }
1190 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1191 {
1192 const struct lttng_credentials credentials = {
1193 .uid = msg.u.create_trace_chunk.credentials.value.uid,
1194 .gid = msg.u.create_trace_chunk.credentials.value.gid,
1195 };
1196 const bool is_local_trace =
1197 !msg.u.create_trace_chunk.relayd_id.is_set;
1198 const uint64_t relayd_id =
1199 msg.u.create_trace_chunk.relayd_id.value;
1200 const char *chunk_override_name =
1201 *msg.u.create_trace_chunk.override_name ?
1202 msg.u.create_trace_chunk.override_name :
1203 NULL;
1204 LTTNG_OPTIONAL(struct lttng_directory_handle) chunk_directory_handle =
1205 LTTNG_OPTIONAL_INIT;
1206
1207 /*
1208 * The session daemon will only provide a chunk directory file
1209 * descriptor for local traces.
1210 */
1211 if (is_local_trace) {
1212 int chunk_dirfd;
1213
1214 /* Acnowledge the reception of the command. */
1215 ret = consumer_send_status_msg(sock,
1216 LTTCOMM_CONSUMERD_SUCCESS);
1217 if (ret < 0) {
1218 /* Somehow, the session daemon is not responding anymore. */
1219 goto end_nosignal;
1220 }
1221
1222 ret = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1);
1223 if (ret != sizeof(chunk_dirfd)) {
1224 ERR("Failed to receive trace chunk directory file descriptor");
1225 goto error_fatal;
1226 }
1227
1228 DBG("Received trace chunk directory fd (%d)",
1229 chunk_dirfd);
1230 ret = lttng_directory_handle_init_from_dirfd(
1231 &chunk_directory_handle.value,
1232 chunk_dirfd);
1233 if (ret) {
1234 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1235 if (close(chunk_dirfd)) {
1236 PERROR("Failed to close chunk directory file descriptor");
1237 }
1238 goto error_fatal;
1239 }
1240 chunk_directory_handle.is_set = true;
1241 }
1242
1243 ret_code = lttng_consumer_create_trace_chunk(
1244 !is_local_trace ? &relayd_id : NULL,
1245 msg.u.create_trace_chunk.session_id,
1246 msg.u.create_trace_chunk.chunk_id,
1247 (time_t) msg.u.create_trace_chunk
1248 .creation_timestamp,
1249 chunk_override_name,
1250 msg.u.create_trace_chunk.credentials.is_set ?
1251 &credentials :
1252 NULL,
1253 chunk_directory_handle.is_set ?
1254 &chunk_directory_handle.value :
1255 NULL);
1256
1257 if (chunk_directory_handle.is_set) {
1258 lttng_directory_handle_fini(
1259 &chunk_directory_handle.value);
1260 }
1261 goto end_msg_sessiond;
1262 }
1263 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1264 {
1265 enum lttng_trace_chunk_command_type close_command =
1266 msg.u.close_trace_chunk.close_command.value;
1267 const uint64_t relayd_id =
1268 msg.u.close_trace_chunk.relayd_id.value;
1269 struct lttcomm_consumer_close_trace_chunk_reply reply;
1270 char path[LTTNG_PATH_MAX];
1271
1272 ret_code = lttng_consumer_close_trace_chunk(
1273 msg.u.close_trace_chunk.relayd_id.is_set ?
1274 &relayd_id :
1275 NULL,
1276 msg.u.close_trace_chunk.session_id,
1277 msg.u.close_trace_chunk.chunk_id,
1278 (time_t) msg.u.close_trace_chunk.close_timestamp,
1279 msg.u.close_trace_chunk.close_command.is_set ?
1280 &close_command :
1281 NULL, path);
1282 reply.ret_code = ret_code;
1283 reply.path_length = strlen(path) + 1;
1284 ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1285 if (ret != sizeof(reply)) {
1286 goto error_fatal;
1287 }
1288 ret = lttcomm_send_unix_sock(sock, path, reply.path_length);
1289 if (ret != reply.path_length) {
1290 goto error_fatal;
1291 }
1292 goto end_nosignal;
1293 }
1294 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1295 {
1296 const uint64_t relayd_id =
1297 msg.u.trace_chunk_exists.relayd_id.value;
1298
1299 ret_code = lttng_consumer_trace_chunk_exists(
1300 msg.u.trace_chunk_exists.relayd_id.is_set ?
1301 &relayd_id : NULL,
1302 msg.u.trace_chunk_exists.session_id,
1303 msg.u.trace_chunk_exists.chunk_id);
1304 goto end_msg_sessiond;
1305 }
1306 default:
1307 goto end_nosignal;
1308 }
1309
1310 end_nosignal:
1311 /*
1312 * Return 1 to indicate success since the 0 value can be a socket
1313 * shutdown during the recv() or send() call.
1314 */
1315 ret = 1;
1316 goto end;
1317 error_fatal:
1318 /* This will issue a consumer stop. */
1319 ret = -1;
1320 goto end;
1321 end_msg_sessiond:
1322 /*
1323 * The returned value here is not useful since either way we'll return 1 to
1324 * the caller because the session daemon socket management is done
1325 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1326 */
1327 ret = consumer_send_status_msg(sock, ret_code);
1328 if (ret < 0) {
1329 goto error_fatal;
1330 }
1331 ret = 1;
1332 end:
1333 health_code_update();
1334 rcu_read_unlock();
1335 return ret;
1336 }
1337
1338 /*
1339 * Sync metadata meaning request them to the session daemon and snapshot to the
1340 * metadata thread can consumer them.
1341 *
1342 * Metadata stream lock MUST be acquired.
1343 *
1344 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1345 * is empty or a negative value on error.
1346 */
1347 int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata)
1348 {
1349 int ret;
1350
1351 assert(metadata);
1352
1353 ret = kernctl_buffer_flush(metadata->wait_fd);
1354 if (ret < 0) {
1355 ERR("Failed to flush kernel stream");
1356 goto end;
1357 }
1358
1359 ret = kernctl_snapshot(metadata->wait_fd);
1360 if (ret < 0) {
1361 if (ret != -EAGAIN) {
1362 ERR("Sync metadata, taking kernel snapshot failed.");
1363 goto end;
1364 }
1365 DBG("Sync metadata, no new kernel metadata");
1366 /* No new metadata, exit. */
1367 ret = ENODATA;
1368 goto end;
1369 }
1370
1371 end:
1372 return ret;
1373 }
1374
1375 static
1376 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1377 struct stream_subbuffer *subbuf)
1378 {
1379 int ret;
1380
1381 ret = kernctl_get_subbuf_size(
1382 stream->wait_fd, &subbuf->info.data.subbuf_size);
1383 if (ret) {
1384 goto end;
1385 }
1386
1387 ret = kernctl_get_padded_subbuf_size(
1388 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1389 if (ret) {
1390 goto end;
1391 }
1392
1393 end:
1394 return ret;
1395 }
1396
1397 static
1398 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1399 struct stream_subbuffer *subbuf)
1400 {
1401 int ret;
1402
1403 ret = extract_common_subbuffer_info(stream, subbuf);
1404 if (ret) {
1405 goto end;
1406 }
1407
1408 ret = kernctl_get_metadata_version(
1409 stream->wait_fd, &subbuf->info.metadata.version);
1410 if (ret) {
1411 goto end;
1412 }
1413
1414 end:
1415 return ret;
1416 }
1417
1418 static
1419 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1420 struct stream_subbuffer *subbuf)
1421 {
1422 int ret;
1423
1424 ret = extract_common_subbuffer_info(stream, subbuf);
1425 if (ret) {
1426 goto end;
1427 }
1428
1429 ret = kernctl_get_packet_size(
1430 stream->wait_fd, &subbuf->info.data.packet_size);
1431 if (ret < 0) {
1432 PERROR("Failed to get sub-buffer packet size");
1433 goto end;
1434 }
1435
1436 ret = kernctl_get_content_size(
1437 stream->wait_fd, &subbuf->info.data.content_size);
1438 if (ret < 0) {
1439 PERROR("Failed to get sub-buffer content size");
1440 goto end;
1441 }
1442
1443 ret = kernctl_get_timestamp_begin(
1444 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1445 if (ret < 0) {
1446 PERROR("Failed to get sub-buffer begin timestamp");
1447 goto end;
1448 }
1449
1450 ret = kernctl_get_timestamp_end(
1451 stream->wait_fd, &subbuf->info.data.timestamp_end);
1452 if (ret < 0) {
1453 PERROR("Failed to get sub-buffer end timestamp");
1454 goto end;
1455 }
1456
1457 ret = kernctl_get_events_discarded(
1458 stream->wait_fd, &subbuf->info.data.events_discarded);
1459 if (ret) {
1460 PERROR("Failed to get sub-buffer events discarded count");
1461 goto end;
1462 }
1463
1464 ret = kernctl_get_sequence_number(stream->wait_fd,
1465 &subbuf->info.data.sequence_number.value);
1466 if (ret) {
1467 /* May not be supported by older LTTng-modules. */
1468 if (ret != -ENOTTY) {
1469 PERROR("Failed to get sub-buffer sequence number");
1470 goto end;
1471 }
1472 } else {
1473 subbuf->info.data.sequence_number.is_set = true;
1474 }
1475
1476 ret = kernctl_get_stream_id(
1477 stream->wait_fd, &subbuf->info.data.stream_id);
1478 if (ret < 0) {
1479 PERROR("Failed to get stream id");
1480 goto end;
1481 }
1482
1483 ret = kernctl_get_instance_id(stream->wait_fd,
1484 &subbuf->info.data.stream_instance_id.value);
1485 if (ret) {
1486 /* May not be supported by older LTTng-modules. */
1487 if (ret != -ENOTTY) {
1488 PERROR("Failed to get stream instance id");
1489 goto end;
1490 }
1491 } else {
1492 subbuf->info.data.stream_instance_id.is_set = true;
1493 }
1494 end:
1495 return ret;
1496 }
1497
1498 static
1499 int get_subbuffer_common(struct lttng_consumer_stream *stream,
1500 struct stream_subbuffer *subbuffer)
1501 {
1502 int ret;
1503
1504 ret = kernctl_get_next_subbuf(stream->wait_fd);
1505 if (ret) {
1506 goto end;
1507 }
1508
1509 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1510 stream, subbuffer);
1511 end:
1512 return ret;
1513 }
1514
1515 static
1516 int get_next_subbuffer_splice(struct lttng_consumer_stream *stream,
1517 struct stream_subbuffer *subbuffer)
1518 {
1519 int ret;
1520
1521 ret = get_subbuffer_common(stream, subbuffer);
1522 if (ret) {
1523 goto end;
1524 }
1525
1526 subbuffer->buffer.fd = stream->wait_fd;
1527 end:
1528 return ret;
1529 }
1530
1531 static
1532 int get_next_subbuffer_mmap(struct lttng_consumer_stream *stream,
1533 struct stream_subbuffer *subbuffer)
1534 {
1535 int ret;
1536 const char *addr;
1537
1538 ret = get_subbuffer_common(stream, subbuffer);
1539 if (ret) {
1540 goto end;
1541 }
1542
1543 ret = get_current_subbuf_addr(stream, &addr);
1544 if (ret) {
1545 goto end;
1546 }
1547
1548 subbuffer->buffer.buffer = lttng_buffer_view_init(
1549 addr, 0, subbuffer->info.data.padded_subbuf_size);
1550 end:
1551 return ret;
1552 }
1553
1554 static
1555 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1556 struct stream_subbuffer *subbuffer)
1557 {
1558 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1559
1560 if (ret) {
1561 if (ret == -EFAULT) {
1562 PERROR("Error in unreserving sub buffer");
1563 } else if (ret == -EIO) {
1564 /* Should never happen with newer LTTng versions */
1565 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1566 }
1567 }
1568
1569 return ret;
1570 }
1571
1572 static void lttng_kconsumer_set_stream_ops(
1573 struct lttng_consumer_stream *stream)
1574 {
1575 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1576 stream->read_subbuffer_ops.get_next_subbuffer =
1577 get_next_subbuffer_mmap;
1578 } else {
1579 stream->read_subbuffer_ops.get_next_subbuffer =
1580 get_next_subbuffer_splice;
1581 }
1582
1583 if (stream->metadata_flag) {
1584 stream->read_subbuffer_ops.extract_subbuffer_info =
1585 extract_metadata_subbuffer_info;
1586 } else {
1587 stream->read_subbuffer_ops.extract_subbuffer_info =
1588 extract_data_subbuffer_info;
1589 if (stream->chan->is_live) {
1590 stream->read_subbuffer_ops.send_live_beacon =
1591 consumer_flush_kernel_index;
1592 }
1593 }
1594
1595 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1596 }
1597
1598 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1599 {
1600 int ret;
1601
1602 assert(stream);
1603
1604 /*
1605 * Don't create anything if this is set for streaming or if there is
1606 * no current trace chunk on the parent channel.
1607 */
1608 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1609 stream->chan->trace_chunk) {
1610 ret = consumer_stream_create_output_files(stream, true);
1611 if (ret) {
1612 goto error;
1613 }
1614 }
1615
1616 if (stream->output == LTTNG_EVENT_MMAP) {
1617 /* get the len of the mmap region */
1618 unsigned long mmap_len;
1619
1620 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1621 if (ret != 0) {
1622 PERROR("kernctl_get_mmap_len");
1623 goto error_close_fd;
1624 }
1625 stream->mmap_len = (size_t) mmap_len;
1626
1627 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1628 MAP_PRIVATE, stream->wait_fd, 0);
1629 if (stream->mmap_base == MAP_FAILED) {
1630 PERROR("Error mmaping");
1631 ret = -1;
1632 goto error_close_fd;
1633 }
1634 }
1635
1636 lttng_kconsumer_set_stream_ops(stream);
1637
1638 /* we return 0 to let the library handle the FD internally */
1639 return 0;
1640
1641 error_close_fd:
1642 if (stream->out_fd >= 0) {
1643 int err;
1644
1645 err = close(stream->out_fd);
1646 assert(!err);
1647 stream->out_fd = -1;
1648 }
1649 error:
1650 return ret;
1651 }
1652
1653 /*
1654 * Check if data is still being extracted from the buffers for a specific
1655 * stream. Consumer data lock MUST be acquired before calling this function
1656 * and the stream lock.
1657 *
1658 * Return 1 if the traced data are still getting read else 0 meaning that the
1659 * data is available for trace viewer reading.
1660 */
1661 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1662 {
1663 int ret;
1664
1665 assert(stream);
1666
1667 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1668 ret = 0;
1669 goto end;
1670 }
1671
1672 ret = kernctl_get_next_subbuf(stream->wait_fd);
1673 if (ret == 0) {
1674 /* There is still data so let's put back this subbuffer. */
1675 ret = kernctl_put_subbuf(stream->wait_fd);
1676 assert(ret == 0);
1677 ret = 1; /* Data is pending */
1678 goto end;
1679 }
1680
1681 /* Data is NOT pending and ready to be read. */
1682 ret = 0;
1683
1684 end:
1685 return ret;
1686 }
This page took 0.102597 seconds and 4 git commands to generate.