2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
11 #include "kernel-consumer.hpp"
13 #include <common/buffer-view.hpp>
14 #include <common/common.hpp>
15 #include <common/compat/endian.hpp>
16 #include <common/consumer/consumer-stream.hpp>
17 #include <common/consumer/consumer-timer.hpp>
18 #include <common/consumer/consumer.hpp>
19 #include <common/consumer/metadata-bucket.hpp>
20 #include <common/index/index.hpp>
21 #include <common/kernel-ctl/kernel-ctl.hpp>
22 #include <common/optional.hpp>
23 #include <common/pipe.hpp>
24 #include <common/relayd/relayd.hpp>
25 #include <common/sessiond-comm/relayd.hpp>
26 #include <common/sessiond-comm/sessiond-comm.hpp>
27 #include <common/urcu.hpp>
28 #include <common/utils.hpp>
30 #include <bin/lttng-consumerd/health-consumerd.hpp>
39 #include <sys/socket.h>
41 #include <sys/types.h>
44 extern struct lttng_consumer_global_data the_consumer_data
;
45 extern int consumer_poll_timeout
;
48 * Take a snapshot for a specific fd
50 * Returns 0 on success, < 0 on error
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
55 int infd
= stream
->wait_fd
;
57 ret
= kernctl_snapshot(infd
);
59 * -EAGAIN is not an error, it just means that there is no data to
62 if (ret
!= 0 && ret
!= -EAGAIN
) {
63 PERROR("Getting sub-buffer snapshot.");
70 * Sample consumed and produced positions for a specific fd.
72 * Returns 0 on success, < 0 on error.
74 int lttng_kconsumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
78 return kernctl_snapshot_sample_positions(stream
->wait_fd
);
82 * Get the produced position
84 * Returns 0 on success, < 0 on error
86 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
, unsigned long *pos
)
89 int infd
= stream
->wait_fd
;
91 ret
= kernctl_snapshot_get_produced(infd
, pos
);
93 PERROR("kernctl_snapshot_get_produced");
100 * Get the consumerd position
102 * Returns 0 on success, < 0 on error
104 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
, unsigned long *pos
)
107 int infd
= stream
->wait_fd
;
109 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
111 PERROR("kernctl_snapshot_get_consumed");
117 static int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
, const char **addr
)
120 unsigned long mmap_offset
;
121 const char *mmap_base
= (const char *) stream
->mmap_base
;
123 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
125 PERROR("Failed to get mmap read offset");
129 *addr
= mmap_base
+ mmap_offset
;
135 * Take a snapshot of all the stream of a channel
136 * RCU read-side lock must be held across this function to ensure existence of
139 * Returns 0 on success, < 0 on error
141 static int lttng_kconsumer_snapshot_channel(struct lttng_consumer_channel
*channel
,
145 uint64_t nb_packets_per_stream
)
148 struct lttng_consumer_stream
*stream
;
150 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
152 /* Prevent channel modifications while we perform the snapshot.*/
153 pthread_mutex_lock(&channel
->lock
);
155 lttng::urcu::read_lock_guard read_lock
;
157 /* Splice is not supported yet for channel snapshot. */
158 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
159 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
165 cds_list_for_each_entry (stream
, &channel
->streams
.head
, send_node
) {
166 unsigned long consumed_pos
, produced_pos
;
168 health_code_update();
171 * Lock stream because we are about to change its state.
173 pthread_mutex_lock(&stream
->lock
);
175 LTTNG_ASSERT(channel
->trace_chunk
);
176 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
178 * Can't happen barring an internal error as the channel
179 * holds a reference to the trace chunk.
181 ERR("Failed to acquire reference to channel's trace chunk");
185 LTTNG_ASSERT(!stream
->trace_chunk
);
186 stream
->trace_chunk
= channel
->trace_chunk
;
189 * Assign the received relayd ID so we can use it for streaming. The streams
190 * are not visible to anyone so this is OK to change it.
192 stream
->net_seq_idx
= relayd_id
;
193 channel
->relayd_id
= relayd_id
;
194 if (relayd_id
!= (uint64_t) -1ULL) {
195 ret
= consumer_send_relayd_stream(stream
, path
);
197 ERR("sending stream to relayd");
198 goto error_close_stream_output
;
201 ret
= consumer_stream_create_output_files(stream
, false);
203 goto error_close_stream_output
;
205 DBG("Kernel consumer snapshot stream (%" PRIu64
")", stream
->key
);
208 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
211 * Doing a buffer flush which does not take into
212 * account empty packets. This is not perfect
213 * for stream intersection, but required as a
214 * fall-back when "flush_empty" is not
215 * implemented by lttng-modules.
217 ret
= kernctl_buffer_flush(stream
->wait_fd
);
219 ERR("Failed to flush kernel stream");
220 goto error_close_stream_output
;
225 ret
= lttng_kconsumer_take_snapshot(stream
);
227 ERR("Taking kernel snapshot");
228 goto error_close_stream_output
;
231 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
233 ERR("Produced kernel snapshot position");
234 goto error_close_stream_output
;
237 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
239 ERR("Consumerd kernel snapshot position");
240 goto error_close_stream_output
;
243 consumed_pos
= consumer_get_consume_start_pos(
244 consumed_pos
, produced_pos
, nb_packets_per_stream
, stream
->max_sb_size
);
246 while ((long) (consumed_pos
- produced_pos
) < 0) {
248 unsigned long len
, padded_len
;
249 const char *subbuf_addr
;
250 struct lttng_buffer_view subbuf_view
;
252 health_code_update();
253 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
255 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
257 if (ret
!= -EAGAIN
) {
258 PERROR("kernctl_get_subbuf snapshot");
259 goto error_close_stream_output
;
261 DBG("Kernel consumer get subbuf failed. Skipping it.");
262 consumed_pos
+= stream
->max_sb_size
;
263 stream
->chan
->lost_packets
++;
267 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
269 ERR("Snapshot kernctl_get_subbuf_size");
270 goto error_put_subbuf
;
273 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
275 ERR("Snapshot kernctl_get_padded_subbuf_size");
276 goto error_put_subbuf
;
279 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
281 goto error_put_subbuf
;
284 subbuf_view
= lttng_buffer_view_init(subbuf_addr
, 0, padded_len
);
285 read_len
= lttng_consumer_on_read_subbuffer_mmap(
286 stream
, &subbuf_view
, padded_len
- len
);
288 * We write the padded len in local tracefiles but the data len
289 * when using a relay. Display the error but continue processing
290 * to try to release the subbuffer.
292 if (relayd_id
!= (uint64_t) -1ULL) {
293 if (read_len
!= len
) {
294 ERR("Error sending to the relay (ret: %zd != len: %lu)",
299 if (read_len
!= padded_len
) {
300 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
306 ret
= kernctl_put_subbuf(stream
->wait_fd
);
308 ERR("Snapshot kernctl_put_subbuf");
309 goto error_close_stream_output
;
311 consumed_pos
+= stream
->max_sb_size
;
314 consumer_stream_close_output(stream
);
315 pthread_mutex_unlock(&stream
->lock
);
323 ret
= kernctl_put_subbuf(stream
->wait_fd
);
325 ERR("Snapshot kernctl_put_subbuf error path");
327 error_close_stream_output
:
328 consumer_stream_close_output(stream
);
330 pthread_mutex_unlock(&stream
->lock
);
332 pthread_mutex_unlock(&channel
->lock
);
337 * Read the whole metadata available for a snapshot.
338 * RCU read-side lock must be held across this function to ensure existence of
341 * Returns 0 on success, < 0 on error
343 static int lttng_kconsumer_snapshot_metadata(struct lttng_consumer_channel
*metadata_channel
,
347 struct lttng_consumer_local_data
*ctx
)
349 int ret
, use_relayd
= 0;
351 struct lttng_consumer_stream
*metadata_stream
;
355 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s", key
, path
);
357 lttng::urcu::read_lock_guard read_lock
;
359 metadata_stream
= metadata_channel
->metadata_stream
;
360 LTTNG_ASSERT(metadata_stream
);
362 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
363 LTTNG_ASSERT(metadata_channel
->trace_chunk
);
364 LTTNG_ASSERT(metadata_stream
->trace_chunk
);
366 /* Flag once that we have a valid relayd for the stream. */
367 if (relayd_id
!= (uint64_t) -1ULL) {
372 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
377 ret
= consumer_stream_create_output_files(metadata_stream
, false);
384 health_code_update();
386 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
388 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", ret_read
);
392 } while (ret_read
> 0);
395 close_relayd_stream(metadata_stream
);
396 metadata_stream
->net_seq_idx
= (uint64_t) -1ULL;
398 if (metadata_stream
->out_fd
>= 0) {
399 ret
= close(metadata_stream
->out_fd
);
401 PERROR("Kernel consumer snapshot metadata close out_fd");
403 * Don't go on error here since the snapshot was successful at this
404 * point but somehow the close failed.
407 metadata_stream
->out_fd
= -1;
408 lttng_trace_chunk_put(metadata_stream
->trace_chunk
);
409 metadata_stream
->trace_chunk
= nullptr;
415 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
416 consumer_stream_destroy(metadata_stream
, nullptr);
417 metadata_channel
->metadata_stream
= nullptr;
422 * Receive command from session daemon and process it.
424 * Return 1 on success else a negative value or 0.
426 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
428 struct pollfd
*consumer_sockpoll
)
431 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
432 struct lttcomm_consumer_msg msg
;
434 health_code_update();
439 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
440 if (ret_recv
!= sizeof(msg
)) {
442 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
449 health_code_update();
451 /* Deprecated command */
452 LTTNG_ASSERT(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
454 health_code_update();
456 /* relayd needs RCU read-side protection */
457 lttng::urcu::read_lock_guard read_lock
;
459 switch (msg
.cmd_type
) {
460 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
462 uint32_t major
= msg
.u
.relayd_sock
.major
;
463 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
464 enum lttcomm_sock_proto protocol
=
465 (enum lttcomm_sock_proto
) msg
.u
.relayd_sock
.relayd_socket_protocol
;
467 /* Session daemon status message are handled in the following call. */
468 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
469 msg
.u
.relayd_sock
.type
,
473 msg
.u
.relayd_sock
.session_id
,
474 msg
.u
.relayd_sock
.relayd_session_id
,
480 case LTTNG_CONSUMER_ADD_CHANNEL
:
482 struct lttng_consumer_channel
*new_channel
;
483 int ret_send_status
, ret_add_channel
= 0;
484 const uint64_t chunk_id
= msg
.u
.channel
.chunk_id
.value
;
486 health_code_update();
488 /* First send a status message before receiving the fds. */
489 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
490 if (ret_send_status
< 0) {
491 /* Somehow, the session daemon is not responding anymore. */
495 health_code_update();
497 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
498 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
499 msg
.u
.channel
.session_id
,
500 msg
.u
.channel
.chunk_id
.is_set
? &chunk_id
:
502 msg
.u
.channel
.pathname
,
504 msg
.u
.channel
.relayd_id
,
505 msg
.u
.channel
.output
,
506 msg
.u
.channel
.tracefile_size
,
507 msg
.u
.channel
.tracefile_count
,
509 msg
.u
.channel
.monitor
,
510 msg
.u
.channel
.live_timer_interval
,
511 msg
.u
.channel
.is_live
,
514 if (new_channel
== nullptr) {
515 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
518 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
519 switch (msg
.u
.channel
.output
) {
520 case LTTNG_EVENT_SPLICE
:
521 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
523 case LTTNG_EVENT_MMAP
:
524 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
527 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
531 /* Translate and save channel type. */
532 switch (msg
.u
.channel
.type
) {
533 case CONSUMER_CHANNEL_TYPE_DATA
:
534 case CONSUMER_CHANNEL_TYPE_METADATA
:
535 new_channel
->type
= (consumer_channel_type
) msg
.u
.channel
.type
;
542 health_code_update();
544 if (ctx
->on_recv_channel
!= nullptr) {
545 int ret_recv_channel
= ctx
->on_recv_channel(new_channel
);
546 if (ret_recv_channel
== 0) {
547 ret_add_channel
= consumer_add_channel(new_channel
, ctx
);
548 } else if (ret_recv_channel
< 0) {
552 ret_add_channel
= consumer_add_channel(new_channel
, ctx
);
554 if (msg
.u
.channel
.type
== CONSUMER_CHANNEL_TYPE_DATA
&& !ret_add_channel
) {
555 int monitor_start_ret
;
557 DBG("Consumer starting monitor timer");
558 consumer_timer_live_start(new_channel
, msg
.u
.channel
.live_timer_interval
);
559 monitor_start_ret
= consumer_timer_monitor_start(
560 new_channel
, msg
.u
.channel
.monitor_timer_interval
);
561 if (monitor_start_ret
< 0) {
562 ERR("Starting channel monitoring timer failed");
567 health_code_update();
569 /* If we received an error in add_channel, we need to report it. */
570 if (ret_add_channel
< 0) {
571 ret_send_status
= consumer_send_status_msg(sock
, ret_add_channel
);
572 if (ret_send_status
< 0) {
580 case LTTNG_CONSUMER_ADD_STREAM
:
583 struct lttng_pipe
*stream_pipe
;
584 struct lttng_consumer_stream
*new_stream
;
585 struct lttng_consumer_channel
*channel
;
587 int ret_send_status
, ret_poll
, ret_get_max_subbuf_size
;
588 ssize_t ret_pipe_write
, ret_recv
;
591 * Get stream's channel reference. Needed when adding the stream to the
594 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
597 * We could not find the channel. Can happen if cpu hotplug
598 * happens while tearing down.
600 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
601 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
604 health_code_update();
606 /* First send a status message before receiving the fds. */
607 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
608 if (ret_send_status
< 0) {
609 /* Somehow, the session daemon is not responding anymore. */
610 goto error_add_stream_fatal
;
613 health_code_update();
615 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
616 /* Channel was not found. */
617 goto error_add_stream_nosignal
;
622 ret_poll
= lttng_consumer_poll_socket(consumer_sockpoll
);
625 goto error_add_stream_fatal
;
628 health_code_update();
630 /* Get stream file descriptor from socket */
631 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
632 if (ret_recv
!= sizeof(fd
)) {
633 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
638 health_code_update();
641 * Send status code to session daemon only if the recv works. If the
642 * above recv() failed, the session daemon is notified through the
643 * error socket and the teardown is eventually done.
645 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
646 if (ret_send_status
< 0) {
647 /* Somehow, the session daemon is not responding anymore. */
648 goto error_add_stream_nosignal
;
651 health_code_update();
653 pthread_mutex_lock(&channel
->lock
);
654 new_stream
= consumer_stream_create(channel
,
660 channel
->trace_chunk
,
665 if (new_stream
== nullptr) {
670 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
673 pthread_mutex_unlock(&channel
->lock
);
674 goto error_add_stream_nosignal
;
677 new_stream
->wait_fd
= fd
;
678 ret_get_max_subbuf_size
=
679 kernctl_get_max_subbuf_size(new_stream
->wait_fd
, &new_stream
->max_sb_size
);
680 if (ret_get_max_subbuf_size
< 0) {
681 pthread_mutex_unlock(&channel
->lock
);
682 ERR("Failed to get kernel maximal subbuffer size");
683 goto error_add_stream_nosignal
;
686 consumer_stream_update_channel_attributes(new_stream
, channel
);
689 * We've just assigned the channel to the stream so increment the
690 * refcount right now. We don't need to increment the refcount for
691 * streams in no monitor because we handle manually the cleanup of
692 * those. It is very important to make sure there is NO prior
693 * consumer_del_stream() calls or else the refcount will be unbalanced.
695 if (channel
->monitor
) {
696 uatomic_inc(&new_stream
->chan
->refcount
);
700 * The buffer flush is done on the session daemon side for the kernel
701 * so no need for the stream "hangup_flush_done" variable to be
702 * tracked. This is important for a kernel stream since we don't rely
703 * on the flush state of the stream to read data. It's not the case for
704 * user space tracing.
706 new_stream
->hangup_flush_done
= 0;
708 health_code_update();
710 pthread_mutex_lock(&new_stream
->lock
);
711 if (ctx
->on_recv_stream
) {
712 int ret_recv_stream
= ctx
->on_recv_stream(new_stream
);
713 if (ret_recv_stream
< 0) {
714 pthread_mutex_unlock(&new_stream
->lock
);
715 pthread_mutex_unlock(&channel
->lock
);
716 consumer_stream_free(new_stream
);
717 goto error_add_stream_nosignal
;
720 health_code_update();
722 if (new_stream
->metadata_flag
) {
723 channel
->metadata_stream
= new_stream
;
726 /* Do not monitor this stream. */
727 if (!channel
->monitor
) {
728 DBG("Kernel consumer add stream %s in no monitor mode with "
729 "relayd id %" PRIu64
,
731 new_stream
->net_seq_idx
);
732 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
733 pthread_mutex_unlock(&new_stream
->lock
);
734 pthread_mutex_unlock(&channel
->lock
);
738 /* Send stream to relayd if the stream has an ID. */
739 if (new_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
740 int ret_send_relayd_stream
;
742 ret_send_relayd_stream
=
743 consumer_send_relayd_stream(new_stream
, new_stream
->chan
->pathname
);
744 if (ret_send_relayd_stream
< 0) {
745 pthread_mutex_unlock(&new_stream
->lock
);
746 pthread_mutex_unlock(&channel
->lock
);
747 consumer_stream_free(new_stream
);
748 goto error_add_stream_nosignal
;
752 * If adding an extra stream to an already
753 * existing channel (e.g. cpu hotplug), we need
754 * to send the "streams_sent" command to relayd.
756 if (channel
->streams_sent_to_relayd
) {
757 int ret_send_relayd_streams_sent
;
759 ret_send_relayd_streams_sent
=
760 consumer_send_relayd_streams_sent(new_stream
->net_seq_idx
);
761 if (ret_send_relayd_streams_sent
< 0) {
762 pthread_mutex_unlock(&new_stream
->lock
);
763 pthread_mutex_unlock(&channel
->lock
);
764 goto error_add_stream_nosignal
;
768 pthread_mutex_unlock(&new_stream
->lock
);
769 pthread_mutex_unlock(&channel
->lock
);
771 /* Get the right pipe where the stream will be sent. */
772 if (new_stream
->metadata_flag
) {
773 consumer_add_metadata_stream(new_stream
);
774 stream_pipe
= ctx
->consumer_metadata_pipe
;
776 consumer_add_data_stream(new_stream
);
777 stream_pipe
= ctx
->consumer_data_pipe
;
780 /* Visible to other threads */
781 new_stream
->globally_visible
= 1;
783 health_code_update();
786 lttng_pipe_write(stream_pipe
, &new_stream
, sizeof(new_stream
)); /* NOLINT
791 if (ret_pipe_write
< 0) {
792 ERR("Consumer write %s stream to pipe %d",
793 new_stream
->metadata_flag
? "metadata" : "data",
794 lttng_pipe_get_writefd(stream_pipe
));
795 if (new_stream
->metadata_flag
) {
796 consumer_del_stream_for_metadata(new_stream
);
798 consumer_del_stream_for_data(new_stream
);
800 goto error_add_stream_nosignal
;
803 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64
,
806 new_stream
->chan
->pathname
,
807 new_stream
->relayd_stream_id
);
810 error_add_stream_nosignal
:
812 error_add_stream_fatal
:
815 case LTTNG_CONSUMER_STREAMS_SENT
:
817 struct lttng_consumer_channel
*channel
;
821 * Get stream's channel reference. Needed when adding the stream to the
824 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
827 * We could not find the channel. Can happen if cpu hotplug
828 * happens while tearing down.
830 ERR("Unable to find channel key %" PRIu64
, msg
.u
.sent_streams
.channel_key
);
831 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
834 health_code_update();
837 * Send status code to session daemon.
839 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
840 if (ret_send_status
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
841 /* Somehow, the session daemon is not responding anymore. */
842 goto error_streams_sent_nosignal
;
845 health_code_update();
848 * We should not send this message if we don't monitor the
849 * streams in this channel.
851 if (!channel
->monitor
) {
852 goto end_error_streams_sent
;
855 health_code_update();
856 /* Send stream to relayd if the stream has an ID. */
857 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
858 int ret_send_relay_streams
;
860 ret_send_relay_streams
=
861 consumer_send_relayd_streams_sent(msg
.u
.sent_streams
.net_seq_idx
);
862 if (ret_send_relay_streams
< 0) {
863 goto error_streams_sent_nosignal
;
865 channel
->streams_sent_to_relayd
= true;
867 end_error_streams_sent
:
869 error_streams_sent_nosignal
:
872 case LTTNG_CONSUMER_UPDATE_STREAM
:
876 case LTTNG_CONSUMER_DESTROY_RELAYD
:
878 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
879 struct consumer_relayd_sock_pair
*relayd
;
882 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
884 /* Get relayd reference if exists. */
885 relayd
= consumer_find_relayd(index
);
886 if (relayd
== nullptr) {
887 DBG("Unable to find relayd %" PRIu64
, index
);
888 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
892 * Each relayd socket pair has a refcount of stream attached to it
893 * which tells if the relayd is still active or not depending on the
896 * This will set the destroy flag of the relayd object and destroy it
897 * if the refcount reaches zero when called.
899 * The destroy can happen either here or when a stream fd hangs up.
902 consumer_flag_relayd_for_destroy(relayd
);
905 health_code_update();
907 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
908 if (ret_send_status
< 0) {
909 /* Somehow, the session daemon is not responding anymore. */
915 case LTTNG_CONSUMER_DATA_PENDING
:
917 int32_t ret_data_pending
;
918 uint64_t id
= msg
.u
.data_pending
.session_id
;
921 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
923 ret_data_pending
= consumer_data_pending(id
);
925 health_code_update();
927 /* Send back returned value to session daemon */
929 lttcomm_send_unix_sock(sock
, &ret_data_pending
, sizeof(ret_data_pending
));
931 PERROR("send data pending ret code");
936 * No need to send back a status message since the data pending
937 * returned value is the response.
941 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
943 struct lttng_consumer_channel
*channel
;
944 uint64_t key
= msg
.u
.snapshot_channel
.key
;
947 channel
= consumer_find_channel(key
);
949 ERR("Channel %" PRIu64
" not found", key
);
950 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
952 if (msg
.u
.snapshot_channel
.metadata
== 1) {
955 ret_snapshot
= lttng_kconsumer_snapshot_metadata(
958 msg
.u
.snapshot_channel
.pathname
,
959 msg
.u
.snapshot_channel
.relayd_id
,
961 if (ret_snapshot
< 0) {
962 ERR("Snapshot metadata failed");
963 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
968 ret_snapshot
= lttng_kconsumer_snapshot_channel(
971 msg
.u
.snapshot_channel
.pathname
,
972 msg
.u
.snapshot_channel
.relayd_id
,
973 msg
.u
.snapshot_channel
.nb_packets_per_stream
);
974 if (ret_snapshot
< 0) {
975 ERR("Snapshot channel failed");
976 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
980 health_code_update();
982 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
983 if (ret_send_status
< 0) {
984 /* Somehow, the session daemon is not responding anymore. */
989 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
991 uint64_t key
= msg
.u
.destroy_channel
.key
;
992 struct lttng_consumer_channel
*channel
;
995 channel
= consumer_find_channel(key
);
997 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
998 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1001 health_code_update();
1003 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1004 if (ret_send_status
< 0) {
1005 /* Somehow, the session daemon is not responding anymore. */
1006 goto end_destroy_channel
;
1009 health_code_update();
1011 /* Stop right now if no channel was found. */
1013 goto end_destroy_channel
;
1017 * This command should ONLY be issued for channel with streams set in
1020 LTTNG_ASSERT(!channel
->monitor
);
1023 * The refcount should ALWAYS be 0 in the case of a channel in no
1026 LTTNG_ASSERT(!uatomic_sub_return(&channel
->refcount
, 1));
1028 consumer_del_channel(channel
);
1029 end_destroy_channel
:
1032 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1036 struct lttng_consumer_channel
*channel
;
1037 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1038 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1040 DBG("Kernel consumer discarded events command for session id %" PRIu64
1041 ", channel key %" PRIu64
,
1045 channel
= consumer_find_channel(key
);
1047 ERR("Kernel consumer discarded events channel %" PRIu64
" not found", key
);
1050 count
= channel
->discarded_events
;
1053 health_code_update();
1055 /* Send back returned value to session daemon */
1056 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1058 PERROR("send discarded events");
1064 case LTTNG_CONSUMER_LOST_PACKETS
:
1068 struct lttng_consumer_channel
*channel
;
1069 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1070 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1072 DBG("Kernel consumer lost packets command for session id %" PRIu64
1073 ", channel key %" PRIu64
,
1077 channel
= consumer_find_channel(key
);
1079 ERR("Kernel consumer lost packets channel %" PRIu64
" not found", key
);
1082 count
= channel
->lost_packets
;
1085 health_code_update();
1087 /* Send back returned value to session daemon */
1088 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1090 PERROR("send lost packets");
1096 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1098 int channel_monitor_pipe
;
1099 int ret_send_status
, ret_set_channel_monitor_pipe
;
1102 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1103 /* Successfully received the command's type. */
1104 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1105 if (ret_send_status
< 0) {
1109 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &channel_monitor_pipe
, 1);
1110 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1111 ERR("Failed to receive channel monitor pipe");
1115 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1116 ret_set_channel_monitor_pipe
=
1117 consumer_timer_thread_set_channel_monitor_pipe(channel_monitor_pipe
);
1118 if (!ret_set_channel_monitor_pipe
) {
1122 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1123 /* Set the pipe as non-blocking. */
1124 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1125 if (ret_fcntl
== -1) {
1126 PERROR("fcntl get flags of the channel monitoring pipe");
1131 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
, flags
| O_NONBLOCK
);
1132 if (ret_fcntl
== -1) {
1133 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1136 DBG("Channel monitor pipe set as non-blocking");
1138 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
1140 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1141 if (ret_send_status
< 0) {
1146 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
1148 struct lttng_consumer_channel
*channel
;
1149 uint64_t key
= msg
.u
.rotate_channel
.key
;
1150 int ret_send_status
;
1152 DBG("Consumer rotate channel %" PRIu64
, key
);
1154 channel
= consumer_find_channel(key
);
1156 ERR("Channel %" PRIu64
" not found", key
);
1157 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1160 * Sample the rotate position of all the streams in this channel.
1162 int ret_rotate_channel
;
1164 ret_rotate_channel
= lttng_consumer_rotate_channel(
1165 channel
, key
, msg
.u
.rotate_channel
.relayd_id
);
1166 if (ret_rotate_channel
< 0) {
1167 ERR("Rotate channel failed");
1168 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
1171 health_code_update();
1174 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1175 if (ret_send_status
< 0) {
1176 /* Somehow, the session daemon is not responding anymore. */
1177 goto error_rotate_channel
;
1180 /* Rotate the streams that are ready right now. */
1183 ret_rotate
= lttng_consumer_rotate_ready_streams(channel
, key
);
1184 if (ret_rotate
< 0) {
1185 ERR("Rotate ready streams failed");
1189 error_rotate_channel
:
1192 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
1194 struct lttng_consumer_channel
*channel
;
1195 uint64_t key
= msg
.u
.clear_channel
.key
;
1196 int ret_send_status
;
1198 channel
= consumer_find_channel(key
);
1200 DBG("Channel %" PRIu64
" not found", key
);
1201 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1203 int ret_clear_channel
;
1205 ret_clear_channel
= lttng_consumer_clear_channel(channel
);
1206 if (ret_clear_channel
) {
1207 ERR("Clear channel failed");
1208 ret_code
= (lttcomm_return_code
) ret_clear_channel
;
1211 health_code_update();
1214 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1215 if (ret_send_status
< 0) {
1216 /* Somehow, the session daemon is not responding anymore. */
1222 case LTTNG_CONSUMER_INIT
:
1224 int ret_send_status
;
1225 lttng_uuid sessiond_uuid
;
1227 std::copy(std::begin(msg
.u
.init
.sessiond_uuid
),
1228 std::end(msg
.u
.init
.sessiond_uuid
),
1229 sessiond_uuid
.begin());
1231 ret_code
= lttng_consumer_init_command(ctx
, sessiond_uuid
);
1232 health_code_update();
1233 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1234 if (ret_send_status
< 0) {
1235 /* Somehow, the session daemon is not responding anymore. */
1240 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
1242 const struct lttng_credentials credentials
= {
1243 .uid
= LTTNG_OPTIONAL_INIT_VALUE(
1244 msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
1245 .gid
= LTTNG_OPTIONAL_INIT_VALUE(
1246 msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
1248 const bool is_local_trace
= !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
1249 const uint64_t relayd_id
= msg
.u
.create_trace_chunk
.relayd_id
.value
;
1250 const char *chunk_override_name
= *msg
.u
.create_trace_chunk
.override_name
?
1251 msg
.u
.create_trace_chunk
.override_name
:
1253 struct lttng_directory_handle
*chunk_directory_handle
= nullptr;
1256 * The session daemon will only provide a chunk directory file
1257 * descriptor for local traces.
1259 if (is_local_trace
) {
1261 int ret_send_status
;
1264 /* Acnowledge the reception of the command. */
1265 ret_send_status
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1266 if (ret_send_status
< 0) {
1267 /* Somehow, the session daemon is not responding anymore. */
1271 ret_recv
= lttcomm_recv_fds_unix_sock(sock
, &chunk_dirfd
, 1);
1272 if (ret_recv
!= sizeof(chunk_dirfd
)) {
1273 ERR("Failed to receive trace chunk directory file descriptor");
1277 DBG("Received trace chunk directory fd (%d)", chunk_dirfd
);
1278 chunk_directory_handle
=
1279 lttng_directory_handle_create_from_dirfd(chunk_dirfd
);
1280 if (!chunk_directory_handle
) {
1281 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1282 if (close(chunk_dirfd
)) {
1283 PERROR("Failed to close chunk directory file descriptor");
1289 ret_code
= lttng_consumer_create_trace_chunk(
1290 !is_local_trace
? &relayd_id
: nullptr,
1291 msg
.u
.create_trace_chunk
.session_id
,
1292 msg
.u
.create_trace_chunk
.chunk_id
,
1293 (time_t) msg
.u
.create_trace_chunk
.creation_timestamp
,
1294 chunk_override_name
,
1295 msg
.u
.create_trace_chunk
.credentials
.is_set
? &credentials
: nullptr,
1296 chunk_directory_handle
);
1297 lttng_directory_handle_put(chunk_directory_handle
);
1298 goto end_msg_sessiond
;
1300 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
1302 enum lttng_trace_chunk_command_type close_command
=
1303 (lttng_trace_chunk_command_type
) msg
.u
.close_trace_chunk
.close_command
.value
;
1304 const uint64_t relayd_id
= msg
.u
.close_trace_chunk
.relayd_id
.value
;
1305 struct lttcomm_consumer_close_trace_chunk_reply reply
;
1306 char path
[LTTNG_PATH_MAX
];
1309 ret_code
= lttng_consumer_close_trace_chunk(
1310 msg
.u
.close_trace_chunk
.relayd_id
.is_set
? &relayd_id
: nullptr,
1311 msg
.u
.close_trace_chunk
.session_id
,
1312 msg
.u
.close_trace_chunk
.chunk_id
,
1313 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
1314 msg
.u
.close_trace_chunk
.close_command
.is_set
? &close_command
: nullptr,
1316 reply
.ret_code
= ret_code
;
1317 reply
.path_length
= strlen(path
) + 1;
1318 ret_send
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
1319 if (ret_send
!= sizeof(reply
)) {
1322 ret_send
= lttcomm_send_unix_sock(sock
, path
, reply
.path_length
);
1323 if (ret_send
!= reply
.path_length
) {
1328 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
1330 const uint64_t relayd_id
= msg
.u
.trace_chunk_exists
.relayd_id
.value
;
1332 ret_code
= lttng_consumer_trace_chunk_exists(
1333 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
? &relayd_id
: nullptr,
1334 msg
.u
.trace_chunk_exists
.session_id
,
1335 msg
.u
.trace_chunk_exists
.chunk_id
);
1336 goto end_msg_sessiond
;
1338 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
1340 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
1341 struct lttng_consumer_channel
*channel
= consumer_find_channel(key
);
1344 pthread_mutex_lock(&channel
->lock
);
1345 ret_code
= lttng_consumer_open_channel_packets(channel
);
1346 pthread_mutex_unlock(&channel
->lock
);
1348 WARN("Channel %" PRIu64
" not found", key
);
1349 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1352 health_code_update();
1353 goto end_msg_sessiond
;
1361 * Return 1 to indicate success since the 0 value can be a socket
1362 * shutdown during the recv() or send() call.
1367 /* This will issue a consumer stop. */
1372 * The returned value here is not useful since either way we'll return 1 to
1373 * the caller because the session daemon socket management is done
1374 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1377 int ret_send_status
;
1379 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
1380 if (ret_send_status
< 0) {
1388 health_code_update();
1393 * Sync metadata meaning request them to the session daemon and snapshot to the
1394 * metadata thread can consumer them.
1396 * Metadata stream lock MUST be acquired.
1398 enum sync_metadata_status
lttng_kconsumer_sync_metadata(struct lttng_consumer_stream
*metadata
)
1401 enum sync_metadata_status status
;
1403 LTTNG_ASSERT(metadata
);
1405 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1407 ERR("Failed to flush kernel stream");
1408 status
= SYNC_METADATA_STATUS_ERROR
;
1412 ret
= kernctl_snapshot(metadata
->wait_fd
);
1414 if (errno
== EAGAIN
) {
1415 /* No new metadata, exit. */
1416 DBG("Sync metadata, no new kernel metadata");
1417 status
= SYNC_METADATA_STATUS_NO_DATA
;
1419 ERR("Sync metadata, taking kernel snapshot failed.");
1420 status
= SYNC_METADATA_STATUS_ERROR
;
1423 status
= SYNC_METADATA_STATUS_NEW_DATA
;
1430 static int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1431 struct stream_subbuffer
*subbuf
)
1435 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1440 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
,
1441 &subbuf
->info
.data
.padded_subbuf_size
);
1450 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1451 struct stream_subbuffer
*subbuf
)
1455 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1460 ret
= kernctl_get_metadata_version(stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1469 static int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1470 struct stream_subbuffer
*subbuf
)
1474 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1479 ret
= kernctl_get_packet_size(stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1481 PERROR("Failed to get sub-buffer packet size");
1485 ret
= kernctl_get_content_size(stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1487 PERROR("Failed to get sub-buffer content size");
1491 ret
= kernctl_get_timestamp_begin(stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1493 PERROR("Failed to get sub-buffer begin timestamp");
1497 ret
= kernctl_get_timestamp_end(stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1499 PERROR("Failed to get sub-buffer end timestamp");
1503 ret
= kernctl_get_events_discarded(stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1505 PERROR("Failed to get sub-buffer events discarded count");
1509 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1510 &subbuf
->info
.data
.sequence_number
.value
);
1512 /* May not be supported by older LTTng-modules. */
1513 if (ret
!= -ENOTTY
) {
1514 PERROR("Failed to get sub-buffer sequence number");
1518 subbuf
->info
.data
.sequence_number
.is_set
= true;
1521 ret
= kernctl_get_stream_id(stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1523 PERROR("Failed to get stream id");
1527 ret
= kernctl_get_instance_id(stream
->wait_fd
, &subbuf
->info
.data
.stream_instance_id
.value
);
1529 /* May not be supported by older LTTng-modules. */
1530 if (ret
!= -ENOTTY
) {
1531 PERROR("Failed to get stream instance id");
1535 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1541 static enum get_next_subbuffer_status
get_subbuffer_common(struct lttng_consumer_stream
*stream
,
1542 struct stream_subbuffer
*subbuffer
)
1545 enum get_next_subbuffer_status status
;
1547 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1550 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1555 * The caller only expects -ENODATA when there is no data to
1556 * read, but the kernel tracer returns -EAGAIN when there is
1557 * currently no data for a non-finalized stream, and -ENODATA
1558 * when there is no data for a finalized stream. Those can be
1559 * combined into a -ENODATA return value.
1561 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1564 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1568 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(stream
, subbuffer
);
1570 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1576 static enum get_next_subbuffer_status
1577 get_next_subbuffer_splice(struct lttng_consumer_stream
*stream
, struct stream_subbuffer
*subbuffer
)
1579 const enum get_next_subbuffer_status status
= get_subbuffer_common(stream
, subbuffer
);
1581 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1585 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1590 static enum get_next_subbuffer_status
get_next_subbuffer_mmap(struct lttng_consumer_stream
*stream
,
1591 struct stream_subbuffer
*subbuffer
)
1594 enum get_next_subbuffer_status status
;
1597 status
= get_subbuffer_common(stream
, subbuffer
);
1598 if (status
!= GET_NEXT_SUBBUFFER_STATUS_OK
) {
1602 ret
= get_current_subbuf_addr(stream
, &addr
);
1604 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1608 subbuffer
->buffer
.buffer
=
1609 lttng_buffer_view_init(addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1614 static enum get_next_subbuffer_status
1615 get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1616 struct stream_subbuffer
*subbuffer
)
1621 enum get_next_subbuffer_status status
;
1623 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
, &coherent
);
1628 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(stream
, subbuffer
);
1633 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1635 ret
= get_current_subbuf_addr(stream
, &addr
);
1640 subbuffer
->buffer
.buffer
=
1641 lttng_buffer_view_init(addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1642 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1643 subbuffer
->info
.metadata
.padded_subbuf_size
,
1644 coherent
? "true" : "false");
1647 * The caller only expects -ENODATA when there is no data to read, but
1648 * the kernel tracer returns -EAGAIN when there is currently no data
1649 * for a non-finalized stream, and -ENODATA when there is no data for a
1650 * finalized stream. Those can be combined into a -ENODATA return value.
1654 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
1659 * The caller only expects -ENODATA when there is no data to
1660 * read, but the kernel tracer returns -EAGAIN when there is
1661 * currently no data for a non-finalized stream, and -ENODATA
1662 * when there is no data for a finalized stream. Those can be
1663 * combined into a -ENODATA return value.
1665 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
1668 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
1675 static int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1676 struct stream_subbuffer
*subbuffer
__attribute__((unused
)))
1678 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1681 if (ret
== -EFAULT
) {
1682 PERROR("Error in unreserving sub buffer");
1683 } else if (ret
== -EIO
) {
1684 /* Should never happen with newer LTTng versions */
1685 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1692 static bool is_get_next_check_metadata_available(int tracer_fd
)
1694 const int ret
= kernctl_get_next_subbuf_metadata_check(tracer_fd
, nullptr);
1695 const bool available
= ret
!= -ENOTTY
;
1698 /* get succeeded, make sure to put the subbuffer. */
1699 kernctl_put_subbuf(tracer_fd
);
1705 static int signal_metadata(struct lttng_consumer_stream
*stream
,
1706 struct lttng_consumer_local_data
*ctx
__attribute__((unused
)))
1708 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
1709 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
1712 static int lttng_kconsumer_set_stream_ops(struct lttng_consumer_stream
*stream
)
1716 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1717 DBG("Attempting to enable metadata bucketization for live consumers");
1718 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1719 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1720 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1721 get_next_subbuffer_metadata_check
;
1722 ret
= consumer_stream_enable_metadata_bucketization(stream
);
1728 * The kernel tracer version is too old to indicate
1729 * when the metadata stream has reached a "coherent"
1730 * (parseable) point.
1732 * This means that a live viewer may see an incoherent
1733 * sequence of metadata and fail to parse it.
1735 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1736 metadata_bucket_destroy(stream
->metadata_bucket
);
1737 stream
->metadata_bucket
= nullptr;
1740 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
1743 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1744 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1745 stream
->read_subbuffer_ops
.get_next_subbuffer
= get_next_subbuffer_mmap
;
1747 stream
->read_subbuffer_ops
.get_next_subbuffer
= get_next_subbuffer_splice
;
1751 if (stream
->metadata_flag
) {
1752 stream
->read_subbuffer_ops
.extract_subbuffer_info
= extract_metadata_subbuffer_info
;
1754 stream
->read_subbuffer_ops
.extract_subbuffer_info
= extract_data_subbuffer_info
;
1755 if (stream
->chan
->is_live
) {
1756 stream
->read_subbuffer_ops
.send_live_beacon
= consumer_flush_kernel_index
;
1760 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1765 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1769 LTTNG_ASSERT(stream
);
1772 * Don't create anything if this is set for streaming or if there is
1773 * no current trace chunk on the parent channel.
1775 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
1776 stream
->chan
->trace_chunk
) {
1777 ret
= consumer_stream_create_output_files(stream
, true);
1783 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1784 /* get the len of the mmap region */
1785 unsigned long mmap_len
;
1787 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1789 PERROR("kernctl_get_mmap_len");
1790 goto error_close_fd
;
1792 stream
->mmap_len
= (size_t) mmap_len
;
1795 mmap(nullptr, stream
->mmap_len
, PROT_READ
, MAP_PRIVATE
, stream
->wait_fd
, 0);
1796 if (stream
->mmap_base
== MAP_FAILED
) {
1797 PERROR("Error mmaping");
1799 goto error_close_fd
;
1803 ret
= lttng_kconsumer_set_stream_ops(stream
);
1805 goto error_close_fd
;
1808 /* we return 0 to let the library handle the FD internally */
1812 if (stream
->out_fd
>= 0) {
1815 err
= close(stream
->out_fd
);
1817 stream
->out_fd
= -1;
1824 * Check if data is still being extracted from the buffers for a specific
1825 * stream. Consumer data lock MUST be acquired before calling this function
1826 * and the stream lock.
1828 * Return 1 if the traced data are still getting read else 0 meaning that the
1829 * data is available for trace viewer reading.
1831 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1835 LTTNG_ASSERT(stream
);
1837 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1842 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1844 /* There is still data so let's put back this subbuffer. */
1845 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1846 LTTNG_ASSERT(ret
== 0);
1847 ret
= 1; /* Data is pending */
1851 /* Data is NOT pending and ready to be read. */