- if (stream->chan->live_timer_interval && !stream->metadata_flag) {
- /*
- * In live, block until all the metadata is sent.
- */
- pthread_mutex_lock(&stream->metadata_timer_lock);
- assert(!stream->missed_metadata_flush);
- stream->waiting_on_metadata = true;
- pthread_mutex_unlock(&stream->metadata_timer_lock);
-
- err = consumer_stream_sync_metadata(ctx, stream->session_id);
-
- pthread_mutex_lock(&stream->metadata_timer_lock);
- stream->waiting_on_metadata = false;
- if (stream->missed_metadata_flush) {
- stream->missed_metadata_flush = false;
- pthread_mutex_unlock(&stream->metadata_timer_lock);
- (void) consumer_flush_kernel_index(stream);
+ subbuffer->buffer.buffer = lttng_buffer_view_init(
+ addr, 0, subbuffer->info.data.padded_subbuf_size);
+end:
+ return ret;
+}
+
+static
+int get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
+ struct stream_subbuffer *subbuffer)
+{
+ int ret;
+ const char *addr;
+ bool coherent;
+
+ ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd,
+ &coherent);
+ if (ret) {
+ goto end;
+ }
+
+ ret = stream->read_subbuffer_ops.extract_subbuffer_info(
+ stream, subbuffer);
+ if (ret) {
+ goto end;
+ }
+
+ LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
+
+ ret = get_current_subbuf_addr(stream, &addr);
+ if (ret) {
+ goto end;
+ }
+
+ subbuffer->buffer.buffer = lttng_buffer_view_init(
+ addr, 0, subbuffer->info.data.padded_subbuf_size);
+ DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
+ subbuffer->info.metadata.padded_subbuf_size,
+ coherent ? "true" : "false");
+end:
+ return ret;
+}
+
+static
+int put_next_subbuffer(struct lttng_consumer_stream *stream,
+ struct stream_subbuffer *subbuffer)
+{
+ const int ret = kernctl_put_next_subbuf(stream->wait_fd);
+
+ if (ret) {
+ if (ret == -EFAULT) {
+ PERROR("Error in unreserving sub buffer");
+ } else if (ret == -EIO) {
+ /* Should never happen with newer LTTng versions */
+ PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
+ }
+ }
+
+ return ret;
+}
+
+static
+bool is_get_next_check_metadata_available(int tracer_fd)
+{
+ return kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL) !=
+ -ENOTTY;
+}
+
+static
+int lttng_kconsumer_set_stream_ops(
+ struct lttng_consumer_stream *stream)
+{
+ int ret = 0;
+
+ if (stream->metadata_flag && stream->chan->is_live) {
+ DBG("Attempting to enable metadata bucketization for live consumers");
+ if (is_get_next_check_metadata_available(stream->wait_fd)) {
+ DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
+ stream->read_subbuffer_ops.get_next_subbuffer =
+ get_next_subbuffer_metadata_check;
+ ret = consumer_stream_enable_metadata_bucketization(
+ stream);
+ if (ret) {
+ goto end;
+ }