Cleanup: lib_ring_buffer_switch_new_end() only calls subbuffer_set_data_size()
[lttng-modules.git] / lib / ringbuffer / ring_buffer_frontend.c
index eaeb571562230f92e4b30df3616ce3907c9b8be7..077be3c8c82c530e7a8c3aef1c6da5f7857b6b00 100644 (file)
@@ -67,7 +67,8 @@
 struct switch_offsets {
        unsigned long begin, end, old;
        size_t pre_header_padding, size;
-       unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1;
+       unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
+                    switch_old_end:1;
 };
 
 #ifdef CONFIG_NO_HZ
@@ -1348,6 +1349,28 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
                                             config->cb.subbuffer_header_size());
 }
 
+/*
+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
+ *
+ * Calls subbuffer_set_data_size() to set the data size of the current
+ * sub-buffer. We do not need to perform check_deliver nor commit here,
+ * since this task will be done by the "commit" of the event for which
+ * we are currently doing the space reservation.
+ */
+static
+void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+                                           struct channel *chan,
+                                           struct switch_offsets *offsets,
+                                           u64 tsc)
+{
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long endidx, data_size;
+
+       endidx = subbuf_index(offsets->end - 1, chan);
+       data_size = subbuf_offset(offsets->end - 1, chan) + 1;
+       subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+}
+
 /*
  * Returns :
  * 0 if ok
@@ -1541,6 +1564,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
        offsets->begin = v_read(config, &buf->offset);
        offsets->old = offsets->begin;
        offsets->switch_new_start = 0;
+       offsets->switch_new_end = 0;
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
@@ -1647,6 +1671,14 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                 */
        }
        offsets->end = offsets->begin + offsets->size;
+
+       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               offsets->switch_new_end = 1;    /* For offsets->begin */
+       }
        return 0;
 }
 
@@ -1717,6 +1749,9 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
        if (unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
 
+       if (unlikely(offsets.switch_new_end))
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+
        ctx->slot_size = offsets.size;
        ctx->pre_offset = offsets.begin;
        ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
This page took 0.024643 seconds and 4 git commands to generate.