Cleanup: apply `include-what-you-use` guideline for `uint*_t`
[lttng-ust.git] / libringbuffer / frontend_internal.h
index 323a8df7f98cd7e5d3aba69302c6c291799b8bbd..f6d91bde2bf63f0917b9bd27b90e73ed54429bcc 100644 (file)
@@ -34,6 +34,7 @@
 #include <urcu/compiler.h>
 #include <urcu/tls-compat.h>
 #include <signal.h>
+#include <stdint.h>
 #include <pthread.h>
 
 #include <lttng/ringbuffer-config.h>
@@ -205,6 +206,39 @@ void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
                                              consumed_new) != consumed_old));
 }
 
+/*
+ * Move consumed position to the beginning of subbuffer in which the
+ * write offset is. Should only be used on ring buffers that are not
+ * actively being written into, because clear_reader does not take into
+ * account the commit counters when moving the consumed position, which
+ * can make concurrent trace producers or consumers observe consumed
+ * position further than the write offset, which breaks ring buffer
+ * algorithm guarantees.
+ */
+static inline
+void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
+                                 struct lttng_ust_shm_handle *handle)
+{
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
+       unsigned long offset, consumed_old, consumed_new;
+
+       chan = shmp(handle, buf->backend.chan);
+       if (!chan)
+               return;
+       config = &chan->backend.config;
+
+       do {
+               offset = v_read(config, &buf->offset);
+               consumed_old = uatomic_read(&buf->consumed);
+               CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
+                               - subbuf_trunc(consumed_old, chan))
+                               < 0);
+               consumed_new = subbuf_trunc(offset, chan);
+       } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+                                             consumed_new) != consumed_old));
+}
+
 static inline
 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
                                 struct lttng_ust_lib_ring_buffer *buf,
@@ -234,14 +268,11 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_con
                                      struct lttng_ust_shm_handle *handle)
 {
        unsigned long offset, idx, commit_count;
-       struct commit_counters_hot *cc_hot = shmp_index(handle, buf->commit_hot, idx);
+       struct commit_counters_hot *cc_hot;
 
        CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
        CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
 
-       if (caa_unlikely(!cc_hot))
-               return 0;
-
        /*
         * Read offset and commit count in a loop so they are both read
         * atomically wrt interrupts. By deal with interrupt concurrency by
@@ -253,6 +284,9 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_con
        do {
                offset = v_read(config, &buf->offset);
                idx = subbuf_index(offset, chan);
+               cc_hot = shmp_index(handle, buf->commit_hot, idx);
+               if (caa_unlikely(!cc_hot))
+                       return 0;
                commit_count = v_read(config, &cc_hot->cc);
        } while (offset != v_read(config, &buf->offset));
 
This page took 0.024168 seconds and 4 git commands to generate.