Fix: pass proper args when writing commit counter
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index 84c6726e97a2d66ec612bc083aaa4ca36ab6225a..eb4e48629ca3d26a08b3c015b069a0e3c29f7d68 100644 (file)
 struct switch_offsets {
        unsigned long begin, end, old;
        size_t pre_header_padding, size;
-       unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1;
+       unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
+                    switch_old_end:1;
 };
 
 DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
@@ -1106,7 +1107,7 @@ retry:
         */
        if (((commit_count - chan->backend.subbuf_size)
             & chan->commit_count_mask)
-           - (buf_trunc(consumed_cur, chan)
+           - (buf_trunc(consumed, chan)
               >> chan->backend.num_subbuf_order)
            != 0)
                goto nodata;
@@ -1115,7 +1116,7 @@ retry:
         * Check that we are not about to read the same subbuffer in
         * which the writer head is.
         */
-       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
+       if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
            == 0)
                goto nodata;
 
@@ -1332,11 +1333,10 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
        commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
-                                     commit_count, oldidx, handle);
+                                     commit_count, oldidx, handle, tsc);
        lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
-                                            offsets->old, commit_count,
-                                            config->cb.subbuffer_header_size(),
-                                            handle);
+                       offsets->old + config->cb.subbuffer_header_size(),
+                       commit_count, handle);
 }
 
 /*
@@ -1371,10 +1371,9 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
        v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
-                                     commit_count, oldidx, handle);
+                                     commit_count, oldidx, handle, tsc);
        lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
-                                            offsets->old, commit_count,
-                                            padding_size, handle);
+                       offsets->old + padding_size, commit_count, handle);
 }
 
 /*
@@ -1407,11 +1406,34 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
        commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
-                                     commit_count, beginidx, handle);
+                                     commit_count, beginidx, handle, tsc);
        lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
-                                            offsets->begin, commit_count,
-                                            config->cb.subbuffer_header_size(),
-                                            handle);
+                       offsets->begin + config->cb.subbuffer_header_size(),
+                       commit_count, handle);
+}
+
+/*
+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
+ *
+ * Calls subbuffer_set_data_size() to set the data size of the current
+ * sub-buffer. We do not need to perform check_deliver nor commit here,
+ * since this task will be done by the "commit" of the event for which
+ * we are currently doing the space reservation.
+ */
+static
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
+                                   struct channel *chan,
+                                   struct switch_offsets *offsets,
+                                   uint64_t tsc,
+                                   struct lttng_ust_shm_handle *handle)
+{
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       unsigned long endidx, data_size;
+
+       endidx = subbuf_index(offsets->end - 1, chan);
+       data_size = subbuf_offset(offsets->end - 1, chan) + 1;
+       subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
+                               handle);
 }
 
 /*
@@ -1424,10 +1446,11 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
                                    struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
-                                   uint64_t *tsc)
+                                   uint64_t *tsc,
+                                   struct lttng_ust_shm_handle *handle)
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long off;
+       unsigned long off, reserve_commit_diff;
 
        offsets->begin = v_read(config, &buf->offset);
        offsets->old = offsets->begin;
@@ -1452,36 +1475,69 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
         * timestamps) are visible to the reader. This is required for
         * quiescence guarantees for the fusion merge.
         */
-       if (mode == SWITCH_FLUSH || off > 0) {
-               if (caa_unlikely(off == 0)) {
-                       /*
-                        * A final flush that encounters an empty
-                        * sub-buffer cannot switch buffer if a
-                        * reader is located within this sub-buffer.
-                        * Anyway, the purpose of final flushing of a
-                        * sub-buffer at offset 0 is to handle the case
-                        * of entirely empty stream.
-                        */
-                       if (caa_unlikely(subbuf_trunc(offsets->begin, chan)
-                                        - subbuf_trunc((unsigned long)
-                                            uatomic_read(&buf->consumed), chan)
-                                       >= chan->backend.buf_size))
-                               return -1;
-                       /*
-                        * The client does not save any header information.
-                        * Don't switch empty subbuffer on finalize, because it
-                        * is invalid to deliver a completely empty subbuffer.
-                        */
-                       if (!config->cb.subbuffer_header_size())
+       if (mode != SWITCH_FLUSH && !off)
+               return -1;      /* we do not have to switch : buffer is empty */
+
+       if (caa_unlikely(off == 0)) {
+               unsigned long sb_index, commit_count;
+
+               /*
+                * We are performing a SWITCH_FLUSH. At this stage, there are no
+                * concurrent writes into the buffer.
+                *
+                * The client does not save any header information.  Don't
+                * switch empty subbuffer on finalize, because it is invalid to
+                * deliver a completely empty subbuffer.
+                */
+               if (!config->cb.subbuffer_header_size())
+                       return -1;
+
+               /* Test new buffer integrity */
+               sb_index = subbuf_index(offsets->begin, chan);
+               commit_count = v_read(config,
+                               &shmp_index(handle, buf->commit_cold,
+                                       sb_index)->cc_sb);
+               reserve_commit_diff =
+                 (buf_trunc(offsets->begin, chan)
+                  >> chan->backend.num_subbuf_order)
+                 - (commit_count & chan->commit_count_mask);
+               if (caa_likely(reserve_commit_diff == 0)) {
+                       /* Next subbuffer not being written to. */
+                       if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                               subbuf_trunc(offsets->begin, chan)
+                                - subbuf_trunc((unsigned long)
+                                    uatomic_read(&buf->consumed), chan)
+                               >= chan->backend.buf_size)) {
+                               /*
+                                * We do not overwrite non consumed buffers
+                                * and we are full : don't switch.
+                                */
                                return -1;
+                       } else {
+                               /*
+                                * Next subbuffer not being written to, and we
+                                * are either in overwrite mode or the buffer is
+                                * not full. It's safe to write in this new
+                                * subbuffer.
+                                */
+                       }
+               } else {
                        /*
-                        * Need to write the subbuffer start header on finalize.
+                        * Next subbuffer reserve offset does not match the
+                        * commit offset. Don't perform switch in
+                        * producer-consumer and overwrite mode.  Caused by
+                        * either a writer OOPS or too many nested writes over a
+                        * reserve/commit pair.
                         */
-                       offsets->switch_old_start = 1;
+                       return -1;
                }
-               offsets->begin = subbuf_align(offsets->begin, chan);
-       } else
-               return -1;      /* we do not have to switch : buffer is empty */
+
+               /*
+                * Need to write the subbuffer start header on finalize.
+                */
+               offsets->switch_old_start = 1;
+       }
+       offsets->begin = subbuf_align(offsets->begin, chan);
        /* Note: old points to the next subbuf at offset 0 */
        offsets->end = offsets->begin;
        return 0;
@@ -1511,7 +1567,7 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum swi
         */
        do {
                if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
-                                                   &tsc))
+                                                   &tsc, handle))
                        return; /* Switch not needed */
        } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
                 != offsets.old);
@@ -1561,11 +1617,13 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        struct lttng_ust_shm_handle *handle = ctx->handle;
-       unsigned long reserve_commit_diff;
+       unsigned long reserve_commit_diff, offset_cmp;
 
-       offsets->begin = v_read(config, &buf->offset);
+retry:
+       offsets->begin = offset_cmp = v_read(config, &buf->offset);
        offsets->old = offsets->begin;
        offsets->switch_new_start = 0;
+       offsets->switch_new_end = 0;
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
@@ -1594,7 +1652,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                }
        }
        if (caa_unlikely(offsets->switch_new_start)) {
-               unsigned long sb_index;
+               unsigned long sb_index, commit_count;
 
                /*
                 * We are typically not filling the previous buffer completely.
@@ -1605,12 +1663,32 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                                 + config->cb.subbuffer_header_size();
                /* Test new buffer integrity */
                sb_index = subbuf_index(offsets->begin, chan);
+               /*
+                * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
+                * lib_ring_buffer_check_deliver() has the matching
+                * memory barriers required around commit_cold cc_sb
+                * updates to ensure reserve and commit counter updates
+                * are not seen reordered when updated by another CPU.
+                */
+               cmm_smp_rmb();
+               commit_count = v_read(config,
+                               &shmp_index(handle, buf->commit_cold,
+                                       sb_index)->cc_sb);
+               /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
+               cmm_smp_rmb();
+               if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
+                       /*
+                        * The reserve counter have been concurrently updated
+                        * while we read the commit counter. This means the
+                        * commit counter we read might not match buf->offset
+                        * due to concurrent update. We therefore need to retry.
+                        */
+                       goto retry;
+               }
                reserve_commit_diff =
                  (buf_trunc(offsets->begin, chan)
                   >> chan->backend.num_subbuf_order)
-                 - ((unsigned long) v_read(config,
-                                           &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
-                    & chan->commit_count_mask);
+                 - (commit_count & chan->commit_count_mask);
                if (caa_likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
                        if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
@@ -1645,7 +1723,8 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
 
                        /*
                         * Next subbuffer reserve offset does not match the
-                        * commit offset. Drop record in producer-consumer and
+                        * commit offset, and this did not involve update to the
+                        * reserve counter. Drop record in producer-consumer and
                         * overwrite mode. Caused by either a writer OOPS or too
                         * many nested writes over a reserve/commit pair.
                         */
@@ -1697,6 +1776,14 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                 */
        }
        offsets->end = offsets->begin + offsets->size;
+
+       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               offsets->switch_new_end = 1;    /* For offsets->begin */
+       }
        return 0;
 }
 
@@ -1770,6 +1857,9 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        if (caa_unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
+       if (caa_unlikely(offsets.switch_new_end))
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
+
        ctx->slot_size = offsets.size;
        ctx->pre_offset = offsets.begin;
        ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
This page took 0.033421 seconds and 4 git commands to generate.