+/*
+ * ltt_reserve_switch_old_subbuf: switch old subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves. Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted. The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ *
+ * Note : offset_old should never be 0 here.
+ */
+static void ltt_reserve_switch_old_subbuf(
+ struct ust_channel *chan, struct ust_buffer *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
+ long commit_count, padding_size;
+
+ padding_size = chan->subbuf_size
+ - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
+ ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
+
+ /*
+ * Must write slot data before incrementing commit count.
+ * This compiler barrier is upgraded into a smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its smp_rmb().
+ */
+ barrier();
+ uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
+ commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
+ ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
+ ltt_write_commit_counter(chan, buf, oldidx,
+ offsets->old, commit_count, padding_size);
+}
+
+/*
+ * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution. The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static void ltt_reserve_switch_new_subbuf(
+ struct ust_channel *chan, struct ust_buffer *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long beginidx = SUBBUF_INDEX(offsets->begin, chan);
+ long commit_count;
+
+ ltt_buffer_begin(buf, *tsc, beginidx);
+
+ /*
+ * Must write slot data before incrementing commit count.
+ * This compiler barrier is upgraded into a smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its smp_rmb().
+ */
+ barrier();
+ uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
+ commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
+ /* Check if the written buffer has to be delivered */
+ ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
+ ltt_write_commit_counter(chan, buf, beginidx,
+ offsets->begin, commit_count, ltt_subbuffer_header_size());
+}
+
+/*
+ * ltt_reserve_end_switch_current: finish switching current subbuffer
+ *
+ * Concurrency safe because we are the last and only thread to alter this
+ * sub-buffer. As long as it is not delivered and read, no other thread can
+ * alter the offset, alter the reserve_count or call the
+ * client_buffer_end_callback on this sub-buffer.
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves. Not concurrency safe in overwrite mode.
+ * We detect corrupted subbuffers with commit and reserve counts. We keep a
+ * corrupted sub-buffers count and push the readers across these sub-buffers.
+ *
+ * Not concurrency safe if a writer is stalled in a subbuffer and another writer
+ * switches in, finding out it's corrupted. The result will be than the old
+ * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
+ * will be declared corrupted too because of the commit count adjustment.
+ */
+static void ltt_reserve_end_switch_current(
+ struct ust_channel *chan,
+ struct ust_buffer *buf,
+ struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
+{
+ long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
+ long commit_count, padding_size;
+
+ padding_size = chan->subbuf_size
+ - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
+
+ ltt_buffer_end(buf, *tsc, offsets->end, endidx);
+
+ /*
+ * Must write slot data before incrementing commit count.
+ * This compiler barrier is upgraded into a smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its smp_rmb().
+ */
+ barrier();
+ uatomic_add(&buf->commit_count[endidx].cc, padding_size);
+ commit_count = uatomic_read(&buf->commit_count[endidx].cc);
+ ltt_check_deliver(chan, buf,
+ offsets->end - 1, commit_count, endidx);
+ ltt_write_commit_counter(chan, buf, endidx,
+ offsets->end, commit_count, padding_size);
+}