-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static inline int ltt_relay_try_switch(
- enum force_switch_mode mode,
- struct ust_channel *channel,
- struct ust_buffer *buf,
- struct ltt_reserve_switch_offsets *offsets,
- u64 *tsc)
-{
- long subbuf_index;
-
- offsets->begin = local_read(&buf->offset);
- offsets->old = offsets->begin;
- offsets->begin_switch = 0;
- offsets->end_switch_old = 0;
-
- *tsc = trace_clock_read64();
-
- if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
- offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
- offsets->end_switch_old = 1;
- } else {
- /* we do not have to switch : buffer is empty */
- return -1;
- }
- if (mode == FORCE_ACTIVE)
- offsets->begin += ltt_subbuffer_header_size();
- /*
- * Always begin_switch in FORCE_ACTIVE mode.
- * Test new buffer integrity
- */
- subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
- offsets->reserve_commit_diff =
- (BUFFER_TRUNC(offsets->begin, buf->chan)
- >> channel->n_subbufs_order)
- - (local_read(&buf->commit_count[subbuf_index])
- & channel->commit_count_mask);
- if (offsets->reserve_commit_diff == 0) {
- /* Next buffer not corrupted. */
- if (mode == FORCE_ACTIVE
- && !channel->overwrite
- && offsets->begin - atomic_long_read(&buf->consumed)
- >= channel->alloc_size) {
- /*
- * We do not overwrite non consumed buffers and we are
- * full : ignore switch while tracing is active.
- */
- return -1;
- }
- } else {
- /*
- * Next subbuffer corrupted. Force pushing reader even in normal
- * mode
- */
- }
- offsets->end = offsets->begin;
- return 0;
-}
-
-static inline void ltt_reserve_push_reader(
- struct ust_channel *channel,
- struct ust_buffer *buf,
- struct ltt_reserve_switch_offsets *offsets)
-{
- long consumed_old, consumed_new;
-
- do {
- consumed_old = atomic_long_read(&buf->consumed);
- /*
- * If buffer is in overwrite mode, push the reader consumed
- * count if the write position has reached it and we are not
- * at the first iteration (don't push the reader farther than
- * the writer). This operation can be done concurrently by many
- * writers in the same buffer, the writer being at the farthest
- * write position sub-buffer index in the buffer being the one
- * which will win this loop.
- * If the buffer is not in overwrite mode, pushing the reader
- * only happens if a sub-buffer is corrupted.
- */
- if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
- - SUBBUF_TRUNC(consumed_old, buf->chan))
- >= channel->alloc_size)
- consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
- else {
- consumed_new = consumed_old;
- break;
- }
- } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
- consumed_new) != consumed_old);
-
- if (consumed_old != consumed_new) {
- /*
- * Reader pushed : we are the winner of the push, we can
- * therefore reequilibrate reserve and commit. Atomic increment
- * of the commit count permits other writers to play around
- * with this variable before us. We keep track of
- * corrupted_subbuffers even in overwrite mode :
- * we never want to write over a non completely committed
- * sub-buffer : possible causes : the buffer size is too low
- * compared to the unordered data input, or there is a writer
- * that died between the reserve and the commit.
- */
- if (offsets->reserve_commit_diff) {
- /*
- * We have to alter the sub-buffer commit count.
- * We do not deliver the previous subbuffer, given it
- * was either corrupted or not consumed (overwrite
- * mode).
- */
- local_add(offsets->reserve_commit_diff,
- &buf->commit_count[
- SUBBUF_INDEX(offsets->begin,
- buf->chan)]);
- if (!channel->overwrite
- || offsets->reserve_commit_diff
- != channel->subbuf_size) {
- /*
- * The reserve commit diff was not subbuf_size :
- * it means the subbuffer was partly written to
- * and is therefore corrupted. If it is multiple
- * of subbuffer size and we are in flight
- * recorder mode, we are skipping over a whole
- * subbuffer.
- */
- local_inc(&buf->corrupted_subbuffers);
- }
- }
- }
-}
-
-
-/*
- * ltt_reserve_switch_old_subbuf: switch old subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves. Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted. The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- *
- * Note : offset_old should never be 0 here.
- */
-static inline void ltt_reserve_switch_old_subbuf(
- struct ust_channel *channel,
- struct ust_buffer *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long oldidx = SUBBUF_INDEX(offsets->old - 1, channel);
-
- channel->buffer_end(buf, *tsc, offsets->old, oldidx);
- /* Must write buffer end before incrementing commit count */
- smp_wmb();
- offsets->commit_count =
- local_add_return(channel->subbuf_size
- - (SUBBUF_OFFSET(offsets->old - 1, channel)
- + 1),
- &buf->commit_count[oldidx]);
- if ((BUFFER_TRUNC(offsets->old - 1, channel)
- >> channel->n_subbufs_order)
- - ((offsets->commit_count - channel->subbuf_size)
- & channel->commit_count_mask) == 0)
- ltt_deliver(buf, oldidx, offsets->commit_count);
-}
-
-/*
- * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution. The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static /*inline*/ void ltt_reserve_switch_new_subbuf(
- struct ust_channel *channel,
- struct ust_buffer *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long beginidx = SUBBUF_INDEX(offsets->begin, channel);
-
- channel->buffer_begin(buf, *tsc, beginidx);
- /* Must write buffer end before incrementing commit count */
- smp_wmb();
- offsets->commit_count = local_add_return(ltt_subbuffer_header_size(),
- &buf->commit_count[beginidx]);
- /* Check if the written buffer has to be delivered */
- if ((BUFFER_TRUNC(offsets->begin, channel)
- >> channel->n_subbufs_order)
- - ((offsets->commit_count - channel->subbuf_size)
- & channel->commit_count_mask) == 0)
- ltt_deliver(buf, beginidx, offsets->commit_count);
-}
-
-
-/*
- * ltt_reserve_end_switch_current: finish switching current subbuffer
- *
- * Concurrency safe because we are the last and only thread to alter this
- * sub-buffer. As long as it is not delivered and read, no other thread can
- * alter the offset, alter the reserve_count or call the
- * client_buffer_end_callback on this sub-buffer.
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves. Not concurrency safe in overwrite mode.
- * We detect corrupted subbuffers with commit and reserve counts. We keep a
- * corrupted sub-buffers count and push the readers across these sub-buffers.
- *
- * Not concurrency safe if a writer is stalled in a subbuffer and another writer
- * switches in, finding out it's corrupted. The result will be than the old
- * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
- * will be declared corrupted too because of the commit count adjustment.
- */
-static inline void ltt_reserve_end_switch_current(
- struct ust_channel *channel,
- struct ust_buffer *buf,
- struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
-{
- long endidx = SUBBUF_INDEX(offsets->end - 1, channel);
-
- channel->buffer_end(buf, *tsc, offsets->end, endidx);
- /* Must write buffer begin before incrementing commit count */
- smp_wmb();
- offsets->commit_count =
- local_add_return(channel->subbuf_size
- - (SUBBUF_OFFSET(offsets->end - 1, channel)
- + 1),
- &buf->commit_count[endidx]);
- if ((BUFFER_TRUNC(offsets->end - 1, channel)
- >> channel->n_subbufs_order)
- - ((offsets->commit_count - channel->subbuf_size)
- & channel->commit_count_mask) == 0)
- ltt_deliver(buf, endidx, offsets->commit_count);
-}
-