+//ust// /*
+//ust// * Returns :
+//ust// * 0 if ok
+//ust// * !0 if execution must be aborted.
+//ust// */
+//ust// static inline int ltt_relay_try_reserve(
+//ust// struct ust_channel *channel, struct ust_buffer *buf,
+//ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size,
+//ust// u64 *tsc, unsigned int *rflags, int largest_align)
+//ust// {
+//ust// offsets->begin = local_read(&buf->offset);
+//ust// offsets->old = offsets->begin;
+//ust// offsets->begin_switch = 0;
+//ust// offsets->end_switch_current = 0;
+//ust// offsets->end_switch_old = 0;
+//ust//
+//ust// *tsc = trace_clock_read64();
+//ust// if (last_tsc_overflow(buf, *tsc))
+//ust// *rflags = LTT_RFLAG_ID_SIZE_TSC;
+//ust//
+//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
+//ust// offsets->begin_switch = 1; /* For offsets->begin */
+//ust// } else {
+//ust// offsets->size = ust_get_header_size(channel,
+//ust// offsets->begin, data_size,
+//ust// &offsets->before_hdr_pad, *rflags);
+//ust// offsets->size += ltt_align(offsets->begin + offsets->size,
+//ust// largest_align)
+//ust// + data_size;
+//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+//ust// > buf->chan->subbuf_size) {
+//ust// offsets->end_switch_old = 1; /* For offsets->old */
+//ust// offsets->begin_switch = 1; /* For offsets->begin */
+//ust// }
+//ust// }
+//ust// if (offsets->begin_switch) {
+//ust// long subbuf_index;
+//ust//
+//ust// if (offsets->end_switch_old)
+//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin,
+//ust// buf->chan);
+//ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size();
+//ust// /* Test new buffer integrity */
+//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+//ust// offsets->reserve_commit_diff =
+//ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
+//ust// >> channel->n_subbufs_order)
+//ust// - (local_read(&buf->commit_count[subbuf_index])
+//ust// & channel->commit_count_mask);
+//ust// if (offsets->reserve_commit_diff == 0) {
+//ust// long consumed;
+//ust//
+//ust// consumed = atomic_long_read(&buf->consumed);
+//ust//
+//ust// /* Next buffer not corrupted. */
+//ust// if (!channel->overwrite &&
+//ust// (SUBBUF_TRUNC(offsets->begin, buf->chan)
+//ust// - SUBBUF_TRUNC(consumed, buf->chan))
+//ust// >= channel->alloc_size) {
+//ust//
+//ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
+//ust// long commit_count = local_read(&buf->commit_count[consumed_idx]);
+//ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
+//ust// WARN("Event dropped. Caused by non-committed event.");
+//ust// }
+//ust// else {
+//ust// WARN("Event dropped. Caused by non-consumed buffer.");
+//ust// }
+//ust// /*
+//ust// * We do not overwrite non consumed buffers
+//ust// * and we are full : event is lost.
+//ust// */
+//ust// local_inc(&buf->events_lost);
+//ust// return -1;
+//ust// } else {
+//ust// /*
+//ust// * next buffer not corrupted, we are either in
+//ust// * overwrite mode or the buffer is not full.
+//ust// * It's safe to write in this new subbuffer.
+//ust// */
+//ust// }
+//ust// } else {
+//ust// /*
+//ust// * Next subbuffer corrupted. Force pushing reader even
+//ust// * in normal mode. It's safe to write in this new
+//ust// * subbuffer.
+//ust// */
+//ust// }
+//ust// offsets->size = ust_get_header_size(channel,
+//ust// offsets->begin, data_size,
+//ust// &offsets->before_hdr_pad, *rflags);
+//ust// offsets->size += ltt_align(offsets->begin + offsets->size,
+//ust// largest_align)
+//ust// + data_size;
+//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
+//ust// > buf->chan->subbuf_size) {
+//ust// /*
+//ust// * Event too big for subbuffers, report error, don't
+//ust// * complete the sub-buffer switch.
+//ust// */
+//ust// local_inc(&buf->events_lost);
+//ust// return -1;
+//ust// } else {
+//ust// /*
+//ust// * We just made a successful buffer switch and the event
+//ust// * fits in the new subbuffer. Let's write.
+//ust// */
+//ust// }
+//ust// } else {
+//ust// /*
+//ust// * Event fits in the current buffer and we are not on a switch
+//ust// * boundary. It's safe to write.
+//ust// */
+//ust// }
+//ust// offsets->end = offsets->begin + offsets->size;
+//ust//
+//ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
+//ust// /*
+//ust// * The offset_end will fall at the very beginning of the next
+//ust// * subbuffer.
+//ust// */
+//ust// offsets->end_switch_current = 1; /* For offsets->begin */
+//ust// }
+//ust// return 0;
+//ust// }
+//ust//
+//ust// /*
+//ust// * Returns :
+//ust// * 0 if ok
+//ust// * !0 if execution must be aborted.
+//ust// */
+//ust// static inline int ltt_relay_try_switch(
+//ust// enum force_switch_mode mode,
+//ust// struct ust_channel *channel,
+//ust// struct ust_buffer *buf,
+//ust// struct ltt_reserve_switch_offsets *offsets,
+//ust// u64 *tsc)
+//ust// {
+//ust// long subbuf_index;
+//ust//
+//ust// offsets->begin = local_read(&buf->offset);
+//ust// offsets->old = offsets->begin;
+//ust// offsets->begin_switch = 0;
+//ust// offsets->end_switch_old = 0;
+//ust//
+//ust// *tsc = trace_clock_read64();
+//ust//
+//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
+//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
+//ust// offsets->end_switch_old = 1;
+//ust// } else {
+//ust// /* we do not have to switch : buffer is empty */
+//ust// return -1;
+//ust// }
+//ust// if (mode == FORCE_ACTIVE)
+//ust// offsets->begin += ltt_subbuffer_header_size();
+//ust// /*
+//ust// * Always begin_switch in FORCE_ACTIVE mode.
+//ust// * Test new buffer integrity
+//ust// */
+//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
+//ust// offsets->reserve_commit_diff =
+//ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
+//ust// >> channel->n_subbufs_order)
+//ust// - (local_read(&buf->commit_count[subbuf_index])
+//ust// & channel->commit_count_mask);
+//ust// if (offsets->reserve_commit_diff == 0) {
+//ust// /* Next buffer not corrupted. */
+//ust// if (mode == FORCE_ACTIVE
+//ust// && !channel->overwrite
+//ust// && offsets->begin - atomic_long_read(&buf->consumed)
+//ust// >= channel->alloc_size) {
+//ust// /*
+//ust// * We do not overwrite non consumed buffers and we are
+//ust// * full : ignore switch while tracing is active.
+//ust// */
+//ust// return -1;
+//ust// }
+//ust// } else {
+//ust// /*
+//ust// * Next subbuffer corrupted. Force pushing reader even in normal
+//ust// * mode
+//ust// */
+//ust// }
+//ust// offsets->end = offsets->begin;
+//ust// return 0;
+//ust// }
+//ust//
+//ust// static inline void ltt_reserve_push_reader(
+//ust// struct ust_channel *channel,
+//ust// struct ust_buffer *buf,
+//ust// struct ltt_reserve_switch_offsets *offsets)
+//ust// {
+//ust// long consumed_old, consumed_new;
+//ust//
+//ust// do {
+//ust// consumed_old = atomic_long_read(&buf->consumed);
+//ust// /*
+//ust// * If buffer is in overwrite mode, push the reader consumed
+//ust// * count if the write position has reached it and we are not
+//ust// * at the first iteration (don't push the reader farther than
+//ust// * the writer). This operation can be done concurrently by many
+//ust// * writers in the same buffer, the writer being at the farthest
+//ust// * write position sub-buffer index in the buffer being the one
+//ust// * which will win this loop.
+//ust// * If the buffer is not in overwrite mode, pushing the reader
+//ust// * only happens if a sub-buffer is corrupted.
+//ust// */
+//ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
+//ust// - SUBBUF_TRUNC(consumed_old, buf->chan))
+//ust// >= channel->alloc_size)
+//ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
+//ust// else {
+//ust// consumed_new = consumed_old;
+//ust// break;
+//ust// }
+//ust// } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
+//ust// consumed_new) != consumed_old);
+//ust//
+//ust// if (consumed_old != consumed_new) {
+//ust// /*
+//ust// * Reader pushed : we are the winner of the push, we can
+//ust// * therefore reequilibrate reserve and commit. Atomic increment
+//ust// * of the commit count permits other writers to play around
+//ust// * with this variable before us. We keep track of
+//ust// * corrupted_subbuffers even in overwrite mode :
+//ust// * we never want to write over a non completely committed
+//ust// * sub-buffer : possible causes : the buffer size is too low
+//ust// * compared to the unordered data input, or there is a writer
+//ust// * that died between the reserve and the commit.
+//ust// */
+//ust// if (offsets->reserve_commit_diff) {
+//ust// /*
+//ust// * We have to alter the sub-buffer commit count.
+//ust// * We do not deliver the previous subbuffer, given it
+//ust// * was either corrupted or not consumed (overwrite
+//ust// * mode).
+//ust// */
+//ust// local_add(offsets->reserve_commit_diff,
+//ust// &buf->commit_count[
+//ust// SUBBUF_INDEX(offsets->begin,
+//ust// buf->chan)]);
+//ust// if (!channel->overwrite
+//ust// || offsets->reserve_commit_diff
+//ust// != channel->subbuf_size) {
+//ust// /*
+//ust// * The reserve commit diff was not subbuf_size :
+//ust// * it means the subbuffer was partly written to
+//ust// * and is therefore corrupted. If it is multiple
+//ust// * of subbuffer size and we are in flight
+//ust// * recorder mode, we are skipping over a whole
+//ust// * subbuffer.
+//ust// */
+//ust// local_inc(&buf->corrupted_subbuffers);
+//ust// }
+//ust// }
+//ust// }
+//ust// }
+//ust//
+//ust// /**
+//ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
+//ust// * @trace: the trace structure to log to.
+//ust// * @ltt_channel: channel structure
+//ust// * @transport_data: data structure specific to ltt relay
+//ust// * @data_size: size of the variable length data to log.
+//ust// * @slot_size: pointer to total size of the slot (out)
+//ust// * @buf_offset : pointer to reserved buffer offset (out)
+//ust// * @tsc: pointer to the tsc at the slot reservation (out)
+//ust// * @cpu: cpuid
+//ust// *
+//ust// * Return : -ENOSPC if not enough space, else returns 0.
+//ust// * It will take care of sub-buffer switching.
+//ust// */
+//ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace,
+//ust// struct ust_channel *channel, void **transport_data,
+//ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
+//ust// unsigned int *rflags, int largest_align, int cpu)
+//ust// {
+//ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu];
+//ust// struct ltt_reserve_switch_offsets offsets;
+//ust//
+//ust// offsets.reserve_commit_diff = 0;
+//ust// offsets.size = 0;
+//ust//
+//ust// /*
+//ust// * Perform retryable operations.
+//ust// */
+//ust// if (ltt_nesting > 4) {
+//ust// local_inc(&buf->events_lost);
+//ust// return -EPERM;
+//ust// }
+//ust// do {
+//ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
+//ust// largest_align))
+//ust// return -ENOSPC;
+//ust// } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust// offsets.end) != offsets.old);
+//ust//
+//ust// /*
+//ust// * Atomically update last_tsc. This update races against concurrent
+//ust// * atomic updates, but the race will always cause supplementary full TSC
+//ust// * events, never the opposite (missing a full TSC event when it would be
+//ust// * needed).
+//ust// */
+//ust// save_last_tsc(buf, *tsc);
+//ust//
+//ust// /*
+//ust// * Push the reader if necessary
+//ust// */
+//ust// ltt_reserve_push_reader(channel, buf, &offsets);
+//ust//
+//ust// /*
+//ust// * Switch old subbuffer if needed.
+//ust// */
+//ust// if (offsets.end_switch_old)
+//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
+//ust//
+//ust// /*
+//ust// * Populate new subbuffer.
+//ust// */
+//ust// if (offsets.begin_switch)
+//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
+//ust//
+//ust// if (offsets.end_switch_current)
+//ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
+//ust//
+//ust// *slot_size = offsets.size;
+//ust// *buf_offset = offsets.begin + offsets.before_hdr_pad;
+//ust// return 0;
+//ust// }
+//ust//
+//ust// /*
+//ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is
+//ust// * completely reentrant : can be called while tracing is active with
+//ust// * absolutely no lock held.
+//ust// *
+//ust// * Note, however, that as a local_cmpxchg is used for some atomic
+//ust// * operations, this function must be called from the CPU which owns the buffer
+//ust// * for a ACTIVE flush.
+//ust// */
+//ust// static notrace void ltt_force_switch(struct ust_buffer *buf,
+//ust// enum force_switch_mode mode)
+//ust// {
+//ust// struct ust_channel *channel = buf->chan;
+//ust// struct ltt_reserve_switch_offsets offsets;
+//ust// u64 tsc;
+//ust//
+//ust// offsets.reserve_commit_diff = 0;
+//ust// offsets.size = 0;
+//ust//
+//ust// /*
+//ust// * Perform retryable operations.
+//ust// */
+//ust// do {
+//ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
+//ust// return;
+//ust// } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust// offsets.end) != offsets.old);
+//ust//
+//ust// /*
+//ust// * Atomically update last_tsc. This update races against concurrent
+//ust// * atomic updates, but the race will always cause supplementary full TSC
+//ust// * events, never the opposite (missing a full TSC event when it would be
+//ust// * needed).
+//ust// */
+//ust// save_last_tsc(buf, tsc);
+//ust//
+//ust// /*
+//ust// * Push the reader if necessary
+//ust// */
+//ust// if (mode == FORCE_ACTIVE)
+//ust// ltt_reserve_push_reader(channel, buf, &offsets);
+//ust//
+//ust// /*
+//ust// * Switch old subbuffer if needed.
+//ust// */
+//ust// if (offsets.end_switch_old)
+//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
+//ust//
+//ust// /*
+//ust// * Populate new subbuffer.
+//ust// */
+//ust// if (mode == FORCE_ACTIVE)
+//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
+//ust// }