unsigned long consumed_cur, write_offset;
int finalized;
- /*
- * First, ensure we perform a "final" flush onto the stream. This will
- * ensure we create a packet of padding if we encounter an empty
- * packet. This ensures the time-stamps right before the snapshot is
- * used as end of packet timestamp.
- */
- if (!buf->quiescent)
- _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
-
retry:
finalized = ACCESS_ONCE(buf->finalized);
/*
/*
* lib_ring_buffer_switch_old_start: Populate old subbuffer header.
*
- * Only executed by SWITCH_FLUSH, which can be issued while tracing is active
- * or at buffer finalization (destroy).
+ * Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
unsigned long sb_index, commit_count;
/*
- * We are performing a SWITCH_FLUSH. There may be concurrent
- * writes into the buffer if e.g. invoked while performing a
- * snapshot on an active trace.
+ * We are performing a SWITCH_FLUSH. At this stage, there are no
+ * concurrent writes into the buffer.
*
- * If the client does not save any header information (sub-buffer
- * header size == 0), don't switch empty subbuffer on finalize,
- * because it is invalid to deliver a completely empty
- * subbuffer.
+ * The client does not save any header information. Don't
+ * switch empty subbuffer on finalize, because it is invalid to
+ * deliver a completely empty subbuffer.
*/
if (!config->cb.subbuffer_header_size())
return -1;
put_online_cpus();
}
+/* Switch sub-buffer if current sub-buffer is non-empty. */
void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
{
_lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
+/* Switch sub-buffer even if current sub-buffer is empty. */
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
+{
+ _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
+
/*
* Returns :
* 0 if ok
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
- ctx);
+ ctx, client_ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
- ctx);
+ ctx, client_ctx);
if (unlikely(ret))
return ret;
} while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,