* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_kernel_ring_buffer_config *config)
{
int cpu, nesting;
* lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_kernel_ring_buffer_config *config)
{
barrier();
(*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx->priv.tsc = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.tsc == -EIO)
return 1;
/*
*/
prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
- ctx->slot_size = record_header_size(config, chan, *o_begin,
+ ctx->priv.slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx, client_ctx);
- ctx->slot_size +=
- lib_ring_buffer_align(*o_begin + ctx->slot_size,
+ ctx->priv.slot_size +=
+ lib_ring_buffer_align(*o_begin + ctx->priv.slot_size,
ctx->largest_align) + ctx->data_size;
- if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (unlikely((subbuf_offset(*o_begin, chan) + ctx->priv.slot_size)
> chan->backend.subbuf_size))
return 1;
* Record fits in the current buffer and we are not on a switch
* boundary. It's safe to write.
*/
- *o_end = *o_begin + ctx->slot_size;
+ *o_end = *o_begin + ctx->priv.slot_size;
if (unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
*/
static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+ buf = per_cpu_ptr(chan->backend.buf, ctx->priv.reserve_cpu);
else
buf = chan->backend.buf;
if (unlikely(atomic_read(&buf->record_disabled)))
return -EAGAIN;
- ctx->buf = buf;
+ ctx->priv.buf = buf;
/*
* Perform retryable operations.
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+ if (unlikely(v_cmpxchg(config, &ctx->priv.buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
* record headers, never the opposite (missing a full TSC record header
* when it would be needed).
*/
- save_last_tsc(config, ctx->buf, ctx->tsc);
+ save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc);
/*
* Push the reader if necessary
*/
- lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
+ lib_ring_buffer_reserve_push_reader(ctx->priv.buf, chan, o_end - 1);
/*
* Clear noref flag for this subbuffer.
*/
- lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
+ lib_ring_buffer_clear_noref(config, &ctx->priv.buf->backend,
subbuf_index(o_end - 1, chan));
- ctx->pre_offset = o_begin;
- ctx->buf_offset = o_begin + before_hdr_pad;
+ ctx->priv.pre_offset = o_begin;
+ ctx->priv.buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
return lib_ring_buffer_reserve_slow(ctx, client_ctx);
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf, enum switch_mode mode)
{
lib_ring_buffer_switch_slow(buf, mode);
}
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_kernel_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
- unsigned long offset_end = ctx->buf_offset;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
+ unsigned long offset_end = ctx->priv.buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
} else
smp_wmb();
- v_add(config, ctx->slot_size, &cc_hot->cc);
+ v_add(config, ctx->priv.slot_size, &cc_hot->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, ctx->tsc);
+ commit_count, endidx, ctx);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_kernel_ring_buffer_config *config,
+ const struct lttng_kernel_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->buf;
- unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
+ struct lttng_kernel_ring_buffer *buf = ctx->priv.buf;
+ unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size;
/*
* We need to ensure that if the cmpxchg succeeds and discards the
*/
save_last_tsc(config, buf, 0ULL);
- if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset)
!= end_offset))
return -EPERM;
else
}
static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+void channel_record_disable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_inc(&chan->record_disabled);
}
static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
- struct channel *chan)
+void channel_record_enable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan)
{
atomic_dec(&chan->record_disabled);
}
static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
atomic_inc(&buf->record_disabled);
}
static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
atomic_dec(&buf->record_disabled);
}