X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=include%2Fringbuffer%2Ffrontend_api.h;h=b473a61b60407859366fd7965a2e548f23357a70;hb=0d49efc853b23612f434bfaa5bddf4319abe7883;hp=49f0eee27134f7e68b2f9e2613c254a7bcd4974a;hpb=8a57ec025ffbe56153748fd69b60118862707182;p=lttng-modules.git diff --git a/include/ringbuffer/frontend_api.h b/include/ringbuffer/frontend_api.h index 49f0eee2..b473a61b 100644 --- a/include/ringbuffer/frontend_api.h +++ b/include/ringbuffer/frontend_api.h @@ -14,7 +14,7 @@ #define _LIB_RING_BUFFER_FRONTEND_API_H #include -#include +#include #include #include @@ -33,7 +33,7 @@ * section. */ static inline -int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) +int lib_ring_buffer_get_cpu(const struct lttng_kernel_ring_buffer_config *config) { int cpu, nesting; @@ -55,10 +55,10 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. */ static inline -void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) +void lib_ring_buffer_put_cpu(const struct lttng_kernel_ring_buffer_config *config) { barrier(); - (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--; + (*this_cpu_ptr(&lib_ring_buffer_nesting))--; rcu_read_unlock_sched_notrace(); } @@ -69,14 +69,14 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) * returns 0 if reserve ok, or 1 if the slow path must be taken. */ static inline -int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, +int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config *config, struct lttng_kernel_ring_buffer_ctx *ctx, void *client_ctx, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { - struct channel *chan = ctx->priv.chan; - struct lib_ring_buffer *buf = ctx->priv.buf; + struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan; + struct lttng_kernel_ring_buffer *buf = ctx->priv.buf; *o_begin = v_read(config, &buf->offset); *o_old = *o_begin; @@ -139,12 +139,12 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, */ static inline -int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, +int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config *config, struct lttng_kernel_ring_buffer_ctx *ctx, void *client_ctx) { - struct channel *chan = ctx->priv.chan; - struct lib_ring_buffer *buf; + struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan; + struct lttng_kernel_ring_buffer *buf; unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; @@ -211,8 +211,8 @@ slow_path: * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. */ static inline -void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf, enum switch_mode mode) +void lib_ring_buffer_switch(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer *buf, enum switch_mode mode) { lib_ring_buffer_switch_slow(buf, mode); } @@ -228,11 +228,11 @@ void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, * specified sub-buffer, and delivers it if necessary. */ static inline -void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, +void lib_ring_buffer_commit(const struct lttng_kernel_ring_buffer_config *config, const struct lttng_kernel_ring_buffer_ctx *ctx) { - struct channel *chan = ctx->priv.chan; - struct lib_ring_buffer *buf = ctx->priv.buf; + struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan; + struct lttng_kernel_ring_buffer *buf = ctx->priv.buf; unsigned long offset_end = ctx->priv.buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; @@ -280,7 +280,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, - commit_count, endidx, ctx->priv.tsc); + commit_count, endidx, ctx); /* * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. @@ -300,10 +300,10 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, * Returns 0 upon success, -EPERM if the record cannot be discarded. */ static inline -int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config, +int lib_ring_buffer_try_discard_reserve(const struct lttng_kernel_ring_buffer_config *config, const struct lttng_kernel_ring_buffer_ctx *ctx) { - struct lib_ring_buffer *buf = ctx->priv.buf; + struct lttng_kernel_ring_buffer *buf = ctx->priv.buf; unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size; /* @@ -328,29 +328,29 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con } static inline -void channel_record_disable(const struct lib_ring_buffer_config *config, - struct channel *chan) +void channel_record_disable(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer_channel *chan) { atomic_inc(&chan->record_disabled); } static inline -void channel_record_enable(const struct lib_ring_buffer_config *config, - struct channel *chan) +void channel_record_enable(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer_channel *chan) { atomic_dec(&chan->record_disabled); } static inline -void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf) +void lib_ring_buffer_record_disable(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer *buf) { atomic_inc(&buf->record_disabled); } static inline -void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf) +void lib_ring_buffer_record_enable(const struct lttng_kernel_ring_buffer_config *config, + struct lttng_kernel_ring_buffer *buf) { atomic_dec(&buf->record_disabled); }