X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=24f94acf110b5f663443207d3711e3bbe323fa39;hb=4318ae1be57eb7983ab4857a7a8eeb4a030a8216;hp=f570cc168174de626d134998571120e8e10a81a7;hpb=4746ae29409b78e96543a3b207c91a3c510c6476;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index f570cc16..24f94acf 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -18,7 +18,7 @@ */ #include "frontend.h" -#include "ust/core.h" +#include "lttng/core.h" #include #include @@ -37,7 +37,7 @@ * section. */ static inline -int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) +int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config) { int cpu, nesting; @@ -46,7 +46,7 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) nesting = ++lib_ring_buffer_nesting; /* TLS */ cmm_barrier(); - if (unlikely(nesting > 4)) { + if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); lib_ring_buffer_nesting--; /* TLS */ rcu_read_unlock(); @@ -59,7 +59,7 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. */ static inline -void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) +void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config) { cmm_barrier(); lib_ring_buffer_nesting--; /* TLS */ @@ -73,13 +73,13 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) * returns 0 if reserve ok, or 1 if the slow path must be taken. */ static inline -int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx, +int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { struct channel *chan = ctx->chan; - struct lib_ring_buffer *buf = ctx->buf; + struct lttng_ust_lib_ring_buffer *buf = ctx->buf; *o_begin = v_read(config, &buf->offset); *o_old = *o_begin; @@ -97,7 +97,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(*o_begin, chan) == 0)) + if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; ctx->slot_size = record_header_size(config, chan, *o_begin, @@ -105,7 +105,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, ctx->slot_size += lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; - if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) + if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) > chan->backend.subbuf_size)) return 1; @@ -115,7 +115,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, */ *o_end = *o_begin + ctx->slot_size; - if (unlikely((subbuf_offset(*o_end, chan)) == 0)) + if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0)) /* * The offset_end will fall at the very beginning of the next * subbuffer. @@ -142,12 +142,12 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, */ static inline -int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx) +int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx) { struct channel *chan = ctx->chan; - struct shm_handle *handle = ctx->handle; - struct lib_ring_buffer *buf; + struct lttng_ust_shm_handle *handle = ctx->handle; + struct lttng_ust_lib_ring_buffer *buf; unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; @@ -165,11 +165,11 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, /* * Perform retryable operations. */ - if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, + if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, &o_end, &o_old, &before_hdr_pad))) goto slow_path; - if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) + if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) != o_old)) goto slow_path; @@ -214,9 +214,9 @@ slow_path: * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. */ static inline -void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf, enum switch_mode mode, - struct shm_handle *handle) +void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, + struct lttng_ust_shm_handle *handle) { lib_ring_buffer_switch_slow(buf, mode, handle); } @@ -232,12 +232,12 @@ void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, * specified sub-buffer, and delivers it if necessary. */ static inline -void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, - const struct lib_ring_buffer_ctx *ctx) +void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config, + const struct lttng_ust_lib_ring_buffer_ctx *ctx) { struct channel *chan = ctx->chan; - struct shm_handle *handle = ctx->handle; - struct lib_ring_buffer *buf = ctx->buf; + struct lttng_ust_shm_handle *handle = ctx->handle; + struct lttng_ust_lib_ring_buffer *buf = ctx->buf; unsigned long offset_end = ctx->buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; @@ -297,10 +297,10 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, * Returns 0 upon success, -EPERM if the record cannot be discarded. */ static inline -int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config, - const struct lib_ring_buffer_ctx *ctx) +int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config, + const struct lttng_ust_lib_ring_buffer_ctx *ctx) { - struct lib_ring_buffer *buf = ctx->buf; + struct lttng_ust_lib_ring_buffer *buf = ctx->buf; unsigned long end_offset = ctx->pre_offset + ctx->slot_size; /* @@ -317,7 +317,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con */ save_last_tsc(config, buf, 0ULL); - if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) + if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) != end_offset)) return -EPERM; else @@ -325,29 +325,29 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con } static inline -void channel_record_disable(const struct lib_ring_buffer_config *config, +void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config, struct channel *chan) { uatomic_inc(&chan->record_disabled); } static inline -void channel_record_enable(const struct lib_ring_buffer_config *config, +void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config, struct channel *chan) { uatomic_dec(&chan->record_disabled); } static inline -void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf) +void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf) { uatomic_inc(&buf->record_disabled); } static inline -void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf) +void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf) { uatomic_dec(&buf->record_disabled); }