X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fbackend_internal.h;h=fe8f6eb611ebb5b11e59d3611e2b20956652bf97;hb=fb31eb73d8a4a6d9784ed5c335b7fa3b9684108c;hp=9edd2e59f8b348bc5f8352e589d3499fcb0f476d;hpb=15500a1bb134d6bbd409da5568308c2a41291928;p=lttng-ust.git diff --git a/libringbuffer/backend_internal.h b/libringbuffer/backend_internal.h index 9edd2e59..fe8f6eb6 100644 --- a/libringbuffer/backend_internal.h +++ b/libringbuffer/backend_internal.h @@ -23,6 +23,8 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include +#include #include #include @@ -196,6 +198,53 @@ int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *conf return 0; } +static inline +int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages) +{ + struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend; + struct channel_backend *chanb = &ctx->chan->backend; + struct lttng_ust_shm_handle *handle = ctx->handle; + size_t sbidx; + size_t offset = ctx->buf_offset; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + unsigned long sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + wsb = shmp_index(handle, bufb->buf_wsb, sbidx); + if (caa_unlikely(!wsb)) + return -1; + id = wsb->id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (caa_unlikely(!rpages)) + return -1; + CHAN_WARN_ON(ctx->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + _backend_pages = shmp(handle, rpages->shmp); + if (caa_unlikely(!_backend_pages)) + return -1; + *backend_pages = _backend_pages; + return 0; +} + +/* Get backend pages from cache. */ +static inline +struct lttng_ust_lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx) +{ + if (caa_unlikely(ctx->ctx_len + < sizeof(struct lttng_ust_lib_ring_buffer_ctx))) + return NULL; + return ctx->backend_pages; +} + /* * The ring buffer can count events recorded and overwritten per buffer, * but it is disabled by default due to its performance overhead. @@ -545,6 +594,28 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, #define inline_memcpy(dest, src, n) memcpy(dest, src, n) #endif +static inline __attribute__((always_inline)) +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + inline_memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -556,7 +627,7 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) /*