X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fbackend.h;h=98c194ca5bf988603646071b778ac936b1add00d;hb=c0c0989ab70574e09b2f7e8b48c2da6af664a849;hp=83e07e426b3de3458a995bcfa953e0e33ee9d1fb;hpb=aead202565d65894df2899a1c6095ffde1f1a779;p=lttng-ust.git diff --git a/libringbuffer/backend.h b/libringbuffer/backend.h index 83e07e42..98c194ca 100644 --- a/libringbuffer/backend.h +++ b/libringbuffer/backend.h @@ -1,22 +1,19 @@ -#ifndef _LINUX_RING_BUFFER_BACKEND_H -#define _LINUX_RING_BUFFER_BACKEND_H - /* - * linux/ringbuffer/backend.h + * SPDX-License-Identifier: LGPL-2.1-only * - * Copyright (C) 2008-2010 - Mathieu Desnoyers + * Copyright (C) 2011-2012 Mathieu Desnoyers * * Ring buffer backend (API). * - * Dual LGPL v2.1/GPL v2 license. - * * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by * the reader in flight recorder mode. */ -#include +#ifndef _LTTNG_RING_BUFFER_BACKEND_H +#define _LTTNG_RING_BUFFER_BACKEND_H -#include "ust/core.h" +#include +#include /* Internal helpers */ #include "backend_internal.h" @@ -26,13 +23,13 @@ /* Ring buffer backend access (read/write) */ -extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, +extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, void *dest, size_t len, - struct shm_handle *handle); + struct lttng_ust_shm_handle *handle); -extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, +extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, void *dest, size_t len, - struct shm_handle *handle); + struct lttng_ust_shm_handle *handle); /* * Return the address where a given offset is located. @@ -41,13 +38,13 @@ extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, * as long as the write is never bigger than a page size. */ extern void * -lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, +lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, - struct shm_handle *handle); + struct lttng_ust_shm_handle *handle); extern void * -lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, +lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, - struct shm_handle *handle); + struct lttng_ust_shm_handle *handle); /** * lib_ring_buffer_write - write data to a buffer backend @@ -61,35 +58,122 @@ lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write) * if copy is crossing a page boundary. */ -static inline -void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx, +static inline __attribute__((always_inline)) +void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src, size_t len) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; - struct shm_handle *handle = ctx->handle; - size_t sbidx; + struct lttng_ust_shm_handle *handle = ctx->handle; size_t offset = ctx->buf_offset; - struct lib_ring_buffer_backend_pages_shmp *rpages; - unsigned long sb_bindex, id; - - offset &= chanb->buf_size - 1; - sbidx = offset >> chanb->subbuf_size_order; - id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; - sb_bindex = subbuffer_id_get_index(config, id); - rpages = shmp_index(handle, bufb->array, sb_bindex); - CHAN_WARN_ON(ctx->chan, - config->mode == RING_BUFFER_OVERWRITE - && subbuffer_id_is_noref(config, id)); + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; + + if (caa_unlikely(!len)) + return; /* * Underlying layer should never ask for writes across * subbuffers. */ - CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - lib_ring_buffer_do_copy(config, - shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), - src, len); + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_copy(config, p, src, len); + ctx->buf_offset += len; +} + +/* + * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL + * terminating character is found in @src. Returns the number of bytes + * copied. Does *not* terminate @dest with NULL terminating character. + */ +static inline __attribute__((always_inline)) +size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, + char *dest, const char *src, size_t len) +{ + size_t count; + + for (count = 0; count < len; count++) { + char c; + + /* + * Only read source character once, in case it is + * modified concurrently. + */ + c = CMM_LOAD_SHARED(src[count]); + if (!c) + break; + lib_ring_buffer_do_copy(config, &dest[count], &c, 1); + } + return count; +} + +/** + * lib_ring_buffer_strcpy - write string data to a buffer backend + * @config : ring buffer instance configuration + * @ctx: ring buffer context. (input arguments only) + * @src : source pointer to copy from + * @len : length of data to copy + * @pad : character to use for padding + * + * This function copies @len - 1 bytes of string data from a source + * pointer to a buffer backend, followed by a terminating '\0' + * character, at the current context offset. This is more or less a + * buffer backend-specific strncpy() operation. If a terminating '\0' + * character is found in @src before @len - 1 characters are copied, pad + * the buffer with @pad characters (e.g. '#'). + */ +static inline __attribute__((always_inline)) +void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + const char *src, size_t len, int pad) +{ + struct channel_backend *chanb = &ctx->chan->backend; + struct lttng_ust_shm_handle *handle = ctx->handle; + size_t count; + size_t offset = ctx->buf_offset; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; + + if (caa_unlikely(!len)) + return; + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + + count = lib_ring_buffer_do_strcpy(config, p, src, len - 1); + offset += count; + /* Padding */ + if (caa_unlikely(count < len - 1)) { + size_t pad_len = len - 1 - count; + + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, pad, pad_len); + offset += pad_len; + } + /* Final '\0' */ + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, '\0', 1); ctx->buf_offset += len; } @@ -100,28 +184,49 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, */ static inline unsigned long lib_ring_buffer_get_records_unread( - const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf, - struct shm_handle *handle) + const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend *bufb = &buf->backend; - struct lib_ring_buffer_backend_pages_shmp *pages; - unsigned long records_unread = 0, sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; + unsigned long records_unread = 0, sb_bindex; unsigned int i; + struct channel *chan; - for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) { - id = shmp_index(handle, bufb->buf_wsb, i)->id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + chan = shmp(handle, bufb->chan); + if (!chan) + return 0; + for (i = 0; i < chan->backend.num_subbuf; i++) { + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + wsb = shmp_index(handle, bufb->buf_wsb, i); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } if (config->mode == RING_BUFFER_OVERWRITE) { - id = bufb->buf_rsb.id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } return records_unread; } -#endif /* _LINUX_RING_BUFFER_BACKEND_H */ +#endif /* _LTTNG_RING_BUFFER_BACKEND_H */