X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fbackend.h;h=52e632d86d247b0b9bf0ed79a472c06c4d6db006;hb=a211b293bc6a610bed73a60006967663a30e4931;hp=feefc7a3229f1314cf6813b1f05178a0cc6176e8;hpb=0bf3c920174f81b8675984010785b8af9b9b1b59;p=lttng-ust.git diff --git a/libringbuffer/backend.h b/libringbuffer/backend.h index feefc7a3..52e632d8 100644 --- a/libringbuffer/backend.h +++ b/libringbuffer/backend.h @@ -26,6 +26,7 @@ * the reader in flight recorder mode. */ +#include #include /* Internal helpers */ @@ -71,37 +72,122 @@ lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bu * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write) * if copy is crossing a page boundary. */ -static inline +static inline __attribute__((always_inline)) void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src, size_t len) { - struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; struct lttng_ust_shm_handle *handle = ctx->handle; - size_t sbidx; size_t offset = ctx->buf_offset; - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; - unsigned long sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; if (caa_unlikely(!len)) return; - offset &= chanb->buf_size - 1; - sbidx = offset >> chanb->subbuf_size_order; - id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; - sb_bindex = subbuffer_id_get_index(config, id); - rpages = shmp_index(handle, bufb->array, sb_bindex); - CHAN_WARN_ON(ctx->chan, - config->mode == RING_BUFFER_OVERWRITE - && subbuffer_id_is_noref(config, id)); /* * Underlying layer should never ask for writes across * subbuffers. */ - CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - lib_ring_buffer_do_copy(config, - shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), - src, len); + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_copy(config, p, src, len); + ctx->buf_offset += len; +} + +/* + * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL + * terminating character is found in @src. Returns the number of bytes + * copied. Does *not* terminate @dest with NULL terminating character. + */ +static inline __attribute__((always_inline)) +size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, + char *dest, const char *src, size_t len) +{ + size_t count; + + for (count = 0; count < len; count++) { + char c; + + /* + * Only read source character once, in case it is + * modified concurrently. + */ + c = CMM_LOAD_SHARED(src[count]); + if (!c) + break; + lib_ring_buffer_do_copy(config, &dest[count], &c, 1); + } + return count; +} + +/** + * lib_ring_buffer_strcpy - write string data to a buffer backend + * @config : ring buffer instance configuration + * @ctx: ring buffer context. (input arguments only) + * @src : source pointer to copy from + * @len : length of data to copy + * @pad : character to use for padding + * + * This function copies @len - 1 bytes of string data from a source + * pointer to a buffer backend, followed by a terminating '\0' + * character, at the current context offset. This is more or less a + * buffer backend-specific strncpy() operation. If a terminating '\0' + * character is found in @src before @len - 1 characters are copied, pad + * the buffer with @pad characters (e.g. '#'). + */ +static inline __attribute__((always_inline)) +void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + const char *src, size_t len, int pad) +{ + struct channel_backend *chanb = &ctx->chan->backend; + struct lttng_ust_shm_handle *handle = ctx->handle; + size_t count; + size_t offset = ctx->buf_offset; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; + + if (caa_unlikely(!len)) + return; + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + + count = lib_ring_buffer_do_strcpy(config, p, src, len - 1); + offset += count; + /* Padding */ + if (caa_unlikely(count < len - 1)) { + size_t pad_len = len - 1 - count; + + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, pad, pad_len); + offset += pad_len; + } + /* Final '\0' */ + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, '\0', 1); ctx->buf_offset += len; } @@ -117,21 +203,42 @@ unsigned long lib_ring_buffer_get_records_unread( struct lttng_ust_shm_handle *handle) { struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; - unsigned long records_unread = 0, sb_bindex, id; + unsigned long records_unread = 0, sb_bindex; unsigned int i; + struct channel *chan; - for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) { - id = shmp_index(handle, bufb->buf_wsb, i)->id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + chan = shmp(handle, bufb->chan); + if (!chan) + return 0; + for (i = 0; i < chan->backend.num_subbuf; i++) { + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + wsb = shmp_index(handle, bufb->buf_wsb, i); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } if (config->mode == RING_BUFFER_OVERWRITE) { - id = bufb->buf_rsb.id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } return records_unread; }