X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fbackend.h;h=46022ad3c5767d7f46db917ee41d2f6e72a98c7b;hb=c7667bfebfa1d054ad8d54fd98ada3c86184e327;hp=53a97140bf33bfd70c21f6b7939c1f48251cb20f;hpb=a44c74d96a15230cec7c09c770bf7fc745f0df15;p=lttng-ust.git diff --git a/libringbuffer/backend.h b/libringbuffer/backend.h index 53a97140..46022ad3 100644 --- a/libringbuffer/backend.h +++ b/libringbuffer/backend.h @@ -71,37 +71,33 @@ lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bu * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write) * if copy is crossing a page boundary. */ -static inline +static inline __attribute__((always_inline)) void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src, size_t len) { - struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; struct lttng_ust_shm_handle *handle = ctx->handle; - size_t sbidx; size_t offset = ctx->buf_offset; - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; - unsigned long sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; if (caa_unlikely(!len)) return; - offset &= chanb->buf_size - 1; - sbidx = offset >> chanb->subbuf_size_order; - id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; - sb_bindex = subbuffer_id_get_index(config, id); - rpages = shmp_index(handle, bufb->array, sb_bindex); - CHAN_WARN_ON(ctx->chan, - config->mode == RING_BUFFER_OVERWRITE - && subbuffer_id_is_noref(config, id)); /* * Underlying layer should never ask for writes across * subbuffers. */ - CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - lib_ring_buffer_do_copy(config, - shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), - src, len); + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_copy(config, p, src, len); ctx->buf_offset += len; } @@ -110,7 +106,7 @@ void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config * terminating character is found in @src. Returns the number of bytes * copied. Does *not* terminate @dest with NULL terminating character. */ -static inline +static inline __attribute__((always_inline)) size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, char *dest, const char *src, size_t len) { @@ -146,52 +142,51 @@ size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config * * character is found in @src before @len - 1 characters are copied, pad * the buffer with @pad characters (e.g. '#'). */ -static inline +static inline __attribute__((always_inline)) void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src, size_t len, int pad) { - struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; struct lttng_ust_shm_handle *handle = ctx->handle; - size_t sbidx, count; + size_t count; size_t offset = ctx->buf_offset; - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; - unsigned long sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + void *p; if (caa_unlikely(!len)) return; - offset &= chanb->buf_size - 1; - sbidx = offset >> chanb->subbuf_size_order; - id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; - sb_bindex = subbuffer_id_get_index(config, id); - rpages = shmp_index(handle, bufb->array, sb_bindex); - CHAN_WARN_ON(ctx->chan, - config->mode == RING_BUFFER_OVERWRITE - && subbuffer_id_is_noref(config, id)); /* * Underlying layer should never ask for writes across * subbuffers. */ - CHAN_WARN_ON(chanb, offset >= chanb->buf_size); - count = lib_ring_buffer_do_strcpy(config, - shmp_index(handle, shmp(handle, rpages->shmp)->p, - offset & (chanb->subbuf_size - 1)), - src, len - 1); + CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + + count = lib_ring_buffer_do_strcpy(config, p, src, len - 1); offset += count; /* Padding */ if (caa_unlikely(count < len - 1)) { size_t pad_len = len - 1 - count; - lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p, - offset & (chanb->subbuf_size - 1)), - pad, pad_len); + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, pad, pad_len); offset += pad_len; } /* Final '\0' */ - lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p, - offset & (chanb->subbuf_size - 1)), - '\0', 1); + p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1)); + if (caa_unlikely(!p)) + return; + lib_ring_buffer_do_memset(p, '\0', 1); ctx->buf_offset += len; } @@ -207,21 +202,42 @@ unsigned long lib_ring_buffer_get_records_unread( struct lttng_ust_shm_handle *handle) { struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; - unsigned long records_unread = 0, sb_bindex, id; + unsigned long records_unread = 0, sb_bindex; unsigned int i; - - for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) { - id = shmp_index(handle, bufb->buf_wsb, i)->id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + struct channel *chan; + + chan = shmp(handle, bufb->chan); + if (!chan) + return 0; + for (i = 0; i < chan->backend.num_subbuf; i++) { + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + wsb = shmp_index(handle, bufb->buf_wsb, i); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } if (config->mode == RING_BUFFER_OVERWRITE) { - id = bufb->buf_rsb.id; - sb_bindex = subbuffer_id_get_index(config, id); - pages = shmp_index(handle, bufb->array, sb_bindex); - records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread); + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; + + sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + records_unread += v_read(config, &backend_pages->records_unread); } return records_unread; }