X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=bc1fd1220ba97c21da824ac326d81d4edf789a8a;hb=e56bb47c3af00db3bc9e2dea711bef4882e6ef4c;hp=93f6760b28b938238171dda1cb3bd395352d362e;hpb=40745074477e85361af72620a85ccf7945079d68;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index 93f6760b..bc1fd122 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -84,6 +84,7 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf static inline int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { @@ -110,7 +111,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c return 1; ctx->slot_size = record_header_size(config, chan, *o_begin, - before_hdr_pad, ctx); + before_hdr_pad, ctx, client_ctx); ctx->slot_size += lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; @@ -152,7 +153,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c static inline int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer_ctx *ctx) + struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx) { struct channel *chan = ctx->chan; struct lttng_ust_shm_handle *handle = ctx->handle; @@ -160,21 +162,23 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; - if (uatomic_read(&chan->record_disabled)) + if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); - if (uatomic_read(&buf->record_disabled)) + if (caa_unlikely(!buf)) + return -EIO; + if (caa_unlikely(uatomic_read(&buf->record_disabled))) return -EAGAIN; ctx->buf = buf; /* * Perform retryable operations. */ - if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, + if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin, &o_end, &o_old, &before_hdr_pad))) goto slow_path; @@ -205,7 +209,7 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi ctx->buf_offset = o_begin + before_hdr_pad; return 0; slow_path: - return lib_ring_buffer_reserve_slow(ctx); + return lib_ring_buffer_reserve_slow(ctx, client_ctx); } /** @@ -250,11 +254,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long offset_end = ctx->buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; + struct commit_counters_hot *cc_hot = shmp_index(handle, + buf->commit_hot, endidx); + + if (caa_unlikely(!cc_hot)) + return; /* * Must count record before incrementing the commit count. */ - subbuffer_count_record(config, &buf->backend, endidx, handle); + subbuffer_count_record(config, ctx, &buf->backend, endidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -262,7 +271,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi */ cmm_smp_wmb(); - v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); + v_add(config, ctx->slot_size, &cc_hot->cc); /* * commit count read can race with concurrent OOO commit count updates. @@ -282,7 +291,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi * count reaches back the reserve offset for a specific sub-buffer, * which is completely independent of the order. */ - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); + commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, commit_count, endidx, handle, ctx->tsc); @@ -290,8 +299,8 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. */ - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - offset_end, commit_count, handle); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offset_end, commit_count, handle, cc_hot); } /**