X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_frontend.c;h=4f8cc37189397c499886302b56ce4107ac8640bc;hb=e185cf4b895d5d9a040d74be1dfa4d004eff04b7;hp=c177f337da01aa4f457fbea66add969a837d691f;hpb=64bf51a6c36687bb8fe5e60a8a5d46cb00e73a47;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index c177f337..4f8cc371 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -1418,8 +1418,10 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf, /* * lib_ring_buffer_switch_new_end: finish switching current subbuffer * - * The only remaining threads could be the ones with pending commits. They will - * have to do the deliver themselves. + * Calls subbuffer_set_data_size() to set the data size of the current + * sub-buffer. We do not need to perform check_deliver nor commit here, + * since this task will be done by the "commit" of the event for which + * we are currently doing the space reservation. */ static void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf, @@ -1429,26 +1431,12 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf, struct lttng_ust_shm_handle *handle) { const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; - unsigned long endidx = subbuf_index(offsets->end - 1, chan); - unsigned long commit_count, padding_size, data_size; + unsigned long endidx, data_size; + endidx = subbuf_index(offsets->end - 1, chan); data_size = subbuf_offset(offsets->end - 1, chan) + 1; - padding_size = chan->backend.subbuf_size - data_size; subbuffer_set_data_size(config, &buf->backend, endidx, data_size, handle); - - /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. - */ - cmm_smp_wmb(); - v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); - lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, - commit_count, endidx, handle); - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - offsets->end, commit_count, - padding_size, handle); } /* @@ -1598,9 +1586,10 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, { const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; struct lttng_ust_shm_handle *handle = ctx->handle; - unsigned long reserve_commit_diff; + unsigned long reserve_commit_diff, offset_cmp; - offsets->begin = v_read(config, &buf->offset); +retry: + offsets->begin = offset_cmp = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; offsets->switch_new_end = 0; @@ -1632,7 +1621,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, } } if (caa_unlikely(offsets->switch_new_start)) { - unsigned long sb_index; + unsigned long sb_index, commit_count; /* * We are typically not filling the previous buffer completely. @@ -1643,12 +1632,32 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, + config->cb.subbuffer_header_size(); /* Test new buffer integrity */ sb_index = subbuf_index(offsets->begin, chan); + /* + * Read buf->offset before buf->commit_cold[sb_index].cc_sb. + * lib_ring_buffer_check_deliver() has the matching + * memory barriers required around commit_cold cc_sb + * updates to ensure reserve and commit counter updates + * are not seen reordered when updated by another CPU. + */ + cmm_smp_rmb(); + commit_count = v_read(config, + &shmp_index(handle, buf->commit_cold, + sb_index)->cc_sb); + /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */ + cmm_smp_rmb(); + if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) { + /* + * The reserve counter have been concurrently updated + * while we read the commit counter. This means the + * commit counter we read might not match buf->offset + * due to concurrent update. We therefore need to retry. + */ + goto retry; + } reserve_commit_diff = (buf_trunc(offsets->begin, chan) >> chan->backend.num_subbuf_order) - - ((unsigned long) v_read(config, - &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb) - & chan->commit_count_mask); + - (commit_count & chan->commit_count_mask); if (caa_likely(reserve_commit_diff == 0)) { /* Next subbuffer not being written to. */ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE && @@ -1683,7 +1692,8 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, /* * Next subbuffer reserve offset does not match the - * commit offset. Drop record in producer-consumer and + * commit offset, and this did not involve update to the + * reserve counter. Drop record in producer-consumer and * overwrite mode. Caused by either a writer OOPS or too * many nested writes over a reserve/commit pair. */