X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Ffrontend_internal.h;h=2bc540370631ab53364a7f8fe318e776b386aabc;hb=7337bad8813e5d9ec7f37669afb3ec3bf07937da;hp=a86abb14e683825029e138660462bcc6f02d2e66;hpb=886d51a3d7ed5fa6b41d7f19b3e14ae6c535a44c;p=lttng-modules.git diff --git a/lib/ringbuffer/frontend_internal.h b/lib/ringbuffer/frontend_internal.h index a86abb14..2bc54037 100644 --- a/lib/ringbuffer/frontend_internal.h +++ b/lib/ringbuffer/frontend_internal.h @@ -156,6 +156,12 @@ extern void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode); +extern +void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf); + +extern +void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf); + /* Buffer write helpers */ static inline @@ -287,17 +293,24 @@ int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi - (commit_count & chan->commit_count_mask) == 0); } +/* + * Receive end of subbuffer TSC as parameter. It has been read in the + * space reservation loop of either reserve or switch, which ensures it + * progresses monotonically with event records in the buffer. Therefore, + * it ensures that the end timestamp of a subbuffer is <= begin + * timestamp of the following subbuffers. + */ static inline void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, struct channel *chan, unsigned long offset, unsigned long commit_count, - unsigned long idx) + unsigned long idx, + u64 tsc) { unsigned long old_commit_count = commit_count - chan->backend.subbuf_size; - u64 tsc; /* Check if all commits have been done */ if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) @@ -328,6 +341,12 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * The subbuffer size is least 2 bytes (minimum size: 1 page). * This guarantees that old_commit_count + 1 != commit_count. */ + + /* + * Order prior updates to reserve count prior to the + * commit_cold cc_sb update. + */ + smp_wmb(); if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb, old_commit_count, old_commit_count + 1) == old_commit_count)) { @@ -337,7 +356,6 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * and any other writer trying to access this subbuffer * in this state is required to drop records. */ - tsc = config->cb.ring_buffer_clock_read(chan); v_add(config, subbuffer_get_records_count(config, &buf->backend, idx), @@ -370,6 +388,11 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, /* End of exclusive subbuffer access */ v_set(config, &buf->commit_cold[idx].cc_sb, commit_count); + /* + * Order later updates to reserve count after + * the commit_cold cc_sb update. + */ + smp_wmb(); lib_ring_buffer_vmcore_check_deliver(config, buf, commit_count, idx); @@ -401,23 +424,20 @@ void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *c struct channel *chan, unsigned long idx, unsigned long buf_offset, - unsigned long commit_count, - size_t slot_size) + unsigned long commit_count) { - unsigned long offset, commit_seq_old; + unsigned long commit_seq_old; if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) return; - offset = buf_offset + slot_size; - /* * subbuf_offset includes commit_count_mask. We can simply * compare the offsets within the subbuffer without caring about * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (unlikely(subbuf_offset(offset - commit_count, chan))) + if (unlikely(subbuf_offset(buf_offset - commit_count, chan))) return; commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);