By using the timestamp sampled at space reservation when the packet is
being filled as "end timestamp" for a packet, we can ensure there is no
overlap between packet timestamp ranges, so that packet timestamp end <=
following packets timestamp begin.
Overlap between consecutive packets becomes an issue when the end
timestamp of a packet is greater than the end timestamp of a following
packet, IOW a packet completely contains the timestamp range of a
following packet. This kind of situation does not allow trace viewers
to do binary search within the packet timestamps. This kind of situation
will typically never occur if packets are significantly larger than
event size, but this fix ensures it can never even theoretically happen.
The only case where packets can still theoretically overlap is if they
have equal begin and end timestamps, which is valid.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle);
+ commit_count, endidx, handle, ctx->tsc);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
static inline
void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
static inline
void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
- struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
/* Check if all commits have been done */
if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
/* Check if all commits have been done */
if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
* and any other writer trying to access this subbuffer
* in this state is required to drop records.
*/
* and any other writer trying to access this subbuffer
* in this state is required to drop records.
*/
- tsc = config->cb.ring_buffer_clock_read(chan);
v_add(config,
subbuffer_get_records_count(config,
&buf->backend,
v_add(config,
subbuffer_get_records_count(config,
&buf->backend,
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx, handle);
+ commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
config->cb.subbuffer_header_size(),
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
config->cb.subbuffer_header_size(),
v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx, handle);
+ commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
padding_size, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
padding_size, handle);
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx, handle);
+ commit_count, beginidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
offsets->begin, commit_count,
config->cb.subbuffer_header_size(),
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
offsets->begin, commit_count,
config->cb.subbuffer_header_size(),