X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=017d30f5ddf673c36f2932d2fe83581a35bea50c;hb=dc5cd5702b74d72f0db0141c6d888a1d820aed9c;hp=c4b797ce8af76faf9ba5209d054b89fb42326656;hpb=e6b06d7d6346a5e3cfcdd214d171af067bca0f34;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index c4b797ce..017d30f5 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -55,13 +55,14 @@ #include #include -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend.h" -#include "../../wrapper/ringbuffer/frontend.h" -#include "../../wrapper/ringbuffer/iterator.h" -#include "../../wrapper/ringbuffer/nohz.h" -#include "../../wrapper/atomic.h" -#include "../../wrapper/percpu-defs.h" +#include +#include +#include +#include +#include +#include +#include +#include /* * Internal structure representing offsets to use at a sub-buffer switch. @@ -91,6 +92,9 @@ EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting); static void lib_ring_buffer_print_errors(struct channel *chan, struct lib_ring_buffer *buf, int cpu); +static +void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, + enum switch_mode mode); /* * Must be called under cpu hotplug protection. @@ -585,6 +589,63 @@ static void channel_unregister_notifiers(struct channel *chan) channel_backend_unregister_notifiers(&chan->backend); } +static void lib_ring_buffer_set_quiescent(struct lib_ring_buffer *buf) +{ + if (!buf->quiescent) { + buf->quiescent = true; + _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH); + } +} + +static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf) +{ + buf->quiescent = false; +} + +void lib_ring_buffer_set_quiescent_channel(struct channel *chan) +{ + int cpu; + const struct lib_ring_buffer_config *config = &chan->backend.config; + + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + get_online_cpus(); + for_each_channel_cpu(cpu, chan) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + + lib_ring_buffer_set_quiescent(buf); + } + put_online_cpus(); + } else { + struct lib_ring_buffer *buf = chan->backend.buf; + + lib_ring_buffer_set_quiescent(buf); + } +} +EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel); + +void lib_ring_buffer_clear_quiescent_channel(struct channel *chan) +{ + int cpu; + const struct lib_ring_buffer_config *config = &chan->backend.config; + + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + get_online_cpus(); + for_each_channel_cpu(cpu, chan) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + + lib_ring_buffer_clear_quiescent(buf); + } + put_online_cpus(); + } else { + struct lib_ring_buffer *buf = chan->backend.buf; + + lib_ring_buffer_clear_quiescent(buf); + } +} +EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel); + static void channel_free(struct channel *chan) { if (chan->backend.release_priv_ops) { @@ -745,7 +806,7 @@ void *channel_destroy(struct channel *chan) chan->backend.priv, cpu); if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH); + lib_ring_buffer_set_quiescent(buf); /* * Perform flush before writing to finalized. */ @@ -759,7 +820,7 @@ void *channel_destroy(struct channel *chan) if (config->cb.buffer_finalize) config->cb.buffer_finalize(buf, chan->backend.priv, -1); if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH); + lib_ring_buffer_set_quiescent(buf); /* * Perform flush before writing to finalized. */ @@ -793,7 +854,10 @@ int lib_ring_buffer_open_read(struct lib_ring_buffer *buf) if (!atomic_long_add_unless(&buf->active_readers, 1, 1)) return -EBUSY; - kref_get(&chan->ref); + if (!lttng_kref_get(&chan->ref)) { + atomic_long_dec(&buf->active_readers); + return -EOVERFLOW; + } lttng_smp_mb__after_atomic(); return 0; } @@ -840,6 +904,15 @@ int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, unsigned long consumed_cur, write_offset; int finalized; + /* + * First, ensure we perform a "final" flush onto the stream. This will + * ensure we create a packet of padding if we encounter an empty + * packet. This ensures the time-stamps right before the snapshot is + * used as end of packet timestamp. + */ + if (!buf->quiescent) + _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH); + retry: finalized = ACCESS_ONCE(buf->finalized); /* @@ -1229,7 +1302,8 @@ void lib_ring_buffer_print_errors(struct channel *chan, /* * lib_ring_buffer_switch_old_start: Populate old subbuffer header. * - * Only executed when the buffer is finalized, in SWITCH_FLUSH. + * Only executed by SWITCH_FLUSH, which can be issued while tracing is active + * or at buffer finalization (destroy). */ static void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, @@ -1420,12 +1494,14 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, unsigned long sb_index, commit_count; /* - * We are performing a SWITCH_FLUSH. At this stage, there are no - * concurrent writes into the buffer. + * We are performing a SWITCH_FLUSH. There may be concurrent + * writes into the buffer if e.g. invoked while performing a + * snapshot on an active trace. * - * The client does not save any header information. Don't - * switch empty subbuffer on finalize, because it is invalid to - * deliver a completely empty subbuffer. + * If the client does not save any header information (sub-buffer + * header size == 0), don't switch empty subbuffer on finalize, + * because it is invalid to deliver a completely empty + * subbuffer. */ if (!config->cb.subbuffer_header_size()) return -1; @@ -1546,7 +1622,8 @@ static void remote_switch(void *info) lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); } -void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) +static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, + enum switch_mode mode) { struct channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -1556,7 +1633,7 @@ void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) * With global synchronization we don't need to use the IPI scheme. */ if (config->sync == RING_BUFFER_SYNC_GLOBAL) { - lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + lib_ring_buffer_switch_slow(buf, mode); return; } @@ -1575,10 +1652,15 @@ void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) remote_switch, buf, 1); if (ret) { /* Remote CPU is offline, do it ourself. */ - lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + lib_ring_buffer_switch_slow(buf, mode); } put_online_cpus(); } + +void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) +{ + _lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE); +} EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote); /* @@ -1739,6 +1821,25 @@ retry: return 0; } +static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu) +{ + const struct lib_ring_buffer_config *config = &chan->backend.config; + + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + return per_cpu_ptr(chan->backend.buf, cpu); + else + return chan->backend.buf; +} + +void lib_ring_buffer_lost_event_too_big(struct channel *chan) +{ + const struct lib_ring_buffer_config *config = &chan->backend.config; + struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id()); + + v_inc(config, &buf->records_lost_big); +} +EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big); + /** * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer. * @ctx: ring buffer context. @@ -1755,12 +1856,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) struct switch_offsets offsets; int ret; - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = per_cpu_ptr(chan->backend.buf, ctx->cpu); - else - buf = chan->backend.buf; - ctx->buf = buf; - + ctx->buf = buf = get_current_buf(chan, ctx->cpu); offsets.size = 0; do {