#include <ringbuffer/iterator.h>
#include <ringbuffer/nohz.h>
#include <wrapper/atomic.h>
+#include <wrapper/cpu.h>
#include <wrapper/kref.h>
#include <wrapper/percpu-defs.h>
#include <wrapper/timer.h>
EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
static
-void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu);
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf, int cpu);
static
-void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+void _lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf,
enum switch_mode mode);
static
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- struct channel *chan)
+int lib_ring_buffer_poll_deliver(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
/*
* Must be called under cpu hotplug protection.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
+void lib_ring_buffer_free(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
irq_work_sync(&buf->wakeup_pending);
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_reset(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
* be using the iterator concurrently with reset. The previous current iterator
* record is reset.
*/
-void channel_reset(struct channel *chan)
+void channel_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
/*
* Reset iterators first. Will put the subbuffer if held for reading.
static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
{
- struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ struct lttng_kernel_ring_buffer *buf = container_of(entry, struct lttng_kernel_ring_buffer,
wakeup_pending);
wake_up_interruptible(&buf->read_wait);
}
static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
{
- struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(entry, struct lttng_kernel_ring_buffer_channel, wakeup_pending);
wake_up_interruptible(&chan->read_wait);
}
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_kernel_ring_buffer *buf,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = &chanb->config;
- struct channel *chan = container_of(chanb, struct channel, backend);
+ const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
u64 tsc;
static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_switch_timer(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
return;
*/
static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_read_timer(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
/*
* Called with ring_buffer_nohz_lock held for per-cpu buffers.
*/
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_read_timer(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_prepare);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
cpu_hp_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!chan->cpu_hp_enable)
return NOTIFY_DONE;
unsigned long val,
void *data)
{
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
tick_nohz_notifier);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
int cpu = smp_processor_id();
if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
/*
* Holds CPU hotplug.
*/
-static void channel_unregister_notifiers(struct channel *chan)
+static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
channel_iterator_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
int cpu;
#ifdef CONFIG_HOTPLUG_CPU
- get_online_cpus();
+ lttng_cpus_read_lock();
chan->cpu_hp_enable = 0;
for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
unregister_cpu_notifier(&chan->cpu_hp_notifier);
#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
}
#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
channel_backend_unregister_notifiers(&chan->backend);
}
-static void lib_ring_buffer_set_quiescent(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_set_quiescent(struct lttng_kernel_ring_buffer *buf)
{
if (!buf->quiescent) {
buf->quiescent = true;
}
}
-static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_clear_quiescent(struct lttng_kernel_ring_buffer *buf)
{
buf->quiescent = false;
}
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_set_quiescent(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_set_quiescent(buf);
}
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel);
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
lib_ring_buffer_clear_quiescent(buf);
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_clear_quiescent(buf);
}
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel);
-static void channel_free(struct channel *chan)
+static void channel_free(struct lttng_kernel_ring_buffer_channel *chan)
{
if (chan->backend.release_priv_ops) {
chan->backend.release_priv_ops(chan->backend.priv_ops);
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kernel_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
int ret;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct lttng_kernel_ring_buffer_channel), GFP_KERNEL);
if (!chan)
return NULL;
chan->cpu_hp_notifier.priority = 6;
register_cpu_notifier(&chan->cpu_hp_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
lib_ring_buffer_start_switch_timer(buf);
spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
}
chan->cpu_hp_enable = 1;
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
lib_ring_buffer_start_switch_timer(buf);
#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
lib_ring_buffer_start_switch_timer(buf);
lib_ring_buffer_start_read_timer(buf);
static
void channel_release(struct kref *kref)
{
- struct channel *chan = container_of(kref, struct channel, ref);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(kref, struct lttng_kernel_ring_buffer_channel, ref);
channel_free(chan);
}
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan)
+void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
void *priv;
irq_work_sync(&chan->wakeup_pending);
* unregistered.
*/
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
+ struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
cpu);
if (config->cb.buffer_finalize)
wake_up_interruptible(&buf->read_wait);
}
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
if (config->cb.buffer_finalize)
config->cb.buffer_finalize(buf, chan->backend.priv, -1);
}
EXPORT_SYMBOL_GPL(channel_destroy);
-struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu)
+struct lttng_kernel_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
return chan->backend.buf;
}
EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+int lib_ring_buffer_open_read(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+void lib_ring_buffer_release_read(struct lttng_kernel_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
lttng_smp_mb__before_atomic();
* Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
* This function is meant to provide information on the exact producer and
* consumer positions without regard for the "snapshot" feature.
*/
-int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot_sample_positions(struct lttng_kernel_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
smp_rmb();
*consumed = atomic_long_read(&buf->consumed);
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed_new)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
unsigned long consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void lib_ring_buffer_flush_read_subbuf_dcache(
- const struct lib_ring_buffer_config *config,
- struct channel *chan,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long sb_bindex, id, i, nr_pages;
if (config->output != RING_BUFFER_MMAP)
pages = buf->backend.array[sb_bindex];
nr_pages = buf->backend.num_pages_per_subbuf;
for (i = 0; i < nr_pages; i++) {
- struct lib_ring_buffer_backend_page *backend_page;
+ struct lttng_kernel_ring_buffer_backend_page *backend_page;
backend_page = &pages->p[i];
flush_dcache_page(pfn_to_page(backend_page->pfn));
}
#else
static void lib_ring_buffer_flush_read_subbuf_dcache(
- const struct lib_ring_buffer_config *config,
- struct channel *chan,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf)
{
}
#endif
* data to read at consumed position, or 0 if the get operation succeeds.
* Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_kernel_ring_buffer *buf,
unsigned long consumed)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_subbuf(struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
}
static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_print_buffer_errors(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
void *priv, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf,
int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (!strcmp(chan->backend.name, "relay-metadata")) {
printk(KERN_DEBUG "LTTng: ring buffer %s: %lu records written, "
}
#else
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf,
int cpu)
{
}
#endif
static
-void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu)
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
+ struct lttng_kernel_ring_buffer *buf, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
lib_ring_buffer_print_records_count(chan, buf, cpu);
* Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_switch_old_start(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_switch_old_end(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct commit_counters_hot *cc_hot;
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_switch_new_start(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
* we are currently doing the space reservation.
*/
static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_switch_new_end(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx, data_size;
u64 *ts_end;
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf, enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
struct switch_param {
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer *buf;
enum switch_mode mode;
};
static void remote_switch(void *info)
{
struct switch_param *param = info;
- struct lib_ring_buffer *buf = param->buf;
+ struct lttng_kernel_ring_buffer *buf = param->buf;
lib_ring_buffer_switch_slow(buf, param->mode);
}
-static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
+static void _lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf,
enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
int ret;
struct switch_param param;
}
/* Switch sub-buffer if current sub-buffer is non-empty. */
-void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
+void lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer *buf)
{
_lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
/* Switch sub-buffer even if current sub-buffer is empty. */
-void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
+void lib_ring_buffer_switch_remote_empty(struct lttng_kernel_ring_buffer *buf)
{
_lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
-void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
+void lib_ring_buffer_clear(struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
lib_ring_buffer_switch_remote(buf);
lib_ring_buffer_clear_reader(buf, chan);
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
- struct channel *chan,
+int lib_ring_buffer_try_reserve_slow(struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
retry:
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx->tsc == -EIO)
+ ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->priv.tsc == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_tsc_overflow(config, buf, ctx->priv.tsc))
+ ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
return 0;
}
-static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu)
+static struct lttng_kernel_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return per_cpu_ptr(chan->backend.buf, cpu);
return chan->backend.buf;
}
-void lib_ring_buffer_lost_event_too_big(struct channel *chan)
+void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan)
{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
v_inc(config, &buf->records_lost_big);
}
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
+ const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_kernel_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
- ctx->buf = buf = get_current_buf(chan, ctx->cpu);
+ ctx->priv.buf = buf = get_current_buf(chan, ctx->priv.reserve_cpu);
offsets.size = 0;
do {
* records, never the opposite (missing a full TSC record when it would
* be needed).
*/
- save_last_tsc(config, buf, ctx->tsc);
+ save_last_tsc(config, buf, ctx->priv.tsc);
/*
* Push the reader if necessary
if (unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan));
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc);
}
/*
* Populate new subbuffer.
*/
if (unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.tsc);
if (unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc);
- ctx->slot_size = offsets.size;
- ctx->pre_offset = offsets.begin;
- ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+ ctx->priv.slot_size = offsets.size;
+ ctx->priv.pre_offset = offsets.begin;
+ ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding;
return 0;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
static
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx)
{
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long idx)
{
v_add(config, subbuffer_get_records_count(config,
}
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static
-void deliver_count_events(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
unsigned long idx)
{
}
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- struct channel *chan,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,