EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
static
-void lib_ring_buffer_print_errors(struct channel *chan,
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf, int cpu);
static
void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
static
int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_kernel_ring_buffer_channel *chan)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
*/
void lib_ring_buffer_free(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
irq_work_sync(&buf->wakeup_pending);
*/
void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
* be using the iterator concurrently with reset. The previous current iterator
* record is reset.
*/
-void channel_reset(struct channel *chan)
+void channel_reset(struct lttng_kernel_ring_buffer_channel *chan)
{
/*
* Reset iterators first. Will put the subbuffer if held for reading.
static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
{
- struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(entry, struct lttng_kernel_ring_buffer_channel, wakeup_pending);
wake_up_interruptible(&chan->read_wait);
}
struct channel_backend *chanb, int cpu)
{
const struct lib_ring_buffer_config *config = &chanb->config;
- struct channel *chan = container_of(chanb, struct channel, backend);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
void *priv = chanb->priv;
size_t subbuf_header_size;
u64 tsc;
static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
*/
static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
*/
static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
return;
static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
{
struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
*/
static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int flags = 0;
*/
static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_prepare);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
struct lttng_cpuhp_node *node)
{
- struct channel *chan = container_of(node, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
cpuhp_online);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
cpu_hp_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long val,
void *data)
{
- struct channel *chan = container_of(nb, struct channel,
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
tick_nohz_notifier);
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
/*
* Holds CPU hotplug.
*/
-static void channel_unregister_notifiers(struct channel *chan)
+static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
buf->quiescent = false;
}
-void lib_ring_buffer_set_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel);
-void lib_ring_buffer_clear_quiescent_channel(struct channel *chan)
+void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel);
-static void channel_free(struct channel *chan)
+static void channel_free(struct lttng_kernel_ring_buffer_channel *chan)
{
if (chan->backend.release_priv_ops) {
chan->backend.release_priv_ops(chan->backend.priv_ops);
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
+struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
int ret;
- struct channel *chan;
+ struct lttng_kernel_ring_buffer_channel *chan;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct lttng_kernel_ring_buffer_channel), GFP_KERNEL);
if (!chan)
return NULL;
static
void channel_release(struct kref *kref)
{
- struct channel *chan = container_of(kref, struct channel, ref);
+ struct lttng_kernel_ring_buffer_channel *chan = container_of(kref, struct lttng_kernel_ring_buffer_channel, ref);
channel_free(chan);
}
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan)
+void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
{
int cpu;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu)
+ struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
return chan->backend.buf;
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
lttng_smp_mb__before_atomic();
int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
smp_rmb();
unsigned long consumed_new)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
unsigned long consumed;
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void lib_ring_buffer_flush_read_subbuf_dcache(
const struct lib_ring_buffer_config *config,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend_pages *pages;
#else
static void lib_ring_buffer_flush_read_subbuf_dcache(
const struct lib_ring_buffer_config *config,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf)
{
}
int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
unsigned long consumed)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
*/
static
void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu)
{
static
void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
void *priv, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf,
int cpu)
{
}
#else
static
-void lib_ring_buffer_print_records_count(struct channel *chan,
+void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf,
int cpu)
{
#endif
static
-void lib_ring_buffer_print_errors(struct channel *chan,
+void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan,
struct lib_ring_buffer *buf, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
*/
static
void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
*/
static
void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 tsc)
{
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
u64 *tsc)
{
*/
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
enum switch_mode mode)
{
- struct channel *chan = buf->backend.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
int ret;
struct switch_param param;
void lib_ring_buffer_clear(struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct lttng_kernel_ring_buffer_channel *chan = bufb->chan;
lib_ring_buffer_switch_remote(buf);
lib_ring_buffer_clear_reader(buf, chan);
*/
static
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
struct switch_offsets *offsets,
struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
return 0;
}
-static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu)
+static struct lib_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
return chan->backend.buf;
}
-void lib_ring_buffer_lost_event_too_big(struct channel *chan)
+void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id());
int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct channel *chan = ctx->priv.chan;
+ struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,