X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=src%2Flib%2Fringbuffer%2Fring_buffer_frontend.c;h=8d1983573e4d7cf429f37815074a543f9231d0e1;hb=860c213b645593fa19d7a3abf7ffdd1282f0a1c6;hp=cc5ac836420fb0f9a54a86de07dd36b417b2a2a4;hpb=585e5dcc4bf017c03b86dc84371f9c6170e12785;p=lttng-modules.git diff --git a/src/lib/ringbuffer/ring_buffer_frontend.c b/src/lib/ringbuffer/ring_buffer_frontend.c index cc5ac836..8d198357 100644 --- a/src/lib/ringbuffer/ring_buffer_frontend.c +++ b/src/lib/ringbuffer/ring_buffer_frontend.c @@ -79,7 +79,7 @@ DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting); EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting); static -void lib_ring_buffer_print_errors(struct channel *chan, +void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf, int cpu); static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, @@ -88,7 +88,7 @@ void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, static int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, - struct channel *chan) + struct lttng_kernel_ring_buffer_channel *chan) { unsigned long consumed_old, consumed_idx, commit_count, write_offset; @@ -131,7 +131,9 @@ int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, */ void lib_ring_buffer_free(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; + + irq_work_sync(&buf->wakeup_pending); lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu); lttng_kvfree(buf->commit_hot); @@ -152,7 +154,7 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf) */ void lib_ring_buffer_reset(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned int i; @@ -191,7 +193,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_reset); * be using the iterator concurrently with reset. The previous current iterator * record is reset. */ -void channel_reset(struct channel *chan) +void channel_reset(struct lttng_kernel_ring_buffer_channel *chan) { /* * Reset iterators first. Will put the subbuffer if held for reading. @@ -206,6 +208,19 @@ void channel_reset(struct channel *chan) } EXPORT_SYMBOL_GPL(channel_reset); +static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry) +{ + struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer, + wakeup_pending); + wake_up_interruptible(&buf->read_wait); +} + +static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry) +{ + struct lttng_kernel_ring_buffer_channel *chan = container_of(entry, struct lttng_kernel_ring_buffer_channel, wakeup_pending); + wake_up_interruptible(&chan->read_wait); +} + /* * Must be called under cpu hotplug protection. */ @@ -213,7 +228,7 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, struct channel_backend *chanb, int cpu) { const struct lib_ring_buffer_config *config = &chanb->config; - struct channel *chan = container_of(chanb, struct channel, backend); + struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend); void *priv = chanb->priv; size_t subbuf_header_size; u64 tsc; @@ -268,6 +283,7 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, init_waitqueue_head(&buf->read_wait); init_waitqueue_head(&buf->write_wait); + init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf); raw_spin_lock_init(&buf->raw_tick_nohz_spinlock); /* @@ -318,7 +334,7 @@ free_chanbuf: static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t) { struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer); - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; /* @@ -340,7 +356,7 @@ static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t) */ static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned int flags = 0; @@ -366,7 +382,7 @@ static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) */ static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; if (!chan->switch_timer_interval || !buf->switch_timer_enabled) return; @@ -381,7 +397,7 @@ static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf) static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t) { struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer); - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; CHAN_WARN_ON(chan, !buf->backend.allocated); @@ -405,7 +421,7 @@ static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t) */ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned int flags = 0; @@ -433,7 +449,7 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) */ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER @@ -453,7 +469,7 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 0; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) enum cpuhp_state lttng_rb_hp_prepare; enum cpuhp_state lttng_rb_hp_online; @@ -473,7 +489,7 @@ EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online); int lttng_cpuhp_rb_frontend_dead(unsigned int cpu, struct lttng_cpuhp_node *node) { - struct channel *chan = container_of(node, struct channel, + struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel, cpuhp_prepare); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -494,7 +510,7 @@ EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead); int lttng_cpuhp_rb_frontend_online(unsigned int cpu, struct lttng_cpuhp_node *node) { - struct channel *chan = container_of(node, struct channel, + struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel, cpuhp_online); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -511,7 +527,7 @@ EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online); int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, struct lttng_cpuhp_node *node) { - struct channel *chan = container_of(node, struct channel, + struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel, cpuhp_online); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -524,7 +540,7 @@ int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU @@ -542,7 +558,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct channel *chan = container_of(nb, struct channel, + struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel, cpu_hp_notifier); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -586,7 +602,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* @@ -601,7 +617,7 @@ static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb, unsigned long val, void *data) { - struct channel *chan = container_of(nb, struct channel, + struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel, tick_nohz_notifier); const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; @@ -673,7 +689,7 @@ void notrace lib_ring_buffer_tick_nohz_restart(void) /* * Holds CPU hotplug. */ -static void channel_unregister_notifiers(struct channel *chan) +static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -692,7 +708,7 @@ static void channel_unregister_notifiers(struct channel *chan) * concurrency. */ #endif /* CONFIG_NO_HZ */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) { int ret; @@ -703,7 +719,7 @@ static void channel_unregister_notifiers(struct channel *chan) &chan->cpuhp_prepare.node); WARN_ON(ret); } -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { int cpu; @@ -727,7 +743,7 @@ static void channel_unregister_notifiers(struct channel *chan) } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -750,7 +766,7 @@ static void lib_ring_buffer_clear_quiescent(struct lib_ring_buffer *buf) buf->quiescent = false; } -void lib_ring_buffer_set_quiescent_channel(struct channel *chan) +void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan) { int cpu; const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -772,7 +788,7 @@ void lib_ring_buffer_set_quiescent_channel(struct channel *chan) } EXPORT_SYMBOL_GPL(lib_ring_buffer_set_quiescent_channel); -void lib_ring_buffer_clear_quiescent_channel(struct channel *chan) +void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_channel *chan) { int cpu; const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -794,7 +810,7 @@ void lib_ring_buffer_clear_quiescent_channel(struct channel *chan) } EXPORT_SYMBOL_GPL(lib_ring_buffer_clear_quiescent_channel); -static void channel_free(struct channel *chan) +static void channel_free(struct lttng_kernel_ring_buffer_channel *chan) { if (chan->backend.release_priv_ops) { chan->backend.release_priv_ops(chan->backend.priv_ops); @@ -822,20 +838,20 @@ static void channel_free(struct channel *chan) * Holds cpu hotplug. * Returns NULL on failure. */ -struct channel *channel_create(const struct lib_ring_buffer_config *config, +struct lttng_kernel_ring_buffer_channel *channel_create(const struct lib_ring_buffer_config *config, const char *name, void *priv, void *buf_addr, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval) { int ret; - struct channel *chan; + struct lttng_kernel_ring_buffer_channel *chan; if (lib_ring_buffer_check_config(config, switch_timer_interval, read_timer_interval)) return NULL; - chan = kzalloc(sizeof(struct channel), GFP_KERNEL); + chan = kzalloc(sizeof(struct lttng_kernel_ring_buffer_channel), GFP_KERNEL); if (!chan) return NULL; @@ -854,9 +870,10 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, kref_init(&chan->ref); init_waitqueue_head(&chan->read_wait); init_waitqueue_head(&chan->hp_wait); + init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND; ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare, &chan->cpuhp_prepare.node); @@ -868,7 +885,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, &chan->cpuhp_online.node); if (ret) goto cpuhp_online_error; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { int cpu; /* @@ -904,7 +921,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* Only benefit from NO_HZ idle with per-cpu buffers for now. */ @@ -924,13 +941,13 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, return chan; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) cpuhp_online_error: ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, &chan->cpuhp_prepare.node); WARN_ON(ret); cpuhp_prepare_error: -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ error_free_backend: channel_backend_free(&chan->backend); error: @@ -942,7 +959,7 @@ EXPORT_SYMBOL_GPL(channel_create); static void channel_release(struct kref *kref) { - struct channel *chan = container_of(kref, struct channel, ref); + struct lttng_kernel_ring_buffer_channel *chan = container_of(kref, struct lttng_kernel_ring_buffer_channel, ref); channel_free(chan); } @@ -957,12 +974,14 @@ void channel_release(struct kref *kref) * They should release their handle at that point. Returns the private * data pointer. */ -void *channel_destroy(struct channel *chan) +void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan) { int cpu; const struct lib_ring_buffer_config *config = &chan->backend.config; void *priv; + irq_work_sync(&chan->wakeup_pending); + channel_unregister_notifiers(chan); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { @@ -1008,7 +1027,7 @@ EXPORT_SYMBOL_GPL(channel_destroy); struct lib_ring_buffer *channel_get_ring_buffer( const struct lib_ring_buffer_config *config, - struct channel *chan, int cpu) + struct lttng_kernel_ring_buffer_channel *chan, int cpu) { if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) return chan->backend.buf; @@ -1019,7 +1038,7 @@ EXPORT_SYMBOL_GPL(channel_get_ring_buffer); int lib_ring_buffer_open_read(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; if (!atomic_long_add_unless(&buf->active_readers, 1, 1)) return -EBUSY; @@ -1034,7 +1053,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read); void lib_ring_buffer_release_read(struct lib_ring_buffer *buf) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1); lttng_smp_mb__before_atomic(); @@ -1068,7 +1087,7 @@ static void remote_mb(void *info) int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, unsigned long *consumed, unsigned long *produced) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, write_offset; int finalized; @@ -1131,7 +1150,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot); int lib_ring_buffer_snapshot_sample_positions(struct lib_ring_buffer *buf, unsigned long *consumed, unsigned long *produced) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; smp_rmb(); @@ -1159,7 +1178,7 @@ void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, unsigned long consumed_new) { struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = bufb->chan; + struct lttng_kernel_ring_buffer_channel *chan = bufb->chan; unsigned long consumed; CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1); @@ -1181,7 +1200,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE static void lib_ring_buffer_flush_read_subbuf_dcache( const struct lib_ring_buffer_config *config, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf) { struct lib_ring_buffer_backend_pages *pages; @@ -1213,7 +1232,7 @@ static void lib_ring_buffer_flush_read_subbuf_dcache( #else static void lib_ring_buffer_flush_read_subbuf_dcache( const struct lib_ring_buffer_config *config, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf) { } @@ -1231,7 +1250,7 @@ static void lib_ring_buffer_flush_read_subbuf_dcache( int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, unsigned long consumed) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, consumed_idx, commit_count, write_offset; int ret; @@ -1386,7 +1405,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf); void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) { struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = bufb->chan; + struct lttng_kernel_ring_buffer_channel *chan = bufb->chan; const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long read_sb_bindex, consumed_idx, consumed; @@ -1442,7 +1461,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf); */ static void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, unsigned long cons_offset, int cpu) { @@ -1469,7 +1488,7 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, static void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, void *priv, int cpu) { const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -1500,7 +1519,7 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static -void lib_ring_buffer_print_records_count(struct channel *chan, +void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf, int cpu) { @@ -1522,7 +1541,7 @@ void lib_ring_buffer_print_records_count(struct channel *chan, } #else static -void lib_ring_buffer_print_records_count(struct channel *chan, +void lib_ring_buffer_print_records_count(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf, int cpu) { @@ -1530,7 +1549,7 @@ void lib_ring_buffer_print_records_count(struct channel *chan, #endif static -void lib_ring_buffer_print_errors(struct channel *chan, +void lib_ring_buffer_print_errors(struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf, int cpu) { const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -1560,7 +1579,7 @@ void lib_ring_buffer_print_errors(struct channel *chan, */ static void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, u64 tsc) { @@ -1605,7 +1624,7 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, */ static void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, u64 tsc) { @@ -1662,7 +1681,7 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, */ static void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, u64 tsc) { @@ -1707,7 +1726,7 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, */ static void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, u64 tsc) { @@ -1738,7 +1757,7 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, static int lib_ring_buffer_try_switch_slow(enum switch_mode mode, struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, u64 *tsc) { @@ -1845,7 +1864,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, */ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; struct switch_offsets offsets; unsigned long oldidx; @@ -1910,7 +1929,7 @@ static void remote_switch(void *info) static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, enum switch_mode mode) { - struct channel *chan = buf->backend.chan; + struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; int ret; struct switch_param param; @@ -1960,7 +1979,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty); void lib_ring_buffer_clear(struct lib_ring_buffer *buf) { struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = bufb->chan; + struct lttng_kernel_ring_buffer_channel *chan = bufb->chan; lib_ring_buffer_switch_remote(buf); lib_ring_buffer_clear_reader(buf, chan); @@ -1976,9 +1995,9 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_clear); */ static int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, struct switch_offsets *offsets, - struct lib_ring_buffer_ctx *ctx, + struct lttng_kernel_ring_buffer_ctx *ctx, void *client_ctx) { const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -1992,14 +2011,14 @@ retry: offsets->switch_old_end = 0; offsets->pre_header_padding = 0; - ctx->tsc = config->cb.ring_buffer_clock_read(chan); - if ((int64_t) ctx->tsc == -EIO) + ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx->priv.tsc == -EIO) return -EIO; - if (last_tsc_overflow(config, buf, ctx->tsc)) - ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_tsc_overflow(config, buf, ctx->priv.tsc)) + ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { + if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, @@ -2126,7 +2145,7 @@ retry: return 0; } -static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu) +static struct lib_ring_buffer *get_current_buf(struct lttng_kernel_ring_buffer_channel *chan, int cpu) { const struct lib_ring_buffer_config *config = &chan->backend.config; @@ -2136,7 +2155,7 @@ static struct lib_ring_buffer *get_current_buf(struct channel *chan, int cpu) return chan->backend.buf; } -void lib_ring_buffer_lost_event_too_big(struct channel *chan) +void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf = get_current_buf(chan, smp_processor_id()); @@ -2153,16 +2172,16 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big); * -EIO for other errors, else returns 0. * It will take care of sub-buffer switching. */ -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, +int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx *ctx, void *client_ctx) { - struct channel *chan = ctx->chan; + struct lttng_kernel_ring_buffer_channel *chan = ctx->priv.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; struct switch_offsets offsets; int ret; - ctx->buf = buf = get_current_buf(chan, ctx->cpu); + ctx->priv.buf = buf = get_current_buf(chan, ctx->priv.reserve_cpu); offsets.size = 0; do { @@ -2180,7 +2199,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, * records, never the opposite (missing a full TSC record when it would * be needed). */ - save_last_tsc(config, buf, ctx->tsc); + save_last_tsc(config, buf, ctx->priv.tsc); /* * Push the reader if necessary @@ -2199,21 +2218,21 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, if (unlikely(offsets.switch_old_end)) { lib_ring_buffer_clear_noref(config, &buf->backend, subbuf_index(offsets.old - 1, chan)); - lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc); } /* * Populate new subbuffer. */ if (unlikely(offsets.switch_new_start)) - lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.tsc); if (unlikely(offsets.switch_new_end)) - lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc); - ctx->slot_size = offsets.size; - ctx->pre_offset = offsets.begin; - ctx->buf_offset = offsets.begin + offsets.pre_header_padding; + ctx->priv.slot_size = offsets.size; + ctx->priv.pre_offset = offsets.begin; + ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding; return 0; } EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow); @@ -2257,7 +2276,7 @@ void deliver_count_events(const struct lib_ring_buffer_config *config, void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, - struct channel *chan, + struct lttng_kernel_ring_buffer_channel *chan, unsigned long offset, unsigned long commit_count, unsigned long idx, @@ -2356,13 +2375,14 @@ void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *con commit_count, idx); /* - * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. + * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue + * the wakeups. */ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER && atomic_long_read(&buf->active_readers) && lib_ring_buffer_poll_deliver(config, buf, chan)) { - wake_up_interruptible(&buf->read_wait); - wake_up_interruptible(&chan->read_wait); + irq_work_queue(&buf->wakeup_pending); + irq_work_queue(&chan->wakeup_pending); } }