X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=97c963c81a7c41b1b71d2d62a08f87a123a68a71;hb=eac992672a4466ab1404739b81d4dd49939a3cbd;hp=b22cb0fe1ced979c4581e31d486cc861e9eb5821;hpb=3dc33675410da9f13647d70e8b3e152f480f81b1;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index b22cb0fe..97c963c8 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -63,6 +63,7 @@ #include #include #include +#include /* * Internal structure representing offsets to use at a sub-buffer switch. @@ -205,7 +206,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, kzalloc_node(ALIGN(sizeof(*buf->commit_hot) * chan->backend.num_subbuf, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(cpu, 0))); if (!buf->commit_hot) { ret = -ENOMEM; goto free_chanbuf; @@ -215,7 +217,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, kzalloc_node(ALIGN(sizeof(*buf->commit_cold) * chan->backend.num_subbuf, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(cpu, 0))); if (!buf->commit_cold) { ret = -ENOMEM; goto free_commit; @@ -281,7 +284,7 @@ static void switch_buffer_timer(unsigned long data) lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - mod_timer_pinned(&buf->switch_timer, + lttng_mod_timer_pinned(&buf->switch_timer, jiffies + chan->switch_timer_interval); else mod_timer(&buf->switch_timer, @@ -298,7 +301,12 @@ static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) if (!chan->switch_timer_interval || buf->switch_timer_enabled) return; - init_timer(&buf->switch_timer); + + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + lttng_init_timer_pinned(&buf->switch_timer); + else + init_timer(&buf->switch_timer); + buf->switch_timer.function = switch_buffer_timer; buf->switch_timer.expires = jiffies + chan->switch_timer_interval; buf->switch_timer.data = (unsigned long)buf; @@ -341,7 +349,7 @@ static void read_buffer_timer(unsigned long data) } if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - mod_timer_pinned(&buf->read_timer, + lttng_mod_timer_pinned(&buf->read_timer, jiffies + chan->read_timer_interval); else mod_timer(&buf->read_timer, @@ -361,7 +369,11 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) || buf->read_timer_enabled) return; - init_timer(&buf->read_timer); + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + lttng_init_timer_pinned(&buf->read_timer); + else + init_timer(&buf->read_timer); + buf->read_timer.function = read_buffer_timer; buf->read_timer.expires = jiffies + chan->read_timer_interval; buf->read_timer.data = (unsigned long)buf; @@ -398,7 +410,81 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 0; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +enum cpuhp_state lttng_rb_hp_prepare; +enum cpuhp_state lttng_rb_hp_online; + +void lttng_rb_set_hp_prepare(enum cpuhp_state val) +{ + lttng_rb_hp_prepare = val; +} +EXPORT_SYMBOL_GPL(lttng_rb_set_hp_prepare); + +void lttng_rb_set_hp_online(enum cpuhp_state val) +{ + lttng_rb_hp_online = val; +} +EXPORT_SYMBOL_GPL(lttng_rb_set_hp_online); + +int lttng_cpuhp_rb_frontend_dead(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_prepare); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + /* + * Performing a buffer switch on a remote CPU. Performed by + * the CPU responsible for doing the hotunplug after the target + * CPU stopped running completely. Ensures that all data + * from that remote CPU is flushed. + */ + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_dead); + +int lttng_cpuhp_rb_frontend_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + wake_up_interruptible(&chan->hp_wait); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_online); + +int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU + /** * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback * @nb: notifier block @@ -454,8 +540,11 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, return NOTIFY_DONE; } } + #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* * For per-cpu buffers, call the reader wakeups before switching the buffer, so @@ -544,7 +633,6 @@ void notrace lib_ring_buffer_tick_nohz_restart(void) static void channel_unregister_notifiers(struct channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; - int cpu; channel_iterator_unregister_notifiers(chan); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { @@ -561,25 +649,42 @@ static void channel_unregister_notifiers(struct channel *chan) * concurrency. */ #endif /* CONFIG_NO_HZ */ -#ifdef CONFIG_HOTPLUG_CPU - get_online_cpus(); - chan->cpu_hp_enable = 0; - for_each_online_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_online, + &chan->cpuhp_online.node); + WARN_ON(ret); + ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + WARN_ON(ret); } - put_online_cpus(); - unregister_cpu_notifier(&chan->cpu_hp_notifier); +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + +#ifdef CONFIG_HOTPLUG_CPU + get_online_cpus(); + chan->cpu_hp_enable = 0; + for_each_online_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + } + put_online_cpus(); + unregister_cpu_notifier(&chan->cpu_hp_notifier); #else - for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); - } + for_each_possible_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + lib_ring_buffer_stop_switch_timer(buf); + lib_ring_buffer_stop_read_timer(buf); + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -680,7 +785,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval) { - int ret, cpu; + int ret; struct channel *chan; if (lib_ring_buffer_check_config(config, switch_timer_interval, @@ -708,6 +813,56 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, init_waitqueue_head(&chan->hp_wait); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND; + ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + if (ret) + goto cpuhp_prepare_error; + + chan->cpuhp_online.component = LTTNG_RING_BUFFER_FRONTEND; + ret = cpuhp_state_add_instance(lttng_rb_hp_online, + &chan->cpuhp_online.node); + if (ret) + goto cpuhp_online_error; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ +#ifdef CONFIG_HOTPLUG_CPU + chan->cpu_hp_notifier.notifier_call = + lib_ring_buffer_cpu_hp_callback; + chan->cpu_hp_notifier.priority = 6; + register_cpu_notifier(&chan->cpu_hp_notifier); + + get_online_cpus(); + for_each_online_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); + } + chan->cpu_hp_enable = 1; + put_online_cpus(); +#else + for_each_possible_cpu(cpu) { + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, + cpu); + spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); + lib_ring_buffer_start_switch_timer(buf); + lib_ring_buffer_start_read_timer(buf); + spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); + } +#endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* Only benefit from NO_HZ idle with per-cpu buffers for now. */ chan->tick_nohz_notifier.notifier_call = @@ -717,38 +872,6 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, &chan->tick_nohz_notifier); #endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */ - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ -#ifdef CONFIG_HOTPLUG_CPU - chan->cpu_hp_notifier.notifier_call = - lib_ring_buffer_cpu_hp_callback; - chan->cpu_hp_notifier.priority = 6; - register_cpu_notifier(&chan->cpu_hp_notifier); - - get_online_cpus(); - for_each_online_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); - spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); - } - chan->cpu_hp_enable = 1; - put_online_cpus(); -#else - for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, - cpu); - spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu)); - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); - spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); - } -#endif } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -758,6 +881,13 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, return chan; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +cpuhp_online_error: + ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, + &chan->cpuhp_prepare.node); + WARN_ON(ret); +cpuhp_prepare_error: +#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ error_free_backend: channel_backend_free(&chan->backend); error: @@ -1293,8 +1423,7 @@ void lib_ring_buffer_print_errors(struct channel *chan, /* * lib_ring_buffer_switch_old_start: Populate old subbuffer header. * - * Only executed by SWITCH_FLUSH, which can be issued while tracing is active - * or at buffer finalization (destroy). + * Only executed when the buffer is finalized, in SWITCH_FLUSH. */ static void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, @@ -1485,14 +1614,12 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, unsigned long sb_index, commit_count; /* - * We are performing a SWITCH_FLUSH. There may be concurrent - * writes into the buffer if e.g. invoked while performing a - * snapshot on an active trace. + * We are performing a SWITCH_FLUSH. At this stage, there are no + * concurrent writes into the buffer. * - * If the client does not save any header information (sub-buffer - * header size == 0), don't switch empty subbuffer on finalize, - * because it is invalid to deliver a completely empty - * subbuffer. + * The client does not save any header information. Don't + * switch empty subbuffer on finalize, because it is invalid to + * deliver a completely empty subbuffer. */ if (!config->cb.subbuffer_header_size()) return -1; @@ -1606,11 +1733,17 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m } EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow); +struct switch_param { + struct lib_ring_buffer *buf; + enum switch_mode mode; +}; + static void remote_switch(void *info) { - struct lib_ring_buffer *buf = info; + struct switch_param *param = info; + struct lib_ring_buffer *buf = param->buf; - lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + lib_ring_buffer_switch_slow(buf, param->mode); } static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, @@ -1619,6 +1752,7 @@ static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, struct channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; int ret; + struct switch_param param; /* * With global synchronization we don't need to use the IPI scheme. @@ -1639,8 +1773,10 @@ static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, * switch. */ get_online_cpus(); + param.buf = buf; + param.mode = mode; ret = smp_call_function_single(buf->backend.cpu, - remote_switch, buf, 1); + remote_switch, ¶m, 1); if (ret) { /* Remote CPU is offline, do it ourself. */ lib_ring_buffer_switch_slow(buf, mode); @@ -1648,6 +1784,7 @@ static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf, put_online_cpus(); } +/* Switch sub-buffer if current sub-buffer is non-empty. */ void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) { _lib_ring_buffer_switch_remote(buf, SWITCH_ACTIVE);