Fix: ACCESS_ONCE() removed in kernel 4.15
[lttng-modules.git] / lib / ringbuffer / ring_buffer_iterator.c
index 9d0197c73fbe074df3f2f60df806b76ad601ef12..61eaa5b775ece93034391c6e169ecbafd7cc78da 100644 (file)
@@ -25,7 +25,8 @@
  *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#include "../../wrapper/ringbuffer/iterator.h"
+#include <wrapper/ringbuffer/iterator.h>
+#include <wrapper/file.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -60,7 +61,7 @@ restart:
        switch (iter->state) {
        case ITER_GET_SUBBUF:
                ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (ret && !ACCESS_ONCE(buf->finalized)
+               if (ret && !READ_ONCE(buf->finalized)
                    && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                        /*
                         * Use "pull" scheme for global buffers. The reader
@@ -70,7 +71,7 @@ restart:
                         * Per-CPU buffers rather use a "push" scheme because
                         * the IPI needed to flush all CPU's buffers is too
                         * costly. In the "push" scheme, the reader waits for
-                        * the writer periodic deferrable timer to flush the
+                        * the writer periodic timer to flush the
                         * buffers (keeping track of a quiescent state
                         * timestamp). Therefore, the writer "pushes" data out
                         * of the buffers rather than letting the reader "pull"
@@ -349,6 +350,25 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer
                list_add(&buf->iter.empty_node, &chan->iter.empty_head);
 }
 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_rb_iter_online(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
+{
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_iter_online);
+       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+
+       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+       lib_ring_buffer_iterator_init(chan, buf);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
 #ifdef CONFIG_HOTPLUG_CPU
 static
 int channel_iterator_cpu_hotplug(struct notifier_block *nb,
@@ -379,13 +399,15 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb,
 }
 #endif
 
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
 int channel_iterator_init(struct channel *chan)
 {
        const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               int cpu, ret;
+               int ret;
 
                INIT_LIST_HEAD(&chan->iter.empty_head);
                ret = lttng_heap_init(&chan->iter.heap,
@@ -393,29 +415,43 @@ int channel_iterator_init(struct channel *chan)
                                GFP_KERNEL, buf_is_higher);
                if (ret)
                        return ret;
-               /*
-                * In case of non-hotplug cpu, if the ring-buffer is allocated
-                * in early initcall, it will not be notified of secondary cpus.
-                * In that off case, we need to allocate for all possible cpus.
-                */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
+               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+                       &chan->cpuhp_iter_online.node);
+               if (ret)
+                       return ret;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+               {
+                       int cpu;
+
+                       /*
+                        * In case of non-hotplug cpu, if the ring-buffer is allocated
+                        * in early initcall, it will not be notified of secondary cpus.
+                        * In that off case, we need to allocate for all possible cpus.
+                        */
 #ifdef CONFIG_HOTPLUG_CPU
-               chan->hp_iter_notifier.notifier_call =
-                       channel_iterator_cpu_hotplug;
-               chan->hp_iter_notifier.priority = 10;
-               register_cpu_notifier(&chan->hp_iter_notifier);
-               get_online_cpus();
-               for_each_online_cpu(cpu) {
-                       buf = per_cpu_ptr(chan->backend.buf, cpu);
-                       lib_ring_buffer_iterator_init(chan, buf);
-               }
-               chan->hp_iter_enable = 1;
-               put_online_cpus();
+                       chan->hp_iter_notifier.notifier_call =
+                               channel_iterator_cpu_hotplug;
+                       chan->hp_iter_notifier.priority = 10;
+                       register_cpu_notifier(&chan->hp_iter_notifier);
+
+                       get_online_cpus();
+                       for_each_online_cpu(cpu) {
+                               buf = per_cpu_ptr(chan->backend.buf, cpu);
+                               lib_ring_buffer_iterator_init(chan, buf);
+                       }
+                       chan->hp_iter_enable = 1;
+                       put_online_cpus();
 #else
-               for_each_possible_cpu(cpu) {
-                       buf = per_cpu_ptr(chan->backend.buf, cpu);
-                       lib_ring_buffer_iterator_init(chan, buf);
-               }
+                       for_each_possible_cpu(cpu) {
+                               buf = per_cpu_ptr(chan->backend.buf, cpu);
+                               lib_ring_buffer_iterator_init(chan, buf);
+                       }
 #endif
+               }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                lib_ring_buffer_iterator_init(chan, buf);
@@ -428,8 +464,18 @@ void channel_iterator_unregister_notifiers(struct channel *chan)
        const struct lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+               {
+                       int ret;
+
+                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+                               &chan->cpuhp_iter_online.node);
+                       WARN_ON(ret);
+               }
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
                chan->hp_iter_enable = 0;
                unregister_cpu_notifier(&chan->hp_iter_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
        }
 }
 
@@ -691,7 +737,7 @@ ssize_t lib_ring_buffer_file_read(struct file *filp,
                                  size_t count,
                                  loff_t *ppos)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = filp->lttng_f_dentry->d_inode;
        struct lib_ring_buffer *buf = inode->i_private;
        struct channel *chan = buf->backend.chan;
 
@@ -716,7 +762,7 @@ ssize_t channel_file_read(struct file *filp,
                          size_t count,
                          loff_t *ppos)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = filp->lttng_f_dentry->d_inode;
        struct channel *chan = inode->i_private;
        const struct lib_ring_buffer_config *config = &chan->backend.config;
 
@@ -796,7 +842,7 @@ const struct file_operations channel_payload_file_operations = {
        .open = channel_file_open,
        .release = channel_file_release,
        .read = channel_file_read,
-       .llseek = lib_ring_buffer_no_llseek,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
 };
 EXPORT_SYMBOL_GPL(channel_payload_file_operations);
 
@@ -805,6 +851,6 @@ const struct file_operations lib_ring_buffer_payload_file_operations = {
        .open = lib_ring_buffer_file_open,
        .release = lib_ring_buffer_file_release,
        .read = lib_ring_buffer_file_read,
-       .llseek = lib_ring_buffer_no_llseek,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
 };
 EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
This page took 0.025719 seconds and 4 git commands to generate.