Drop 'linux/file.h' wrapper
[lttng-modules.git] / src / lib / ringbuffer / ring_buffer_iterator.c
index 64fc50861b062ba8ba988a7a51f013a407154ccb..b0af7df7e2d5c107b706707f21e63ef62c1fec1c 100644 (file)
@@ -10,7 +10,8 @@
  */
 
 #include <ringbuffer/iterator.h>
-#include <wrapper/file.h>
+#include <wrapper/cpu.h>
+#include <linux/file.h>
 #include <wrapper/uaccess.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
  * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
  * buffer is empty and finalized. The buffer must already be opened for reading.
  */
-ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
-                                       struct lib_ring_buffer *buf)
+ssize_t lib_ring_buffer_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
+                                       struct lttng_kernel_ring_buffer *buf)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer_iter *iter = &buf->iter;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer_iter *iter = &buf->iter;
        int ret;
 
 restart:
        switch (iter->state) {
        case ITER_GET_SUBBUF:
                ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (ret && !READ_ONCE(buf->finalized)
+               if (ret && !LTTNG_READ_ONCE(buf->finalized)
                    && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                        /*
                         * Use "pull" scheme for global buffers. The reader
@@ -105,21 +106,39 @@ restart:
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
 
+void lib_ring_buffer_put_current_record(struct lttng_kernel_ring_buffer *buf)
+{
+       struct lttng_kernel_ring_buffer_iter *iter;
+
+       if (!buf)
+               return;
+       iter = &buf->iter;
+       if (iter->state != ITER_NEXT_RECORD)
+               return;
+       iter->read_offset += iter->payload_len;
+       iter->state = ITER_TEST_RECORD;
+       if (iter->read_offset - iter->consumed >= iter->data_size) {
+               lib_ring_buffer_put_next_subbuf(buf);
+               iter->state = ITER_GET_SUBBUF;
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_current_record);
+
 static int buf_is_higher(void *a, void *b)
 {
-       struct lib_ring_buffer *bufa = a;
-       struct lib_ring_buffer *bufb = b;
+       struct lttng_kernel_ring_buffer *bufa = a;
+       struct lttng_kernel_ring_buffer *bufb = b;
 
        /* Consider lowest timestamps to be at the top of the heap */
        return (bufa->iter.timestamp < bufb->iter.timestamp);
 }
 
 static
-void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
-                                          struct channel *chan)
+void lib_ring_buffer_get_empty_buf_records(const struct lttng_kernel_ring_buffer_config *config,
+                                          struct lttng_kernel_ring_buffer_channel *chan)
 {
        struct lttng_ptr_heap *heap = &chan->iter.heap;
-       struct lib_ring_buffer *buf, *tmp;
+       struct lttng_kernel_ring_buffer *buf, *tmp;
        ssize_t len;
 
        list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
@@ -159,8 +178,8 @@ void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *
 }
 
 static
-void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
-                                struct channel *chan)
+void lib_ring_buffer_wait_for_qs(const struct lttng_kernel_ring_buffer_config *config,
+                                struct lttng_kernel_ring_buffer_channel *chan)
 {
        u64 timestamp_qs;
        unsigned long wait_msecs;
@@ -220,11 +239,11 @@ void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
  * opened for reading.
  */
 
-ssize_t channel_get_next_record(struct channel *chan,
-                               struct lib_ring_buffer **ret_buf)
+ssize_t channel_get_next_record(struct lttng_kernel_ring_buffer_channel *chan,
+                               struct lttng_kernel_ring_buffer **ret_buf)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf;
        struct lttng_ptr_heap *heap;
        ssize_t len;
 
@@ -319,7 +338,7 @@ ssize_t channel_get_next_record(struct channel *chan,
 EXPORT_SYMBOL_GPL(channel_get_next_record);
 
 static
-void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_init(struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf)
 {
        if (buf->iter.allocated)
                return;
@@ -335,15 +354,15 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer
                list_add(&buf->iter.empty_node, &chan->iter.empty_head);
 }
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
 
 int lttng_cpuhp_rb_iter_online(unsigned int cpu,
                struct lttng_cpuhp_node *node)
 {
-       struct channel *chan = container_of(node, struct channel,
+       struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
                                            cpuhp_iter_online);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
 
@@ -352,7 +371,7 @@ int lttng_cpuhp_rb_iter_online(unsigned int cpu,
 }
 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
 
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
 
 #ifdef CONFIG_HOTPLUG_CPU
 static
@@ -361,10 +380,10 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb,
                                           void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct channel *chan = container_of(nb, struct channel,
+       struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
                                            hp_iter_notifier);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (!chan->hp_iter_enable)
                return NOTIFY_DONE;
@@ -384,12 +403,12 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb,
 }
 #endif
 
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
 
-int channel_iterator_init(struct channel *chan)
+int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                int ret;
@@ -401,13 +420,13 @@ int channel_iterator_init(struct channel *chan)
                if (ret)
                        return ret;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
                chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
                ret = cpuhp_state_add_instance(lttng_rb_hp_online,
                        &chan->cpuhp_iter_online.node);
                if (ret)
                        return ret;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
                {
                        int cpu;
 
@@ -422,13 +441,13 @@ int channel_iterator_init(struct channel *chan)
                        chan->hp_iter_notifier.priority = 10;
                        register_cpu_notifier(&chan->hp_iter_notifier);
 
-                       get_online_cpus();
+                       lttng_cpus_read_lock();
                        for_each_online_cpu(cpu) {
                                buf = per_cpu_ptr(chan->backend.buf, cpu);
                                lib_ring_buffer_iterator_init(chan, buf);
                        }
                        chan->hp_iter_enable = 1;
-                       put_online_cpus();
+                       lttng_cpus_read_unlock();
 #else
                        for_each_possible_cpu(cpu) {
                                buf = per_cpu_ptr(chan->backend.buf, cpu);
@@ -436,7 +455,7 @@ int channel_iterator_init(struct channel *chan)
                        }
 #endif
                }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                lib_ring_buffer_iterator_init(chan, buf);
@@ -444,12 +463,12 @@ int channel_iterator_init(struct channel *chan)
        return 0;
 }
 
-void channel_iterator_unregister_notifiers(struct channel *chan)
+void channel_iterator_unregister_notifiers(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
                {
                        int ret;
 
@@ -457,25 +476,25 @@ void channel_iterator_unregister_notifiers(struct channel *chan)
                                &chan->cpuhp_iter_online.node);
                        WARN_ON(ret);
                }
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
                chan->hp_iter_enable = 0;
                unregister_cpu_notifier(&chan->hp_iter_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
        }
 }
 
-void channel_iterator_free(struct channel *chan)
+void channel_iterator_free(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                lttng_heap_free(&chan->iter.heap);
 }
 
-int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
+int lib_ring_buffer_iterator_open(struct lttng_kernel_ring_buffer *buf)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
        CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
        return lib_ring_buffer_open_read(buf);
 }
@@ -486,22 +505,22 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
  * iterator can leave the buffer in "GET" state, which is not consistent with
  * other types of output (mmap, splice, raw data read).
  */
-void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_release(struct lttng_kernel_ring_buffer *buf)
 {
        lib_ring_buffer_release_read(buf);
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
 
-int channel_iterator_open(struct channel *chan)
+int channel_iterator_open(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf;
        int ret = 0, cpu;
 
        CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                /* Allow CPU hotplug to keep track of opened reader */
                chan->iter.read_open = 1;
                for_each_channel_cpu(cpu, chan) {
@@ -511,7 +530,7 @@ int channel_iterator_open(struct channel *chan)
                                goto error;
                        buf->iter.read_open = 1;
                }
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                ret = lib_ring_buffer_iterator_open(buf);
@@ -520,19 +539,19 @@ int channel_iterator_open(struct channel *chan)
 error:
        /* Error should always happen on CPU 0, hence no close is required. */
        CHAN_WARN_ON(chan, cpu != 0);
-       put_online_cpus();
+       lttng_cpus_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(channel_iterator_open);
 
-void channel_iterator_release(struct channel *chan)
+void channel_iterator_release(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf;
        int cpu;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               get_online_cpus();
+               lttng_cpus_read_lock();
                for_each_channel_cpu(cpu, chan) {
                        buf = channel_get_ring_buffer(config, chan, cpu);
                        if (buf->iter.read_open) {
@@ -541,7 +560,7 @@ void channel_iterator_release(struct channel *chan)
                        }
                }
                chan->iter.read_open = 0;
-               put_online_cpus();
+               lttng_cpus_read_unlock();
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                lib_ring_buffer_iterator_release(buf);
@@ -549,9 +568,9 @@ void channel_iterator_release(struct channel *chan)
 }
 EXPORT_SYMBOL_GPL(channel_iterator_release);
 
-void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_iterator_reset(struct lttng_kernel_ring_buffer *buf)
 {
-       struct channel *chan = buf->backend.chan;
+       struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
 
        if (buf->iter.state != ITER_GET_SUBBUF)
                lib_ring_buffer_put_next_subbuf(buf);
@@ -568,10 +587,10 @@ void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
        /* Don't reset allocated and read_open */
 }
 
-void channel_iterator_reset(struct channel *chan)
+void channel_iterator_reset(struct lttng_kernel_ring_buffer_channel *chan)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
-       struct lib_ring_buffer *buf;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_kernel_ring_buffer *buf;
        int cpu;
 
        /* Empty heap, put into empty_head */
@@ -597,11 +616,11 @@ ssize_t channel_ring_buffer_file_read(struct file *filp,
                                      char __user *user_buf,
                                      size_t count,
                                      loff_t *ppos,
-                                     struct channel *chan,
-                                     struct lib_ring_buffer *buf,
+                                     struct lttng_kernel_ring_buffer_channel *chan,
+                                     struct lttng_kernel_ring_buffer *buf,
                                      int fusionmerge)
 {
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
        size_t read_count = 0, read_offset;
        ssize_t len;
 
@@ -696,12 +715,14 @@ skip_get_next:
                        return -EFAULT;
                }
                read_count += copy_len;
-       };
-       return read_count;
+       }
+       goto put_record;
 
 nodata:
        *ppos = 0;
        chan->iter.len_left = 0;
+put_record:
+       lib_ring_buffer_put_current_record(buf);
        return read_count;
 }
 
@@ -722,9 +743,9 @@ ssize_t lib_ring_buffer_file_read(struct file *filp,
                                  size_t count,
                                  loff_t *ppos)
 {
-       struct inode *inode = filp->lttng_f_dentry->d_inode;
-       struct lib_ring_buffer *buf = inode->i_private;
-       struct channel *chan = buf->backend.chan;
+       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct lttng_kernel_ring_buffer *buf = inode->i_private;
+       struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
 
        return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
                                             chan, buf, 0);
@@ -747,15 +768,15 @@ ssize_t channel_file_read(struct file *filp,
                          size_t count,
                          loff_t *ppos)
 {
-       struct inode *inode = filp->lttng_f_dentry->d_inode;
-       struct channel *chan = inode->i_private;
-       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
+       const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                return channel_ring_buffer_file_read(filp, user_buf, count,
                                                     ppos, chan, NULL, 1);
        else {
-               struct lib_ring_buffer *buf =
+               struct lttng_kernel_ring_buffer *buf =
                        channel_get_ring_buffer(config, chan, 0);
                return channel_ring_buffer_file_read(filp, user_buf, count,
                                                     ppos, chan, buf, 0);
@@ -765,7 +786,7 @@ ssize_t channel_file_read(struct file *filp,
 static
 int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
 {
-       struct lib_ring_buffer *buf = inode->i_private;
+       struct lttng_kernel_ring_buffer *buf = inode->i_private;
        int ret;
 
        ret = lib_ring_buffer_iterator_open(buf);
@@ -786,7 +807,7 @@ release_iter:
 static
 int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
 {
-       struct lib_ring_buffer *buf = inode->i_private;
+       struct lttng_kernel_ring_buffer *buf = inode->i_private;
 
        lib_ring_buffer_iterator_release(buf);
        return 0;
@@ -795,7 +816,7 @@ int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
 static
 int channel_file_open(struct inode *inode, struct file *file)
 {
-       struct channel *chan = inode->i_private;
+       struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
        int ret;
 
        ret = channel_iterator_open(chan);
@@ -816,7 +837,7 @@ release_iter:
 static
 int channel_file_release(struct inode *inode, struct file *file)
 {
-       struct channel *chan = inode->i_private;
+       struct lttng_kernel_ring_buffer_channel *chan = inode->i_private;
 
        channel_iterator_release(chan);
        return 0;
This page took 0.032899 seconds and 4 git commands to generate.