Performance: mark lib_ring_buffer_write always inline
[lttng-modules.git] / lib / ringbuffer / backend.h
index b11545c7c698124a72df4920f6cbcdf8a9cb8af5..449d663555dc37ecf11e7165fa564a6a45f94f57 100644 (file)
@@ -37,8 +37,8 @@
 #include <linux/uaccess.h>
 
 /* Internal helpers */
-#include "../../wrapper/ringbuffer/backend_internal.h"
-#include "../../wrapper/ringbuffer/frontend_internal.h"
+#include <wrapper/ringbuffer/backend_internal.h>
+#include <wrapper/ringbuffer/frontend_internal.h>
 
 /* Ring buffer backend API */
 
@@ -54,8 +54,8 @@ extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
                                     size_t offset, void *dest, size_t len);
 
-extern struct page **
-lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
+extern unsigned long *
+lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
                              void ***virt);
 
 /*
@@ -83,16 +83,15 @@ lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
  * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
  * if copy is crossing a page boundary.
  */
-static inline
+static inline __attribute__((always_inline))
 void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
                           struct lib_ring_buffer_ctx *ctx,
                           const void *src, size_t len)
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index;
+       size_t sbidx, index, pagecpy;
        size_t offset = ctx->buf_offset;
-       ssize_t pagecpy;
        struct lib_ring_buffer_backend_pages *rpages;
        unsigned long sb_bindex, id;
 
@@ -138,9 +137,8 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
 
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index;
+       size_t sbidx, index, pagecpy;
        size_t offset = ctx->buf_offset;
-       ssize_t pagecpy;
        struct lib_ring_buffer_backend_pages *rpages;
        unsigned long sb_bindex, id;
 
@@ -211,7 +209,7 @@ size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer
                int ret;
                char c;
 
-               ret = __get_user(c, &src[count]);
+               ret = __copy_from_user_inatomic(&c, src + count, 1);
                if (ret || !c)
                        break;
                lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
@@ -306,9 +304,8 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
 {
        struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
-       size_t sbidx, index;
+       size_t sbidx, index, pagecpy;
        size_t offset = ctx->buf_offset;
-       ssize_t pagecpy;
        struct lib_ring_buffer_backend_pages *rpages;
        unsigned long sb_bindex, id;
        unsigned long ret;
@@ -337,8 +334,7 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
                        rpages->p[index].virt + (offset & ~PAGE_MASK),
                        src, len);
                if (unlikely(ret > 0)) {
-                       len -= (pagecpy - ret);
-                       offset += (pagecpy - ret);
+                       /* Copy failed. */
                        goto fill_buffer;
                }
        } else {
@@ -482,4 +478,29 @@ unsigned long lib_ring_buffer_get_records_unread(
        return records_unread;
 }
 
+/*
+ * We use __copy_from_user_inatomic to copy userspace data after
+ * checking with access_ok() and disabling page faults.
+ *
+ * Return 0 if OK, nonzero on error.
+ */
+static inline
+unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
+                                               const void __user *src,
+                                               unsigned long len)
+{
+       unsigned long ret;
+       mm_segment_t old_fs;
+
+       if (!access_ok(VERIFY_READ, src, len))
+               return 1;
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       ret = __copy_from_user_inatomic(dest, src, len);
+       pagefault_enable();
+       set_fs(old_fs);
+       return ret;
+}
+
 #endif /* _LIB_RING_BUFFER_BACKEND_H */
This page took 0.024999 seconds and 4 git commands to generate.