Cleanup: disable page fault after access_ok
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 10 May 2021 12:34:16 +0000 (08:34 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 11 May 2021 19:14:55 +0000 (15:14 -0400)
The page faults don't need to be disabled for access_ok. Simplify the
fill buffer error paths.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I2e31d308d45efdfede04e2159c37c066749871ae

include/ringbuffer/backend.h

index 1ede713c82de37233c54c86b1b4b20c41e1a7824..be72d769520337425fe53996e16295b0b646700e 100644 (file)
@@ -286,17 +286,17 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buff
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
 
-       pagefault_disable();
        if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
                goto fill_buffer;
 
+       pagefault_disable();
        if (likely(pagecpy == len)) {
                ret = lib_ring_buffer_do_copy_from_user_inatomic(
                        backend_pages->p[index].virt + (offset & ~PAGE_MASK),
                        src, len);
                if (unlikely(ret > 0)) {
                        /* Copy failed. */
-                       goto fill_buffer;
+                       goto fill_buffer_enable_pf;
                }
        } else {
                _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
@@ -306,8 +306,9 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buff
 
        return;
 
-fill_buffer:
+fill_buffer_enable_pf:
        pagefault_enable();
+fill_buffer:
        /*
         * In the error path we call the slow path version to avoid
         * the pollution of static inline code.
@@ -353,10 +354,10 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lttng_kernel_ring_bu
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
 
-       pagefault_disable();
        if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
                goto fill_buffer;
 
+       pagefault_disable();
        if (likely(pagecpy == len)) {
                size_t count;
 
@@ -388,7 +389,6 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lttng_kernel_ring_bu
        return;
 
 fill_buffer:
-       pagefault_enable();
        /*
         * In the error path we call the slow path version to avoid
         * the pollution of static inline code.
This page took 0.026106 seconds and 4 git commands to generate.