#include <linux/uaccess.h>
/* Internal helpers */
-#include "../../wrapper/ringbuffer/backend_internal.h"
-#include "../../wrapper/ringbuffer/frontend_internal.h"
+#include <wrapper/ringbuffer/backend_internal.h>
+#include <wrapper/ringbuffer/frontend_internal.h>
/* Ring buffer backend API */
extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
-extern struct page **
-lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
+extern unsigned long *
+lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
void ***virt);
/*
* backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
* if copy is crossing a page boundary.
*/
-static inline
+static inline __attribute__((always_inline))
void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
const void *src, size_t len)
* terminating character is found in @src. Returns the number of bytes
* copied. Does *not* terminate @dest with NULL terminating character.
*/
-static inline
+static inline __attribute__((always_inline))
size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
char *dest, const char *src, size_t len)
{
* directly without having the src pointer checked with access_ok()
* previously.
*/
-static inline
+static inline __attribute__((always_inline))
size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
char *dest, const char __user *src, size_t len)
{
* (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
* Disable the page fault handler to ensure we never try to take the mmap_sem.
*/
-static inline
+static inline __attribute__((always_inline))
void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
rpages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
if (unlikely(ret > 0)) {
- len -= (pagecpy - ret);
- offset += (pagecpy - ret);
+ /* Copy failed. */
goto fill_buffer;
}
} else {
return records_unread;
}
+/*
+ * We use __copy_from_user_inatomic to copy userspace data after
+ * checking with access_ok() and disabling page faults.
+ *
+ * Return 0 if OK, nonzero on error.
+ */
+static inline
+unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
+ const void __user *src,
+ unsigned long len)
+{
+ unsigned long ret;
+ mm_segment_t old_fs;
+
+ if (!access_ok(VERIFY_READ, src, len))
+ return 1;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dest, src, len);
+ pagefault_enable();
+ set_fs(old_fs);
+ return ret;
+}
+
#endif /* _LIB_RING_BUFFER_BACKEND_H */