X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fbackend.h;h=449d663555dc37ecf11e7165fa564a6a45f94f57;hb=8617eb9a6f9dd4e63f8cb649120b3b3ae79df4f0;hp=b11545c7c698124a72df4920f6cbcdf8a9cb8af5;hpb=16f78f3ad7dea4a0b442b8c7e0de01935f28e656;p=lttng-modules.git diff --git a/lib/ringbuffer/backend.h b/lib/ringbuffer/backend.h index b11545c7..449d6635 100644 --- a/lib/ringbuffer/backend.h +++ b/lib/ringbuffer/backend.h @@ -37,8 +37,8 @@ #include /* Internal helpers */ -#include "../../wrapper/ringbuffer/backend_internal.h" -#include "../../wrapper/ringbuffer/frontend_internal.h" +#include +#include /* Ring buffer backend API */ @@ -54,8 +54,8 @@ extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, void *dest, size_t len); -extern struct page ** -lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset, +extern unsigned long * +lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset, void ***virt); /* @@ -83,16 +83,15 @@ lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write) * if copy is crossing a page boundary. */ -static inline +static inline __attribute__((always_inline)) void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, const void *src, size_t len) { struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; - size_t sbidx, index; + size_t sbidx, index, pagecpy; size_t offset = ctx->buf_offset; - ssize_t pagecpy; struct lib_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; @@ -138,9 +137,8 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; - size_t sbidx, index; + size_t sbidx, index, pagecpy; size_t offset = ctx->buf_offset; - ssize_t pagecpy; struct lib_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; @@ -211,7 +209,7 @@ size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer int ret; char c; - ret = __get_user(c, &src[count]); + ret = __copy_from_user_inatomic(&c, src + count, 1); if (ret || !c) break; lib_ring_buffer_do_copy(config, &dest[count], &c, 1); @@ -306,9 +304,8 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config { struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; struct channel_backend *chanb = &ctx->chan->backend; - size_t sbidx, index; + size_t sbidx, index, pagecpy; size_t offset = ctx->buf_offset; - ssize_t pagecpy; struct lib_ring_buffer_backend_pages *rpages; unsigned long sb_bindex, id; unsigned long ret; @@ -337,8 +334,7 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config rpages->p[index].virt + (offset & ~PAGE_MASK), src, len); if (unlikely(ret > 0)) { - len -= (pagecpy - ret); - offset += (pagecpy - ret); + /* Copy failed. */ goto fill_buffer; } } else { @@ -482,4 +478,29 @@ unsigned long lib_ring_buffer_get_records_unread( return records_unread; } +/* + * We use __copy_from_user_inatomic to copy userspace data after + * checking with access_ok() and disabling page faults. + * + * Return 0 if OK, nonzero on error. + */ +static inline +unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest, + const void __user *src, + unsigned long len) +{ + unsigned long ret; + mm_segment_t old_fs; + + if (!access_ok(VERIFY_READ, src, len)) + return 1; + old_fs = get_fs(); + set_fs(KERNEL_DS); + pagefault_disable(); + ret = __copy_from_user_inatomic(dest, src, len); + pagefault_enable(); + set_fs(old_fs); + return ret; +} + #endif /* _LIB_RING_BUFFER_BACKEND_H */