/* Ring buffer backend access (read/write) */
-extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
+extern size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
-extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
+extern int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void __user *dest,
size_t len);
-extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
+extern int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
extern unsigned long *
-lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
+lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
void ***virt);
/*
* as long as the write is never bigger than a page size.
*/
extern void *
-lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset);
extern void *
-lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
size_t offset);
/**
* if copy is crossing a page boundary.
*/
static inline __attribute__((always_inline))
-void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_write(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* boundary.
*/
static inline
-void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_memset(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx, int c, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* copied. Does *not* terminate @dest with NULL terminating character.
*/
static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
+size_t lib_ring_buffer_do_strcpy(const struct lttng_kernel_ring_buffer_config *config,
char *dest, const char *src, size_t len)
{
size_t count;
* previously.
*/
static inline __attribute__((always_inline))
-size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
char *dest, const char __user *src, size_t len)
{
size_t count;
* (_ring_buffer_strcpy) if copy is crossing a page boundary.
*/
static inline
-void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_strcpy(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const char *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
* Disable the page fault handler to ensure we never try to take the mmap_sem.
*/
static inline __attribute__((always_inline))
-void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
if (unlikely(!len))
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
+ pagefault_disable();
if (likely(pagecpy == len)) {
ret = lib_ring_buffer_do_copy_from_user_inatomic(
backend_pages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
if (unlikely(ret > 0)) {
/* Copy failed. */
- goto fill_buffer;
+ goto fill_buffer_enable_pf;
}
} else {
_lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
return;
-fill_buffer:
+fill_buffer_enable_pf:
pagefault_enable();
+fill_buffer:
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
_lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+ ctx->priv.buf_offset += len;
}
/**
* take the mmap_sem.
*/
static inline
-void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
+void lib_ring_buffer_strcpy_from_user_inatomic(const struct lttng_kernel_ring_buffer_config *config,
struct lttng_kernel_ring_buffer_ctx *ctx,
const void __user *src, size_t len, int pad)
{
- struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
+ struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
size_t index, pagecpy;
size_t offset = ctx->priv.buf_offset;
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
if (unlikely(!len))
return;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
- pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
+ pagefault_disable();
if (likely(pagecpy == len)) {
size_t count;
return;
fill_buffer:
- pagefault_enable();
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
_lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
offset += len - 1;
_lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
+ ctx->priv.buf_offset += len;
}
/*
*/
static inline
unsigned long lib_ring_buffer_get_records_unread(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct lib_ring_buffer_backend_pages *pages;
+ struct lttng_kernel_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_kernel_ring_buffer_backend_pages *pages;
unsigned long records_unread = 0, sb_bindex, id;
unsigned int i;