projects
/
lttng-modules.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Drop 'linux/atomic.h' wrapper
[lttng-modules.git]
/
include
/
ringbuffer
/
backend.h
diff --git
a/include/ringbuffer/backend.h
b/include/ringbuffer/backend.h
index 13f67a4ea8c51e48f8832ff1813931a772997442..6a90161839c76903f28bb4c5a5e4eede152dfc2c 100644
(file)
--- a/
include/ringbuffer/backend.h
+++ b/
include/ringbuffer/backend.h
@@
-22,6
+22,7
@@
#include <linux/fs.h>
#include <linux/mm.h>
#include <wrapper/uaccess.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <wrapper/uaccess.h>
+#include <lttng/probe-user.h>
/* Internal helpers */
#include <ringbuffer/backend_internal.h>
/* Internal helpers */
#include <ringbuffer/backend_internal.h>
@@
-77,7
+78,7
@@
void lib_ring_buffer_write(const struct lttng_kernel_ring_buffer_config *config,
{
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
{
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
- size_t index,
pagecpy
;
+ size_t index,
bytes_left_in_page
;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
@@
-87,14
+88,14
@@
void lib_ring_buffer_write(const struct lttng_kernel_ring_buffer_config *config,
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
pagecpy
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
- if (likely(
pagecpy
== len))
+
bytes_left_in_page
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
+ if (likely(
bytes_left_in_page
== len))
lib_ring_buffer_do_copy(config,
backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len);
else
lib_ring_buffer_do_copy(config,
backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len);
else
- _lib_ring_buffer_write(bufb, offset, src, len
, 0
);
+ _lib_ring_buffer_write(bufb, offset, src, len);
ctx->priv.buf_offset += len;
}
ctx->priv.buf_offset += len;
}
@@
-118,7
+119,7
@@
void lib_ring_buffer_memset(const struct lttng_kernel_ring_buffer_config *config
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
- size_t index,
pagecpy
;
+ size_t index,
bytes_left_in_page
;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
@@
-128,13
+129,13
@@
void lib_ring_buffer_memset(const struct lttng_kernel_ring_buffer_config *config
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
pagecpy
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
- if (likely(
pagecpy
== len))
+
bytes_left_in_page
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
+ if (likely(
bytes_left_in_page
== len))
lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
c, len);
else
lib_ring_buffer_do_memset(backend_pages->p[index].virt
+ (offset & ~PAGE_MASK),
c, len);
else
- _lib_ring_buffer_memset(bufb, offset, c, len
, 0
);
+ _lib_ring_buffer_memset(bufb, offset, c, len);
ctx->priv.buf_offset += len;
}
ctx->priv.buf_offset += len;
}
@@
-335,7
+336,7
@@
void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buff
{
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
{
struct lttng_kernel_ring_buffer_backend *bufb = &ctx->priv.buf->backend;
struct channel_backend *chanb = &ctx->priv.chan->backend;
- size_t index,
pagecpy
;
+ size_t index,
bytes_left_in_page
;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
size_t offset = ctx->priv.buf_offset;
struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
@@
-346,13
+347,13
@@
void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buff
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-
pagecpy
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
+
bytes_left_in_page
= min_t(size_t, len, (-offset) & ~PAGE_MASK);
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
pagefault_disable();
- if (likely(
pagecpy
== len)) {
+ if (likely(
bytes_left_in_page
== len)) {
ret = lib_ring_buffer_do_copy_from_user_inatomic(
backend_pages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
ret = lib_ring_buffer_do_copy_from_user_inatomic(
backend_pages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
@@
-361,7
+362,7
@@
void lib_ring_buffer_copy_from_user_inatomic(const struct lttng_kernel_ring_buff
goto fill_buffer_enable_pf;
}
} else {
goto fill_buffer_enable_pf;
}
} else {
- _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len
, 0
);
+ _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len);
}
pagefault_enable();
ctx->priv.buf_offset += len;
}
pagefault_enable();
ctx->priv.buf_offset += len;
@@
-375,7
+376,7
@@
fill_buffer:
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
- _lib_ring_buffer_memset(bufb, offset, 0, len
, 0
);
+ _lib_ring_buffer_memset(bufb, offset, 0, len);
ctx->priv.buf_offset += len;
}
ctx->priv.buf_offset += len;
}
@@
-455,9
+456,9
@@
fill_buffer:
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
- _lib_ring_buffer_memset(bufb, offset, pad, len - 1
, 0
);
+ _lib_ring_buffer_memset(bufb, offset, pad, len - 1);
offset += len - 1;
offset += len - 1;
- _lib_ring_buffer_memset(bufb, offset, '\0', 1
, 0
);
+ _lib_ring_buffer_memset(bufb, offset, '\0', 1);
ctx->priv.buf_offset += len;
}
ctx->priv.buf_offset += len;
}
@@
-535,7
+536,7
@@
fill_buffer:
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
- _lib_ring_buffer_memset(bufb, offset, pad, len
, 0
);
+ _lib_ring_buffer_memset(bufb, offset, pad, len);
ctx->priv.buf_offset += len;
}
ctx->priv.buf_offset += len;
}
@@
-580,14
+581,7
@@
unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
const void __user *src,
unsigned long len)
{
const void __user *src,
unsigned long len)
{
- unsigned long ret;
-
- if (!lttng_access_ok(VERIFY_READ, src, len))
- return 1;
- pagefault_disable();
- ret = __copy_from_user_inatomic(dest, src, len);
- pagefault_enable();
- return ret;
+ return lttng_copy_from_user_check_nofault(dest, src, len);
}
#endif /* _LIB_RING_BUFFER_BACKEND_H */
}
#endif /* _LIB_RING_BUFFER_BACKEND_H */
This page took
0.026167 seconds
and
4
git commands to generate.