copy_from_user and memset
authorJulien Desfossez <julien.desfossez@polymtl.ca>
Tue, 27 Sep 2011 11:51:34 +0000 (07:51 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 27 Sep 2011 11:51:34 +0000 (07:51 -0400)
This patch provides the copy_from_user and memset operations for the lib
ringbuffer.

[ edit: tp_copy_string_from_user len parameter now excludes final \0 ]

Signed-off-by: Julien Desfossez <julien.desfossez@polymtl.ca>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lib/ringbuffer/backend.h
lib/ringbuffer/backend_internal.h
lib/ringbuffer/ring_buffer_backend.c
ltt-events.h
ltt-ring-buffer-client.h
ltt-ring-buffer-metadata-client.h
probes/lttng-events.h

index 47bc1798334188a2c31c2c89e71c12ad81e4b42e..541dc531ca76765e990635edc1c2a526e2ccdddb 100644 (file)
@@ -103,6 +103,115 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
        ctx->buf_offset += len;
 }
 
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a buffer backend
+ * @config : ring buffer instance configuration
+ * @bufb : ring buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to copy
+ * @len : number of bytes to copy
+ *
+ * This function writes "len" bytes of "c" to a buffer backend, at a specific
+ * offset. This is more or less a buffer backend-specific memset() operation.
+ * Calls the slow path (_ring_buffer_memset) if write is crossing a page
+ * boundary.
+ */
+static inline
+void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_ctx *ctx, int c, size_t len)
+{
+
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, index;
+       size_t offset = ctx->buf_offset;
+       ssize_t pagecpy;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       if (likely(pagecpy == len))
+               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                         + (offset & ~PAGE_MASK),
+                                         c, len);
+       else
+               _lib_ring_buffer_memset(bufb, offset, c, len, 0);
+       ctx->buf_offset += len;
+}
+
+/**
+ * lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : userspace source pointer to copy from
+ * @len : length of data to copy
+ *
+ * This function copies "len" bytes of data from a userspace pointer to a
+ * buffer backend, at the current context offset. This is more or less a buffer
+ * backend-specific memcpy() operation. Calls the slow path
+ * (_ring_buffer_write_from_user) if copy is crossing a page boundary.
+ */
+static inline
+void lib_ring_buffer_copy_from_user(const struct lib_ring_buffer_config *config,
+                                   struct lib_ring_buffer_ctx *ctx,
+                                   const void __user *src, size_t len)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, index;
+       size_t offset = ctx->buf_offset;
+       ssize_t pagecpy;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       unsigned long ret;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+       pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+
+       if (unlikely(!access_ok(VERIFY_READ, src, len)))
+               goto fill_buffer;
+
+       if (likely(pagecpy == len)) {
+               ret = lib_ring_buffer_do_copy_from_user(
+                       rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       src, len);
+               if (unlikely(ret > 0)) {
+                       len -= (pagecpy - ret);
+                       offset += (pagecpy - ret);
+                       goto fill_buffer;
+               }
+       } else {
+               _lib_ring_buffer_copy_from_user(bufb, offset, src, len, 0);
+       }
+       ctx->buf_offset += len;
+
+       return;
+
+fill_buffer:
+       /*
+        * In the error path we call the slow path version to avoid
+        * the pollution of static inline code.
+        */
+       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+}
+
 /*
  * This accessor counts the number of unread records in a buffer.
  * It only provides a consistent value if no reads not writes are performed
index d92fe36c82f39326ad846054e5f1bb248f030126..442f357a4c40120aae2f06d897aee7a723f1cd56 100644 (file)
@@ -15,6 +15,7 @@
 #include "../../wrapper/ringbuffer/backend_types.h"
 #include "../../wrapper/ringbuffer/frontend_types.h"
 #include <linux/string.h>
+#include <linux/uaccess.h>
 
 /* Ring buffer backend API presented to the frontend */
 
@@ -40,6 +41,12 @@ void lib_ring_buffer_backend_exit(void);
 extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
                                   size_t offset, const void *src, size_t len,
                                   ssize_t pagecpy);
+extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+                                   size_t offset, int c, size_t len,
+                                   ssize_t pagecpy);
+extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+                                           size_t offset, const void *src,
+                                           size_t len, ssize_t pagecpy);
 
 /*
  * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
@@ -414,4 +421,29 @@ do {                                                               \
                inline_memcpy(dest, src, __len);                \
 } while (0)
 
+/*
+ * We use __copy_from_user to copy userspace data since we already
+ * did the access_ok for the whole range.
+ */
+static inline
+unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
+                                               const void __user *src,
+                                               unsigned long len)
+{
+       return __copy_from_user(dest, src, len);
+}
+
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, int c,
+       unsigned long len)
+{
+       unsigned long i;
+
+       for (i = 0; i < len; i++)
+               dest[i] = c;
+}
+
 #endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
index a9513d1c07e3072fc9e6f6dcd5418554a1af8255..d1b5b8cde5494909f748c28ab2e31583d5cfb2a1 100644 (file)
@@ -501,6 +501,107 @@ void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
 }
 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
 
+
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to write
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ */
+void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+                            size_t offset,
+                            int c, size_t len, ssize_t pagecpy)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+
+       do {
+               len -= pagecpy;
+               offset += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                         + (offset & ~PAGE_MASK),
+                                         c, pagecpy);
+       } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
+
+
+/**
+ * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+                                     size_t offset,
+                                     const void __user *src, size_t len,
+                                     ssize_t pagecpy)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int ret;
+
+       do {
+               len -= pagecpy;
+               src += pagecpy;
+               offset += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                               && subbuffer_id_is_noref(config, id));
+               ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
+                                                       + (offset & ~PAGE_MASK),
+                                                       src, pagecpy) != 0;
+               if (ret > 0) {
+                       offset += (pagecpy - ret);
+                       len -= (pagecpy - ret);
+                       _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+                       break; /* stop copy */
+               }
+       } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
+
 /**
  * lib_ring_buffer_read - read data from ring_buffer_buffer.
  * @bufb : buffer backend
index e00714d0e0526aee3e23257d15575a8a499449cb..586608b38ebda0aff48038c28ade69f322926860 100644 (file)
@@ -210,6 +210,8 @@ struct ltt_channel_ops {
        void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
        void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
                            size_t len);
+       void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
+                                     const void *src, size_t len);
        /*
         * packet_avail_size returns the available size in the current
         * packet. Note that the size returned is only a hint, since it
index 7ed86fbb5280a2c275ca89c24b9b4ec66257e5df..f71047f1a999e0f2971fdd827cf6df08aa19965a 100644 (file)
@@ -480,6 +480,13 @@ void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
        lib_ring_buffer_write(&client_config, ctx, src, len);
 }
 
+static
+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+                              const void __user *src, size_t len)
+{
+       lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
+}
+
 static
 wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
 {
@@ -519,6 +526,7 @@ static struct ltt_transport ltt_relay_transport = {
                .event_reserve = ltt_event_reserve,
                .event_commit = ltt_event_commit,
                .event_write = ltt_event_write,
+               .event_write_from_user = ltt_event_write_from_user,
                .packet_avail_size = NULL,      /* Would be racy anyway */
                .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
                .get_hp_wait_queue = ltt_get_hp_wait_queue,
index dc0e36e1ec1515a31ca7514b75b7e5e10a798378..3cf8a342052e2e53e5a2d73cee12f281fa8be69a 100644 (file)
@@ -224,6 +224,13 @@ void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
        lib_ring_buffer_write(&client_config, ctx, src, len);
 }
 
+static
+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+                              const void __user *src, size_t len)
+{
+       lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
+}
+
 static
 size_t ltt_packet_avail_size(struct channel *chan)
                             
@@ -279,6 +286,7 @@ static struct ltt_transport ltt_relay_transport = {
                .buffer_read_close = ltt_buffer_read_close,
                .event_reserve = ltt_event_reserve,
                .event_commit = ltt_event_commit,
+               .event_write_from_user = ltt_event_write_from_user,
                .event_write = ltt_event_write,
                .packet_avail_size = ltt_packet_avail_size,
                .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
index 1d2def4c8f161888dd9503cccfe0ca621f13ec2e..084666d56514520f7db875e6026bf538eef0cfbd 100644 (file)
@@ -510,6 +510,27 @@ __assign_##dest##_2:                                                       \
        __chan->ops->event_write(&__ctx, src,                           \
                sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
        goto __end_field_##dest##_2;
+#undef tp_memcpy_from_user
+#define tp_memcpy_from_user(dest, src, len)                            \
+       __assign_##dest:                                                \
+       if (0)                                                          \
+               (void) __typemap.dest;                                  \
+       lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
+       __chan->ops->event_write_from_user(&__ctx, src, len);           \
+       goto __end_field_##dest;
+/*
+ * tp_copy_string_from_user "len" parameter is the length of the string
+ * excluding the final \0.
+ */
+#undef tp_copy_string_from_user
+#define tp_copy_string_from_user(dest, src, len)                       \
+       __assign_##dest:                                                \
+       if (0)                                                          \
+               (void) __typemap.dest;                                  \
+       lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
+       __chan->ops->event_write_from_user(&__ctx, src, len);           \
+       __chan->ops->event_memset(&__ctx, 0, 1);                        \
+       goto __end_field_##dest;
 
 #undef tp_strcpy
 #define tp_strcpy(dest, src)                                           \
This page took 0.031488 seconds and 4 git commands to generate.