-/*
- * ring_buffer_backend.c
+/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
*
- * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * ring_buffer_backend.c
*
- * Dual LGPL v2.1/GPL v2 license.
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
#include <linux/stddef.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/oom.h>
#include <linux/cpu.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
-#include "../../wrapper/symbols.h" /* for wrapper_vmalloc_sync_all() */
-#include "../../wrapper/ringbuffer/config.h"
-#include "../../wrapper/ringbuffer/backend.h"
-#include "../../wrapper/ringbuffer/frontend.h"
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/ringbuffer/config.h>
+#include <wrapper/ringbuffer/backend.h>
+#include <wrapper/ringbuffer/frontend.h>
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
unsigned long subbuf_size, mmap_offset = 0;
unsigned long num_subbuf_alloc;
struct page **pages;
- void **virt;
unsigned long i;
num_pages = size >> PAGE_SHIFT;
+
+ /*
+ * Verify that the number of pages requested for that buffer is smaller
+ * than the number of available pages on the system. si_mem_available()
+ * returns an _estimate_ of the number of available pages.
+ */
+ if (num_pages > si_mem_available())
+ goto not_enough_pages;
+
+ /*
+ * Set the current user thread as the first target of the OOM killer.
+ * If the estimate received by si_mem_available() was off, and we do
+ * end up running out of memory because of this buffer allocation, we
+ * want to kill the offending app first.
+ */
+ set_current_oom_origin();
+
num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
subbuf_size = chanb->subbuf_size;
num_subbuf_alloc = num_subbuf;
num_subbuf_alloc++;
}
- pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
+ pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+ cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!pages))
goto pages_error;
- virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
- 1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
- if (unlikely(!virt))
- goto virt_error;
-
- bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
+ bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
* num_subbuf_alloc,
1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!bufb->array))
goto array_error;
for (i = 0; i < num_pages; i++) {
pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
- GFP_KERNEL | __GFP_ZERO, 0);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
if (unlikely(!pages[i]))
goto depopulate;
- virt[i] = page_address(pages[i]);
}
bufb->num_pages_per_subbuf = num_pages_per_subbuf;
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
- kzalloc_node(ALIGN(
+ lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_pages) +
sizeof(struct lib_ring_buffer_backend_page)
* num_pages_per_subbuf,
1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(bufb->cpu, 0)));
if (!bufb->array[i])
goto free_array;
}
/* Allocate write-side subbuffer table */
- bufb->buf_wsb = kzalloc_node(ALIGN(
+ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!bufb->buf_wsb))
goto free_array;
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+ /* Allocate subbuffer packet counter table */
+ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
+ sizeof(struct lib_ring_buffer_backend_counts)
+ * num_subbuf,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(bufb->cpu, 0)));
+ if (unlikely(!bufb->buf_cnt))
+ goto free_wsb;
+
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < num_pages_per_subbuf; j++) {
CHAN_WARN_ON(chanb, page_idx > num_pages);
- bufb->array[i]->p[j].virt = virt[page_idx];
- bufb->array[i]->p[j].page = pages[page_idx];
+ bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
+ bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
page_idx++;
}
if (config->output == RING_BUFFER_MMAP) {
* will not fault.
*/
wrapper_vmalloc_sync_all();
- kfree(virt);
- kfree(pages);
+ clear_current_oom_origin();
+ vfree(pages);
return 0;
+free_wsb:
+ lttng_kvfree(bufb->buf_wsb);
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
- kfree(bufb->array[i]);
+ lttng_kvfree(bufb->array[i]);
depopulate:
/* Free all allocated pages */
for (i = 0; (i < num_pages && pages[i]); i++)
__free_page(pages[i]);
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
array_error:
- kfree(virt);
-virt_error:
- kfree(pages);
+ vfree(pages);
pages_error:
+ clear_current_oom_origin();
+not_enough_pages:
return -ENOMEM;
}
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
bufb->chan = container_of(chanb, struct channel, backend);
bufb->cpu = cpu;
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
- kfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
- __free_page(bufb->array[i]->p[j].page);
- kfree(bufb->array[i]);
+ __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
+ lttng_kvfree(bufb->array[i]);
}
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
bufb->allocated = 0;
}
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = container_of(chanb, struct channel, backend);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+/*
+ * No need to implement a "dead" callback to do a buffer switch here,
+ * because it will happen when tracing is stopped, or will be done by
+ * switch timer CPU DEAD callback.
+ * We don't free buffers when CPU go away, because it would make trace
+ * data vanish, which is unwanted.
+ */
+int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel_backend *chanb = container_of(node,
+ struct channel_backend, cpuhp_prepare);
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ struct lib_ring_buffer *buf;
+ int ret;
+
+ CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ buf = per_cpu_ptr(chanb->buf, cpu);
+ ret = lib_ring_buffer_create(buf, chanb, cpu);
+ if (ret) {
+ printk(KERN_ERR
+ "ring_buffer_cpu_hp_callback: cpu %d "
+ "buffer creation failed\n", cpu);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
+
/**
* lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
* @nb: notifier block
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct channel_backend *chanb = container_of(nb, struct channel_backend,
cpu_hp_notifier);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct lib_ring_buffer *buf;
int ret;
}
return NOTIFY_OK;
}
+
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
/**
* channel_backend_init - initialize a channel backend
* @chanb: channel backend
if (!name)
return -EPERM;
- if (!(subbuf_size && num_subbuf))
- return -EPERM;
-
/* Check that the subbuffer size is larger than a page. */
- CHAN_WARN_ON(chanb, subbuf_size < PAGE_SIZE);
+ if (subbuf_size < PAGE_SIZE)
+ return -EINVAL;
/*
- * Make sure the number of subbuffers and subbuffer size are power of 2.
+ * Make sure the number of subbuffers and subbuffer size are
+ * power of 2 and nonzero.
*/
- CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
- CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+ return -EINVAL;
+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+ return -EINVAL;
+ /*
+ * Overwrite mode buffers require at least 2 subbuffers per
+ * buffer.
+ */
+ if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+ return -EINVAL;
ret = subbuffer_id_check_index(config, num_subbuf);
if (ret)
(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
chanb->num_subbuf = num_subbuf;
strlcpy(chanb->name, name, NAME_MAX);
- chanb->config = config;
+ memcpy(&chanb->config, config, sizeof(chanb->config));
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
if (!chanb->buf)
goto free_cpumask;
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
+ ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ if (ret)
+ goto free_bufs;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+ {
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
#ifdef CONFIG_HOTPLUG_CPU
- /*
- * buf->backend.allocated test takes care of concurrent CPU
- * hotplug.
- * Priority higher than frontend, so we create the ring buffer
- * before we start the timer.
- */
- chanb->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chanb->cpu_hp_notifier.priority = 5;
- register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
- put_online_cpus();
+ /*
+ * buf->backend.allocated test takes care of concurrent CPU
+ * hotplug.
+ * Priority higher than frontend, so we create the ring buffer
+ * before we start the timer.
+ */
+ chanb->cpu_hp_notifier.notifier_call =
+ lib_ring_buffer_cpu_hp_callback;
+ chanb->cpu_hp_notifier.priority = 5;
+ register_hotcpu_notifier(&chanb->cpu_hp_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs; /* cpu hotplug locked */
+ }
+ put_online_cpus();
#else
- for_each_possible_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
+ for_each_possible_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs;
+ }
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
} else {
chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ /*
+ * Teardown of lttng_rb_hp_prepare instance
+ * on "add" error is handled within cpu hotplug,
+ * no teardown to do from the caller.
+ */
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#ifdef CONFIG_HOTPLUG_CPU
+ put_online_cpus();
+ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
+ struct lib_ring_buffer *buf =
+ per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf);
}
-#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
-#endif
free_percpu(chanb->buf);
} else
kfree(chanb->buf);
*/
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ }
}
/**
*/
void channel_backend_free(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
* @pagecpy : page size copied so far
*/
void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
- const void *src, size_t len, ssize_t pagecpy)
+ const void *src, size_t len, size_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
+
+/**
+ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @c : the byte to write
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ */
+void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
+ size_t offset,
+ int c, size_t len, size_t pagecpy)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+
+ do {
+ len -= pagecpy;
+ offset += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ c, pagecpy);
+ } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
+
+/**
+ * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ */
+void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+ size_t offset, const char *src, size_t len,
+ size_t pagecpy, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int src_terminated = 0;
+
+ CHAN_WARN_ON(chanb, !len);
+ offset += pagecpy;
+ do {
+ len -= pagecpy;
+ if (!src_terminated)
+ src += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = pagecpy;
+ if (pagecpy == len)
+ to_copy--; /* Final '\0' */
+ count = lib_ring_buffer_do_strcpy(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = 1;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = pagecpy;
+ if (pagecpy == len)
+ pad_len--; /* Final '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } while (unlikely(len != pagecpy));
+ /* Ending '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+ '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
+
+/**
+ * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+ size_t offset,
+ const void __user *src, size_t len,
+ size_t pagecpy)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int ret;
+
+ do {
+ len -= pagecpy;
+ src += pagecpy;
+ offset += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, pagecpy) != 0;
+ if (ret > 0) {
+ /* Copy failed. */
+ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
+ break; /* stop copy */
+ }
+ } while (unlikely(len != pagecpy));
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
+
+/**
+ * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+ size_t offset, const char __user *src, size_t len,
+ size_t pagecpy, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int src_terminated = 0;
+
+ offset += pagecpy;
+ do {
+ len -= pagecpy;
+ if (!src_terminated)
+ src += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = pagecpy;
+ if (pagecpy == len)
+ to_copy--; /* Final '\0' */
+ count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = 1;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = pagecpy;
+ if (pagecpy == len)
+ pad_len--; /* Final '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } while (unlikely(len != pagecpy));
+ /* Ending '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+ '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
+
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
* @bufb : buffer backend
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
- size_t index;
- ssize_t pagecpy, orig_len;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t index, pagecpy, orig_len;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
size_t offset, void __user *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
- ssize_t pagecpy, orig_len;
+ ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
- orig_len = len;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
if (unlikely(!len))
* @dest : destination address
* @len : destination's length
*
- * return string's length
+ * Return string's length, or -EINVAL on error.
* Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
*/
int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, pagelen, strpagelen, orig_offset;
char *str;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
orig_offset = offset;
+ if (unlikely(!len))
+ return -EINVAL;
for (;;) {
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
/**
- * lib_ring_buffer_read_get_page - Get a whole page to read from
+ * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
* @bufb : buffer backend
* @offset : offset within the buffer
* @virt : pointer to page address (output)
*
* Should be protected by get_subbuf/put_subbuf.
- * Returns the pointer to the page struct pointer.
+ * Returns the pointer to the page frame number unsigned long.
*/
-struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
+unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
size_t offset, void ***virt)
{
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
*virt = &rpages->p[index].virt;
- return &rpages->p[index].page;
+ return &rpages->p[index].pfn;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
/**
* lib_ring_buffer_read_offset_address - get address of a buffer location
*
* Return the address where a given offset is located (for read).
* Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than a
+ * page size.
*/
void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset)
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;