/*
* ring_buffer_backend.c
*
- * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * Dual LGPL v2.1/GPL v2 license.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <unistd.h>
#include <urcu/arch.h>
+#include <limits.h>
-#include "ust/core.h"
-
-#include "config.h"
+#include <lttng/ringbuffer-config.h>
+#include "vatomic.h"
#include "backend.h"
#include "frontend.h"
+#include "smp.h"
+#include "shm.h"
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
* @extra_reader_sb: need extra subbuffer for reader
*/
static
-int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
+int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
size_t size, size_t num_subbuf,
- int extra_reader_sb)
+ int extra_reader_sb,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
{
- struct channel_backend *chanb = &bufb->chan->backend;
- unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
+ struct channel_backend *chanb;
unsigned long subbuf_size, mmap_offset = 0;
unsigned long num_subbuf_alloc;
- struct page **pages;
- void **virt;
unsigned long i;
+ long page_size;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return -EINVAL;
- num_pages = size >> get_count_order(PAGE_SIZE);
- num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
subbuf_size = chanb->subbuf_size;
num_subbuf_alloc = num_subbuf;
- if (extra_reader_sb) {
- num_pages += num_pages_per_subbuf; /* Add pages for reader */
+ if (extra_reader_sb)
num_subbuf_alloc++;
- }
- pages = malloc_align(sizeof(*pages) * num_pages);
- if (unlikely(!pages))
- goto pages_error;
-
- virt = malloc_align(sizeof(*virt) * num_pages);
- if (unlikely(!virt))
- goto virt_error;
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ goto page_size_error;
+ }
- bufb->array = malloc_align(sizeof(*bufb->array) * num_subbuf_alloc);
- if (unlikely(!bufb->array))
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ set_shmp(bufb->array, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
+ if (caa_unlikely(!shmp(handle, bufb->array)))
goto array_error;
- for (i = 0; i < num_pages; i++) {
- pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
- GFP_KERNEL | __GFP_ZERO, 0);
- if (unlikely(!pages[i]))
- goto depopulate;
- virt[i] = page_address(pages[i]);
- }
- bufb->num_pages_per_subbuf = num_pages_per_subbuf;
+ /*
+ * This is the largest element (the buffer pages) which needs to
+ * be aligned on page size.
+ */
+ align_shm(shmobj, page_size);
+ set_shmp(bufb->memory_map, zalloc_shm(shmobj,
+ subbuf_size * num_subbuf_alloc));
+ if (caa_unlikely(!shmp(handle, bufb->memory_map)))
+ goto memory_map_error;
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
- bufb->array[i] =
- zmalloc_align(
- sizeof(struct lib_ring_buffer_backend_pages) +
- sizeof(struct lib_ring_buffer_backend_page)
- * num_pages_per_subbuf);
- if (!bufb->array[i])
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ set_shmp(shmp_index(handle, bufb->array, i)->shmp,
+ zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
+ if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
goto free_array;
}
/* Allocate write-side subbuffer table */
- bufb->buf_wsb = zmalloc_align(
- sizeof(struct lib_ring_buffer_backend_subbuffer)
- * num_subbuf);
- if (unlikely(!bufb->buf_wsb))
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
+ * num_subbuf));
+ if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
goto free_array;
- for (i = 0; i < num_subbuf; i++)
- bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
+ for (i = 0; i < num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+
+ sb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!sb)
+ goto free_array;
+ sb->id = subbuffer_id(config, 0, 1, i);
+ }
/* Assign read-side subbuffer table */
if (extra_reader_sb)
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+ /* Allocate subbuffer packet counter table */
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
+ * num_subbuf));
+ if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
+ goto free_wsb;
+
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
- for (j = 0; j < num_pages_per_subbuf; j++) {
- CHAN_WARN_ON(chanb, page_idx > num_pages);
- bufb->array[i]->p[j].virt = virt[page_idx];
- bufb->array[i]->p[j].page = pages[page_idx];
- page_idx++;
- }
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+ struct shm_ref ref;
+
+ ref.index = bufb->memory_map._ref.index;
+ ref.offset = bufb->memory_map._ref.offset;
+ ref.offset += i * subbuf_size;
+
+ sbp = shmp_index(handle, bufb->array, i);
+ if (!sbp)
+ goto free_array;
+ pages = shmp(handle, sbp->shmp);
+ if (!pages)
+ goto free_array;
+ set_shmp(pages->p, ref);
if (config->output == RING_BUFFER_MMAP) {
- bufb->array[i]->mmap_offset = mmap_offset;
+ pages->mmap_offset = mmap_offset;
mmap_offset += subbuf_size;
}
}
-
- kfree(virt);
- kfree(pages);
return 0;
+free_wsb:
+ /* bufb->buf_wsb will be freed by shm teardown */
free_array:
- for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
- kfree(bufb->array[i]);
-depopulate:
- /* Free all allocated pages */
- for (i = 0; (i < num_pages && pages[i]); i++)
- __free_page(pages[i]);
- kfree(bufb->array);
+ /* bufb->array[i] will be freed by shm teardown */
+memory_map_error:
+ /* bufb->array will be freed by shm teardown */
array_error:
- kfree(virt);
-virt_error:
- kfree(pages);
-pages_error:
+page_size_error:
return -ENOMEM;
}
-int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
- struct channel_backend *chanb, int cpu)
+int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct channel_backend *chanb, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
- bufb->chan = caa_container_of(chanb, struct channel, backend);
+ set_shmp(bufb->chan, handle->chan._ref);
bufb->cpu = cpu;
return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
chanb->num_subbuf,
- chanb->extra_reader_sb);
+ chanb->extra_reader_sb,
+ handle, shmobj);
}
-void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
+void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel_backend *chanb = &bufb->chan->backend;
- unsigned long i, j, num_subbuf_alloc;
-
- num_subbuf_alloc = chanb->num_subbuf;
- if (chanb->extra_reader_sb)
- num_subbuf_alloc++;
-
- kfree(bufb->buf_wsb);
- for (i = 0; i < num_subbuf_alloc; i++) {
- for (j = 0; j < bufb->num_pages_per_subbuf; j++)
- __free_page(bufb->array[i]->p[j].page);
- kfree(bufb->array[i]);
- }
- kfree(bufb->array);
- bufb->allocated = 0;
-}
-
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
-{
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long num_subbuf_alloc;
unsigned int i;
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return;
+ config = &chanb->config;
+
num_subbuf_alloc = chanb->num_subbuf;
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
- for (i = 0; i < chanb->num_subbuf; i++)
- bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
+ for (i = 0; i < chanb->num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+
+ sb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!sb)
+ return;
+ sb->id = subbuffer_id(config, 0, 1, i);
+ }
if (chanb->extra_reader_sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
num_subbuf_alloc - 1);
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
for (i = 0; i < num_subbuf_alloc; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+
+ sbp = shmp_index(handle, bufb->array, i);
+ if (!sbp)
+ return;
+ pages = shmp(handle, sbp->shmp);
+ if (!pages)
+ return;
/* Don't reset mmap_offset */
- v_set(config, &bufb->array[i]->records_commit, 0);
- v_set(config, &bufb->array[i]->records_unread, 0);
- bufb->array[i]->data_size = 0;
+ v_set(config, &pages->records_commit, 0);
+ v_set(config, &pages->records_unread, 0);
+ pages->data_size = 0;
/* Don't reset backend page and virt addresses */
}
/* Don't reset num_pages_per_subbuf, cpu, allocated */
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = caa_container_of(chanb, struct channel, backend);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
-#ifdef CONFIG_HOTPLUG_CPU
-/**
- * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- * @nb: notifier block
- * @action: hotplug action to take
- * @hcpu: CPU number
- *
- * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct channel_backend *chanb = caa_container_of(nb, struct channel_backend,
- cpu_hp_notifier);
- const struct lib_ring_buffer_config *config = chanb->config;
- struct lib_ring_buffer *buf;
- int ret;
-
- CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- buf = per_cpu_ptr(chanb->buf, cpu);
- ret = lib_ring_buffer_create(buf, chanb, cpu);
- if (ret) {
- printk(KERN_ERR
- "ring_buffer_cpu_hp_callback: cpu %d "
- "buffer creation failed\n", cpu);
- return NOTIFY_BAD;
- }
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- /* No need to do a buffer switch here, because it will happen
- * when tracing is stopped, or will be done by switch timer CPU
- * DEAD callback. */
- break;
- }
- return NOTIFY_OK;
-}
-#endif
-
/**
* channel_backend_init - initialize a channel backend
* @chanb: channel backend
* @name: channel name
* @config: client ring buffer configuration
- * @priv: client private data
* @parent: dentry of parent directory, %NULL for root directory
- * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
+ * @subbuf_size: size of sub-buffers (> page size, power of 2)
* @num_subbuf: number of sub-buffers (power of 2)
+ * @lttng_ust_shm_handle: shared memory handle
+ * @stream_fds: stream file descriptors.
*
* Returns channel pointer if successful, %NULL otherwise.
*
*/
int channel_backend_init(struct channel_backend *chanb,
const char *name,
- const struct lib_ring_buffer_config *config,
- void *priv, size_t subbuf_size, size_t num_subbuf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t subbuf_size, size_t num_subbuf,
+ struct lttng_ust_shm_handle *handle,
+ const int *stream_fds)
{
struct channel *chan = caa_container_of(chanb, struct channel, backend);
unsigned int i;
int ret;
+ size_t shmsize = 0, num_subbuf_alloc;
+ long page_size;
if (!name)
return -EPERM;
- if (!(subbuf_size && num_subbuf))
- return -EPERM;
-
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ return -ENOMEM;
+ }
/* Check that the subbuffer size is larger than a page. */
- if (subbuf_size < PAGE_SIZE)
+ if (subbuf_size < page_size)
return -EINVAL;
/*
- * Make sure the number of subbuffers and subbuffer size are power of 2.
+ * Make sure the number of subbuffers and subbuffer size are
+ * power of 2, and nonzero.
+ */
+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+ return -EINVAL;
+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+ return -EINVAL;
+ /*
+ * Overwrite mode buffers require at least 2 subbuffers per
+ * buffer.
*/
- CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
- CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
+ if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+ return -EINVAL;
ret = subbuffer_id_check_index(config, num_subbuf);
if (ret)
return ret;
- chanb->priv = priv;
chanb->buf_size = num_subbuf * subbuf_size;
chanb->subbuf_size = subbuf_size;
chanb->buf_size_order = get_count_order(chanb->buf_size);
chanb->extra_reader_sb =
(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
chanb->num_subbuf = num_subbuf;
- strlcpy(chanb->name, name, NAME_MAX);
- chanb->config = config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
- return -ENOMEM;
- }
+ strncpy(chanb->name, name, NAME_MAX);
+ chanb->name[NAME_MAX - 1] = '\0';
+ memcpy(&chanb->config, config, sizeof(*config));
+
+ /* Per-cpu buffer size: control (prior to backend) */
+ shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
+
+ /* Per-cpu buffer size: backend */
+ /* num_subbuf + 1 is the worse case */
+ num_subbuf_alloc = num_subbuf + 1;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, page_size);
+ shmsize += subbuf_size * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
+ /* Per-cpu buffer size: control (after backend) */
+ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
+ shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
+ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
+ shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- /* Allocating the buffer per-cpu structures */
- chanb->buf = alloc_percpu(struct lib_ring_buffer);
- if (!chanb->buf)
- goto free_cpumask;
-
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
-#ifdef CONFIG_HOTPLUG_CPU
+ struct lttng_ust_lib_ring_buffer *buf;
/*
- * buf->backend.allocated test takes care of concurrent CPU
- * hotplug.
- * Priority higher than frontend, so we create the ring buffer
- * before we start the timer.
+ * We need to allocate for all possible cpus.
*/
- chanb->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chanb->cpu_hp_notifier.priority = 5;
- register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
- put_online_cpus();
-#else
for_each_possible_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
+ struct shm_object *shmobj;
+
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM, stream_fds[i], i);
+ if (!shmobj)
+ goto end;
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ buf = shmp(handle, chanb->buf[i].shmp);
+ if (!buf)
+ goto end;
+ set_shmp(buf->self, chanb->buf[i].shmp._ref);
+ ret = lib_ring_buffer_create(buf, chanb, i,
+ handle, shmobj);
if (ret)
goto free_bufs; /* cpu hotplug locked */
}
-#endif
} else {
- chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
- if (!chanb->buf)
- goto free_cpumask;
- ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
+ struct shm_object *shmobj;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM, stream_fds[0], -1);
+ if (!shmobj)
+ goto end;
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ buf = shmp(handle, chanb->buf[0].shmp);
+ if (!buf)
+ goto end;
+ set_shmp(buf->self, chanb->buf[0].shmp._ref);
+ ret = lib_ring_buffer_create(buf, chanb, -1,
+ handle, shmobj);
if (ret)
goto free_bufs;
}
return 0;
free_bufs:
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
-
- if (!buf->backend.allocated)
- continue;
- lib_ring_buffer_free(buf);
- }
-#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
-#endif
- free_percpu(chanb->buf);
- } else
- kfree(chanb->buf);
-free_cpumask:
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- free_cpumask_var(chanb->cpumask);
+ /* We only free the buffer data upon shm teardown */
+end:
return -ENOMEM;
}
-/**
- * channel_backend_unregister_notifiers - unregister notifiers
- * @chan: the channel
- *
- * Holds CPU hotplug.
- */
-void channel_backend_unregister_notifiers(struct channel_backend *chanb)
-{
- const struct lib_ring_buffer_config *config = chanb->config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-}
-
/**
* channel_backend_free - destroy the channel
* @chan: the channel
*
* Destroy all channel buffers and frees the channel.
*/
-void channel_backend_free(struct channel_backend *chanb)
+void channel_backend_free(struct channel_backend *chanb,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chanb->config;
- unsigned int i;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
-
- if (!buf->backend.allocated)
- continue;
- lib_ring_buffer_free(buf);
- }
- free_cpumask_var(chanb->cpumask);
- free_percpu(chanb->buf);
- } else {
- struct lib_ring_buffer *buf = chanb->buf;
-
- CHAN_WARN_ON(chanb, !buf->backend.allocated);
- lib_ring_buffer_free(buf);
- kfree(buf);
- }
+ /* SHM teardown takes care of everything */
}
-/**
- * lib_ring_buffer_write - write data to a ring_buffer buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @src : source address
- * @len : length to write
- * @pagecpy : page size copied so far
- */
-void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
- const void *src, size_t len, ssize_t pagecpy)
-{
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
-
- do {
- len -= pagecpy;
- src += pagecpy;
- offset += pagecpy;
- sbidx = offset >> chanb->subbuf_size_order;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
-
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-
- pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
- id = bufb->buf_wsb[sbidx].id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- lib_ring_buffer_do_copy(config,
- rpages->p[index].virt
- + (offset & ~PAGE_MASK),
- src, pagecpy);
- } while (unlikely(len != pagecpy));
-}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
-
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
* @bufb : buffer backend
* Should be protected by get_subbuf/put_subbuf.
* Returns the length copied.
*/
-size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
- void *dest, size_t len)
+size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+ void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
- size_t index;
- ssize_t pagecpy, orig_len;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ ssize_t orig_len;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
+ void *src;
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return 0;
+ config = &chanb->config;
orig_len = len;
offset &= chanb->buf_size - 1;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
- if (unlikely(!len))
+
+ if (caa_unlikely(!len))
return 0;
- for (;;) {
- pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
- pagecpy);
- len -= pagecpy;
- if (likely(!len))
- break;
- dest += pagecpy;
- offset += pagecpy;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
- /*
- * Underlying layer should never ask for reads across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
- }
+ id = bufb->buf_rsb.id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ /*
+ * Underlying layer should never ask for reads across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ src = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!src))
+ return 0;
+ memcpy(dest, src, len);
return orig_len;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
/**
* lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
* @dest : destination address
* @len : destination's length
*
- * return string's length
+ * Return string's length, or -EINVAL on error.
* Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
*/
-int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
- void *dest, size_t len)
+int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+ void *dest, size_t len, struct lttng_ust_shm_handle *handle)
{
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
- size_t index;
- ssize_t pagecpy, pagelen, strpagelen, orig_offset;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ ssize_t string_len, orig_offset;
char *str;
- struct lib_ring_buffer_backend_pages *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
unsigned long sb_bindex, id;
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return -EINVAL;
+ config = &chanb->config;
+ if (caa_unlikely(!len))
+ return -EINVAL;
offset &= chanb->buf_size - 1;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
orig_offset = offset;
- for (;;) {
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
- pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
- strpagelen = strnlen(str, pagelen);
- if (len) {
- pagecpy = min_t(size_t, len, strpagelen);
- if (dest) {
- memcpy(dest, str, pagecpy);
- dest += pagecpy;
- }
- len -= pagecpy;
- }
- offset += strpagelen;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
- if (strpagelen < pagelen)
- break;
- /*
- * Underlying layer should never ask for reads across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
- }
- if (dest && len)
- ((char *)dest)[0] = 0;
- return offset - orig_offset;
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
-
-/**
- * lib_ring_buffer_read_get_page - Get a whole page to read from
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @virt : pointer to page address (output)
- *
- * Should be protected by get_subbuf/put_subbuf.
- * Returns the pointer to the page struct pointer.
- */
-struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
- size_t offset, void ***virt)
-{
- size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
- unsigned long sb_bindex, id;
-
- offset &= chanb->buf_size - 1;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return -EINVAL;
+ /*
+ * Underlying layer should never ask for reads across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- *virt = &rpages->p[index].virt;
- return &rpages->p[index].page;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return -EINVAL;
+ str = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!str))
+ return -EINVAL;
+ string_len = strnlen(str, len);
+ if (dest && len) {
+ memcpy(dest, str, string_len);
+ ((char *)dest)[0] = 0;
+ }
+ return offset - orig_offset;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
/**
* lib_ring_buffer_read_offset_address - get address of a buffer location
*
* Return the address where a given offset is located (for read).
* Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than
+ * a page size.
*/
-void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
- size_t offset)
+void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
{
- size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long sb_bindex, id;
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return NULL;
+ config = &chanb->config;
offset &= chanb->buf_size - 1;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return rpages->p[index].virt + (offset & ~PAGE_MASK);
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
/**
* lib_ring_buffer_offset_address - get address of a location within the buffer
* it's always at the beginning of a page, it's safe to write directly to this
* address, as long as the write is never bigger than a page size.
*/
-void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
- size_t offset)
+void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
{
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ size_t sbidx;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned long sb_bindex, id;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return NULL;
+ config = &chanb->config;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
- index = (offset & (chanb->subbuf_size - 1)) >> get_count_order(PAGE_SIZE);
- id = bufb->buf_wsb[sbidx].id;
+ sb = shmp_index(handle, bufb->buf_wsb, sbidx);
+ if (!sb)
+ return NULL;
+ id = sb->id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = bufb->array[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return rpages->p[index].virt + (offset & ~PAGE_MASK);
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);