/*
* ring_buffer_backend.c
*
- * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * Dual LGPL v2.1/GPL v2 license.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define _GNU_SOURCE
#include <urcu/arch.h>
+#include <limits.h>
-#include "ust/core.h"
-
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
+#include "vatomic.h"
#include "backend.h"
#include "frontend.h"
#include "smp.h"
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
set_shmp(bufb->array, zalloc_shm(shmobj,
sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
- if (unlikely(!shmp(handle, bufb->array)))
+ if (caa_unlikely(!shmp(handle, bufb->array)))
goto array_error;
/*
align_shm(shmobj, PAGE_SIZE);
set_shmp(bufb->memory_map, zalloc_shm(shmobj,
subbuf_size * num_subbuf_alloc));
- if (unlikely(!shmp(handle, bufb->memory_map)))
+ if (caa_unlikely(!shmp(handle, bufb->memory_map)))
goto memory_map_error;
/* Allocate backend pages array elements */
set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
* num_subbuf));
- if (unlikely(!shmp(handle, bufb->buf_wsb)))
+ if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
goto free_array;
for (i = 0; i < num_subbuf; i++)
handle, shmobj);
}
-void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb)
-{
- /* bufb->buf_wsb will be freed by shm teardown */
- /* bufb->array[i] will be freed by shm teardown */
- /* bufb->array will be freed by shm teardown */
- bufb->allocated = 0;
-}
-
void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
* @chanb: channel backend
* @name: channel name
* @config: client ring buffer configuration
- * @priv: client private data
* @parent: dentry of parent directory, %NULL for root directory
* @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
* @num_subbuf: number of sub-buffers (power of 2)
int channel_backend_init(struct channel_backend *chanb,
const char *name,
const struct lttng_ust_lib_ring_buffer_config *config,
- void *priv, size_t subbuf_size, size_t num_subbuf,
+ size_t subbuf_size, size_t num_subbuf,
struct lttng_ust_shm_handle *handle)
{
struct channel *chan = caa_container_of(chanb, struct channel, backend);
if (!name)
return -EPERM;
- if (!(subbuf_size && num_subbuf))
- return -EPERM;
-
/* Check that the subbuffer size is larger than a page. */
if (subbuf_size < PAGE_SIZE)
return -EINVAL;
/*
- * Make sure the number of subbuffers and subbuffer size are power of 2.
+ * Make sure the number of subbuffers and subbuffer size are
+ * power of 2, and nonzero.
*/
- CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
- CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+ return -EINVAL;
+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+ return -EINVAL;
ret = subbuffer_id_check_index(config, num_subbuf);
if (ret)
return ret;
- chanb->priv = priv;
chanb->buf_size = num_subbuf * subbuf_size;
chanb->subbuf_size = subbuf_size;
chanb->buf_size_order = get_count_order(chanb->buf_size);
for_each_possible_cpu(i) {
struct shm_object *shmobj;
- shmobj = shm_object_table_append(handle->table, shmsize);
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM);
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
struct shm_object *shmobj;
struct lttng_ust_lib_ring_buffer *buf;
- shmobj = shm_object_table_append(handle->table, shmsize);
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM);
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
buf = shmp(handle, chanb->buf[0].shmp);
if (!buf)
goto end;
+ set_shmp(buf->self, chanb->buf[0].shmp._ref);
ret = lib_ring_buffer_create(buf, chanb, -1,
handle, shmobj);
if (ret)
return 0;
free_bufs:
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(i) {
- struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
-
- if (!buf->backend.allocated)
- continue;
- lib_ring_buffer_free(buf, handle);
- }
- }
/* We only free the buffer data upon shm teardown */
end:
return -ENOMEM;
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
- unsigned int i;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(i) {
- struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp);
-
- if (!buf->backend.allocated)
- continue;
- lib_ring_buffer_free(buf, handle);
- }
- } else {
- struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp);
-
- CHAN_WARN_ON(chanb, !buf->backend.allocated);
- lib_ring_buffer_free(buf, handle);
- }
- /* We only free the buffer data upon shm teardown */
+ /* SHM teardown takes care of everything */
}
/**
orig_len = len;
offset &= chanb->buf_size - 1;
- if (unlikely(!len))
+ if (caa_unlikely(!len))
return 0;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);