init private data
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index a344d415f1d1f459e933ebc7733cf5ebfc2d488a..6de8336861e4dadc43d77cf4f77140c54f67f64f 100644 (file)
@@ -46,7 +46,7 @@
 #include <urcu/ref.h>
 
 #include "smp.h"
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
 #include "backend.h"
 #include "frontend.h"
 #include "shm.h"
@@ -174,7 +174,7 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
        struct channel *chan = caa_container_of(chanb, struct channel, backend);
-       void *priv = chanb->priv;
+       void *priv = channel_get_private(chan);
        unsigned int num_subbuf;
        size_t subbuf_header_size;
        u64 tsc;
@@ -409,7 +409,9 @@ static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *hand
  * channel_create - Create channel.
  * @config: ring buffer instance configuration
  * @name: name of the channel
- * @priv: ring buffer client private data
+ * @priv_data: ring buffer client private data area pointer (output)
+ * @priv_data_size: length, in bytes, of the private data area.
+ * @priv_data_init: initialization data for private data.
  * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
  *            address mapping. It is used only by RING_BUFFER_STATIC
  *            configuration. It can be set to NULL for other backends.
@@ -424,14 +426,18 @@ static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *hand
  * Returns NULL on failure.
  */
 struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
-                  const char *name, void *priv, void *buf_addr,
-                  size_t subbuf_size,
+                  const char *name,
+                  void **priv_data,
+                  size_t priv_data_align,
+                  size_t priv_data_size,
+                  void *priv_data_init,
+                  void *buf_addr, size_t subbuf_size,
                   size_t num_subbuf, unsigned int switch_timer_interval,
                   unsigned int read_timer_interval,
                   int *shm_fd, int *wait_fd, uint64_t *memory_map_size)
 {
        int ret, cpu;
-       size_t shmsize;
+       size_t shmsize, chansize;
        struct channel *chan;
        struct lttng_ust_shm_handle *handle;
        struct shm_object *shmobj;
@@ -452,23 +458,43 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
 
        /* Calculate the shm allocation layout */
        shmsize = sizeof(struct channel);
+       shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
        else
                shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
+       chansize = shmsize;
+       shmsize += offset_align(shmsize, priv_data_align);
+       shmsize += priv_data_size;
 
        shmobj = shm_object_table_append(handle->table, shmsize);
        if (!shmobj)
                goto error_append;
        /* struct channel is at object 0, offset 0 (hardcoded) */
-       set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel)));
+       set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
        assert(handle->chan._ref.index == 0);
        assert(handle->chan._ref.offset == 0);
        chan = shmp(handle, handle->chan);
        if (!chan)
                goto error_append;
 
-       ret = channel_backend_init(&chan->backend, name, config, priv,
+       /* space for private data */
+       if (priv_data_size) {
+               DECLARE_SHMP(void, priv_data_alloc);
+
+               align_shm(shmobj, priv_data_align);
+               chan->priv_data_offset = shmobj->allocated_len;
+               set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+               if (!shmp(handle, priv_data_alloc))
+                       goto error_append;
+               *priv_data = channel_get_private(chan);
+               memcpy(*priv_data, priv_data_init, priv_data_size);
+       } else {
+               chan->priv_data_offset = -1;
+               *priv_data = NULL;
+       }
+
+       ret = channel_backend_init(&chan->backend, name, config,
                                   subbuf_size, num_subbuf, handle);
        if (ret)
                goto error_backend_init;
@@ -569,19 +595,17 @@ void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
  * Call "destroy" callback, finalize channels, decrement the channel
  * reference count. Note that when readers have completed data
  * consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point.  Returns the private
- * data pointer.
+ * They should release their handle at that point. 
  */
-void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
                int shadow)
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       void *priv;
        int cpu;
 
        if (shadow) {
                channel_release(chan, handle, shadow);
-               return NULL;
+               return;
        }
 
        channel_unregister_notifiers(chan, handle);
@@ -592,7 +616,7 @@ void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
 
                        if (config->cb.buffer_finalize)
                                config->cb.buffer_finalize(buf,
-                                                          chan->backend.priv,
+                                                          channel_get_private(chan),
                                                           cpu, handle);
                        if (buf->backend.allocated)
                                lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
@@ -608,7 +632,7 @@ void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
                struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
                if (config->cb.buffer_finalize)
-                       config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle);
+                       config->cb.buffer_finalize(buf, channel_get_private(chan), -1, handle);
                if (buf->backend.allocated)
                        lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
                                                handle);
@@ -626,9 +650,8 @@ void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
         * sessiond/consumer are keeping a reference on the shm file
         * descriptor directly. No need to refcount.
         */
-       priv = chan->backend.priv;
        channel_release(chan, handle, shadow);
-       return priv;
+       return;
 }
 
 struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
@@ -1005,7 +1028,7 @@ void lib_ring_buffer_print_errors(struct channel *chan,
                                  struct lttng_ust_shm_handle *handle)
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       void *priv = chan->backend.priv;
+       void *priv = channel_get_private(chan);
 
        ERRMSG("ring buffer %s, cpu %d: %lu records written, "
                          "%lu records overrun\n",
@@ -1212,7 +1235,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
         * quiescence guarantees for the fusion merge.
         */
        if (mode == SWITCH_FLUSH || off > 0) {
-               if (unlikely(off == 0)) {
+               if (caa_unlikely(off == 0)) {
                        /*
                         * The client does not save any header information.
                         * Don't switch empty subbuffer on finalize, because it
@@ -1323,7 +1346,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -1334,19 +1357,19 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan) +
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
                             offsets->size > chan->backend.subbuf_size)) {
                        offsets->switch_old_end = 1;    /* For offsets->old */
                        offsets->switch_new_start = 1;  /* For offsets->begin */
                }
        }
-       if (unlikely(offsets->switch_new_start)) {
+       if (caa_unlikely(offsets->switch_new_start)) {
                unsigned long sb_index;
 
                /*
                 * We are typically not filling the previous buffer completely.
                 */
-               if (likely(offsets->switch_old_end))
+               if (caa_likely(offsets->switch_old_end))
                        offsets->begin = subbuf_align(offsets->begin, chan);
                offsets->begin = offsets->begin
                                 + config->cb.subbuffer_header_size();
@@ -1358,9 +1381,9 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                  - ((unsigned long) v_read(config,
                                            &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
                     & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
+               if (caa_likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                       if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
                                subbuf_trunc(offsets->begin, chan)
                                 - subbuf_trunc((unsigned long)
                                     uatomic_read(&buf->consumed), chan)
@@ -1398,7 +1421,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan)
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan)
                             + offsets->size > chan->backend.subbuf_size)) {
                        /*
                         * Record too big for subbuffers, report error, don't
@@ -1420,7 +1443,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
        }
        offsets->end = offsets->begin + offsets->size;
 
-       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -1458,9 +1481,9 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        do {
                ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
                                                       ctx);
-               if (unlikely(ret))
+               if (caa_unlikely(ret))
                        return ret;
-       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+       } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
                                    offsets.end)
                          != offsets.old));
 
@@ -1487,7 +1510,7 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        /*
         * Switch old subbuffer if needed.
         */
-       if (unlikely(offsets.switch_old_end)) {
+       if (caa_unlikely(offsets.switch_old_end)) {
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan),
                                            handle);
@@ -1497,10 +1520,10 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        /*
         * Populate new subbuffer.
         */
-       if (unlikely(offsets.switch_new_start))
+       if (caa_unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
-       if (unlikely(offsets.switch_new_end))
+       if (caa_unlikely(offsets.switch_new_end))
                lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
 
        ctx->slot_size = offsets.size;
This page took 0.027761 seconds and 4 git commands to generate.