init private data
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index bd774dfa4645633708dc82c68f35a074436e73c2..6de8336861e4dadc43d77cf4f77140c54f67f64f 100644 (file)
@@ -46,7 +46,7 @@
 #include <urcu/ref.h>
 
 #include "smp.h"
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
 #include "backend.h"
 #include "frontend.h"
 #include "shm.h"
@@ -85,14 +85,14 @@ __thread unsigned int lib_ring_buffer_nesting;
 
 static
 void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu,
-                                 struct shm_handle *handle);
+                                 struct lttng_ust_lib_ring_buffer *buf, int cpu,
+                                 struct lttng_ust_shm_handle *handle);
 
 /*
  * Must be called under cpu hotplug protection.
  */
-void lib_ring_buffer_free(struct lib_ring_buffer *buf,
-                         struct shm_handle *handle)
+void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
+                         struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
 
@@ -112,11 +112,11 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf,
  * should not be using the iterator concurrently with reset. The previous
  * current iterator record is reset.
  */
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
-                          struct shm_handle *handle)
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned int i;
 
        /*
@@ -167,14 +167,14 @@ void channel_reset(struct channel *chan)
 /*
  * Must be called under cpu hotplug protection.
  */
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
                           struct channel_backend *chanb, int cpu,
-                          struct shm_handle *handle,
+                          struct lttng_ust_shm_handle *handle,
                           struct shm_object *shmobj)
 {
-       const struct lib_ring_buffer_config *config = chanb->config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
        struct channel *chan = caa_container_of(chanb, struct channel, backend);
-       void *priv = chanb->priv;
+       void *priv = channel_get_private(chan);
        unsigned int num_subbuf;
        size_t subbuf_header_size;
        u64 tsc;
@@ -242,14 +242,14 @@ free_chanbuf:
 #if 0
 static void switch_buffer_timer(unsigned long data)
 {
-       struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+       struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        /*
         * Only flush buffers periodically if readers are active.
         */
-       if (uatomic_read(&buf->active_readers))
+       if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
                lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
 
        //TODO timers
@@ -262,11 +262,11 @@ static void switch_buffer_timer(unsigned long data)
 }
 #endif //0
 
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
-                          struct shm_handle *handle)
+static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (!chan->switch_timer_interval || buf->switch_timer_enabled)
                return;
@@ -282,8 +282,8 @@ static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
        buf->switch_timer_enabled = 1;
 }
 
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf,
-                          struct shm_handle *handle)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
 
@@ -301,13 +301,13 @@ static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf,
  */
 static void read_buffer_timer(unsigned long data)
 {
-       struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+       struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        CHAN_WARN_ON(chan, !buf->backend.allocated);
 
-       if (uatomic_read(&buf->active_readers)
+       if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
            && lib_ring_buffer_poll_deliver(config, buf, chan)) {
                //TODO
                //wake_up_interruptible(&buf->read_wait);
@@ -324,11 +324,11 @@ static void read_buffer_timer(unsigned long data)
 }
 #endif //0
 
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf,
-                          struct shm_handle *handle)
+static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
            || !chan->read_timer_interval
@@ -348,11 +348,11 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf,
        buf->read_timer_enabled = 1;
 }
 
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf,
-                          struct shm_handle *handle)
+static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
            || !chan->read_timer_interval
@@ -374,20 +374,20 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf,
 }
 
 static void channel_unregister_notifiers(struct channel *chan,
-                          struct shm_handle *handle)
+                          struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        int cpu;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                for_each_possible_cpu(cpu) {
-                       struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+                       struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
 
                        lib_ring_buffer_stop_switch_timer(buf, handle);
                        lib_ring_buffer_stop_read_timer(buf, handle);
                }
        } else {
-               struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+               struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
                lib_ring_buffer_stop_switch_timer(buf, handle);
                lib_ring_buffer_stop_read_timer(buf, handle);
@@ -395,11 +395,11 @@ static void channel_unregister_notifiers(struct channel *chan,
        //channel_backend_unregister_notifiers(&chan->backend);
 }
 
-static void channel_free(struct channel *chan, struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       int ret;
-
-       channel_backend_free(&chan->backend, handle);
+       if (!shadow)
+               channel_backend_free(&chan->backend, handle);
        /* chan is freed by shm teardown */
        shm_object_table_destroy(handle->table);
        free(handle);
@@ -409,7 +409,9 @@ static void channel_free(struct channel *chan, struct shm_handle *handle)
  * channel_create - Create channel.
  * @config: ring buffer instance configuration
  * @name: name of the channel
- * @priv: ring buffer client private data
+ * @priv_data: ring buffer client private data area pointer (output)
+ * @priv_data_size: length, in bytes, of the private data area.
+ * @priv_data_init: initialization data for private data.
  * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
  *            address mapping. It is used only by RING_BUFFER_STATIC
  *            configuration. It can be set to NULL for other backends.
@@ -423,23 +425,29 @@ static void channel_free(struct channel *chan, struct shm_handle *handle)
  * Holds cpu hotplug.
  * Returns NULL on failure.
  */
-struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
-                  const char *name, void *priv, void *buf_addr,
-                  size_t subbuf_size,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+                  const char *name,
+                  void **priv_data,
+                  size_t priv_data_align,
+                  size_t priv_data_size,
+                  void *priv_data_init,
+                  void *buf_addr, size_t subbuf_size,
                   size_t num_subbuf, unsigned int switch_timer_interval,
-                  unsigned int read_timer_interval)
+                  unsigned int read_timer_interval,
+                  int *shm_fd, int *wait_fd, uint64_t *memory_map_size)
 {
        int ret, cpu;
-       size_t shmsize;
+       size_t shmsize, chansize;
        struct channel *chan;
-       struct shm_handle *handle;
+       struct lttng_ust_shm_handle *handle;
        struct shm_object *shmobj;
+       struct shm_ref *ref;
 
        if (lib_ring_buffer_check_config(config, switch_timer_interval,
                                         read_timer_interval))
                return NULL;
 
-       handle = zmalloc(sizeof(struct shm_handle));
+       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
        if (!handle)
                return NULL;
 
@@ -450,20 +458,43 @@ struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
 
        /* Calculate the shm allocation layout */
        shmsize = sizeof(struct channel);
+       shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               shmsize += sizeof(struct lib_ring_buffer_shmp) * num_possible_cpus();
+               shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
        else
-               shmsize += sizeof(struct lib_ring_buffer_shmp);
+               shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
+       chansize = shmsize;
+       shmsize += offset_align(shmsize, priv_data_align);
+       shmsize += priv_data_size;
 
        shmobj = shm_object_table_append(handle->table, shmsize);
        if (!shmobj)
                goto error_append;
-       set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel)));
+       /* struct channel is at object 0, offset 0 (hardcoded) */
+       set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
+       assert(handle->chan._ref.index == 0);
+       assert(handle->chan._ref.offset == 0);
        chan = shmp(handle, handle->chan);
        if (!chan)
                goto error_append;
 
-       ret = channel_backend_init(&chan->backend, name, config, priv,
+       /* space for private data */
+       if (priv_data_size) {
+               DECLARE_SHMP(void, priv_data_alloc);
+
+               align_shm(shmobj, priv_data_align);
+               chan->priv_data_offset = shmobj->allocated_len;
+               set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+               if (!shmp(handle, priv_data_alloc))
+                       goto error_append;
+               *priv_data = channel_get_private(chan);
+               memcpy(*priv_data, priv_data_init, priv_data_size);
+       } else {
+               chan->priv_data_offset = -1;
+               *priv_data = NULL;
+       }
+
+       ret = channel_backend_init(&chan->backend, name, config,
                                   subbuf_size, num_subbuf, handle);
        if (ret)
                goto error_backend_init;
@@ -483,17 +514,18 @@ struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
                 * In that off case, we need to allocate for all possible cpus.
                 */
                for_each_possible_cpu(cpu) {
-                       struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+                       struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
                        lib_ring_buffer_start_switch_timer(buf, handle);
                        lib_ring_buffer_start_read_timer(buf, handle);
                }
        } else {
-               struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+               struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
                lib_ring_buffer_start_switch_timer(buf, handle);
                lib_ring_buffer_start_read_timer(buf, handle);
        }
-
+       ref = &handle->chan._ref;
+       shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size);
        return handle;
 
 error_backend_init:
@@ -504,10 +536,55 @@ error_table_alloc:
        return NULL;
 }
 
+struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+                                       uint64_t memory_map_size)
+{
+       struct lttng_ust_shm_handle *handle;
+       struct shm_object *object;
+
+       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       if (!handle)
+               return NULL;
+
+       /* Allocate table for channel + per-cpu buffers */
+       handle->table = shm_object_table_create(1 + num_possible_cpus());
+       if (!handle->table)
+               goto error_table_alloc;
+       /* Add channel object */
+       object = shm_object_table_append_shadow(handle->table,
+                       shm_fd, wait_fd, memory_map_size);
+       if (!object)
+               goto error_table_object;
+       /* struct channel is at object 0, offset 0 (hardcoded) */
+       handle->chan._ref.index = 0;
+       handle->chan._ref.offset = 0;
+       return handle;
+
+error_table_object:
+       shm_object_table_destroy(handle->table);
+error_table_alloc:
+       free(handle);
+       return NULL;
+}
+
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+               int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+       struct shm_object *object;
+
+       /* Add stream object */
+       object = shm_object_table_append_shadow(handle->table,
+                       shm_fd, wait_fd, memory_map_size);
+       if (!object)
+               return -1;
+       return 0;
+}
+
 static
-void channel_release(struct channel *chan, struct shm_handle *handle)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       channel_free(chan, handle);
+       channel_free(chan, handle, shadow);
 }
 
 /**
@@ -518,24 +595,28 @@ void channel_release(struct channel *chan, struct shm_handle *handle)
  * Call "destroy" callback, finalize channels, decrement the channel
  * reference count. Note that when readers have completed data
  * consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point.  Returns the private
- * data pointer.
+ * They should release their handle at that point. 
  */
-void *channel_destroy(struct channel *chan, struct shm_handle *handle)
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       void *priv;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        int cpu;
 
+       if (shadow) {
+               channel_release(chan, handle, shadow);
+               return;
+       }
+
        channel_unregister_notifiers(chan, handle);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                for_each_channel_cpu(cpu, chan) {
-                       struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+                       struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
 
                        if (config->cb.buffer_finalize)
                                config->cb.buffer_finalize(buf,
-                                                          chan->backend.priv,
+                                                          channel_get_private(chan),
                                                           cpu, handle);
                        if (buf->backend.allocated)
                                lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
@@ -548,10 +629,10 @@ void *channel_destroy(struct channel *chan, struct shm_handle *handle)
                        //wake_up_interruptible(&buf->read_wait);
                }
        } else {
-               struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+               struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
                if (config->cb.buffer_finalize)
-                       config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle);
+                       config->cb.buffer_finalize(buf, channel_get_private(chan), -1, handle);
                if (buf->backend.allocated)
                        lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
                                                handle);
@@ -569,38 +650,62 @@ void *channel_destroy(struct channel *chan, struct shm_handle *handle)
         * sessiond/consumer are keeping a reference on the shm file
         * descriptor directly. No need to refcount.
         */
-       priv = chan->backend.priv;
-       channel_release(chan, handle);
-       return priv;
+       channel_release(chan, handle, shadow);
+       return;
 }
 
-struct lib_ring_buffer *channel_get_ring_buffer(
-                                       const struct lib_ring_buffer_config *config,
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+                                       const struct lttng_ust_lib_ring_buffer_config *config,
                                        struct channel *chan, int cpu,
-                                       struct shm_handle *handle)
+                                       struct lttng_ust_shm_handle *handle,
+                                       int *shm_fd, int *wait_fd,
+                                       uint64_t *memory_map_size)
 {
-       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
+       struct shm_ref *ref;
+
+       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+               ref = &chan->backend.buf[0].shmp._ref;
+               shm_get_object_data(handle, ref, shm_fd, wait_fd,
+                       memory_map_size);
                return shmp(handle, chan->backend.buf[0].shmp);
-       else
+       } else {
+               if (cpu >= num_possible_cpus())
+                       return NULL;
+               ref = &chan->backend.buf[cpu].shmp._ref;
+               shm_get_object_data(handle, ref, shm_fd, wait_fd,
+                       memory_map_size);
                return shmp(handle, chan->backend.buf[cpu].shmp);
+       }
 }
 
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
-                             struct shm_handle *handle)
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+                             struct lttng_ust_shm_handle *handle,
+                             int shadow)
 {
-       struct channel *chan = shmp(handle, buf->backend.chan);
-
+       if (shadow) {
+               if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+                       return -EBUSY;
+               cmm_smp_mb();
+               return 0;
+       }
        if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
                return -EBUSY;
        cmm_smp_mb();
        return 0;
 }
 
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
-                                 struct shm_handle *handle)
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+                                 struct lttng_ust_shm_handle *handle,
+                                 int shadow)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
 
+       if (shadow) {
+               CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+               cmm_smp_mb();
+               uatomic_dec(&buf->active_shadow_readers);
+               return;
+       }
        CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
        cmm_smp_mb();
        uatomic_dec(&buf->active_readers);
@@ -616,12 +721,12 @@ void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
  * data to read at consumed position, or 0 if the get operation succeeds.
  */
 
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
                             unsigned long *consumed, unsigned long *produced,
-                            struct shm_handle *handle)
+                            struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long consumed_cur, write_offset;
        int finalized;
 
@@ -670,15 +775,16 @@ nodata:
  * @buf: ring buffer
  * @consumed_new: new consumed count value
  */
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
                                   unsigned long consumed_new,
-                                  struct shm_handle *handle)
+                                  struct lttng_ust_shm_handle *handle)
 {
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
        struct channel *chan = shmp(handle, bufb->chan);
        unsigned long consumed;
 
-       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+                       && uatomic_read(&buf->active_shadow_readers) != 1);
 
        /*
         * Only push the consumed value forward.
@@ -699,12 +805,12 @@ void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
  * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
  * data to read at consumed position, or 0 if the get operation succeeds.
  */
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
                               unsigned long consumed,
-                              struct shm_handle *handle)
+                              struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
        int ret;
        int finalized;
@@ -793,15 +899,16 @@ nodata:
  * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
  * @buf: ring buffer
  */
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
-                               struct shm_handle *handle)
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+                               struct lttng_ust_shm_handle *handle)
 {
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
        struct channel *chan = shmp(handle, bufb->chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long read_sb_bindex, consumed_idx, consumed;
 
-       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+                       && uatomic_read(&buf->active_shadow_readers) != 1);
 
        if (!buf->get_subbuf) {
                /*
@@ -852,13 +959,13 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
  * position and the writer position. (inclusive)
  */
 static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
                                            struct channel *chan,
                                            unsigned long cons_offset,
                                            int cpu,
-                                           struct shm_handle *handle)
+                                           struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long cons_idx, commit_count, commit_count_sb;
 
        cons_idx = subbuf_index(cons_offset, chan);
@@ -879,12 +986,12 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
 }
 
 static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
                                         struct channel *chan,
                                         void *priv, int cpu,
-                                        struct shm_handle *handle)
+                                        struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long write_offset, cons_offset;
 
        /*
@@ -917,11 +1024,11 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
 
 static
 void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu,
-                                 struct shm_handle *handle)
+                                 struct lttng_ust_lib_ring_buffer *buf, int cpu,
+                                 struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       void *priv = chan->backend.priv;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       void *priv = channel_get_private(chan);
 
        ERRMSG("ring buffer %s, cpu %d: %lu records written, "
                          "%lu records overrun\n",
@@ -949,13 +1056,13 @@ void lib_ring_buffer_print_errors(struct channel *chan,
  * Only executed when the buffer is finalized, in SWITCH_FLUSH.
  */
 static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
                                      struct channel *chan,
                                      struct switch_offsets *offsets,
                                      u64 tsc,
-                                     struct shm_handle *handle)
+                                     struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old, chan);
        unsigned long commit_count;
 
@@ -987,13 +1094,13 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
  * subbuffer.
  */
 static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
                                    u64 tsc,
-                                   struct shm_handle *handle)
+                                   struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
        unsigned long commit_count, padding_size, data_size;
 
@@ -1024,13 +1131,13 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
  * that this code is executed before the deliver of this sub-buffer.
  */
 static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
                                      struct channel *chan,
                                      struct switch_offsets *offsets,
                                      u64 tsc,
-                                     struct shm_handle *handle)
+                                     struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long beginidx = subbuf_index(offsets->begin, chan);
        unsigned long commit_count;
 
@@ -1060,13 +1167,13 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
  * have to do the deliver themselves.
  */
 static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
                                    u64 tsc,
-                                   struct shm_handle *handle)
+                                   struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long endidx = subbuf_index(offsets->end - 1, chan);
        unsigned long commit_count, padding_size, data_size;
 
@@ -1096,12 +1203,12 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
  */
 static
 int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
-                                   struct lib_ring_buffer *buf,
+                                   struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
                                    u64 *tsc)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long off;
 
        offsets->begin = v_read(config, &buf->offset);
@@ -1128,7 +1235,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
         * quiescence guarantees for the fusion merge.
         */
        if (mode == SWITCH_FLUSH || off > 0) {
-               if (unlikely(off == 0)) {
+               if (caa_unlikely(off == 0)) {
                        /*
                         * The client does not save any header information.
                         * Don't switch empty subbuffer on finalize, because it
@@ -1157,11 +1264,11 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
  * operations, this function must be called from the CPU which owns the buffer
  * for a ACTIVE flush.
  */
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode,
-                                struct shm_handle *handle)
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+                                struct lttng_ust_shm_handle *handle)
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        struct switch_offsets offsets;
        unsigned long oldidx;
        u64 tsc;
@@ -1216,13 +1323,13 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m
  * -EIO if data cannot be written into the buffer for any other reason.
  */
 static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                                     struct channel *chan,
                                     struct switch_offsets *offsets,
-                                    struct lib_ring_buffer_ctx *ctx)
+                                    struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       struct shm_handle *handle = ctx->handle;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
        unsigned long reserve_commit_diff;
 
        offsets->begin = v_read(config, &buf->offset);
@@ -1239,7 +1346,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -1250,19 +1357,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan) +
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
                             offsets->size > chan->backend.subbuf_size)) {
                        offsets->switch_old_end = 1;    /* For offsets->old */
                        offsets->switch_new_start = 1;  /* For offsets->begin */
                }
        }
-       if (unlikely(offsets->switch_new_start)) {
+       if (caa_unlikely(offsets->switch_new_start)) {
                unsigned long sb_index;
 
                /*
                 * We are typically not filling the previous buffer completely.
                 */
-               if (likely(offsets->switch_old_end))
+               if (caa_likely(offsets->switch_old_end))
                        offsets->begin = subbuf_align(offsets->begin, chan);
                offsets->begin = offsets->begin
                                 + config->cb.subbuffer_header_size();
@@ -1274,9 +1381,9 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                  - ((unsigned long) v_read(config,
                                            &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
                     & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
+               if (caa_likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                       if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
                                subbuf_trunc(offsets->begin, chan)
                                 - subbuf_trunc((unsigned long)
                                     uatomic_read(&buf->consumed), chan)
@@ -1314,7 +1421,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan)
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan)
                             + offsets->size > chan->backend.subbuf_size)) {
                        /*
                         * Record too big for subbuffers, report error, don't
@@ -1336,7 +1443,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
        }
        offsets->end = offsets->begin + offsets->size;
 
-       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -1354,12 +1461,12 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
  * -EIO for other errors, else returns 0.
  * It will take care of sub-buffer switching.
  */
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct shm_handle *handle = ctx->handle;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       struct lib_ring_buffer *buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_lib_ring_buffer *buf;
        struct switch_offsets offsets;
        int ret;
 
@@ -1374,9 +1481,9 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
        do {
                ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
                                                       ctx);
-               if (unlikely(ret))
+               if (caa_unlikely(ret))
                        return ret;
-       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+       } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
                                    offsets.end)
                          != offsets.old));
 
@@ -1403,7 +1510,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
        /*
         * Switch old subbuffer if needed.
         */
-       if (unlikely(offsets.switch_old_end)) {
+       if (caa_unlikely(offsets.switch_old_end)) {
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan),
                                            handle);
@@ -1413,10 +1520,10 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
        /*
         * Populate new subbuffer.
         */
-       if (unlikely(offsets.switch_new_start))
+       if (caa_unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
-       if (unlikely(offsets.switch_new_end))
+       if (caa_unlikely(offsets.switch_new_end))
                lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
 
        ctx->slot_size = offsets.size;
This page took 0.037381 seconds and 4 git commands to generate.