Rename struct lib_ring_buffer* to struct lttng_ust_lib_ring_buffer*
[lttng-ust.git] / libringbuffer / frontend_api.h
index 75146e60bef33a23937f71d27e254b486ae1f7c9..31072b6b3895edae8d61bb86a2affc3f3caa1b92 100644 (file)
@@ -37,7 +37,7 @@
  * section.
  */
 static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        int cpu, nesting;
 
@@ -59,7 +59,7 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
  * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
  */
 static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        cmm_barrier();
        lib_ring_buffer_nesting--;              /* TLS */
@@ -73,13 +73,13 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
  * returns 0 if reserve ok, or 1 if the slow path must be taken.
  */
 static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                               struct lttng_ust_lib_ring_buffer_ctx *ctx,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
        struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
@@ -142,11 +142,12 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
  */
 
 static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
@@ -154,9 +155,9 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = &shmp(chan->backend.buf)[ctx->cpu];
+               buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
-               buf = shmp(chan->backend.buf);
+               buf = shmp(handle, chan->backend.buf[0].shmp);
        if (uatomic_read(&buf->record_disabled))
                return -EAGAIN;
        ctx->buf = buf;
@@ -189,7 +190,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
         * Clear noref flag for this subbuffer.
         */
        lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
-                               subbuf_index(o_end - 1, chan));
+                               subbuf_index(o_end - 1, chan), handle);
 
        ctx->pre_offset = o_begin;
        ctx->buf_offset = o_begin + before_hdr_pad;
@@ -213,10 +214,11 @@ slow_path:
  * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
  */
 static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+                           struct lttng_ust_shm_handle *handle)
 {
-       lib_ring_buffer_switch_slow(buf, mode);
+       lib_ring_buffer_switch_slow(buf, mode, handle);
 }
 
 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
@@ -230,11 +232,12 @@ void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
  * specified sub-buffer, and delivers it if necessary.
  */
 static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
-                           const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+                           const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
@@ -242,7 +245,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        /*
         * Must count record before incrementing the commit count.
         */
-       subbuffer_count_record(config, &buf->backend, endidx);
+       subbuffer_count_record(config, &buf->backend, endidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -250,7 +253,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         */
        cmm_smp_wmb();
 
-       v_add(config, ctx->slot_size, &shmp(buf->commit_hot)[endidx].cc);
+       v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -270,17 +273,17 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &shmp(buf->commit_hot)[endidx].cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx);
+                                     commit_count, endidx, handle);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
                                             ctx->buf_offset, commit_count,
-                                        ctx->slot_size);
+                                            ctx->slot_size, handle);
 }
 
 /**
@@ -294,10 +297,10 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
  * Returns 0 upon success, -EPERM if the record cannot be discarded.
  */
 static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
-                                       const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                                       const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
 
        /*
@@ -322,29 +325,29 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
 }
 
 static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
                            struct channel *chan)
 {
        uatomic_inc(&chan->record_disabled);
 }
 
 static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
                           struct channel *chan)
 {
        uatomic_dec(&chan->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
-                                   struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                   struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_inc(&buf->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                  struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_dec(&buf->record_disabled);
 }
This page took 0.026901 seconds and 4 git commands to generate.