Fix: pass proper args when writing commit counter
[lttng-ust.git] / libringbuffer / frontend_api.h
index f570cc168174de626d134998571120e8e10a81a7..56dbef2aa4326c8a4067ecc96ada66b887358afd 100644 (file)
@@ -1,24 +1,36 @@
-#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-#define _LINUX_RING_BUFFER_FRONTEND_API_H
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
 
 /*
- * linux/ringbuffer/frontend_api.h
+ * libringbuffer/frontend_api.h
  *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  *
  * Ring Buffer Library Synchronization Header (buffer write API).
  *
  * Author:
- *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
+ *      Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
- * Dual LGPL v2.1/GPL v2 license.
+ * See ring_buffer_frontend.c for more information on wait-free
+ * algorithms.
+ * See frontend.h for channel allocation and read-side API.
  */
 
 #include "frontend.h"
-#include "ust/core.h"
 #include <urcu-bp.h>
 #include <urcu/compiler.h>
 
  * section.
  */
 static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        int cpu, nesting;
 
        rcu_read_lock();
-       cpu = ust_get_cpu();
-       nesting = ++lib_ring_buffer_nesting;    /* TLS */
+       cpu = lttng_ust_get_cpu();
+       nesting = ++URCU_TLS(lib_ring_buffer_nesting);
        cmm_barrier();
 
-       if (unlikely(nesting > 4)) {
+       if (caa_unlikely(nesting > 4)) {
                WARN_ON_ONCE(1);
-               lib_ring_buffer_nesting--;      /* TLS */
+               URCU_TLS(lib_ring_buffer_nesting)--;
                rcu_read_unlock();
                return -EPERM;
        } else
@@ -59,10 +71,10 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
  * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
  */
 static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        cmm_barrier();
-       lib_ring_buffer_nesting--;              /* TLS */
+       URCU_TLS(lib_ring_buffer_nesting)--;            /* TLS */
        rcu_read_unlock();
 }
 
@@ -73,13 +85,13 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
  * returns 0 if reserve ok, or 1 if the slow path must be taken.
  */
 static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                               struct lttng_ust_lib_ring_buffer_ctx *ctx,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
        struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
@@ -97,7 +109,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+       if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
        ctx->slot_size = record_header_size(config, chan, *o_begin,
@@ -105,7 +117,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
        ctx->slot_size +=
                lib_ring_buffer_align(*o_begin + ctx->slot_size,
                                      ctx->largest_align) + ctx->data_size;
-       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+       if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
                     > chan->backend.subbuf_size))
                return 1;
 
@@ -115,7 +127,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
         */
        *o_end = *o_begin + ctx->slot_size;
 
-       if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+       if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -142,12 +154,12 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
  */
 
 static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct shm_handle *handle = ctx->handle;
-       struct lib_ring_buffer *buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
@@ -165,11 +177,11 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
        /*
         * Perform retryable operations.
         */
-       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+       if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
-       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+       if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
                     != o_old))
                goto slow_path;
 
@@ -214,9 +226,9 @@ slow_path:
  * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
  */
 static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer *buf, enum switch_mode mode,
-                           struct shm_handle *handle)
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+                           struct lttng_ust_shm_handle *handle)
 {
        lib_ring_buffer_switch_slow(buf, mode, handle);
 }
@@ -232,12 +244,12 @@ void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
  * specified sub-buffer, and delivers it if necessary.
  */
 static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
-                           const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+                           const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct shm_handle *handle = ctx->handle;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
@@ -276,14 +288,13 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx, handle);
+                                     commit_count, endidx, handle, ctx->tsc);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            ctx->buf_offset, commit_count,
-                                            ctx->slot_size, handle);
+                       offset_end, commit_count, handle);
 }
 
 /**
@@ -297,10 +308,10 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
  * Returns 0 upon success, -EPERM if the record cannot be discarded.
  */
 static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
-                                       const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                                       const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
 
        /*
@@ -317,7 +328,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
         */
        save_last_tsc(config, buf, 0ULL);
 
-       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+       if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
                   != end_offset))
                return -EPERM;
        else
@@ -325,31 +336,31 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
 }
 
 static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
                            struct channel *chan)
 {
        uatomic_inc(&chan->record_disabled);
 }
 
 static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
                           struct channel *chan)
 {
        uatomic_dec(&chan->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
-                                   struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                   struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_inc(&buf->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                  struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_dec(&buf->record_disabled);
 }
 
-#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */
This page took 0.028005 seconds and 4 git commands to generate.