Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / frontend_api.h
index 1d8e29493655da9709cec47909167177a7c1bcc6..f746c98ec88028008779033e8d558c2693fa72fb 100644 (file)
@@ -1,34 +1,29 @@
-#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-#define _LINUX_RING_BUFFER_FRONTEND_API_H
-
 /*
- * linux/ringbuffer/frontend_api.h
- *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
+ * SPDX-License-Identifier: LGPL-2.1-only
  *
- * Author:
- *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
- *
- * Dual LGPL v2.1/GPL v2 license.
+ * See ring_buffer_frontend.c for more information on wait-free
+ * algorithms.
+ * See frontend.h for channel allocation and read-side API.
  */
 
-#include "frontend.h"
-#include "ust/core.h"
-#include <urcu-bp.h>
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
+
+#include <stddef.h>
+
 #include <urcu/compiler.h>
 
+#include "frontend.h"
+
 /**
  * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
  *
- * Grabs RCU read-side lock and keeps a ring buffer nesting count as
- * supplementary safety net to ensure tracer client code will never
- * trigger an endless recursion. Returns the processor ID on success,
- * -EPERM on failure (nesting count too high).
+ * Keeps a ring buffer nesting count as supplementary safety net to
+ * ensure tracer client code will never trigger an endless recursion.
+ * Returns the processor ID on success, -EPERM on failure (nesting count
+ * too high).
  *
  * asm volatile and "memory" clobber prevent the compiler from moving
  * instructions out of the ring buffer nesting count. This is required to ensure
  * section.
  */
 static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        int cpu, nesting;
 
-       rcu_read_lock();
-       cpu = ust_get_cpu();
-       nesting = ++lib_ring_buffer_nesting;    /* TLS */
+       cpu = lttng_ust_get_cpu();
+       nesting = ++URCU_TLS(lib_ring_buffer_nesting);
        cmm_barrier();
 
-       if (unlikely(nesting > 4)) {
+       if (caa_unlikely(nesting > 4)) {
                WARN_ON_ONCE(1);
-               lib_ring_buffer_nesting--;      /* TLS */
-               rcu_read_unlock();
+               URCU_TLS(lib_ring_buffer_nesting)--;
                return -EPERM;
        } else
                return cpu;
@@ -59,11 +52,10 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
  * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
  */
 static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        cmm_barrier();
-       lib_ring_buffer_nesting--;              /* TLS */
-       rcu_read_unlock();
+       URCU_TLS(lib_ring_buffer_nesting)--;            /* TLS */
 }
 
 /*
@@ -73,13 +65,14 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
  * returns 0 if reserve ok, or 1 if the slow path must be taken.
  */
 static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                               struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                               void *client_ctx,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
        struct channel *chan = ctx->chan;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        *o_begin = v_read(config, &buf->offset);
        *o_old = *o_begin;
 
@@ -97,15 +90,15 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+       if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
        ctx->slot_size = record_header_size(config, chan, *o_begin,
-                                           before_hdr_pad, ctx);
+                                           before_hdr_pad, ctx, client_ctx);
        ctx->slot_size +=
                lib_ring_buffer_align(*o_begin + ctx->slot_size,
                                      ctx->largest_align) + ctx->data_size;
-       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+       if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
                     > chan->backend.subbuf_size))
                return 1;
 
@@ -115,7 +108,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
         */
        *o_end = *o_begin + ctx->slot_size;
 
-       if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+       if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -142,34 +135,37 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
  */
 
 static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                           void *client_ctx)
 {
        struct channel *chan = ctx->chan;
-       struct shm_handle *handle = ctx->handle;
-       struct lib_ring_buffer *buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (uatomic_read(&chan->record_disabled))
+       if (caa_unlikely(uatomic_read(&chan->record_disabled)))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
                buf = shmp(handle, chan->backend.buf[0].shmp);
-       if (uatomic_read(&buf->record_disabled))
+       if (caa_unlikely(!buf))
+               return -EIO;
+       if (caa_unlikely(uatomic_read(&buf->record_disabled)))
                return -EAGAIN;
        ctx->buf = buf;
 
        /*
         * Perform retryable operations.
         */
-       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+       if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
-       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+       if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
                     != o_old))
                goto slow_path;
 
@@ -196,7 +192,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
        ctx->buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
-       return lib_ring_buffer_reserve_slow(ctx);
+       return lib_ring_buffer_reserve_slow(ctx, client_ctx);
 }
 
 /**
@@ -214,9 +210,9 @@ slow_path:
  * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
  */
 static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer *buf, enum switch_mode mode,
-                           struct shm_handle *handle)
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+                           struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+                           struct lttng_ust_shm_handle *handle)
 {
        lib_ring_buffer_switch_slow(buf, mode, handle);
 }
@@ -232,20 +228,25 @@ void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
  * specified sub-buffer, and delivers it if necessary.
  */
 static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
-                           const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+                           const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       struct shm_handle *handle = ctx->handle;
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot = shmp_index(handle,
+                                               buf->commit_hot, endidx);
+
+       if (caa_unlikely(!cc_hot))
+               return;
 
        /*
         * Must count record before incrementing the commit count.
         */
-       subbuffer_count_record(config, &buf->backend, endidx, handle);
+       subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -253,7 +254,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         */
        cmm_smp_wmb();
 
-       v_add(config, ctx->slot_size, &shmp(handle, buf->commit_hot)[endidx].cc);
+       v_add(config, ctx->slot_size, &cc_hot->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -273,17 +274,16 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+       commit_count = v_read(config, &cc_hot->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx, handle);
+                                     commit_count, endidx, handle, ctx->tsc);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
-       lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            ctx->buf_offset, commit_count,
-                                            ctx->slot_size, handle);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offset_end, commit_count, handle, cc_hot);
 }
 
 /**
@@ -297,10 +297,10 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
  * Returns 0 upon success, -EPERM if the record cannot be discarded.
  */
 static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
-                                       const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+                                       const struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
-       struct lib_ring_buffer *buf = ctx->buf;
+       struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
        unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
 
        /*
@@ -317,7 +317,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
         */
        save_last_tsc(config, buf, 0ULL);
 
-       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+       if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
                   != end_offset))
                return -EPERM;
        else
@@ -325,31 +325,31 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con
 }
 
 static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
                            struct channel *chan)
 {
        uatomic_inc(&chan->record_disabled);
 }
 
 static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
                           struct channel *chan)
 {
        uatomic_dec(&chan->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
-                                   struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                   struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_inc(&buf->record_disabled);
 }
 
 static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
-                                  struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+                                  struct lttng_ust_lib_ring_buffer *buf)
 {
        uatomic_dec(&buf->record_disabled);
 }
 
-#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */
This page took 0.027677 seconds and 4 git commands to generate.