Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / frontend_api.h
index 93f6760b28b938238171dda1cb3bd395352d362e..f746c98ec88028008779033e8d558c2693fa72fb 100644 (file)
@@ -1,39 +1,22 @@
-#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
-#define _LTTNG_RING_BUFFER_FRONTEND_API_H
-
 /*
- * libringbuffer/frontend_api.h
+ * SPDX-License-Identifier: LGPL-2.1-only
  *
  * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
- *
- * Author:
- *      Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
  * See ring_buffer_frontend.c for more information on wait-free
  * algorithms.
  * See frontend.h for channel allocation and read-side API.
  */
 
-#include "frontend.h"
-#include <urcu-bp.h>
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
+
+#include <stddef.h>
+
 #include <urcu/compiler.h>
 
+#include "frontend.h"
+
 /**
  * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
  *
@@ -84,6 +67,7 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf
 static inline
 int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
                                struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                               void *client_ctx,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
@@ -110,7 +94,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
                return 1;
 
        ctx->slot_size = record_header_size(config, chan, *o_begin,
-                                           before_hdr_pad, ctx);
+                                           before_hdr_pad, ctx, client_ctx);
        ctx->slot_size +=
                lib_ring_buffer_align(*o_begin + ctx->slot_size,
                                      ctx->largest_align) + ctx->data_size;
@@ -152,7 +136,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
 
 static inline
 int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
-                           struct lttng_ust_lib_ring_buffer_ctx *ctx)
+                           struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                           void *client_ctx)
 {
        struct channel *chan = ctx->chan;
        struct lttng_ust_shm_handle *handle = ctx->handle;
@@ -160,21 +145,23 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (uatomic_read(&chan->record_disabled))
+       if (caa_unlikely(uatomic_read(&chan->record_disabled)))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
                buf = shmp(handle, chan->backend.buf[0].shmp);
-       if (uatomic_read(&buf->record_disabled))
+       if (caa_unlikely(!buf))
+               return -EIO;
+       if (caa_unlikely(uatomic_read(&buf->record_disabled)))
                return -EAGAIN;
        ctx->buf = buf;
 
        /*
         * Perform retryable operations.
         */
-       if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+       if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
@@ -205,7 +192,7 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
        ctx->buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
-       return lib_ring_buffer_reserve_slow(ctx);
+       return lib_ring_buffer_reserve_slow(ctx, client_ctx);
 }
 
 /**
@@ -250,11 +237,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot = shmp_index(handle,
+                                               buf->commit_hot, endidx);
+
+       if (caa_unlikely(!cc_hot))
+               return;
 
        /*
         * Must count record before incrementing the commit count.
         */
-       subbuffer_count_record(config, &buf->backend, endidx, handle);
+       subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -262,7 +254,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
         */
        cmm_smp_wmb();
 
-       v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+       v_add(config, ctx->slot_size, &cc_hot->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -282,7 +274,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+       commit_count = v_read(config, &cc_hot->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
                                      commit_count, endidx, handle, ctx->tsc);
@@ -290,8 +282,8 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
-       lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                       offset_end, commit_count, handle);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offset_end, commit_count, handle, cc_hot);
 }
 
 /**
This page took 0.025267 seconds and 4 git commands to generate.