Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / backend.h
index 6eb0cf0b4e8feb4ab3f69f2f51f6e0be874b22f8..98c194ca5bf988603646071b778ac936b1add00d 100644 (file)
@@ -1,22 +1,19 @@
-#ifndef _LINUX_RING_BUFFER_BACKEND_H
-#define _LINUX_RING_BUFFER_BACKEND_H
-
 /*
- * linux/ringbuffer/backend.h
+ * SPDX-License-Identifier: LGPL-2.1-only
  *
- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * Ring buffer backend (API).
  *
- * Dual LGPL v2.1/GPL v2 license.
- *
  * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
  * the reader in flight recorder mode.
  */
 
-#include <unistd.h>
+#ifndef _LTTNG_RING_BUFFER_BACKEND_H
+#define _LTTNG_RING_BUFFER_BACKEND_H
 
-#include "ust/core.h"
+#include <stddef.h>
+#include <unistd.h>
 
 /* Internal helpers */
 #include "backend_internal.h"
 
 /* Ring buffer backend access (read/write) */
 
-extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
+extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
                                   size_t offset, void *dest, size_t len,
                                   struct lttng_ust_shm_handle *handle);
 
-extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
+extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
                                     size_t offset, void *dest, size_t len,
                                     struct lttng_ust_shm_handle *handle);
 
@@ -41,11 +38,11 @@ extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
  * as long as the write is never bigger than a page size.
  */
 extern void *
-lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
                               size_t offset,
                               struct lttng_ust_shm_handle *handle);
 extern void *
-lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
+lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
                                    size_t offset,
                                    struct lttng_ust_shm_handle *handle);
 
@@ -61,35 +58,122 @@ lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
  * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
  * if copy is crossing a page boundary.
  */
-static inline
-void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
-                          struct lib_ring_buffer_ctx *ctx,
+static inline __attribute__((always_inline))
+void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
+                          struct lttng_ust_lib_ring_buffer_ctx *ctx,
                           const void *src, size_t len)
 {
-       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
        struct channel_backend *chanb = &ctx->chan->backend;
        struct lttng_ust_shm_handle *handle = ctx->handle;
-       size_t sbidx;
        size_t offset = ctx->buf_offset;
-       struct lib_ring_buffer_backend_pages_shmp *rpages;
-       unsigned long sb_bindex, id;
-
-       offset &= chanb->buf_size - 1;
-       sbidx = offset >> chanb->subbuf_size_order;
-       id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
-       sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = shmp_index(handle, bufb->array, sb_bindex);
-       CHAN_WARN_ON(ctx->chan,
-                    config->mode == RING_BUFFER_OVERWRITE
-                    && subbuffer_id_is_noref(config, id));
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+       void *p;
+
+       if (caa_unlikely(!len))
+               return;
+       /*
+        * Underlying layer should never ask for writes across
+        * subbuffers.
+        */
+       CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
+       backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       if (caa_unlikely(!backend_pages)) {
+               if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+                       return;
+       }
+       p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+       if (caa_unlikely(!p))
+               return;
+       lib_ring_buffer_do_copy(config, p, src, len);
+       ctx->buf_offset += len;
+}
+
+/*
+ * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
+ * terminating character is found in @src. Returns the number of bytes
+ * copied. Does *not* terminate @dest with NULL terminating character.
+ */
+static inline __attribute__((always_inline))
+size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+               char *dest, const char *src, size_t len)
+{
+       size_t count;
+
+       for (count = 0; count < len; count++) {
+               char c;
+
+               /*
+                * Only read source character once, in case it is
+                * modified concurrently.
+                */
+               c = CMM_LOAD_SHARED(src[count]);
+               if (!c)
+                       break;
+               lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
+       }
+       return count;
+}
+
+/**
+ * lib_ring_buffer_strcpy - write string data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies @len - 1 bytes of string data from a source
+ * pointer to a buffer backend, followed by a terminating '\0'
+ * character, at the current context offset. This is more or less a
+ * buffer backend-specific strncpy() operation. If a terminating '\0'
+ * character is found in @src before @len - 1 characters are copied, pad
+ * the buffer with @pad characters (e.g. '#').
+ */
+static inline __attribute__((always_inline))
+void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+                          struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                          const char *src, size_t len, int pad)
+{
+       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       size_t count;
+       size_t offset = ctx->buf_offset;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+       void *p;
+
+       if (caa_unlikely(!len))
+               return;
        /*
         * Underlying layer should never ask for writes across
         * subbuffers.
         */
-       CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-       lib_ring_buffer_do_copy(config,
-                               shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)),
-                               src, len);
+       CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
+       backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       if (caa_unlikely(!backend_pages)) {
+               if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+                       return;
+       }
+       p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+       if (caa_unlikely(!p))
+               return;
+
+       count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
+       offset += count;
+       /* Padding */
+       if (caa_unlikely(count < len - 1)) {
+               size_t pad_len = len - 1 - count;
+
+               p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+               if (caa_unlikely(!p))
+                       return;
+               lib_ring_buffer_do_memset(p, pad, pad_len);
+               offset += pad_len;
+       }
+       /* Final '\0' */
+       p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+       if (caa_unlikely(!p))
+               return;
+       lib_ring_buffer_do_memset(p, '\0', 1);
        ctx->buf_offset += len;
 }
 
@@ -100,28 +184,49 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
  */
 static inline
 unsigned long lib_ring_buffer_get_records_unread(
-                               const struct lib_ring_buffer_config *config,
-                               struct lib_ring_buffer *buf,
+                               const struct lttng_ust_lib_ring_buffer_config *config,
+                               struct lttng_ust_lib_ring_buffer *buf,
                                struct lttng_ust_shm_handle *handle)
 {
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct lib_ring_buffer_backend_pages_shmp *pages;
-       unsigned long records_unread = 0, sb_bindex, id;
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+       unsigned long records_unread = 0, sb_bindex;
        unsigned int i;
+       struct channel *chan;
 
-       for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) {
-               id = shmp_index(handle, bufb->buf_wsb, i)->id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               pages = shmp_index(handle, bufb->array, sb_bindex);
-               records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
+       chan = shmp(handle, bufb->chan);
+       if (!chan)
+               return 0;
+       for (i = 0; i < chan->backend.num_subbuf; i++) {
+               struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+               struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+               struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+               wsb = shmp_index(handle, bufb->buf_wsb, i);
+               if (!wsb)
+                       return 0;
+               sb_bindex = subbuffer_id_get_index(config, wsb->id);
+               rpages = shmp_index(handle, bufb->array, sb_bindex);
+               if (!rpages)
+                       return 0;
+               backend_pages = shmp(handle, rpages->shmp);
+               if (!backend_pages)
+                       return 0;
+               records_unread += v_read(config, &backend_pages->records_unread);
        }
        if (config->mode == RING_BUFFER_OVERWRITE) {
-               id = bufb->buf_rsb.id;
-               sb_bindex = subbuffer_id_get_index(config, id);
-               pages = shmp_index(handle, bufb->array, sb_bindex);
-               records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
+               struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+               struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+               sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+               rpages = shmp_index(handle, bufb->array, sb_bindex);
+               if (!rpages)
+                       return 0;
+               backend_pages = shmp(handle, rpages->shmp);
+               if (!backend_pages)
+                       return 0;
+               records_unread += v_read(config, &backend_pages->records_unread);
        }
        return records_unread;
 }
 
-#endif /* _LINUX_RING_BUFFER_BACKEND_H */
+#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.025685 seconds and 4 git commands to generate.