Cleanup: apply `include-what-you-use` guideline for `size_t`
[lttng-ust.git] / libringbuffer / backend_internal.h
index 36d53dfd531357037501a539cd8c991c9d2ca4d9..f492cc3eaff18cb36197f8cf0b0ed0161964cc60 100644 (file)
@@ -1,16 +1,29 @@
-#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
-#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
+#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
 
 /*
- * linux/ringbuffer/backend_internal.h
- *
- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * libringbuffer/backend_internal.h
  *
  * Ring buffer backend (internal helpers).
  *
- * Dual LGPL v2.1/GPL v2 license.
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include <stddef.h>
 #include <unistd.h>
 #include <urcu/compiler.h>
 
@@ -33,7 +46,8 @@ int channel_backend_init(struct channel_backend *chanb,
                         const char *name,
                         const struct lttng_ust_lib_ring_buffer_config *config,
                         size_t subbuf_size,
-                        size_t num_subbuf, struct lttng_ust_shm_handle *handle);
+                        size_t num_subbuf, struct lttng_ust_shm_handle *handle,
+                        const int *stream_fds);
 void channel_backend_free(struct channel_backend *chanb,
                          struct lttng_ust_shm_handle *handle);
 
@@ -183,16 +197,82 @@ int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *conf
                return 0;
 }
 
+static inline
+int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
+                       struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                       struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       size_t sbidx;
+       size_t offset = ctx->buf_offset;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       unsigned long sb_bindex, id;
+       struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
+       if (caa_unlikely(!wsb))
+               return -1;
+       id = wsb->id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (caa_unlikely(!rpages))
+               return -1;
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       _backend_pages = shmp(handle, rpages->shmp);
+       if (caa_unlikely(!_backend_pages))
+               return -1;
+       *backend_pages = _backend_pages;
+       return 0;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lttng_ust_lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
+               struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+       if (caa_unlikely(ctx->ctx_len
+                       < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
+               return NULL;
+       return ctx->backend_pages;
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
 static inline
 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+                           const struct lttng_ust_lib_ring_buffer_ctx *ctx,
                            struct lttng_ust_lib_ring_buffer_backend *bufb,
                            unsigned long idx, struct lttng_ust_shm_handle *handle)
 {
-       unsigned long sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
-       v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+       backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+       if (caa_unlikely(!backend_pages)) {
+               if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+                       return;
+       }
+       v_inc(config, &backend_pages->records_commit);
 }
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+                           const struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                           struct lttng_ust_lib_ring_buffer_backend *bufb,
+                           unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
 /*
  * Reader has exclusive subbuffer access for record consumption. No need to
@@ -204,12 +284,23 @@ void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *con
                              struct lttng_ust_shm_handle *handle)
 {
        unsigned long sb_bindex;
+       struct channel *chan;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       CHAN_WARN_ON(shmp(handle, bufb->chan),
-                    !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
+       chan = shmp(handle, bufb->chan);
+       if (!chan)
+               return;
+       pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+       if (!pages_shmp)
+               return;
+       backend_pages = shmp(handle, pages_shmp->shmp);
+       if (!backend_pages)
+               return;
+       CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
        /* Non-atomic decrement protected by exclusive subbuffer access */
-       _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
+       _v_dec(config, &backend_pages->records_unread);
        v_inc(config, &bufb->records_read);
 }
 
@@ -221,16 +312,29 @@ unsigned long subbuffer_get_records_count(
                                struct lttng_ust_shm_handle *handle)
 {
        unsigned long sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
-       return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return 0;
+       sb_bindex = subbuffer_id_get_index(config, wsb->id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (!rpages)
+               return 0;
+       backend_pages = shmp(handle, rpages->shmp);
+       if (!backend_pages)
+               return 0;
+       return v_read(config, &backend_pages->records_commit);
 }
 
 /*
  * Must be executed at subbuffer delivery when the writer has _exclusive_
- * subbuffer access. See ring_buffer_check_deliver() for details.
- * ring_buffer_get_records_count() must be called to get the records count
- * before this function, because it resets the records_commit count.
+ * subbuffer access. See lib_ring_buffer_check_deliver() for details.
+ * lib_ring_buffer_get_records_count() must be called to get the records
+ * count before this function, because it resets the records_commit
+ * count.
  */
 static inline
 unsigned long subbuffer_count_records_overrun(
@@ -239,15 +343,25 @@ unsigned long subbuffer_count_records_overrun(
                                unsigned long idx,
                                struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long overruns, sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
-       pages = shmp_index(handle, bufb->array, sb_bindex);
-       overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
-       v_set(config, &shmp(handle, pages->shmp)->records_unread,
-             v_read(config, &shmp(handle, pages->shmp)->records_commit));
-       v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return 0;
+       sb_bindex = subbuffer_id_get_index(config, wsb->id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (!rpages)
+               return 0;
+       backend_pages = shmp(handle, rpages->shmp);
+       if (!backend_pages)
+               return 0;
+       overruns = v_read(config, &backend_pages->records_unread);
+       v_set(config, &backend_pages->records_unread,
+             v_read(config, &backend_pages->records_commit));
+       v_set(config, &backend_pages->records_commit, 0);
 
        return overruns;
 }
@@ -259,12 +373,22 @@ void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *conf
                             unsigned long data_size,
                             struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
-       pages = shmp_index(handle, bufb->array, sb_bindex);
-       shmp(handle, pages->shmp)->data_size = data_size;
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return;
+       sb_bindex = subbuffer_id_get_index(config, wsb->id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (!rpages)
+               return;
+       backend_pages = shmp(handle, rpages->shmp);
+       if (!backend_pages)
+               return;
+       backend_pages->data_size = data_size;
 }
 
 static inline
@@ -273,12 +397,18 @@ unsigned long subbuffer_get_read_data_size(
                                struct lttng_ust_lib_ring_buffer_backend *bufb,
                                struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       pages = shmp_index(handle, bufb->array, sb_bindex);
-       return shmp(handle, pages->shmp)->data_size;
+       pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+       if (!pages_shmp)
+               return 0;
+       backend_pages = shmp(handle, pages_shmp->shmp);
+       if (!backend_pages)
+               return 0;
+       return backend_pages->data_size;
 }
 
 static inline
@@ -288,12 +418,35 @@ unsigned long subbuffer_get_data_size(
                                unsigned long idx,
                                struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long sb_bindex;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
-       pages = shmp_index(handle, bufb->array, sb_bindex);
-       return shmp(handle, pages->shmp)->data_size;
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return 0;
+       sb_bindex = subbuffer_id_get_index(config, wsb->id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (!rpages)
+               return 0;
+       backend_pages = shmp(handle, rpages->shmp);
+       if (!backend_pages)
+               return 0;
+       return backend_pages->data_size;
+}
+
+static inline
+void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
+               struct lttng_ust_lib_ring_buffer_backend *bufb,
+               unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+       struct lttng_ust_lib_ring_buffer_backend_counts *counts;
+
+       counts = shmp_index(handle, bufb->buf_cnt, idx);
+       if (!counts)
+               return;
+       counts->seq_cnt++;
 }
 
 /**
@@ -307,6 +460,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *
                                 struct lttng_ust_shm_handle *handle)
 {
        unsigned long id, new_id;
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
 
        if (config->mode != RING_BUFFER_OVERWRITE)
                return;
@@ -315,7 +469,10 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *
         * Performing a volatile access to read the sb_pages, because we want to
         * read a coherent version of the pointer and the associated noref flag.
         */
-       id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return;
+       id = CMM_ACCESS_ONCE(wsb->id);
        for (;;) {
                /* This check is called on the fast path for each record. */
                if (caa_likely(!subbuffer_id_is_noref(config, id))) {
@@ -329,7 +486,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *
                }
                new_id = id;
                subbuffer_id_clear_noref(config, &new_id);
-               new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
+               new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
                if (caa_likely(new_id == id))
                        break;
                id = new_id;
@@ -346,9 +503,15 @@ void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_con
                                      unsigned long idx, unsigned long offset,
                                      struct lttng_ust_shm_handle *handle)
 {
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct channel *chan;
+
        if (config->mode != RING_BUFFER_OVERWRITE)
                return;
 
+       wsb = shmp_index(handle, bufb->buf_wsb, idx);
+       if (!wsb)
+               return;
        /*
         * Because ring_buffer_set_noref() is only called by a single thread
         * (the one which updated the cc_sb value), there are no concurrent
@@ -360,14 +523,16 @@ void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_con
         * subbuffer_set_noref() uses a volatile store to deal with concurrent
         * readers of the noref flag.
         */
-       CHAN_WARN_ON(shmp(handle, bufb->chan),
-                    subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
+       chan = shmp(handle, bufb->chan);
+       if (!chan)
+               return;
+       CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
        /*
         * Memory barrier that ensures counter stores are ordered before set
         * noref and offset.
         */
        cmm_smp_mb();
-       subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
+       subbuffer_id_set_noref_offset(config, &wsb->id, offset);
 }
 
 /**
@@ -381,16 +546,23 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
                         unsigned long consumed_count,
                         struct lttng_ust_shm_handle *handle)
 {
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
        unsigned long old_id, new_id;
 
+       wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
+       if (caa_unlikely(!wsb))
+               return -EPERM;
+
        if (config->mode == RING_BUFFER_OVERWRITE) {
+               struct channel *chan;
+
                /*
                 * Exchange the target writer subbuffer with our own unused
                 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
                 * old_wpage, because the value read will be confirmed by the
                 * following cmpxchg().
                 */
-               old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+               old_id = wsb->id;
                if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
                        return -EAGAIN;
                /*
@@ -400,18 +572,19 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
                if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
                                                          consumed_count)))
                        return -EAGAIN;
-               CHAN_WARN_ON(shmp(handle, bufb->chan),
-                            !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+               chan = shmp(handle, bufb->chan);
+               if (caa_unlikely(!chan))
+                       return -EPERM;
+               CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
                subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
                                              consumed_count);
-               new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
-                                bufb->buf_rsb.id);
+               new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
                if (caa_unlikely(old_id != new_id))
                        return -EAGAIN;
                bufb->buf_rsb.id = new_id;
        } else {
                /* No page exchange, use the writer page directly */
-               bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+               bufb->buf_rsb.id = wsb->id;
        }
        return 0;
 }
@@ -420,6 +593,28 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
 #define inline_memcpy(dest, src, n)    memcpy(dest, src, n)
 #endif
 
+static inline __attribute__((always_inline))
+void lttng_inline_memcpy(void *dest, const void *src,
+               unsigned long len)
+{
+       switch (len) {
+       case 1:
+               *(uint8_t *) dest = *(const uint8_t *) src;
+               break;
+       case 2:
+               *(uint16_t *) dest = *(const uint16_t *) src;
+               break;
+       case 4:
+               *(uint32_t *) dest = *(const uint32_t *) src;
+               break;
+       case 8:
+               *(uint64_t *) dest = *(const uint64_t *) src;
+               break;
+       default:
+               inline_memcpy(dest, src, len);
+       }
+}
+
 /*
  * Use the architecture-specific memcpy implementation for constant-sized
  * inputs, but rely on an inline memcpy for length statically unknown.
@@ -431,12 +626,24 @@ do {                                                              \
        if (__builtin_constant_p(len))                          \
                memcpy(dest, src, __len);                       \
        else                                                    \
-               inline_memcpy(dest, src, __len);                \
+               lttng_inline_memcpy(dest, src, __len);          \
 } while (0)
 
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
+{
+       unsigned long i;
+
+       for (i = 0; i < len; i++)
+               dest[i] = c;
+}
+
 /* arch-agnostic implementation */
 
-static inline int fls(unsigned int x)
+static inline int lttng_ust_fls(unsigned int x)
 {
        int r = 32;
 
@@ -459,7 +666,7 @@ static inline int fls(unsigned int x)
                r -= 2;
        }
        if (!(x & 0x80000000U)) {
-               x <<= 1;
+               /* No need to bit shift on last operation */
                r -= 1;
        }
        return r;
@@ -469,26 +676,10 @@ static inline int get_count_order(unsigned int count)
 {
        int order;
 
-       order = fls(count) - 1;
+       order = lttng_ust_fls(count) - 1;
        if (count & (count - 1))
                order++;
        return order;
 }
 
-static inline
-unsigned int hweight32(unsigned int value)
-{
-       unsigned int r;
-
-       r = value;
-       r = r - ((r >> 1) & 0x55555555);
-       r = (r & 0x33333333) + ((r >> 2) & 0x33333333);
-       r += r >> 4;
-       r &= 0x0F0F0F0F;
-       r += r >> 8;
-       r += r >> 16;
-       r &= 0x000000FF;
-       return r;
-}
-
-#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
+#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.029346 seconds and 4 git commands to generate.