Performance: cache the backend pages pointer in context
[lttng-modules.git] / lib / ringbuffer / backend_internal.h
index 35b26f77190edb9d1588f9ace92866f304fe1053..e03d8c0b1b23a1363fdebbe5ed8589223226983c 100644 (file)
@@ -23,9 +23,9 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include "../../wrapper/ringbuffer/config.h"
-#include "../../wrapper/ringbuffer/backend_types.h"
-#include "../../wrapper/ringbuffer/frontend_types.h"
+#include <wrapper/ringbuffer/config.h>
+#include <wrapper/ringbuffer/backend_types.h>
+#include <wrapper/ringbuffer/frontend_types.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
 
@@ -201,6 +201,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
                return 0;
 }
 
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, offset = ctx->buf_offset;
+       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *rpages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx)
+{
+       return ctx->backend_pages;
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
 static inline
 void subbuffer_count_record(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_backend *bufb,
@@ -211,6 +247,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config,
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
        v_inc(config, &bufb->array[sb_bindex]->records_commit);
 }
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_backend *bufb,
+                           unsigned long idx)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
 /*
  * Reader has exclusive subbuffer access for record consumption. No need to
@@ -309,6 +353,14 @@ unsigned long subbuffer_get_data_size(
        return pages->data_size;
 }
 
+static inline
+void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
+                               struct lib_ring_buffer_backend *bufb,
+                               unsigned long idx)
+{
+       bufb->buf_cnt[idx].seq_cnt++;
+}
+
 /**
  * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
  *                               writer.
@@ -443,6 +495,8 @@ do {                                                                \
 /*
  * We use __copy_from_user_inatomic to copy userspace data since we already
  * did the access_ok for the whole range.
+ *
+ * Return 0 if OK, nonzero on error.
  */
 static inline
 unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
This page took 0.023639 seconds and 4 git commands to generate.