Performance: cache the backend pages pointer in context
[lttng-modules.git] / lib / ringbuffer / backend_internal.h
index d18967d6738d48721eed9d1471c0140d02b99629..e03d8c0b1b23a1363fdebbe5ed8589223226983c 100644 (file)
@@ -201,6 +201,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
                return 0;
 }
 
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, offset = ctx->buf_offset;
+       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *rpages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx)
+{
+       return ctx->backend_pages;
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
 static inline
 void subbuffer_count_record(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_backend *bufb,
@@ -211,6 +247,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config,
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
        v_inc(config, &bufb->array[sb_bindex]->records_commit);
 }
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_backend *bufb,
+                           unsigned long idx)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
 /*
  * Reader has exclusive subbuffer access for record consumption. No need to
@@ -451,6 +495,8 @@ do {                                                                \
 /*
  * We use __copy_from_user_inatomic to copy userspace data since we already
  * did the access_ok for the whole range.
+ *
+ * Return 0 if OK, nonzero on error.
  */
 static inline
 unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
This page took 0.024966 seconds and 4 git commands to generate.