#include <lttng/align.h>
#include <lttng/tracer-core.h>
-struct lib_ring_buffer;
-struct channel;
-struct lib_ring_buffer_config;
-struct lib_ring_buffer_ctx;
+struct lttng_kernel_ring_buffer;
+struct lttng_kernel_ring_buffer_channel;
+struct lttng_kernel_ring_buffer_config;
+struct lttng_kernel_ring_buffer_ctx;
+struct lttng_kernel_ring_buffer_ctx_private;
/*
* Ring buffer client callbacks. Only used by slow path, never on fast path.
* provided as inline functions too. These may simply return 0 if not used by
* the client.
*/
-struct lib_ring_buffer_client_cb {
+struct lttng_kernel_ring_buffer_client_cb {
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
- u64 (*ring_buffer_clock_read) (struct channel *chan);
- size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ u64 (*ring_buffer_clock_read) (struct lttng_kernel_ring_buffer_channel *chan);
+ size_t (*record_header_size) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
void *client_ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
+ void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx);
- void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
- unsigned int subbuf_idx, unsigned long data_size);
+ void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx, unsigned long data_size,
+ const struct lttng_kernel_ring_buffer_ctx *ctx);
/* Optional callbacks (can be set to NULL) */
/* Called at buffer creation/finalize */
- int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
+ int (*buffer_create) (struct lttng_kernel_ring_buffer *buf, void *priv,
int cpu, const char *name);
/*
* Clients should guarantee that no new reader handle can be opened
* after finalize.
*/
- void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
+ void (*buffer_finalize) (struct lttng_kernel_ring_buffer *buf, void *priv, int cpu);
/*
* Extract header length, payload length and timestamp from event
* record. Used by buffer iterators. Timestamp is only used by channel
* iterator.
*/
- void (*record_get) (const struct lib_ring_buffer_config *config,
- struct channel *chan, struct lib_ring_buffer *buf,
+ void (*record_get) (const struct lttng_kernel_ring_buffer_config *config,
+ struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp);
};
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
*/
-struct lib_ring_buffer_config {
+struct lttng_kernel_ring_buffer_config {
enum {
RING_BUFFER_ALLOC_PER_CPU,
RING_BUFFER_ALLOC_GLOBAL,
* 0 and 64 disable the timestamp compression scheme.
*/
unsigned int tsc_bits;
- struct lib_ring_buffer_client_cb cb;
+ struct lttng_kernel_ring_buffer_client_cb cb;
+};
+
+/*
+ * ring buffer private context
+ *
+ * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ *
+ * Get struct lttng_kernel_ring_buffer_ctx parent with container_of().
+ */
+
+struct lttng_kernel_ring_buffer_ctx_private {
+ /* input received by lib_ring_buffer_reserve(). */
+ struct lttng_kernel_ring_buffer_channel *chan; /* ring buffer channel */
+
+ /* output from lib_ring_buffer_reserve() */
+ int reserve_cpu; /* processor id updated by the reserve */
+ size_t slot_size; /* size of the reserved slot */
+ unsigned long buf_offset; /* offset following the record header */
+ unsigned long pre_offset; /*
+ * Initial offset position _before_
+ * the record is written. Positioned
+ * prior to record header alignment
+ * padding.
+ */
+ u64 tsc; /* time-stamp counter value */
+ unsigned int rflags; /* reservation flags */
+
+ struct lttng_kernel_ring_buffer *buf; /*
+ * buffer corresponding to processor id
+ * for this channel
+ */
+ struct lttng_kernel_ring_buffer_backend_pages *backend_pages;
+
+ /*
+ * Records lost counts are only loaded into these fields before
+ * reserving the last bytes from the ring buffer.
+ */
+ unsigned long records_lost_full;
+ unsigned long records_lost_wrap;
+ unsigned long records_lost_big;
};
/*
* lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
* lib_ring_buffer_write().
*/
-struct lib_ring_buffer_ctx {
+struct lttng_kernel_ring_buffer_ctx {
+ /* Private ring buffer context, set by reserve callback. */
+ struct lttng_kernel_ring_buffer_ctx_private priv;
+
/* input received by lib_ring_buffer_reserve(), saved here. */
- struct channel *chan; /* channel */
- void *priv; /* client private data */
+ void *client_priv; /* Ring buffer client private data */
+
size_t data_size; /* size of payload */
int largest_align; /*
* alignment of the largest element
* in the payload
*/
- int cpu; /* processor id */
-
- /* output from lib_ring_buffer_reserve() */
- struct lib_ring_buffer *buf; /*
- * buffer corresponding to processor id
- * for this channel
- */
- size_t slot_size; /* size of the reserved slot */
- unsigned long buf_offset; /* offset following the record header */
- unsigned long pre_offset; /*
- * Initial offset position _before_
- * the record is written. Positioned
- * prior to record header alignment
- * padding.
- */
- u64 tsc; /* time-stamp counter value */
- unsigned int rflags; /* reservation flags */
- /* Cache backend pages pointer chasing. */
- struct lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_kernel_probe_ctx *probe_ctx; /* Probe context */
};
/**
* lib_ring_buffer_ctx_init - initialize ring buffer context
* @ctx: ring buffer context to initialize
- * @chan: channel
- * @priv: client private data
+ * @client_priv: client private data
* @data_size: size of record data payload. It must be greater than 0.
* @largest_align: largest alignment within data payload types
- * @cpu: processor id
*/
static inline
-void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
- struct channel *chan,
+void lib_ring_buffer_ctx_init(struct lttng_kernel_ring_buffer_ctx *ctx,
+ void *client_priv,
size_t data_size, int largest_align,
- int cpu, void *priv)
+ struct lttng_kernel_probe_ctx *probe_ctx)
{
- ctx->chan = chan;
- ctx->priv = priv;
+ ctx->client_priv = client_priv;
ctx->data_size = data_size;
ctx->largest_align = largest_align;
- ctx->cpu = cpu;
- ctx->rflags = 0;
- ctx->backend_pages = NULL;
+ ctx->probe_ctx = probe_ctx;
}
/*
* @ctx: ring buffer context.
*/
static inline
-void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
+void lib_ring_buffer_align_ctx(struct lttng_kernel_ring_buffer_ctx *ctx,
size_t alignment)
{
- ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
+ ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset,
alignment);
}
* Used internally to check for valid configurations at channel creation.
*/
static inline
-int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
+int lib_ring_buffer_check_config(const struct lttng_kernel_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{