Tracepoint: evaluate arguments within test block
[lttng-ust.git] / include / lttng / ringbuffer-config.h
CommitLineData
a6352fd4
MD
1#ifndef _LINUX_RING_BUFFER_CONFIG_H
2#define _LINUX_RING_BUFFER_CONFIG_H
3
4/*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * Dual LGPL v2.1/GPL v2 license.
13 */
14
15#include <errno.h>
4318ae1b
MD
16#include "lttng/ust-tracer.h"
17#include "lttng/usterr-signal-safe.h"
18#include "lttng/kcompat/kcompat.h"
19#include "lttng/align.h"
a6352fd4 20
4cfec15c 21struct lttng_ust_lib_ring_buffer;
a6352fd4 22struct channel;
4cfec15c
MD
23struct lttng_ust_lib_ring_buffer_config;
24struct lttng_ust_lib_ring_buffer_ctx;
38fae1d3 25struct lttng_ust_shm_handle *handle;
a6352fd4
MD
26
27/*
28 * Ring buffer client callbacks. Only used by slow path, never on fast path.
29 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
30 * provided as inline functions too. These may simply return 0 if not used by
31 * the client.
32 */
4cfec15c 33struct lttng_ust_lib_ring_buffer_client_cb {
a6352fd4
MD
34 /* Mandatory callbacks */
35
36 /* A static inline version is also required for fast path */
37 u64 (*ring_buffer_clock_read) (struct channel *chan);
4cfec15c 38 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
a6352fd4
MD
39 struct channel *chan, size_t offset,
40 size_t *pre_header_padding,
4cfec15c 41 struct lttng_ust_lib_ring_buffer_ctx *ctx);
a6352fd4
MD
42
43 /* Slow path only, at subbuffer switch */
44 size_t (*subbuffer_header_size) (void);
4cfec15c 45 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
1d498196 46 unsigned int subbuf_idx,
38fae1d3 47 struct lttng_ust_shm_handle *handle);
4cfec15c 48 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
1d498196 49 unsigned int subbuf_idx, unsigned long data_size,
38fae1d3 50 struct lttng_ust_shm_handle *handle);
a6352fd4
MD
51
52 /* Optional callbacks (can be set to NULL) */
53
54 /* Called at buffer creation/finalize */
4cfec15c 55 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
1d498196 56 int cpu, const char *name,
38fae1d3 57 struct lttng_ust_shm_handle *handle);
a6352fd4
MD
58 /*
59 * Clients should guarantee that no new reader handle can be opened
60 * after finalize.
61 */
4cfec15c 62 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
1d498196 63 void *priv, int cpu,
38fae1d3 64 struct lttng_ust_shm_handle *handle);
a6352fd4
MD
65
66 /*
67 * Extract header length, payload length and timestamp from event
68 * record. Used by buffer iterators. Timestamp is only used by channel
69 * iterator.
70 */
4cfec15c
MD
71 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
72 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
a6352fd4 73 size_t offset, size_t *header_len,
1d498196 74 size_t *payload_len, u64 *timestamp,
38fae1d3 75 struct lttng_ust_shm_handle *handle);
a6352fd4
MD
76};
77
78/*
79 * Ring buffer instance configuration.
80 *
81 * Declare as "static const" within the client object to ensure the inline fast
82 * paths can be optimized.
83 *
84 * alloc/sync pairs:
85 *
86 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
87 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
88 * with preemption disabled (lib_ring_buffer_get_cpu() and
89 * lib_ring_buffer_put_cpu()).
90 *
91 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
92 * Per-cpu buffer with global synchronization. Tracing can be performed with
93 * preemption enabled, statistically stays on the local buffers.
94 *
95 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
96 * Should only be used for buffers belonging to a single thread or protected
97 * by mutual exclusion by the client. Note that periodical sub-buffer switch
98 * should be disabled in this kind of configuration.
99 *
100 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
101 * Global shared buffer with global synchronization.
102 *
103 * wakeup:
104 *
105 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
106 * buffers and wake up readers if data is ready. Mainly useful for tracers which
107 * don't want to call into the wakeup code on the tracing path. Use in
108 * combination with "read_timer_interval" channel_create() argument.
109 *
110 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
111 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
112 * for drivers.
113 *
114 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
115 * has the responsibility to perform wakeups.
116 */
4cfec15c 117struct lttng_ust_lib_ring_buffer_config {
a6352fd4
MD
118 enum {
119 RING_BUFFER_ALLOC_PER_CPU,
120 RING_BUFFER_ALLOC_GLOBAL,
121 } alloc;
122 enum {
123 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
124 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
125 } sync;
126 enum {
127 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
128 RING_BUFFER_DISCARD, /* Discard when buffer full */
129 } mode;
130 enum {
131 RING_BUFFER_SPLICE,
132 RING_BUFFER_MMAP,
133 RING_BUFFER_READ, /* TODO */
134 RING_BUFFER_ITERATOR,
135 RING_BUFFER_NONE,
136 } output;
137 enum {
138 RING_BUFFER_PAGE,
139 RING_BUFFER_VMAP, /* TODO */
140 RING_BUFFER_STATIC, /* TODO */
141 } backend;
142 enum {
143 RING_BUFFER_NO_OOPS_CONSISTENCY,
144 RING_BUFFER_OOPS_CONSISTENCY,
145 } oops;
146 enum {
147 RING_BUFFER_IPI_BARRIER,
148 RING_BUFFER_NO_IPI_BARRIER,
149 } ipi;
150 enum {
151 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
152 RING_BUFFER_WAKEUP_BY_WRITER, /*
153 * writer wakes up reader,
154 * not lock-free
155 * (takes spinlock).
156 */
157 } wakeup;
158 /*
159 * tsc_bits: timestamp bits saved at each record.
160 * 0 and 64 disable the timestamp compression scheme.
161 */
162 unsigned int tsc_bits;
4cfec15c 163 struct lttng_ust_lib_ring_buffer_client_cb cb;
c1fca457
MD
164 /*
165 * client_type is used by the consumer process (which is in a
166 * different address space) to lookup the appropriate client
167 * callbacks and update the cb pointers.
168 */
169 int client_type;
a6352fd4
MD
170};
171
172/*
173 * ring buffer context
174 *
175 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
176 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
177 * lib_ring_buffer_write().
178 */
4cfec15c 179struct lttng_ust_lib_ring_buffer_ctx {
a6352fd4
MD
180 /* input received by lib_ring_buffer_reserve(), saved here. */
181 struct channel *chan; /* channel */
182 void *priv; /* client private data */
38fae1d3 183 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
a6352fd4
MD
184 size_t data_size; /* size of payload */
185 int largest_align; /*
186 * alignment of the largest element
187 * in the payload
188 */
189 int cpu; /* processor id */
190
191 /* output from lib_ring_buffer_reserve() */
4cfec15c 192 struct lttng_ust_lib_ring_buffer *buf; /*
a6352fd4
MD
193 * buffer corresponding to processor id
194 * for this channel
195 */
196 size_t slot_size; /* size of the reserved slot */
197 unsigned long buf_offset; /* offset following the record header */
198 unsigned long pre_offset; /*
199 * Initial offset position _before_
200 * the record is written. Positioned
201 * prior to record header alignment
202 * padding.
203 */
204 u64 tsc; /* time-stamp counter value */
205 unsigned int rflags; /* reservation flags */
206};
207
208/**
209 * lib_ring_buffer_ctx_init - initialize ring buffer context
210 * @ctx: ring buffer context to initialize
211 * @chan: channel
212 * @priv: client private data
213 * @data_size: size of record data payload
214 * @largest_align: largest alignment within data payload types
215 * @cpu: processor id
216 */
217static inline
4cfec15c 218void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
a6352fd4
MD
219 struct channel *chan, void *priv,
220 size_t data_size, int largest_align,
38fae1d3 221 int cpu, struct lttng_ust_shm_handle *handle)
a6352fd4
MD
222{
223 ctx->chan = chan;
224 ctx->priv = priv;
225 ctx->data_size = data_size;
226 ctx->largest_align = largest_align;
227 ctx->cpu = cpu;
228 ctx->rflags = 0;
1d498196 229 ctx->handle = handle;
a6352fd4
MD
230}
231
232/*
233 * Reservation flags.
234 *
235 * RING_BUFFER_RFLAG_FULL_TSC
236 *
237 * This flag is passed to record_header_size() and to the primitive used to
238 * write the record header. It indicates that the full 64-bit time value is
239 * needed in the record header. If this flag is not set, the record header needs
240 * only to contain "tsc_bits" bit of time value.
241 *
242 * Reservation flags can be added by the client, starting from
243 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
244 * record_header_size() to lib_ring_buffer_write_record_header().
245 */
246#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
247#define RING_BUFFER_RFLAG_END (1U << 1)
248
249/*
250 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
251 * compile-time. We have to duplicate the "config->align" information and the
252 * definition here because config->align is used both in the slow and fast
253 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
254 */
255#ifdef RING_BUFFER_ALIGN
256
257# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
258
259/*
260 * Calculate the offset needed to align the type.
261 * size_of_type must be non-zero.
262 */
263static inline
264unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
265{
266 return offset_align(align_drift, size_of_type);
267}
268
269#else
270
271# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
272
273/*
274 * Calculate the offset needed to align the type.
275 * size_of_type must be non-zero.
276 */
277static inline
278unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
279{
280 return 0;
281}
282
283#endif
284
285/**
286 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
287 * @ctx: ring buffer context.
288 */
289static inline
4cfec15c 290void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
a6352fd4
MD
291 size_t alignment)
292{
293 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
294 alignment);
295}
296
297/*
298 * lib_ring_buffer_check_config() returns 0 on success.
299 * Used internally to check for valid configurations at channel creation.
300 */
301static inline
4cfec15c 302int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
a6352fd4
MD
303 unsigned int switch_timer_interval,
304 unsigned int read_timer_interval)
305{
306 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
307 && config->sync == RING_BUFFER_SYNC_PER_CPU
308 && switch_timer_interval)
309 return -EINVAL;
310 return 0;
311}
312
4318ae1b 313#include <lttng/vatomic.h>
a6352fd4
MD
314
315#endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.03604 seconds and 4 git commands to generate.