fix: block: move block-related definitions out of fs.h (v5.16)
[lttng-modules.git] / lib / ringbuffer / config.h
1 /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
2 *
3 * lib/ringbuffer/config.h
4 *
5 * Ring buffer configuration header. Note: after declaring the standard inline
6 * functions, clients should also include linux/ringbuffer/api.h.
7 *
8 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 */
10
11 #ifndef _LIB_RING_BUFFER_CONFIG_H
12 #define _LIB_RING_BUFFER_CONFIG_H
13
14 #include <linux/types.h>
15 #include <linux/percpu.h>
16 #include <lib/align.h>
17 #include <lttng-tracer-core.h>
18
19 struct lib_ring_buffer;
20 struct channel;
21 struct lib_ring_buffer_config;
22 struct lib_ring_buffer_ctx;
23
24 /*
25 * Ring buffer client callbacks. Only used by slow path, never on fast path.
26 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
27 * provided as inline functions too. These may simply return 0 if not used by
28 * the client.
29 */
30 struct lib_ring_buffer_client_cb {
31 /* Mandatory callbacks */
32
33 /* A static inline version is also required for fast path */
34 u64 (*ring_buffer_clock_read) (struct channel *chan);
35 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
36 struct channel *chan, size_t offset,
37 size_t *pre_header_padding,
38 struct lib_ring_buffer_ctx *ctx,
39 void *client_ctx);
40
41 /* Slow path only, at subbuffer switch */
42 size_t (*subbuffer_header_size) (void);
43 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
44 unsigned int subbuf_idx);
45 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
46 unsigned int subbuf_idx, unsigned long data_size);
47
48 /* Optional callbacks (can be set to NULL) */
49
50 /* Called at buffer creation/finalize */
51 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
52 int cpu, const char *name);
53 /*
54 * Clients should guarantee that no new reader handle can be opened
55 * after finalize.
56 */
57 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
58
59 /*
60 * Extract header length, payload length and timestamp from event
61 * record. Used by buffer iterators. Timestamp is only used by channel
62 * iterator.
63 */
64 void (*record_get) (const struct lib_ring_buffer_config *config,
65 struct channel *chan, struct lib_ring_buffer *buf,
66 size_t offset, size_t *header_len,
67 size_t *payload_len, u64 *timestamp);
68 };
69
70 /*
71 * Ring buffer instance configuration.
72 *
73 * Declare as "static const" within the client object to ensure the inline fast
74 * paths can be optimized.
75 *
76 * alloc/sync pairs:
77 *
78 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
79 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
80 * with preemption disabled (lib_ring_buffer_get_cpu() and
81 * lib_ring_buffer_put_cpu()).
82 *
83 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
84 * Per-cpu buffer with global synchronization. Tracing can be performed with
85 * preemption enabled, statistically stays on the local buffers.
86 *
87 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
88 * Should only be used for buffers belonging to a single thread or protected
89 * by mutual exclusion by the client. Note that periodical sub-buffer switch
90 * should be disabled in this kind of configuration.
91 *
92 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
93 * Global shared buffer with global synchronization.
94 *
95 * wakeup:
96 *
97 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
98 * buffers and wake up readers if data is ready. Mainly useful for tracers which
99 * don't want to call into the wakeup code on the tracing path. Use in
100 * combination with "read_timer_interval" channel_create() argument.
101 *
102 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
103 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
104 * for drivers.
105 *
106 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
107 * has the responsibility to perform wakeups.
108 */
109 struct lib_ring_buffer_config {
110 enum {
111 RING_BUFFER_ALLOC_PER_CPU,
112 RING_BUFFER_ALLOC_GLOBAL,
113 } alloc;
114 enum {
115 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
116 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
117 } sync;
118 enum {
119 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
120 RING_BUFFER_DISCARD, /* Discard when buffer full */
121 } mode;
122 enum {
123 RING_BUFFER_SPLICE,
124 RING_BUFFER_MMAP,
125 RING_BUFFER_READ, /* TODO */
126 RING_BUFFER_ITERATOR,
127 RING_BUFFER_NONE,
128 } output;
129 enum {
130 RING_BUFFER_PAGE,
131 RING_BUFFER_VMAP, /* TODO */
132 RING_BUFFER_STATIC, /* TODO */
133 } backend;
134 enum {
135 RING_BUFFER_NO_OOPS_CONSISTENCY,
136 RING_BUFFER_OOPS_CONSISTENCY,
137 } oops;
138 enum {
139 RING_BUFFER_IPI_BARRIER,
140 RING_BUFFER_NO_IPI_BARRIER,
141 } ipi;
142 enum {
143 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
144 RING_BUFFER_WAKEUP_BY_WRITER, /*
145 * writer wakes up reader,
146 * not lock-free
147 * (takes spinlock).
148 */
149 } wakeup;
150 /*
151 * tsc_bits: timestamp bits saved at each record.
152 * 0 and 64 disable the timestamp compression scheme.
153 */
154 unsigned int tsc_bits;
155 struct lib_ring_buffer_client_cb cb;
156 };
157
158 /*
159 * ring buffer context
160 *
161 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
162 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
163 * lib_ring_buffer_write().
164 */
165 struct lib_ring_buffer_ctx {
166 /* input received by lib_ring_buffer_reserve(), saved here. */
167 struct channel *chan; /* channel */
168 void *priv; /* client private data */
169 size_t data_size; /* size of payload */
170 int largest_align; /*
171 * alignment of the largest element
172 * in the payload
173 */
174 int cpu; /* processor id */
175
176 /* output from lib_ring_buffer_reserve() */
177 struct lib_ring_buffer *buf; /*
178 * buffer corresponding to processor id
179 * for this channel
180 */
181 size_t slot_size; /* size of the reserved slot */
182 unsigned long buf_offset; /* offset following the record header */
183 unsigned long pre_offset; /*
184 * Initial offset position _before_
185 * the record is written. Positioned
186 * prior to record header alignment
187 * padding.
188 */
189 u64 tsc; /* time-stamp counter value */
190 unsigned int rflags; /* reservation flags */
191 /* Cache backend pages pointer chasing. */
192 struct lib_ring_buffer_backend_pages *backend_pages;
193 };
194
195 /**
196 * lib_ring_buffer_ctx_init - initialize ring buffer context
197 * @ctx: ring buffer context to initialize
198 * @chan: channel
199 * @priv: client private data
200 * @data_size: size of record data payload. It must be greater than 0.
201 * @largest_align: largest alignment within data payload types
202 * @cpu: processor id
203 */
204 static inline
205 void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
206 struct channel *chan, void *priv,
207 size_t data_size, int largest_align,
208 int cpu)
209 {
210 ctx->chan = chan;
211 ctx->priv = priv;
212 ctx->data_size = data_size;
213 ctx->largest_align = largest_align;
214 ctx->cpu = cpu;
215 ctx->rflags = 0;
216 ctx->backend_pages = NULL;
217 }
218
219 /*
220 * Reservation flags.
221 *
222 * RING_BUFFER_RFLAG_FULL_TSC
223 *
224 * This flag is passed to record_header_size() and to the primitive used to
225 * write the record header. It indicates that the full 64-bit time value is
226 * needed in the record header. If this flag is not set, the record header needs
227 * only to contain "tsc_bits" bit of time value.
228 *
229 * Reservation flags can be added by the client, starting from
230 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
231 * record_header_size() to lib_ring_buffer_write_record_header().
232 */
233 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
234 #define RING_BUFFER_RFLAG_END (1U << 1)
235
236 #ifndef LTTNG_TRACER_CORE_H
237 #error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
238 #endif
239
240 /*
241 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
242 * compile-time. We have to duplicate the "config->align" information and the
243 * definition here because config->align is used both in the slow and fast
244 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
245 */
246 #ifdef RING_BUFFER_ALIGN
247
248 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
249
250 /*
251 * Calculate the offset needed to align the type.
252 * size_of_type must be non-zero.
253 */
254 static inline
255 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
256 {
257 return offset_align(align_drift, size_of_type);
258 }
259
260 #else
261
262 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
263
264 /*
265 * Calculate the offset needed to align the type.
266 * size_of_type must be non-zero.
267 */
268 static inline
269 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
270 {
271 return 0;
272 }
273
274 #endif
275
276 /**
277 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
278 * @ctx: ring buffer context.
279 */
280 static inline
281 void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
282 size_t alignment)
283 {
284 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
285 alignment);
286 }
287
288 /*
289 * lib_ring_buffer_check_config() returns 0 on success.
290 * Used internally to check for valid configurations at channel creation.
291 */
292 static inline
293 int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
294 unsigned int switch_timer_interval,
295 unsigned int read_timer_interval)
296 {
297 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
298 && config->sync == RING_BUFFER_SYNC_PER_CPU
299 && switch_timer_interval)
300 return -EINVAL;
301 return 0;
302 }
303
304 #include <wrapper/ringbuffer/vatomic.h>
305
306 #endif /* _LIB_RING_BUFFER_CONFIG_H */
This page took 0.071611 seconds and 4 git commands to generate.