Move to kernel style SPDX license identifiers
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer configuration header. Note: after declaring the standard inline
7 * functions, clients should also include linux/ringbuffer/api.h.
8 */
9
10 #ifndef _LTTNG_RING_BUFFER_CONFIG_H
11 #define _LTTNG_RING_BUFFER_CONFIG_H
12
13 #include <errno.h>
14 #include "lttng/ust-tracer.h"
15 #include <stdint.h>
16 #include <stddef.h>
17 #include <urcu/arch.h>
18 #include <string.h>
19 #include "lttng/align.h"
20 #include <lttng/ust-compiler.h>
21
22 struct lttng_ust_lib_ring_buffer;
23 struct channel;
24 struct lttng_ust_lib_ring_buffer_config;
25 struct lttng_ust_lib_ring_buffer_ctx;
26 struct lttng_ust_shm_handle;
27
28 /*
29 * Ring buffer client callbacks. Only used by slow path, never on fast path.
30 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
31 * provided as inline functions too. These may simply return 0 if not used by
32 * the client.
33 */
34 struct lttng_ust_lib_ring_buffer_client_cb {
35 /* Mandatory callbacks */
36
37 /* A static inline version is also required for fast path */
38 uint64_t (*ring_buffer_clock_read) (struct channel *chan);
39 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
40 struct channel *chan, size_t offset,
41 size_t *pre_header_padding,
42 struct lttng_ust_lib_ring_buffer_ctx *ctx,
43 void *client_ctx);
44
45 /* Slow path only, at subbuffer switch */
46 size_t (*subbuffer_header_size) (void);
47 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
48 unsigned int subbuf_idx,
49 struct lttng_ust_shm_handle *handle);
50 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
51 unsigned int subbuf_idx, unsigned long data_size,
52 struct lttng_ust_shm_handle *handle);
53
54 /* Optional callbacks (can be set to NULL) */
55
56 /* Called at buffer creation/finalize */
57 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
58 int cpu, const char *name,
59 struct lttng_ust_shm_handle *handle);
60 /*
61 * Clients should guarantee that no new reader handle can be opened
62 * after finalize.
63 */
64 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
65 void *priv, int cpu,
66 struct lttng_ust_shm_handle *handle);
67
68 /*
69 * Extract header length, payload length and timestamp from event
70 * record. Used by buffer iterators. Timestamp is only used by channel
71 * iterator.
72 */
73 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
74 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
75 size_t offset, size_t *header_len,
76 size_t *payload_len, uint64_t *timestamp,
77 struct lttng_ust_shm_handle *handle);
78 /*
79 * Offset and size of content size field in client.
80 */
81 void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
82 size_t *offset, size_t *length);
83 void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
84 size_t *offset, size_t *length);
85 };
86
87 /*
88 * Ring buffer instance configuration.
89 *
90 * Declare as "static const" within the client object to ensure the inline fast
91 * paths can be optimized.
92 *
93 * alloc/sync pairs:
94 *
95 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
96 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
97 * with preemption disabled (lib_ring_buffer_get_cpu() and
98 * lib_ring_buffer_put_cpu()).
99 *
100 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
101 * Per-cpu buffer with global synchronization. Tracing can be performed with
102 * preemption enabled, statistically stays on the local buffers.
103 *
104 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
105 * Should only be used for buffers belonging to a single thread or protected
106 * by mutual exclusion by the client. Note that periodical sub-buffer switch
107 * should be disabled in this kind of configuration.
108 *
109 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
110 * Global shared buffer with global synchronization.
111 *
112 * wakeup:
113 *
114 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
115 * buffers and wake up readers if data is ready. Mainly useful for tracers which
116 * don't want to call into the wakeup code on the tracing path. Use in
117 * combination with "read_timer_interval" channel_create() argument.
118 *
119 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
120 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
121 * for drivers.
122 *
123 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
124 * has the responsibility to perform wakeups.
125 */
126 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
127
128 enum lttng_ust_lib_ring_buffer_alloc_types {
129 RING_BUFFER_ALLOC_PER_CPU,
130 RING_BUFFER_ALLOC_GLOBAL,
131 };
132
133 enum lttng_ust_lib_ring_buffer_sync_types {
134 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
135 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
136 };
137
138 enum lttng_ust_lib_ring_buffer_mode_types {
139 RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
140 RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
141 };
142
143 enum lttng_ust_lib_ring_buffer_output_types {
144 RING_BUFFER_SPLICE,
145 RING_BUFFER_MMAP,
146 RING_BUFFER_READ, /* TODO */
147 RING_BUFFER_ITERATOR,
148 RING_BUFFER_NONE,
149 };
150
151 enum lttng_ust_lib_ring_buffer_backend_types {
152 RING_BUFFER_PAGE,
153 RING_BUFFER_VMAP, /* TODO */
154 RING_BUFFER_STATIC, /* TODO */
155 };
156
157 enum lttng_ust_lib_ring_buffer_oops_types {
158 RING_BUFFER_NO_OOPS_CONSISTENCY,
159 RING_BUFFER_OOPS_CONSISTENCY,
160 };
161
162 enum lttng_ust_lib_ring_buffer_ipi_types {
163 RING_BUFFER_IPI_BARRIER,
164 RING_BUFFER_NO_IPI_BARRIER,
165 };
166
167 enum lttng_ust_lib_ring_buffer_wakeup_types {
168 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
169 RING_BUFFER_WAKEUP_BY_WRITER, /*
170 * writer wakes up reader,
171 * not lock-free
172 * (takes spinlock).
173 */
174 };
175
176 struct lttng_ust_lib_ring_buffer_config {
177 enum lttng_ust_lib_ring_buffer_alloc_types alloc;
178 enum lttng_ust_lib_ring_buffer_sync_types sync;
179 enum lttng_ust_lib_ring_buffer_mode_types mode;
180 enum lttng_ust_lib_ring_buffer_output_types output;
181 enum lttng_ust_lib_ring_buffer_backend_types backend;
182 enum lttng_ust_lib_ring_buffer_oops_types oops;
183 enum lttng_ust_lib_ring_buffer_ipi_types ipi;
184 enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
185 /*
186 * tsc_bits: timestamp bits saved at each record.
187 * 0 and 64 disable the timestamp compression scheme.
188 */
189 unsigned int tsc_bits;
190 struct lttng_ust_lib_ring_buffer_client_cb cb;
191 /*
192 * client_type is used by the consumer process (which is in a
193 * different address space) to lookup the appropriate client
194 * callbacks and update the cb pointers.
195 */
196 int client_type;
197 int _unused1;
198 const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
199 char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
200 };
201
202 /*
203 * ring buffer context
204 *
205 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
206 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
207 * lib_ring_buffer_write().
208 *
209 * IMPORTANT: this structure is part of the ABI between the probe and
210 * UST. Fields need to be only added at the end, never reordered, never
211 * removed.
212 */
213 #define LTTNG_UST_RING_BUFFER_CTX_PADDING \
214 (24 - sizeof(int) - sizeof(void *) - sizeof(void *))
215 struct lttng_ust_lib_ring_buffer_ctx {
216 /* input received by lib_ring_buffer_reserve(), saved here. */
217 struct channel *chan; /* channel */
218 void *priv; /* client private data */
219 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
220 size_t data_size; /* size of payload */
221 int largest_align; /*
222 * alignment of the largest element
223 * in the payload
224 */
225 int cpu; /* processor id */
226
227 /* output from lib_ring_buffer_reserve() */
228 struct lttng_ust_lib_ring_buffer *buf; /*
229 * buffer corresponding to processor id
230 * for this channel
231 */
232 size_t slot_size; /* size of the reserved slot */
233 unsigned long buf_offset; /* offset following the record header */
234 unsigned long pre_offset; /*
235 * Initial offset position _before_
236 * the record is written. Positioned
237 * prior to record header alignment
238 * padding.
239 */
240 uint64_t tsc; /* time-stamp counter value */
241 unsigned int rflags; /* reservation flags */
242 /*
243 * The field ctx_len is the length of struct
244 * lttng_ust_lib_ring_buffer_ctx as known by the user of
245 * lib_ring_buffer_ctx_init.
246 */
247 unsigned int ctx_len;
248 void *ip; /* caller ip address */
249 void *priv2; /* 2nd priv data */
250 char padding2[LTTNG_UST_RING_BUFFER_CTX_PADDING];
251 /*
252 * This is the end of the initial fields expected by the original ABI
253 * between probes and UST. Only the fields above can be used if
254 * ctx_len is 0. Use the value of ctx_len to find out which of the
255 * following fields may be used.
256 */
257 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
258 };
259
260 /**
261 * lib_ring_buffer_ctx_init - initialize ring buffer context
262 * @ctx: ring buffer context to initialize
263 * @chan: channel
264 * @priv: client private data
265 * @data_size: size of record data payload
266 * @largest_align: largest alignment within data payload types
267 * @cpu: processor id
268 */
269 static inline lttng_ust_notrace
270 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
271 struct channel *chan, void *priv,
272 size_t data_size, int largest_align,
273 int cpu, struct lttng_ust_shm_handle *handle,
274 void *priv2);
275 static inline
276 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
277 struct channel *chan, void *priv,
278 size_t data_size, int largest_align,
279 int cpu, struct lttng_ust_shm_handle *handle,
280 void *priv2)
281 {
282 ctx->chan = chan;
283 ctx->priv = priv;
284 ctx->data_size = data_size;
285 ctx->largest_align = largest_align;
286 ctx->cpu = cpu;
287 ctx->rflags = 0;
288 ctx->handle = handle;
289 ctx->ctx_len = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
290 ctx->ip = 0;
291 ctx->priv2 = priv2;
292 memset(ctx->padding2, 0, LTTNG_UST_RING_BUFFER_CTX_PADDING);
293 }
294
295 /*
296 * Reservation flags.
297 *
298 * RING_BUFFER_RFLAG_FULL_TSC
299 *
300 * This flag is passed to record_header_size() and to the primitive used to
301 * write the record header. It indicates that the full 64-bit time value is
302 * needed in the record header. If this flag is not set, the record header needs
303 * only to contain "tsc_bits" bit of time value.
304 *
305 * Reservation flags can be added by the client, starting from
306 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
307 * record_header_size() to lib_ring_buffer_write_record_header().
308 */
309 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
310 #define RING_BUFFER_RFLAG_END (1U << 1)
311
312 /*
313 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
314 * compile-time. We have to duplicate the "config->align" information and the
315 * definition here because config->align is used both in the slow and fast
316 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
317 */
318 #ifdef RING_BUFFER_ALIGN
319
320 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
321
322 /*
323 * Calculate the offset needed to align the type.
324 * size_of_type must be non-zero.
325 */
326 static inline lttng_ust_notrace
327 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
328 static inline
329 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
330 {
331 return lttng_ust_offset_align(align_drift, size_of_type);
332 }
333
334 #else
335
336 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
337
338 /*
339 * Calculate the offset needed to align the type.
340 * size_of_type must be non-zero.
341 */
342 static inline lttng_ust_notrace
343 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
344 static inline
345 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
346 {
347 return 0;
348 }
349
350 #endif
351
352 /**
353 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
354 * @ctx: ring buffer context.
355 */
356 static inline lttng_ust_notrace
357 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
358 size_t alignment);
359 static inline
360 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
361 size_t alignment)
362 {
363 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
364 alignment);
365 }
366
367 /*
368 * lib_ring_buffer_check_config() returns 0 on success.
369 * Used internally to check for valid configurations at channel creation.
370 */
371 static inline lttng_ust_notrace
372 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
373 unsigned int switch_timer_interval,
374 unsigned int read_timer_interval);
375 static inline
376 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
377 unsigned int switch_timer_interval,
378 unsigned int read_timer_interval)
379 {
380 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
381 && config->sync == RING_BUFFER_SYNC_PER_CPU
382 && switch_timer_interval)
383 return -EINVAL;
384 return 0;
385 }
386
387 #endif /* _LTTNG_RING_BUFFER_CONFIG_H */
This page took 0.036061 seconds and 4 git commands to generate.