Remove all LGPL-licensed headers
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 #ifndef _LINUX_RING_BUFFER_CONFIG_H
2 #define _LINUX_RING_BUFFER_CONFIG_H
3
4 /*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
13 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
14 *
15 * Permission is hereby granted to use or copy this program
16 * for any purpose, provided the above notices are retained on all copies.
17 * Permission to modify the code and to distribute modified code is granted,
18 * provided the above notices are retained, and a notice that the code was
19 * modified is included with the above copyright notice.
20 */
21
22 #include <errno.h>
23 #include "lttng/ust-tracer.h"
24 #include "lttng/usterr-signal-safe.h"
25 #include "lttng/kcompat/kcompat.h"
26 #include "lttng/align.h"
27
28 struct lttng_ust_lib_ring_buffer;
29 struct channel;
30 struct lttng_ust_lib_ring_buffer_config;
31 struct lttng_ust_lib_ring_buffer_ctx;
32 struct lttng_ust_shm_handle *handle;
33
34 /*
35 * Ring buffer client callbacks. Only used by slow path, never on fast path.
36 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
37 * provided as inline functions too. These may simply return 0 if not used by
38 * the client.
39 */
40 struct lttng_ust_lib_ring_buffer_client_cb {
41 /* Mandatory callbacks */
42
43 /* A static inline version is also required for fast path */
44 u64 (*ring_buffer_clock_read) (struct channel *chan);
45 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
46 struct channel *chan, size_t offset,
47 size_t *pre_header_padding,
48 struct lttng_ust_lib_ring_buffer_ctx *ctx);
49
50 /* Slow path only, at subbuffer switch */
51 size_t (*subbuffer_header_size) (void);
52 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
53 unsigned int subbuf_idx,
54 struct lttng_ust_shm_handle *handle);
55 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, u64 tsc,
56 unsigned int subbuf_idx, unsigned long data_size,
57 struct lttng_ust_shm_handle *handle);
58
59 /* Optional callbacks (can be set to NULL) */
60
61 /* Called at buffer creation/finalize */
62 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
63 int cpu, const char *name,
64 struct lttng_ust_shm_handle *handle);
65 /*
66 * Clients should guarantee that no new reader handle can be opened
67 * after finalize.
68 */
69 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
70 void *priv, int cpu,
71 struct lttng_ust_shm_handle *handle);
72
73 /*
74 * Extract header length, payload length and timestamp from event
75 * record. Used by buffer iterators. Timestamp is only used by channel
76 * iterator.
77 */
78 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
79 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
80 size_t offset, size_t *header_len,
81 size_t *payload_len, u64 *timestamp,
82 struct lttng_ust_shm_handle *handle);
83 };
84
85 /*
86 * Ring buffer instance configuration.
87 *
88 * Declare as "static const" within the client object to ensure the inline fast
89 * paths can be optimized.
90 *
91 * alloc/sync pairs:
92 *
93 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
94 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
95 * with preemption disabled (lib_ring_buffer_get_cpu() and
96 * lib_ring_buffer_put_cpu()).
97 *
98 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
99 * Per-cpu buffer with global synchronization. Tracing can be performed with
100 * preemption enabled, statistically stays on the local buffers.
101 *
102 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
103 * Should only be used for buffers belonging to a single thread or protected
104 * by mutual exclusion by the client. Note that periodical sub-buffer switch
105 * should be disabled in this kind of configuration.
106 *
107 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
108 * Global shared buffer with global synchronization.
109 *
110 * wakeup:
111 *
112 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
113 * buffers and wake up readers if data is ready. Mainly useful for tracers which
114 * don't want to call into the wakeup code on the tracing path. Use in
115 * combination with "read_timer_interval" channel_create() argument.
116 *
117 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
118 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
119 * for drivers.
120 *
121 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
122 * has the responsibility to perform wakeups.
123 */
124 struct lttng_ust_lib_ring_buffer_config {
125 enum {
126 RING_BUFFER_ALLOC_PER_CPU,
127 RING_BUFFER_ALLOC_GLOBAL,
128 } alloc;
129 enum {
130 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
131 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
132 } sync;
133 enum {
134 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
135 RING_BUFFER_DISCARD, /* Discard when buffer full */
136 } mode;
137 enum {
138 RING_BUFFER_SPLICE,
139 RING_BUFFER_MMAP,
140 RING_BUFFER_READ, /* TODO */
141 RING_BUFFER_ITERATOR,
142 RING_BUFFER_NONE,
143 } output;
144 enum {
145 RING_BUFFER_PAGE,
146 RING_BUFFER_VMAP, /* TODO */
147 RING_BUFFER_STATIC, /* TODO */
148 } backend;
149 enum {
150 RING_BUFFER_NO_OOPS_CONSISTENCY,
151 RING_BUFFER_OOPS_CONSISTENCY,
152 } oops;
153 enum {
154 RING_BUFFER_IPI_BARRIER,
155 RING_BUFFER_NO_IPI_BARRIER,
156 } ipi;
157 enum {
158 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
159 RING_BUFFER_WAKEUP_BY_WRITER, /*
160 * writer wakes up reader,
161 * not lock-free
162 * (takes spinlock).
163 */
164 } wakeup;
165 /*
166 * tsc_bits: timestamp bits saved at each record.
167 * 0 and 64 disable the timestamp compression scheme.
168 */
169 unsigned int tsc_bits;
170 struct lttng_ust_lib_ring_buffer_client_cb cb;
171 /*
172 * client_type is used by the consumer process (which is in a
173 * different address space) to lookup the appropriate client
174 * callbacks and update the cb pointers.
175 */
176 int client_type;
177 };
178
179 /*
180 * ring buffer context
181 *
182 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
183 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
184 * lib_ring_buffer_write().
185 */
186 struct lttng_ust_lib_ring_buffer_ctx {
187 /* input received by lib_ring_buffer_reserve(), saved here. */
188 struct channel *chan; /* channel */
189 void *priv; /* client private data */
190 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
191 size_t data_size; /* size of payload */
192 int largest_align; /*
193 * alignment of the largest element
194 * in the payload
195 */
196 int cpu; /* processor id */
197
198 /* output from lib_ring_buffer_reserve() */
199 struct lttng_ust_lib_ring_buffer *buf; /*
200 * buffer corresponding to processor id
201 * for this channel
202 */
203 size_t slot_size; /* size of the reserved slot */
204 unsigned long buf_offset; /* offset following the record header */
205 unsigned long pre_offset; /*
206 * Initial offset position _before_
207 * the record is written. Positioned
208 * prior to record header alignment
209 * padding.
210 */
211 u64 tsc; /* time-stamp counter value */
212 unsigned int rflags; /* reservation flags */
213 };
214
215 /**
216 * lib_ring_buffer_ctx_init - initialize ring buffer context
217 * @ctx: ring buffer context to initialize
218 * @chan: channel
219 * @priv: client private data
220 * @data_size: size of record data payload
221 * @largest_align: largest alignment within data payload types
222 * @cpu: processor id
223 */
224 static inline
225 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
226 struct channel *chan, void *priv,
227 size_t data_size, int largest_align,
228 int cpu, struct lttng_ust_shm_handle *handle)
229 {
230 ctx->chan = chan;
231 ctx->priv = priv;
232 ctx->data_size = data_size;
233 ctx->largest_align = largest_align;
234 ctx->cpu = cpu;
235 ctx->rflags = 0;
236 ctx->handle = handle;
237 }
238
239 /*
240 * Reservation flags.
241 *
242 * RING_BUFFER_RFLAG_FULL_TSC
243 *
244 * This flag is passed to record_header_size() and to the primitive used to
245 * write the record header. It indicates that the full 64-bit time value is
246 * needed in the record header. If this flag is not set, the record header needs
247 * only to contain "tsc_bits" bit of time value.
248 *
249 * Reservation flags can be added by the client, starting from
250 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
251 * record_header_size() to lib_ring_buffer_write_record_header().
252 */
253 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
254 #define RING_BUFFER_RFLAG_END (1U << 1)
255
256 /*
257 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
258 * compile-time. We have to duplicate the "config->align" information and the
259 * definition here because config->align is used both in the slow and fast
260 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
261 */
262 #ifdef RING_BUFFER_ALIGN
263
264 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
265
266 /*
267 * Calculate the offset needed to align the type.
268 * size_of_type must be non-zero.
269 */
270 static inline
271 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
272 {
273 return offset_align(align_drift, size_of_type);
274 }
275
276 #else
277
278 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
279
280 /*
281 * Calculate the offset needed to align the type.
282 * size_of_type must be non-zero.
283 */
284 static inline
285 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
286 {
287 return 0;
288 }
289
290 #endif
291
292 /**
293 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
294 * @ctx: ring buffer context.
295 */
296 static inline
297 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
298 size_t alignment)
299 {
300 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
301 alignment);
302 }
303
304 /*
305 * lib_ring_buffer_check_config() returns 0 on success.
306 * Used internally to check for valid configurations at channel creation.
307 */
308 static inline
309 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
310 unsigned int switch_timer_interval,
311 unsigned int read_timer_interval)
312 {
313 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
314 && config->sync == RING_BUFFER_SYNC_PER_CPU
315 && switch_timer_interval)
316 return -EINVAL;
317 return 0;
318 }
319
320 #include <lttng/vatomic.h>
321
322 #endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.035931 seconds and 5 git commands to generate.