Do not install usterr-signal-safe
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 #ifndef _LINUX_RING_BUFFER_CONFIG_H
2 #define _LINUX_RING_BUFFER_CONFIG_H
3
4 /*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
13 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
14 *
15 * Permission is hereby granted to use or copy this program
16 * for any purpose, provided the above notices are retained on all copies.
17 * Permission to modify the code and to distribute modified code is granted,
18 * provided the above notices are retained, and a notice that the code was
19 * modified is included with the above copyright notice.
20 */
21
22 #include <errno.h>
23 #include "lttng/ust-tracer.h"
24 #include <stdint.h>
25 #include <stddef.h>
26 #include <urcu/arch.h>
27 #include "lttng/align.h"
28
29 struct lttng_ust_lib_ring_buffer;
30 struct channel;
31 struct lttng_ust_lib_ring_buffer_config;
32 struct lttng_ust_lib_ring_buffer_ctx;
33 struct lttng_ust_shm_handle *handle;
34
35 /*
36 * Ring buffer client callbacks. Only used by slow path, never on fast path.
37 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
38 * provided as inline functions too. These may simply return 0 if not used by
39 * the client.
40 */
41 struct lttng_ust_lib_ring_buffer_client_cb {
42 /* Mandatory callbacks */
43
44 /* A static inline version is also required for fast path */
45 uint64_t (*ring_buffer_clock_read) (struct channel *chan);
46 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
47 struct channel *chan, size_t offset,
48 size_t *pre_header_padding,
49 struct lttng_ust_lib_ring_buffer_ctx *ctx);
50
51 /* Slow path only, at subbuffer switch */
52 size_t (*subbuffer_header_size) (void);
53 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
54 unsigned int subbuf_idx,
55 struct lttng_ust_shm_handle *handle);
56 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
57 unsigned int subbuf_idx, unsigned long data_size,
58 struct lttng_ust_shm_handle *handle);
59
60 /* Optional callbacks (can be set to NULL) */
61
62 /* Called at buffer creation/finalize */
63 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
64 int cpu, const char *name,
65 struct lttng_ust_shm_handle *handle);
66 /*
67 * Clients should guarantee that no new reader handle can be opened
68 * after finalize.
69 */
70 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
71 void *priv, int cpu,
72 struct lttng_ust_shm_handle *handle);
73
74 /*
75 * Extract header length, payload length and timestamp from event
76 * record. Used by buffer iterators. Timestamp is only used by channel
77 * iterator.
78 */
79 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
80 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
81 size_t offset, size_t *header_len,
82 size_t *payload_len, uint64_t *timestamp,
83 struct lttng_ust_shm_handle *handle);
84 };
85
86 /*
87 * Ring buffer instance configuration.
88 *
89 * Declare as "static const" within the client object to ensure the inline fast
90 * paths can be optimized.
91 *
92 * alloc/sync pairs:
93 *
94 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
95 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
96 * with preemption disabled (lib_ring_buffer_get_cpu() and
97 * lib_ring_buffer_put_cpu()).
98 *
99 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
100 * Per-cpu buffer with global synchronization. Tracing can be performed with
101 * preemption enabled, statistically stays on the local buffers.
102 *
103 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
104 * Should only be used for buffers belonging to a single thread or protected
105 * by mutual exclusion by the client. Note that periodical sub-buffer switch
106 * should be disabled in this kind of configuration.
107 *
108 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
109 * Global shared buffer with global synchronization.
110 *
111 * wakeup:
112 *
113 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
114 * buffers and wake up readers if data is ready. Mainly useful for tracers which
115 * don't want to call into the wakeup code on the tracing path. Use in
116 * combination with "read_timer_interval" channel_create() argument.
117 *
118 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
119 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
120 * for drivers.
121 *
122 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
123 * has the responsibility to perform wakeups.
124 */
125 struct lttng_ust_lib_ring_buffer_config {
126 enum {
127 RING_BUFFER_ALLOC_PER_CPU,
128 RING_BUFFER_ALLOC_GLOBAL,
129 } alloc;
130 enum {
131 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
132 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
133 } sync;
134 enum {
135 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
136 RING_BUFFER_DISCARD, /* Discard when buffer full */
137 } mode;
138 enum {
139 RING_BUFFER_SPLICE,
140 RING_BUFFER_MMAP,
141 RING_BUFFER_READ, /* TODO */
142 RING_BUFFER_ITERATOR,
143 RING_BUFFER_NONE,
144 } output;
145 enum {
146 RING_BUFFER_PAGE,
147 RING_BUFFER_VMAP, /* TODO */
148 RING_BUFFER_STATIC, /* TODO */
149 } backend;
150 enum {
151 RING_BUFFER_NO_OOPS_CONSISTENCY,
152 RING_BUFFER_OOPS_CONSISTENCY,
153 } oops;
154 enum {
155 RING_BUFFER_IPI_BARRIER,
156 RING_BUFFER_NO_IPI_BARRIER,
157 } ipi;
158 enum {
159 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
160 RING_BUFFER_WAKEUP_BY_WRITER, /*
161 * writer wakes up reader,
162 * not lock-free
163 * (takes spinlock).
164 */
165 } wakeup;
166 /*
167 * tsc_bits: timestamp bits saved at each record.
168 * 0 and 64 disable the timestamp compression scheme.
169 */
170 unsigned int tsc_bits;
171 struct lttng_ust_lib_ring_buffer_client_cb cb;
172 /*
173 * client_type is used by the consumer process (which is in a
174 * different address space) to lookup the appropriate client
175 * callbacks and update the cb pointers.
176 */
177 int client_type;
178 };
179
180 /*
181 * ring buffer context
182 *
183 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
184 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
185 * lib_ring_buffer_write().
186 */
187 struct lttng_ust_lib_ring_buffer_ctx {
188 /* input received by lib_ring_buffer_reserve(), saved here. */
189 struct channel *chan; /* channel */
190 void *priv; /* client private data */
191 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
192 size_t data_size; /* size of payload */
193 int largest_align; /*
194 * alignment of the largest element
195 * in the payload
196 */
197 int cpu; /* processor id */
198
199 /* output from lib_ring_buffer_reserve() */
200 struct lttng_ust_lib_ring_buffer *buf; /*
201 * buffer corresponding to processor id
202 * for this channel
203 */
204 size_t slot_size; /* size of the reserved slot */
205 unsigned long buf_offset; /* offset following the record header */
206 unsigned long pre_offset; /*
207 * Initial offset position _before_
208 * the record is written. Positioned
209 * prior to record header alignment
210 * padding.
211 */
212 uint64_t tsc; /* time-stamp counter value */
213 unsigned int rflags; /* reservation flags */
214 };
215
216 /**
217 * lib_ring_buffer_ctx_init - initialize ring buffer context
218 * @ctx: ring buffer context to initialize
219 * @chan: channel
220 * @priv: client private data
221 * @data_size: size of record data payload
222 * @largest_align: largest alignment within data payload types
223 * @cpu: processor id
224 */
225 static inline
226 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
227 struct channel *chan, void *priv,
228 size_t data_size, int largest_align,
229 int cpu, struct lttng_ust_shm_handle *handle)
230 {
231 ctx->chan = chan;
232 ctx->priv = priv;
233 ctx->data_size = data_size;
234 ctx->largest_align = largest_align;
235 ctx->cpu = cpu;
236 ctx->rflags = 0;
237 ctx->handle = handle;
238 }
239
240 /*
241 * Reservation flags.
242 *
243 * RING_BUFFER_RFLAG_FULL_TSC
244 *
245 * This flag is passed to record_header_size() and to the primitive used to
246 * write the record header. It indicates that the full 64-bit time value is
247 * needed in the record header. If this flag is not set, the record header needs
248 * only to contain "tsc_bits" bit of time value.
249 *
250 * Reservation flags can be added by the client, starting from
251 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
252 * record_header_size() to lib_ring_buffer_write_record_header().
253 */
254 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
255 #define RING_BUFFER_RFLAG_END (1U << 1)
256
257 /*
258 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
259 * compile-time. We have to duplicate the "config->align" information and the
260 * definition here because config->align is used both in the slow and fast
261 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
262 */
263 #ifdef RING_BUFFER_ALIGN
264
265 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
266
267 /*
268 * Calculate the offset needed to align the type.
269 * size_of_type must be non-zero.
270 */
271 static inline
272 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
273 {
274 return offset_align(align_drift, size_of_type);
275 }
276
277 #else
278
279 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
280
281 /*
282 * Calculate the offset needed to align the type.
283 * size_of_type must be non-zero.
284 */
285 static inline
286 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
287 {
288 return 0;
289 }
290
291 #endif
292
293 /**
294 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
295 * @ctx: ring buffer context.
296 */
297 static inline
298 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
299 size_t alignment)
300 {
301 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
302 alignment);
303 }
304
305 /*
306 * lib_ring_buffer_check_config() returns 0 on success.
307 * Used internally to check for valid configurations at channel creation.
308 */
309 static inline
310 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
311 unsigned int switch_timer_interval,
312 unsigned int read_timer_interval)
313 {
314 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
315 && config->sync == RING_BUFFER_SYNC_PER_CPU
316 && switch_timer_interval)
317 return -EINVAL;
318 return 0;
319 }
320
321 #endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.037011 seconds and 5 git commands to generate.