Fix ABI: add padding to tracepoint and ring buffer config public structures
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 #ifndef _LINUX_RING_BUFFER_CONFIG_H
2 #define _LINUX_RING_BUFFER_CONFIG_H
3
4 /*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
13 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
14 *
15 * Permission is hereby granted to use or copy this program
16 * for any purpose, provided the above notices are retained on all copies.
17 * Permission to modify the code and to distribute modified code is granted,
18 * provided the above notices are retained, and a notice that the code was
19 * modified is included with the above copyright notice.
20 */
21
22 #include <errno.h>
23 #include "lttng/ust-tracer.h"
24 #include <stdint.h>
25 #include <stddef.h>
26 #include <urcu/arch.h>
27 #include <string.h>
28 #include "lttng/align.h"
29
30 struct lttng_ust_lib_ring_buffer;
31 struct channel;
32 struct lttng_ust_lib_ring_buffer_config;
33 struct lttng_ust_lib_ring_buffer_ctx;
34 struct lttng_ust_shm_handle *handle;
35
36 /*
37 * Ring buffer client callbacks. Only used by slow path, never on fast path.
38 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
39 * provided as inline functions too. These may simply return 0 if not used by
40 * the client.
41 */
42 struct lttng_ust_lib_ring_buffer_client_cb {
43 /* Mandatory callbacks */
44
45 /* A static inline version is also required for fast path */
46 uint64_t (*ring_buffer_clock_read) (struct channel *chan);
47 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
48 struct channel *chan, size_t offset,
49 size_t *pre_header_padding,
50 struct lttng_ust_lib_ring_buffer_ctx *ctx);
51
52 /* Slow path only, at subbuffer switch */
53 size_t (*subbuffer_header_size) (void);
54 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
55 unsigned int subbuf_idx,
56 struct lttng_ust_shm_handle *handle);
57 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
58 unsigned int subbuf_idx, unsigned long data_size,
59 struct lttng_ust_shm_handle *handle);
60
61 /* Optional callbacks (can be set to NULL) */
62
63 /* Called at buffer creation/finalize */
64 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
65 int cpu, const char *name,
66 struct lttng_ust_shm_handle *handle);
67 /*
68 * Clients should guarantee that no new reader handle can be opened
69 * after finalize.
70 */
71 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
72 void *priv, int cpu,
73 struct lttng_ust_shm_handle *handle);
74
75 /*
76 * Extract header length, payload length and timestamp from event
77 * record. Used by buffer iterators. Timestamp is only used by channel
78 * iterator.
79 */
80 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
81 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
82 size_t offset, size_t *header_len,
83 size_t *payload_len, uint64_t *timestamp,
84 struct lttng_ust_shm_handle *handle);
85 };
86
87 /*
88 * Ring buffer instance configuration.
89 *
90 * Declare as "static const" within the client object to ensure the inline fast
91 * paths can be optimized.
92 *
93 * alloc/sync pairs:
94 *
95 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
96 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
97 * with preemption disabled (lib_ring_buffer_get_cpu() and
98 * lib_ring_buffer_put_cpu()).
99 *
100 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
101 * Per-cpu buffer with global synchronization. Tracing can be performed with
102 * preemption enabled, statistically stays on the local buffers.
103 *
104 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
105 * Should only be used for buffers belonging to a single thread or protected
106 * by mutual exclusion by the client. Note that periodical sub-buffer switch
107 * should be disabled in this kind of configuration.
108 *
109 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
110 * Global shared buffer with global synchronization.
111 *
112 * wakeup:
113 *
114 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
115 * buffers and wake up readers if data is ready. Mainly useful for tracers which
116 * don't want to call into the wakeup code on the tracing path. Use in
117 * combination with "read_timer_interval" channel_create() argument.
118 *
119 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
120 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
121 * for drivers.
122 *
123 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
124 * has the responsibility to perform wakeups.
125 */
126 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 32
127 struct lttng_ust_lib_ring_buffer_config {
128 enum {
129 RING_BUFFER_ALLOC_PER_CPU,
130 RING_BUFFER_ALLOC_GLOBAL,
131 } alloc;
132 enum {
133 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
134 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
135 } sync;
136 enum {
137 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
138 RING_BUFFER_DISCARD, /* Discard when buffer full */
139 } mode;
140 enum {
141 RING_BUFFER_SPLICE,
142 RING_BUFFER_MMAP,
143 RING_BUFFER_READ, /* TODO */
144 RING_BUFFER_ITERATOR,
145 RING_BUFFER_NONE,
146 } output;
147 enum {
148 RING_BUFFER_PAGE,
149 RING_BUFFER_VMAP, /* TODO */
150 RING_BUFFER_STATIC, /* TODO */
151 } backend;
152 enum {
153 RING_BUFFER_NO_OOPS_CONSISTENCY,
154 RING_BUFFER_OOPS_CONSISTENCY,
155 } oops;
156 enum {
157 RING_BUFFER_IPI_BARRIER,
158 RING_BUFFER_NO_IPI_BARRIER,
159 } ipi;
160 enum {
161 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
162 RING_BUFFER_WAKEUP_BY_WRITER, /*
163 * writer wakes up reader,
164 * not lock-free
165 * (takes spinlock).
166 */
167 } wakeup;
168 /*
169 * tsc_bits: timestamp bits saved at each record.
170 * 0 and 64 disable the timestamp compression scheme.
171 */
172 unsigned int tsc_bits;
173 struct lttng_ust_lib_ring_buffer_client_cb cb;
174 /*
175 * client_type is used by the consumer process (which is in a
176 * different address space) to lookup the appropriate client
177 * callbacks and update the cb pointers.
178 */
179 int client_type;
180 char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
181 };
182
183 /*
184 * ring buffer context
185 *
186 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
187 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
188 * lib_ring_buffer_write().
189 */
190 #define LTTNG_UST_RING_BUFFER_CTX_PADDING 24
191 struct lttng_ust_lib_ring_buffer_ctx {
192 /* input received by lib_ring_buffer_reserve(), saved here. */
193 struct channel *chan; /* channel */
194 void *priv; /* client private data */
195 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
196 size_t data_size; /* size of payload */
197 int largest_align; /*
198 * alignment of the largest element
199 * in the payload
200 */
201 int cpu; /* processor id */
202
203 /* output from lib_ring_buffer_reserve() */
204 struct lttng_ust_lib_ring_buffer *buf; /*
205 * buffer corresponding to processor id
206 * for this channel
207 */
208 size_t slot_size; /* size of the reserved slot */
209 unsigned long buf_offset; /* offset following the record header */
210 unsigned long pre_offset; /*
211 * Initial offset position _before_
212 * the record is written. Positioned
213 * prior to record header alignment
214 * padding.
215 */
216 uint64_t tsc; /* time-stamp counter value */
217 unsigned int rflags; /* reservation flags */
218 char padding[LTTNG_UST_RING_BUFFER_CTX_PADDING];
219 };
220
221 /**
222 * lib_ring_buffer_ctx_init - initialize ring buffer context
223 * @ctx: ring buffer context to initialize
224 * @chan: channel
225 * @priv: client private data
226 * @data_size: size of record data payload
227 * @largest_align: largest alignment within data payload types
228 * @cpu: processor id
229 */
230 static inline
231 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
232 struct channel *chan, void *priv,
233 size_t data_size, int largest_align,
234 int cpu, struct lttng_ust_shm_handle *handle)
235 {
236 ctx->chan = chan;
237 ctx->priv = priv;
238 ctx->data_size = data_size;
239 ctx->largest_align = largest_align;
240 ctx->cpu = cpu;
241 ctx->rflags = 0;
242 ctx->handle = handle;
243 memset(ctx->padding, 0, LTTNG_UST_RING_BUFFER_CTX_PADDING);
244 }
245
246 /*
247 * Reservation flags.
248 *
249 * RING_BUFFER_RFLAG_FULL_TSC
250 *
251 * This flag is passed to record_header_size() and to the primitive used to
252 * write the record header. It indicates that the full 64-bit time value is
253 * needed in the record header. If this flag is not set, the record header needs
254 * only to contain "tsc_bits" bit of time value.
255 *
256 * Reservation flags can be added by the client, starting from
257 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
258 * record_header_size() to lib_ring_buffer_write_record_header().
259 */
260 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
261 #define RING_BUFFER_RFLAG_END (1U << 1)
262
263 /*
264 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
265 * compile-time. We have to duplicate the "config->align" information and the
266 * definition here because config->align is used both in the slow and fast
267 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
268 */
269 #ifdef RING_BUFFER_ALIGN
270
271 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
272
273 /*
274 * Calculate the offset needed to align the type.
275 * size_of_type must be non-zero.
276 */
277 static inline
278 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
279 {
280 return offset_align(align_drift, size_of_type);
281 }
282
283 #else
284
285 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
286
287 /*
288 * Calculate the offset needed to align the type.
289 * size_of_type must be non-zero.
290 */
291 static inline
292 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
293 {
294 return 0;
295 }
296
297 #endif
298
299 /**
300 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
301 * @ctx: ring buffer context.
302 */
303 static inline
304 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
305 size_t alignment)
306 {
307 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
308 alignment);
309 }
310
311 /*
312 * lib_ring_buffer_check_config() returns 0 on success.
313 * Used internally to check for valid configurations at channel creation.
314 */
315 static inline
316 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
317 unsigned int switch_timer_interval,
318 unsigned int read_timer_interval)
319 {
320 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
321 && config->sync == RING_BUFFER_SYNC_PER_CPU
322 && switch_timer_interval)
323 return -EINVAL;
324 return 0;
325 }
326
327 #endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.035738 seconds and 5 git commands to generate.