Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
9f36eaed | 2 | * |
24591303 | 3 | * ringbuffer/config.h |
f3bc08c5 MD |
4 | * |
5 | * Ring buffer configuration header. Note: after declaring the standard inline | |
6 | * functions, clients should also include linux/ringbuffer/api.h. | |
7 | * | |
886d51a3 | 8 | * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f3bc08c5 MD |
9 | */ |
10 | ||
9f36eaed MJ |
11 | #ifndef _LIB_RING_BUFFER_CONFIG_H |
12 | #define _LIB_RING_BUFFER_CONFIG_H | |
13 | ||
f3bc08c5 MD |
14 | #include <linux/types.h> |
15 | #include <linux/percpu.h> | |
a071f25d | 16 | #include <lttng/align.h> |
2df37e95 | 17 | #include <lttng/tracer-core.h> |
f3bc08c5 MD |
18 | |
19 | struct lib_ring_buffer; | |
860c213b | 20 | struct lttng_kernel_ring_buffer_channel; |
f3bc08c5 | 21 | struct lib_ring_buffer_config; |
8a57ec02 | 22 | struct lttng_kernel_ring_buffer_ctx; |
b1199bd3 | 23 | struct lttng_kernel_ring_buffer_ctx_private; |
f3bc08c5 MD |
24 | |
25 | /* | |
26 | * Ring buffer client callbacks. Only used by slow path, never on fast path. | |
27 | * For the fast path, record_header_size(), ring_buffer_clock_read() should be | |
28 | * provided as inline functions too. These may simply return 0 if not used by | |
29 | * the client. | |
30 | */ | |
31 | struct lib_ring_buffer_client_cb { | |
32 | /* Mandatory callbacks */ | |
33 | ||
34 | /* A static inline version is also required for fast path */ | |
860c213b | 35 | u64 (*ring_buffer_clock_read) (struct lttng_kernel_ring_buffer_channel *chan); |
f3bc08c5 | 36 | size_t (*record_header_size) (const struct lib_ring_buffer_config *config, |
860c213b | 37 | struct lttng_kernel_ring_buffer_channel *chan, size_t offset, |
f3bc08c5 | 38 | size_t *pre_header_padding, |
8a57ec02 | 39 | struct lttng_kernel_ring_buffer_ctx *ctx, |
cc62f29e | 40 | void *client_ctx); |
f3bc08c5 MD |
41 | |
42 | /* Slow path only, at subbuffer switch */ | |
43 | size_t (*subbuffer_header_size) (void); | |
44 | void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc, | |
45 | unsigned int subbuf_idx); | |
46 | void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc, | |
47 | unsigned int subbuf_idx, unsigned long data_size); | |
48 | ||
49 | /* Optional callbacks (can be set to NULL) */ | |
50 | ||
51 | /* Called at buffer creation/finalize */ | |
52 | int (*buffer_create) (struct lib_ring_buffer *buf, void *priv, | |
53 | int cpu, const char *name); | |
54 | /* | |
55 | * Clients should guarantee that no new reader handle can be opened | |
56 | * after finalize. | |
57 | */ | |
58 | void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu); | |
59 | ||
60 | /* | |
61 | * Extract header length, payload length and timestamp from event | |
62 | * record. Used by buffer iterators. Timestamp is only used by channel | |
63 | * iterator. | |
64 | */ | |
65 | void (*record_get) (const struct lib_ring_buffer_config *config, | |
860c213b | 66 | struct lttng_kernel_ring_buffer_channel *chan, struct lib_ring_buffer *buf, |
f3bc08c5 MD |
67 | size_t offset, size_t *header_len, |
68 | size_t *payload_len, u64 *timestamp); | |
69 | }; | |
70 | ||
71 | /* | |
72 | * Ring buffer instance configuration. | |
73 | * | |
74 | * Declare as "static const" within the client object to ensure the inline fast | |
75 | * paths can be optimized. | |
76 | * | |
77 | * alloc/sync pairs: | |
78 | * | |
79 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU : | |
80 | * Per-cpu buffers with per-cpu synchronization. Tracing must be performed | |
81 | * with preemption disabled (lib_ring_buffer_get_cpu() and | |
82 | * lib_ring_buffer_put_cpu()). | |
83 | * | |
84 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL : | |
85 | * Per-cpu buffer with global synchronization. Tracing can be performed with | |
86 | * preemption enabled, statistically stays on the local buffers. | |
87 | * | |
88 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU : | |
89 | * Should only be used for buffers belonging to a single thread or protected | |
90 | * by mutual exclusion by the client. Note that periodical sub-buffer switch | |
91 | * should be disabled in this kind of configuration. | |
92 | * | |
93 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL : | |
94 | * Global shared buffer with global synchronization. | |
95 | * | |
96 | * wakeup: | |
97 | * | |
da9f3fb7 | 98 | * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the |
f3bc08c5 MD |
99 | * buffers and wake up readers if data is ready. Mainly useful for tracers which |
100 | * don't want to call into the wakeup code on the tracing path. Use in | |
101 | * combination with "read_timer_interval" channel_create() argument. | |
102 | * | |
103 | * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is | |
104 | * ready to read. Lower latencies before the reader is woken up. Mainly suitable | |
fbd4d558 MD |
105 | * for drivers. Going through an "irq_work" allows triggering this type of wakeup |
106 | * even from NMI context: the wakeup will be slightly delayed until the next | |
107 | * interrupts are handled. | |
f3bc08c5 MD |
108 | * |
109 | * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client | |
110 | * has the responsibility to perform wakeups. | |
111 | */ | |
112 | struct lib_ring_buffer_config { | |
113 | enum { | |
114 | RING_BUFFER_ALLOC_PER_CPU, | |
115 | RING_BUFFER_ALLOC_GLOBAL, | |
116 | } alloc; | |
117 | enum { | |
118 | RING_BUFFER_SYNC_PER_CPU, /* Wait-free */ | |
119 | RING_BUFFER_SYNC_GLOBAL, /* Lock-free */ | |
120 | } sync; | |
121 | enum { | |
122 | RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */ | |
123 | RING_BUFFER_DISCARD, /* Discard when buffer full */ | |
124 | } mode; | |
125 | enum { | |
126 | RING_BUFFER_SPLICE, | |
127 | RING_BUFFER_MMAP, | |
128 | RING_BUFFER_READ, /* TODO */ | |
129 | RING_BUFFER_ITERATOR, | |
130 | RING_BUFFER_NONE, | |
131 | } output; | |
132 | enum { | |
133 | RING_BUFFER_PAGE, | |
134 | RING_BUFFER_VMAP, /* TODO */ | |
135 | RING_BUFFER_STATIC, /* TODO */ | |
136 | } backend; | |
137 | enum { | |
138 | RING_BUFFER_NO_OOPS_CONSISTENCY, | |
139 | RING_BUFFER_OOPS_CONSISTENCY, | |
140 | } oops; | |
141 | enum { | |
142 | RING_BUFFER_IPI_BARRIER, | |
143 | RING_BUFFER_NO_IPI_BARRIER, | |
144 | } ipi; | |
145 | enum { | |
146 | RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */ | |
147 | RING_BUFFER_WAKEUP_BY_WRITER, /* | |
fbd4d558 MD |
148 | * writer wakes up reader through |
149 | * irq_work. | |
f3bc08c5 MD |
150 | */ |
151 | } wakeup; | |
152 | /* | |
153 | * tsc_bits: timestamp bits saved at each record. | |
154 | * 0 and 64 disable the timestamp compression scheme. | |
155 | */ | |
156 | unsigned int tsc_bits; | |
157 | struct lib_ring_buffer_client_cb cb; | |
158 | }; | |
159 | ||
b1199bd3 MD |
160 | /* |
161 | * ring buffer private context | |
162 | * | |
163 | * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), | |
164 | * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and | |
165 | * lib_ring_buffer_write(). | |
166 | * | |
167 | * Get struct lttng_kernel_ring_buffer_ctx parent with container_of(). | |
168 | */ | |
169 | ||
170 | struct lttng_kernel_ring_buffer_ctx_private { | |
171 | /* input received by lib_ring_buffer_reserve(). */ | |
860c213b | 172 | struct lttng_kernel_ring_buffer_channel *chan; /* ring buffer channel */ |
b1199bd3 MD |
173 | |
174 | /* output from lib_ring_buffer_reserve() */ | |
175 | int reserve_cpu; /* processor id updated by the reserve */ | |
176 | size_t slot_size; /* size of the reserved slot */ | |
177 | unsigned long buf_offset; /* offset following the record header */ | |
178 | unsigned long pre_offset; /* | |
179 | * Initial offset position _before_ | |
180 | * the record is written. Positioned | |
181 | * prior to record header alignment | |
182 | * padding. | |
183 | */ | |
184 | u64 tsc; /* time-stamp counter value */ | |
185 | unsigned int rflags; /* reservation flags */ | |
186 | ||
187 | struct lib_ring_buffer *buf; /* | |
188 | * buffer corresponding to processor id | |
189 | * for this channel | |
190 | */ | |
191 | struct lib_ring_buffer_backend_pages *backend_pages; | |
192 | }; | |
193 | ||
f3bc08c5 MD |
194 | /* |
195 | * ring buffer context | |
196 | * | |
197 | * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), | |
198 | * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and | |
199 | * lib_ring_buffer_write(). | |
200 | */ | |
8a57ec02 | 201 | struct lttng_kernel_ring_buffer_ctx { |
b1199bd3 MD |
202 | /* Private ring buffer context, set by reserve callback. */ |
203 | struct lttng_kernel_ring_buffer_ctx_private priv; | |
204 | ||
f3bc08c5 | 205 | /* input received by lib_ring_buffer_reserve(), saved here. */ |
b1199bd3 MD |
206 | void *client_priv; /* Ring buffer client private data */ |
207 | ||
f3bc08c5 MD |
208 | size_t data_size; /* size of payload */ |
209 | int largest_align; /* | |
210 | * alignment of the largest element | |
211 | * in the payload | |
212 | */ | |
a92e844e | 213 | struct lttng_kernel_probe_ctx *probe_ctx; /* Probe context */ |
f3bc08c5 MD |
214 | }; |
215 | ||
216 | /** | |
217 | * lib_ring_buffer_ctx_init - initialize ring buffer context | |
218 | * @ctx: ring buffer context to initialize | |
b1199bd3 | 219 | * @client_priv: client private data |
3cae7f22 | 220 | * @data_size: size of record data payload. It must be greater than 0. |
f3bc08c5 | 221 | * @largest_align: largest alignment within data payload types |
f3bc08c5 MD |
222 | */ |
223 | static inline | |
8a57ec02 | 224 | void lib_ring_buffer_ctx_init(struct lttng_kernel_ring_buffer_ctx *ctx, |
b1199bd3 | 225 | void *client_priv, |
f3bc08c5 | 226 | size_t data_size, int largest_align, |
a92e844e | 227 | struct lttng_kernel_probe_ctx *probe_ctx) |
f3bc08c5 | 228 | { |
b1199bd3 | 229 | ctx->client_priv = client_priv; |
f3bc08c5 MD |
230 | ctx->data_size = data_size; |
231 | ctx->largest_align = largest_align; | |
b1199bd3 | 232 | ctx->probe_ctx = probe_ctx; |
f3bc08c5 MD |
233 | } |
234 | ||
235 | /* | |
236 | * Reservation flags. | |
237 | * | |
238 | * RING_BUFFER_RFLAG_FULL_TSC | |
239 | * | |
240 | * This flag is passed to record_header_size() and to the primitive used to | |
241 | * write the record header. It indicates that the full 64-bit time value is | |
242 | * needed in the record header. If this flag is not set, the record header needs | |
243 | * only to contain "tsc_bits" bit of time value. | |
244 | * | |
245 | * Reservation flags can be added by the client, starting from | |
246 | * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from | |
247 | * record_header_size() to lib_ring_buffer_write_record_header(). | |
248 | */ | |
249 | #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0) | |
250 | #define RING_BUFFER_RFLAG_END (1U << 1) | |
251 | ||
ae090dc5 | 252 | #ifndef LTTNG_TRACER_CORE_H |
2df37e95 | 253 | #error "lttng/tracer-core.h is needed for RING_BUFFER_ALIGN define" |
ae090dc5 MD |
254 | #endif |
255 | ||
f3bc08c5 MD |
256 | /* |
257 | * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at | |
258 | * compile-time. We have to duplicate the "config->align" information and the | |
259 | * definition here because config->align is used both in the slow and fast | |
260 | * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code. | |
261 | */ | |
262 | #ifdef RING_BUFFER_ALIGN | |
263 | ||
264 | # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */ | |
265 | ||
266 | /* | |
267 | * Calculate the offset needed to align the type. | |
268 | * size_of_type must be non-zero. | |
269 | */ | |
270 | static inline | |
271 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) | |
272 | { | |
273 | return offset_align(align_drift, size_of_type); | |
274 | } | |
275 | ||
276 | #else | |
277 | ||
278 | # define RING_BUFFER_ALIGN_ATTR __attribute__((packed)) | |
279 | ||
280 | /* | |
281 | * Calculate the offset needed to align the type. | |
282 | * size_of_type must be non-zero. | |
283 | */ | |
284 | static inline | |
285 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) | |
286 | { | |
287 | return 0; | |
288 | } | |
289 | ||
290 | #endif | |
291 | ||
292 | /** | |
293 | * lib_ring_buffer_align_ctx - Align context offset on "alignment" | |
294 | * @ctx: ring buffer context. | |
295 | */ | |
296 | static inline | |
8a57ec02 | 297 | void lib_ring_buffer_align_ctx(struct lttng_kernel_ring_buffer_ctx *ctx, |
f3bc08c5 MD |
298 | size_t alignment) |
299 | { | |
b1199bd3 | 300 | ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset, |
f3bc08c5 MD |
301 | alignment); |
302 | } | |
303 | ||
304 | /* | |
305 | * lib_ring_buffer_check_config() returns 0 on success. | |
306 | * Used internally to check for valid configurations at channel creation. | |
307 | */ | |
308 | static inline | |
309 | int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config, | |
310 | unsigned int switch_timer_interval, | |
311 | unsigned int read_timer_interval) | |
312 | { | |
313 | if (config->alloc == RING_BUFFER_ALLOC_GLOBAL | |
314 | && config->sync == RING_BUFFER_SYNC_PER_CPU | |
315 | && switch_timer_interval) | |
316 | return -EINVAL; | |
317 | return 0; | |
318 | } | |
319 | ||
24591303 | 320 | #include <ringbuffer/vatomic.h> |
f3bc08c5 | 321 | |
886d51a3 | 322 | #endif /* _LIB_RING_BUFFER_CONFIG_H */ |