Commit | Line | Data |
---|---|---|
a6352fd4 MD |
1 | #ifndef _LINUX_RING_BUFFER_CONFIG_H |
2 | #define _LINUX_RING_BUFFER_CONFIG_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/config.h | |
6 | * | |
7 | * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring buffer configuration header. Note: after declaring the standard inline | |
10 | * functions, clients should also include linux/ringbuffer/api.h. | |
11 | * | |
12 | * Dual LGPL v2.1/GPL v2 license. | |
13 | */ | |
14 | ||
15 | #include <errno.h> | |
16 | #include "ust/kcompat/kcompat.h" | |
17 | #include "ust/align.h" | |
18 | ||
19 | struct lib_ring_buffer; | |
20 | struct channel; | |
21 | struct lib_ring_buffer_config; | |
22 | struct lib_ring_buffer_ctx; | |
1d498196 | 23 | struct shm_handle *handle; |
a6352fd4 MD |
24 | |
25 | /* | |
26 | * Ring buffer client callbacks. Only used by slow path, never on fast path. | |
27 | * For the fast path, record_header_size(), ring_buffer_clock_read() should be | |
28 | * provided as inline functions too. These may simply return 0 if not used by | |
29 | * the client. | |
30 | */ | |
31 | struct lib_ring_buffer_client_cb { | |
32 | /* Mandatory callbacks */ | |
33 | ||
34 | /* A static inline version is also required for fast path */ | |
35 | u64 (*ring_buffer_clock_read) (struct channel *chan); | |
36 | size_t (*record_header_size) (const struct lib_ring_buffer_config *config, | |
37 | struct channel *chan, size_t offset, | |
38 | size_t *pre_header_padding, | |
39 | struct lib_ring_buffer_ctx *ctx); | |
40 | ||
41 | /* Slow path only, at subbuffer switch */ | |
42 | size_t (*subbuffer_header_size) (void); | |
43 | void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc, | |
1d498196 MD |
44 | unsigned int subbuf_idx, |
45 | struct shm_handle *handle); | |
a6352fd4 | 46 | void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc, |
1d498196 MD |
47 | unsigned int subbuf_idx, unsigned long data_size, |
48 | struct shm_handle *handle); | |
a6352fd4 MD |
49 | |
50 | /* Optional callbacks (can be set to NULL) */ | |
51 | ||
52 | /* Called at buffer creation/finalize */ | |
53 | int (*buffer_create) (struct lib_ring_buffer *buf, void *priv, | |
1d498196 MD |
54 | int cpu, const char *name, |
55 | struct shm_handle *handle); | |
a6352fd4 MD |
56 | /* |
57 | * Clients should guarantee that no new reader handle can be opened | |
58 | * after finalize. | |
59 | */ | |
1d498196 MD |
60 | void (*buffer_finalize) (struct lib_ring_buffer *buf, |
61 | void *priv, int cpu, | |
62 | struct shm_handle *handle); | |
a6352fd4 MD |
63 | |
64 | /* | |
65 | * Extract header length, payload length and timestamp from event | |
66 | * record. Used by buffer iterators. Timestamp is only used by channel | |
67 | * iterator. | |
68 | */ | |
69 | void (*record_get) (const struct lib_ring_buffer_config *config, | |
70 | struct channel *chan, struct lib_ring_buffer *buf, | |
71 | size_t offset, size_t *header_len, | |
1d498196 MD |
72 | size_t *payload_len, u64 *timestamp, |
73 | struct shm_handle *handle); | |
a6352fd4 MD |
74 | }; |
75 | ||
76 | /* | |
77 | * Ring buffer instance configuration. | |
78 | * | |
79 | * Declare as "static const" within the client object to ensure the inline fast | |
80 | * paths can be optimized. | |
81 | * | |
82 | * alloc/sync pairs: | |
83 | * | |
84 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU : | |
85 | * Per-cpu buffers with per-cpu synchronization. Tracing must be performed | |
86 | * with preemption disabled (lib_ring_buffer_get_cpu() and | |
87 | * lib_ring_buffer_put_cpu()). | |
88 | * | |
89 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL : | |
90 | * Per-cpu buffer with global synchronization. Tracing can be performed with | |
91 | * preemption enabled, statistically stays on the local buffers. | |
92 | * | |
93 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU : | |
94 | * Should only be used for buffers belonging to a single thread or protected | |
95 | * by mutual exclusion by the client. Note that periodical sub-buffer switch | |
96 | * should be disabled in this kind of configuration. | |
97 | * | |
98 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL : | |
99 | * Global shared buffer with global synchronization. | |
100 | * | |
101 | * wakeup: | |
102 | * | |
103 | * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the | |
104 | * buffers and wake up readers if data is ready. Mainly useful for tracers which | |
105 | * don't want to call into the wakeup code on the tracing path. Use in | |
106 | * combination with "read_timer_interval" channel_create() argument. | |
107 | * | |
108 | * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is | |
109 | * ready to read. Lower latencies before the reader is woken up. Mainly suitable | |
110 | * for drivers. | |
111 | * | |
112 | * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client | |
113 | * has the responsibility to perform wakeups. | |
114 | */ | |
115 | struct lib_ring_buffer_config { | |
116 | enum { | |
117 | RING_BUFFER_ALLOC_PER_CPU, | |
118 | RING_BUFFER_ALLOC_GLOBAL, | |
119 | } alloc; | |
120 | enum { | |
121 | RING_BUFFER_SYNC_PER_CPU, /* Wait-free */ | |
122 | RING_BUFFER_SYNC_GLOBAL, /* Lock-free */ | |
123 | } sync; | |
124 | enum { | |
125 | RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */ | |
126 | RING_BUFFER_DISCARD, /* Discard when buffer full */ | |
127 | } mode; | |
128 | enum { | |
129 | RING_BUFFER_SPLICE, | |
130 | RING_BUFFER_MMAP, | |
131 | RING_BUFFER_READ, /* TODO */ | |
132 | RING_BUFFER_ITERATOR, | |
133 | RING_BUFFER_NONE, | |
134 | } output; | |
135 | enum { | |
136 | RING_BUFFER_PAGE, | |
137 | RING_BUFFER_VMAP, /* TODO */ | |
138 | RING_BUFFER_STATIC, /* TODO */ | |
139 | } backend; | |
140 | enum { | |
141 | RING_BUFFER_NO_OOPS_CONSISTENCY, | |
142 | RING_BUFFER_OOPS_CONSISTENCY, | |
143 | } oops; | |
144 | enum { | |
145 | RING_BUFFER_IPI_BARRIER, | |
146 | RING_BUFFER_NO_IPI_BARRIER, | |
147 | } ipi; | |
148 | enum { | |
149 | RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */ | |
150 | RING_BUFFER_WAKEUP_BY_WRITER, /* | |
151 | * writer wakes up reader, | |
152 | * not lock-free | |
153 | * (takes spinlock). | |
154 | */ | |
155 | } wakeup; | |
156 | /* | |
157 | * tsc_bits: timestamp bits saved at each record. | |
158 | * 0 and 64 disable the timestamp compression scheme. | |
159 | */ | |
160 | unsigned int tsc_bits; | |
161 | struct lib_ring_buffer_client_cb cb; | |
162 | }; | |
163 | ||
164 | /* | |
165 | * ring buffer context | |
166 | * | |
167 | * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), | |
168 | * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and | |
169 | * lib_ring_buffer_write(). | |
170 | */ | |
171 | struct lib_ring_buffer_ctx { | |
172 | /* input received by lib_ring_buffer_reserve(), saved here. */ | |
173 | struct channel *chan; /* channel */ | |
174 | void *priv; /* client private data */ | |
1d498196 | 175 | struct shm_handle *handle; /* shared-memory handle */ |
a6352fd4 MD |
176 | size_t data_size; /* size of payload */ |
177 | int largest_align; /* | |
178 | * alignment of the largest element | |
179 | * in the payload | |
180 | */ | |
181 | int cpu; /* processor id */ | |
182 | ||
183 | /* output from lib_ring_buffer_reserve() */ | |
184 | struct lib_ring_buffer *buf; /* | |
185 | * buffer corresponding to processor id | |
186 | * for this channel | |
187 | */ | |
188 | size_t slot_size; /* size of the reserved slot */ | |
189 | unsigned long buf_offset; /* offset following the record header */ | |
190 | unsigned long pre_offset; /* | |
191 | * Initial offset position _before_ | |
192 | * the record is written. Positioned | |
193 | * prior to record header alignment | |
194 | * padding. | |
195 | */ | |
196 | u64 tsc; /* time-stamp counter value */ | |
197 | unsigned int rflags; /* reservation flags */ | |
198 | }; | |
199 | ||
200 | /** | |
201 | * lib_ring_buffer_ctx_init - initialize ring buffer context | |
202 | * @ctx: ring buffer context to initialize | |
203 | * @chan: channel | |
204 | * @priv: client private data | |
205 | * @data_size: size of record data payload | |
206 | * @largest_align: largest alignment within data payload types | |
207 | * @cpu: processor id | |
208 | */ | |
209 | static inline | |
210 | void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx, | |
211 | struct channel *chan, void *priv, | |
212 | size_t data_size, int largest_align, | |
1d498196 | 213 | int cpu, struct shm_handle *handle) |
a6352fd4 MD |
214 | { |
215 | ctx->chan = chan; | |
216 | ctx->priv = priv; | |
217 | ctx->data_size = data_size; | |
218 | ctx->largest_align = largest_align; | |
219 | ctx->cpu = cpu; | |
220 | ctx->rflags = 0; | |
1d498196 | 221 | ctx->handle = handle; |
a6352fd4 MD |
222 | } |
223 | ||
224 | /* | |
225 | * Reservation flags. | |
226 | * | |
227 | * RING_BUFFER_RFLAG_FULL_TSC | |
228 | * | |
229 | * This flag is passed to record_header_size() and to the primitive used to | |
230 | * write the record header. It indicates that the full 64-bit time value is | |
231 | * needed in the record header. If this flag is not set, the record header needs | |
232 | * only to contain "tsc_bits" bit of time value. | |
233 | * | |
234 | * Reservation flags can be added by the client, starting from | |
235 | * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from | |
236 | * record_header_size() to lib_ring_buffer_write_record_header(). | |
237 | */ | |
238 | #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0) | |
239 | #define RING_BUFFER_RFLAG_END (1U << 1) | |
240 | ||
241 | /* | |
242 | * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at | |
243 | * compile-time. We have to duplicate the "config->align" information and the | |
244 | * definition here because config->align is used both in the slow and fast | |
245 | * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code. | |
246 | */ | |
247 | #ifdef RING_BUFFER_ALIGN | |
248 | ||
249 | # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */ | |
250 | ||
251 | /* | |
252 | * Calculate the offset needed to align the type. | |
253 | * size_of_type must be non-zero. | |
254 | */ | |
255 | static inline | |
256 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) | |
257 | { | |
258 | return offset_align(align_drift, size_of_type); | |
259 | } | |
260 | ||
261 | #else | |
262 | ||
263 | # define RING_BUFFER_ALIGN_ATTR __attribute__((packed)) | |
264 | ||
265 | /* | |
266 | * Calculate the offset needed to align the type. | |
267 | * size_of_type must be non-zero. | |
268 | */ | |
269 | static inline | |
270 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) | |
271 | { | |
272 | return 0; | |
273 | } | |
274 | ||
275 | #endif | |
276 | ||
277 | /** | |
278 | * lib_ring_buffer_align_ctx - Align context offset on "alignment" | |
279 | * @ctx: ring buffer context. | |
280 | */ | |
281 | static inline | |
282 | void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx, | |
283 | size_t alignment) | |
284 | { | |
285 | ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset, | |
286 | alignment); | |
287 | } | |
288 | ||
289 | /* | |
290 | * lib_ring_buffer_check_config() returns 0 on success. | |
291 | * Used internally to check for valid configurations at channel creation. | |
292 | */ | |
293 | static inline | |
294 | int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config, | |
295 | unsigned int switch_timer_interval, | |
296 | unsigned int read_timer_interval) | |
297 | { | |
298 | if (config->alloc == RING_BUFFER_ALLOC_GLOBAL | |
299 | && config->sync == RING_BUFFER_SYNC_PER_CPU | |
300 | && switch_timer_interval) | |
301 | return -EINVAL; | |
302 | return 0; | |
303 | } | |
304 | ||
8d8a24c8 | 305 | #include <ust/vatomic.h> |
a6352fd4 MD |
306 | |
307 | #endif /* _LINUX_RING_BUFFER_CONFIG_H */ |