Commit | Line | Data |
---|---|---|
f3bc08c5 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_API_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_API_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend_api.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (buffer write API). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * See linux/ringbuffer/frontend.h for channel allocation and read-side API. | |
16 | * | |
17 | * Dual LGPL v2.1/GPL v2 license. | |
18 | */ | |
19 | ||
20 | #include "../../wrapper/ringbuffer/frontend.h" | |
21 | #include <linux/errno.h> | |
22 | ||
23 | /** | |
24 | * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. | |
25 | * | |
26 | * Disables preemption (acts as a RCU read-side critical section) and keeps a | |
27 | * ring buffer nesting count as supplementary safety net to ensure tracer client | |
28 | * code will never trigger an endless recursion. Returns the processor ID on | |
29 | * success, -EPERM on failure (nesting count too high). | |
30 | * | |
31 | * asm volatile and "memory" clobber prevent the compiler from moving | |
32 | * instructions out of the ring buffer nesting count. This is required to ensure | |
33 | * that probe side-effects which can cause recursion (e.g. unforeseen traps, | |
34 | * divisions by 0, ...) are triggered within the incremented nesting count | |
35 | * section. | |
36 | */ | |
37 | static inline | |
38 | int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) | |
39 | { | |
40 | int cpu, nesting; | |
41 | ||
42 | rcu_read_lock_sched_notrace(); | |
43 | cpu = smp_processor_id(); | |
44 | nesting = ++per_cpu(lib_ring_buffer_nesting, cpu); | |
45 | barrier(); | |
46 | ||
47 | if (unlikely(nesting > 4)) { | |
48 | WARN_ON_ONCE(1); | |
49 | per_cpu(lib_ring_buffer_nesting, cpu)--; | |
50 | rcu_read_unlock_sched_notrace(); | |
51 | return -EPERM; | |
52 | } else | |
53 | return cpu; | |
54 | } | |
55 | ||
56 | /** | |
57 | * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. | |
58 | */ | |
59 | static inline | |
60 | void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) | |
61 | { | |
62 | barrier(); | |
63 | __get_cpu_var(lib_ring_buffer_nesting)--; | |
64 | rcu_read_unlock_sched_notrace(); | |
65 | } | |
66 | ||
67 | /* | |
68 | * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not | |
69 | * part of the API per se. | |
70 | * | |
71 | * returns 0 if reserve ok, or 1 if the slow path must be taken. | |
72 | */ | |
73 | static inline | |
74 | int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, | |
75 | struct lib_ring_buffer_ctx *ctx, | |
76 | unsigned long *o_begin, unsigned long *o_end, | |
77 | unsigned long *o_old, size_t *before_hdr_pad) | |
78 | { | |
79 | struct channel *chan = ctx->chan; | |
80 | struct lib_ring_buffer *buf = ctx->buf; | |
81 | *o_begin = v_read(config, &buf->offset); | |
82 | *o_old = *o_begin; | |
83 | ||
84 | ctx->tsc = lib_ring_buffer_clock_read(chan); | |
85 | ||
86 | /* | |
87 | * Prefetch cacheline for read because we have to read the previous | |
88 | * commit counter to increment it and commit seq value to compare it to | |
89 | * the commit counter. | |
90 | */ | |
91 | prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); | |
92 | ||
93 | if (last_tsc_overflow(config, buf, ctx->tsc)) | |
94 | ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC; | |
95 | ||
96 | if (unlikely(subbuf_offset(*o_begin, chan) == 0)) | |
97 | return 1; | |
98 | ||
99 | ctx->slot_size = record_header_size(config, chan, *o_begin, | |
100 | ctx->data_size, before_hdr_pad, | |
101 | ctx->rflags, ctx); | |
102 | ctx->slot_size += | |
103 | lib_ring_buffer_align(*o_begin + ctx->slot_size, | |
104 | ctx->largest_align) + ctx->data_size; | |
105 | if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) | |
106 | > chan->backend.subbuf_size)) | |
107 | return 1; | |
108 | ||
109 | /* | |
110 | * Record fits in the current buffer and we are not on a switch | |
111 | * boundary. It's safe to write. | |
112 | */ | |
113 | *o_end = *o_begin + ctx->slot_size; | |
114 | ||
115 | if (unlikely((subbuf_offset(*o_end, chan)) == 0)) | |
116 | /* | |
117 | * The offset_end will fall at the very beginning of the next | |
118 | * subbuffer. | |
119 | */ | |
120 | return 1; | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | /** | |
126 | * lib_ring_buffer_reserve - Reserve space in a ring buffer. | |
127 | * @config: ring buffer instance configuration. | |
128 | * @ctx: ring buffer context. (input and output) Must be already initialized. | |
129 | * | |
130 | * Atomic wait-free slot reservation. The reserved space starts at the context | |
131 | * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". | |
132 | * | |
133 | * Return : -ENOSPC if not enough space, -EAGAIN if channel is disabled. | |
134 | * Returns 0 on success. | |
135 | */ | |
136 | ||
137 | static inline | |
138 | int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, | |
139 | struct lib_ring_buffer_ctx *ctx) | |
140 | { | |
141 | struct channel *chan = ctx->chan; | |
142 | struct lib_ring_buffer *buf; | |
143 | unsigned long o_begin, o_end, o_old; | |
144 | size_t before_hdr_pad = 0; | |
145 | ||
146 | if (atomic_read(&chan->record_disabled)) | |
147 | return -EAGAIN; | |
148 | ||
149 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
150 | buf = per_cpu_ptr(chan->backend.buf, ctx->cpu); | |
151 | else | |
152 | buf = chan->backend.buf; | |
153 | if (atomic_read(&buf->record_disabled)) | |
154 | return -EAGAIN; | |
155 | ctx->buf = buf; | |
156 | ||
157 | /* | |
158 | * Perform retryable operations. | |
159 | */ | |
160 | if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, | |
161 | &o_end, &o_old, &before_hdr_pad))) | |
162 | goto slow_path; | |
163 | ||
164 | if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) | |
165 | != o_old)) | |
166 | goto slow_path; | |
167 | ||
168 | /* | |
169 | * Atomically update last_tsc. This update races against concurrent | |
170 | * atomic updates, but the race will always cause supplementary full TSC | |
171 | * record headers, never the opposite (missing a full TSC record header | |
172 | * when it would be needed). | |
173 | */ | |
174 | save_last_tsc(config, ctx->buf, ctx->tsc); | |
175 | ||
176 | /* | |
177 | * Push the reader if necessary | |
178 | */ | |
179 | lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1); | |
180 | ||
181 | /* | |
182 | * Clear noref flag for this subbuffer. | |
183 | */ | |
184 | lib_ring_buffer_clear_noref(config, &ctx->buf->backend, | |
185 | subbuf_index(o_end - 1, chan)); | |
186 | ||
187 | ctx->pre_offset = o_begin; | |
188 | ctx->buf_offset = o_begin + before_hdr_pad; | |
189 | return 0; | |
190 | slow_path: | |
191 | return lib_ring_buffer_reserve_slow(ctx); | |
192 | } | |
193 | ||
194 | /** | |
195 | * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer. | |
196 | * @config: ring buffer instance configuration. | |
197 | * @buf: buffer | |
198 | * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH) | |
199 | * | |
200 | * This operation is completely reentrant : can be called while tracing is | |
201 | * active with absolutely no lock held. | |
202 | * | |
203 | * Note, however, that as a v_cmpxchg is used for some atomic operations and | |
204 | * requires to be executed locally for per-CPU buffers, this function must be | |
205 | * called from the CPU which owns the buffer for a ACTIVE flush, with preemption | |
206 | * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. | |
207 | */ | |
208 | static inline | |
209 | void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, | |
210 | struct lib_ring_buffer *buf, enum switch_mode mode) | |
211 | { | |
212 | lib_ring_buffer_switch_slow(buf, mode); | |
213 | } | |
214 | ||
215 | /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */ | |
216 | ||
217 | /** | |
218 | * lib_ring_buffer_commit - Commit an record. | |
219 | * @config: ring buffer instance configuration. | |
220 | * @ctx: ring buffer context. (input arguments only) | |
221 | * | |
222 | * Atomic unordered slot commit. Increments the commit count in the | |
223 | * specified sub-buffer, and delivers it if necessary. | |
224 | */ | |
225 | static inline | |
226 | void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, | |
227 | const struct lib_ring_buffer_ctx *ctx) | |
228 | { | |
229 | struct channel *chan = ctx->chan; | |
230 | struct lib_ring_buffer *buf = ctx->buf; | |
231 | unsigned long offset_end = ctx->buf_offset; | |
232 | unsigned long endidx = subbuf_index(offset_end - 1, chan); | |
233 | unsigned long commit_count; | |
234 | ||
235 | /* | |
236 | * Must count record before incrementing the commit count. | |
237 | */ | |
238 | subbuffer_count_record(config, &buf->backend, endidx); | |
239 | ||
240 | /* | |
241 | * Order all writes to buffer before the commit count update that will | |
242 | * determine that the subbuffer is full. | |
243 | */ | |
244 | if (config->ipi == RING_BUFFER_IPI_BARRIER) { | |
245 | /* | |
246 | * Must write slot data before incrementing commit count. This | |
247 | * compiler barrier is upgraded into a smp_mb() by the IPI sent | |
248 | * by get_subbuf(). | |
249 | */ | |
250 | barrier(); | |
251 | } else | |
252 | smp_wmb(); | |
253 | ||
254 | v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc); | |
255 | ||
256 | /* | |
257 | * commit count read can race with concurrent OOO commit count updates. | |
258 | * This is only needed for lib_ring_buffer_check_deliver (for | |
259 | * non-polling delivery only) and for | |
260 | * lib_ring_buffer_write_commit_counter. The race can only cause the | |
261 | * counter to be read with the same value more than once, which could | |
262 | * cause : | |
263 | * - Multiple delivery for the same sub-buffer (which is handled | |
264 | * gracefully by the reader code) if the value is for a full | |
265 | * sub-buffer. It's important that we can never miss a sub-buffer | |
266 | * delivery. Re-reading the value after the v_add ensures this. | |
267 | * - Reading a commit_count with a higher value that what was actually | |
268 | * added to it for the lib_ring_buffer_write_commit_counter call | |
269 | * (again caused by a concurrent committer). It does not matter, | |
270 | * because this function is interested in the fact that the commit | |
271 | * count reaches back the reserve offset for a specific sub-buffer, | |
272 | * which is completely independent of the order. | |
273 | */ | |
274 | commit_count = v_read(config, &buf->commit_hot[endidx].cc); | |
275 | ||
276 | lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, | |
277 | commit_count, endidx); | |
278 | /* | |
279 | * Update used size at each commit. It's needed only for extracting | |
280 | * ring_buffer buffers from vmcore, after crash. | |
281 | */ | |
282 | lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, | |
283 | ctx->buf_offset, commit_count, | |
284 | ctx->slot_size); | |
285 | } | |
286 | ||
287 | /** | |
288 | * lib_ring_buffer_try_discard_reserve - Try discarding a record. | |
289 | * @config: ring buffer instance configuration. | |
290 | * @ctx: ring buffer context. (input arguments only) | |
291 | * | |
292 | * Only succeeds if no other record has been written after the record to | |
293 | * discard. If discard fails, the record must be committed to the buffer. | |
294 | * | |
295 | * Returns 0 upon success, -EPERM if the record cannot be discarded. | |
296 | */ | |
297 | static inline | |
298 | int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config, | |
299 | const struct lib_ring_buffer_ctx *ctx) | |
300 | { | |
301 | struct lib_ring_buffer *buf = ctx->buf; | |
302 | unsigned long end_offset = ctx->pre_offset + ctx->slot_size; | |
303 | ||
304 | /* | |
305 | * We need to ensure that if the cmpxchg succeeds and discards the | |
306 | * record, the next record will record a full TSC, because it cannot | |
307 | * rely on the last_tsc associated with the discarded record to detect | |
308 | * overflows. The only way to ensure this is to set the last_tsc to 0 | |
309 | * (assuming no 64-bit TSC overflow), which forces to write a 64-bit | |
310 | * timestamp in the next record. | |
311 | * | |
312 | * Note: if discard fails, we must leave the TSC in the record header. | |
313 | * It is needed to keep track of TSC overflows for the following | |
314 | * records. | |
315 | */ | |
316 | save_last_tsc(config, buf, 0ULL); | |
317 | ||
318 | if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) | |
319 | != end_offset)) | |
320 | return -EPERM; | |
321 | else | |
322 | return 0; | |
323 | } | |
324 | ||
325 | static inline | |
326 | void channel_record_disable(const struct lib_ring_buffer_config *config, | |
327 | struct channel *chan) | |
328 | { | |
329 | atomic_inc(&chan->record_disabled); | |
330 | } | |
331 | ||
332 | static inline | |
333 | void channel_record_enable(const struct lib_ring_buffer_config *config, | |
334 | struct channel *chan) | |
335 | { | |
336 | atomic_dec(&chan->record_disabled); | |
337 | } | |
338 | ||
339 | static inline | |
340 | void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config, | |
341 | struct lib_ring_buffer *buf) | |
342 | { | |
343 | atomic_inc(&buf->record_disabled); | |
344 | } | |
345 | ||
346 | static inline | |
347 | void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config, | |
348 | struct lib_ring_buffer *buf) | |
349 | { | |
350 | atomic_dec(&buf->record_disabled); | |
351 | } | |
352 | ||
353 | #endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */ |