7233ed6d91e7529c6af2aaa509f1d7e6e06532fc
[lttng-ust.git] / libringbuffer / frontend_internal.h
1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
3
4 /*
5 * libringbuffer/frontend_internal.h
6 *
7 * Ring Buffer Library Synchronization Header (internal helpers).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 *
26 * Author:
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 *
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
30 *
31 * Dual LGPL v2.1/GPL v2 license.
32 */
33
34 #include <urcu/compiler.h>
35 #include <urcu/tls-compat.h>
36 #include <signal.h>
37 #include <pthread.h>
38
39 #include <lttng/ringbuffer-config.h>
40 #include "backend_types.h"
41 #include "frontend_types.h"
42 #include "shm.h"
43
44 /* Buffer offset macros */
45
46 /* buf_trunc mask selects only the buffer number. */
47 static inline
48 unsigned long buf_trunc(unsigned long offset, struct channel *chan)
49 {
50 return offset & ~(chan->backend.buf_size - 1);
51
52 }
53
54 /* Select the buffer number value (counter). */
55 static inline
56 unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
57 {
58 return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
59 }
60
61 /* buf_offset mask selects only the offset within the current buffer. */
62 static inline
63 unsigned long buf_offset(unsigned long offset, struct channel *chan)
64 {
65 return offset & (chan->backend.buf_size - 1);
66 }
67
68 /* subbuf_offset mask selects the offset within the current subbuffer. */
69 static inline
70 unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
71 {
72 return offset & (chan->backend.subbuf_size - 1);
73 }
74
75 /* subbuf_trunc mask selects the subbuffer number. */
76 static inline
77 unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
78 {
79 return offset & ~(chan->backend.subbuf_size - 1);
80 }
81
82 /* subbuf_align aligns the offset to the next subbuffer. */
83 static inline
84 unsigned long subbuf_align(unsigned long offset, struct channel *chan)
85 {
86 return (offset + chan->backend.subbuf_size)
87 & ~(chan->backend.subbuf_size - 1);
88 }
89
90 /* subbuf_index returns the index of the current subbuffer within the buffer. */
91 static inline
92 unsigned long subbuf_index(unsigned long offset, struct channel *chan)
93 {
94 return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
95 }
96
97 /*
98 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
99 * bits from the last TSC read. When overflows are detected, the full 64-bit
100 * timestamp counter should be written in the record header. Reads and writes
101 * last_tsc atomically.
102 */
103
104 #if (CAA_BITS_PER_LONG == 32)
105 static inline
106 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
107 struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
108 {
109 if (config->tsc_bits == 0 || config->tsc_bits == 64)
110 return;
111
112 /*
113 * Ensure the compiler performs this update in a single instruction.
114 */
115 v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
116 }
117
118 static inline
119 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
120 struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
121 {
122 unsigned long tsc_shifted;
123
124 if (config->tsc_bits == 0 || config->tsc_bits == 64)
125 return 0;
126
127 tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
128 if (caa_unlikely(tsc_shifted
129 - (unsigned long)v_read(config, &buf->last_tsc)))
130 return 1;
131 else
132 return 0;
133 }
134 #else
135 static inline
136 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
137 struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
138 {
139 if (config->tsc_bits == 0 || config->tsc_bits == 64)
140 return;
141
142 v_set(config, &buf->last_tsc, (unsigned long)tsc);
143 }
144
145 static inline
146 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
147 struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
148 {
149 if (config->tsc_bits == 0 || config->tsc_bits == 64)
150 return 0;
151
152 if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
153 >> config->tsc_bits))
154 return 1;
155 else
156 return 0;
157 }
158 #endif
159
160 extern
161 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
162 void *client_ctx);
163
164 extern
165 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
166 enum switch_mode mode,
167 struct lttng_ust_shm_handle *handle);
168
169 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
170 struct lttng_ust_lib_ring_buffer *buf,
171 struct channel *chan,
172 unsigned long offset,
173 unsigned long commit_count,
174 unsigned long idx,
175 struct lttng_ust_shm_handle *handle,
176 uint64_t tsc);
177
178 /* Buffer write helpers */
179
180 static inline
181 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
182 struct channel *chan,
183 unsigned long offset)
184 {
185 unsigned long consumed_old, consumed_new;
186
187 do {
188 consumed_old = uatomic_read(&buf->consumed);
189 /*
190 * If buffer is in overwrite mode, push the reader consumed
191 * count if the write position has reached it and we are not
192 * at the first iteration (don't push the reader farther than
193 * the writer). This operation can be done concurrently by many
194 * writers in the same buffer, the writer being at the farthest
195 * write position sub-buffer index in the buffer being the one
196 * which will win this loop.
197 */
198 if (caa_unlikely(subbuf_trunc(offset, chan)
199 - subbuf_trunc(consumed_old, chan)
200 >= chan->backend.buf_size))
201 consumed_new = subbuf_align(consumed_old, chan);
202 else
203 return;
204 } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
205 consumed_new) != consumed_old));
206 }
207
208 /*
209 * Move consumed position to the beginning of subbuffer in which the
210 * write offset is. Should only be used on ring buffers that are not
211 * actively being written into, because clear_reader does not take into
212 * account the commit counters when moving the consumed position, which
213 * can make concurrent trace producers or consumers observe consumed
214 * position further than the write offset, which breaks ring buffer
215 * algorithm guarantees.
216 */
217 static inline
218 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
219 struct lttng_ust_shm_handle *handle)
220 {
221 struct channel *chan;
222 const struct lttng_ust_lib_ring_buffer_config *config;
223 unsigned long offset, consumed_old, consumed_new;
224
225 chan = shmp(handle, buf->backend.chan);
226 if (!chan)
227 return;
228 config = &chan->backend.config;
229
230 do {
231 offset = v_read(config, &buf->offset);
232 consumed_old = uatomic_read(&buf->consumed);
233 CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
234 - subbuf_trunc(consumed_old, chan))
235 < 0);
236 consumed_new = subbuf_trunc(offset, chan);
237 } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
238 consumed_new) != consumed_old));
239 }
240
241 static inline
242 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
243 struct lttng_ust_lib_ring_buffer *buf,
244 struct channel *chan)
245 {
246 return !!subbuf_offset(v_read(config, &buf->offset), chan);
247 }
248
249 static inline
250 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
251 struct lttng_ust_lib_ring_buffer *buf,
252 unsigned long idx,
253 struct lttng_ust_shm_handle *handle)
254 {
255 return subbuffer_get_data_size(config, &buf->backend, idx, handle);
256 }
257
258 /*
259 * Check if all space reservation in a buffer have been committed. This helps
260 * knowing if an execution context is nested (for per-cpu buffers only).
261 * This is a very specific ftrace use-case, so we keep this as "internal" API.
262 */
263 static inline
264 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
265 struct lttng_ust_lib_ring_buffer *buf,
266 struct channel *chan,
267 struct lttng_ust_shm_handle *handle)
268 {
269 unsigned long offset, idx, commit_count;
270 struct commit_counters_hot *cc_hot;
271
272 CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
273 CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
274
275 /*
276 * Read offset and commit count in a loop so they are both read
277 * atomically wrt interrupts. By deal with interrupt concurrency by
278 * restarting both reads if the offset has been pushed. Note that given
279 * we only have to deal with interrupt concurrency here, an interrupt
280 * modifying the commit count will also modify "offset", so it is safe
281 * to only check for offset modifications.
282 */
283 do {
284 offset = v_read(config, &buf->offset);
285 idx = subbuf_index(offset, chan);
286 cc_hot = shmp_index(handle, buf->commit_hot, idx);
287 if (caa_unlikely(!cc_hot))
288 return 0;
289 commit_count = v_read(config, &cc_hot->cc);
290 } while (offset != v_read(config, &buf->offset));
291
292 return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
293 - (commit_count & chan->commit_count_mask) == 0);
294 }
295
296 /*
297 * Receive end of subbuffer TSC as parameter. It has been read in the
298 * space reservation loop of either reserve or switch, which ensures it
299 * progresses monotonically with event records in the buffer. Therefore,
300 * it ensures that the end timestamp of a subbuffer is <= begin
301 * timestamp of the following subbuffers.
302 */
303 static inline
304 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
305 struct lttng_ust_lib_ring_buffer *buf,
306 struct channel *chan,
307 unsigned long offset,
308 unsigned long commit_count,
309 unsigned long idx,
310 struct lttng_ust_shm_handle *handle,
311 uint64_t tsc)
312 {
313 unsigned long old_commit_count = commit_count
314 - chan->backend.subbuf_size;
315
316 /* Check if all commits have been done */
317 if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
318 - (old_commit_count & chan->commit_count_mask) == 0))
319 lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
320 commit_count, idx, handle, tsc);
321 }
322
323 /*
324 * lib_ring_buffer_write_commit_counter
325 *
326 * For flight recording. must be called after commit.
327 * This function increments the subbuffer's commit_seq counter each time the
328 * commit count reaches back the reserve offset (modulo subbuffer size). It is
329 * useful for crash dump.
330 */
331 static inline
332 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
333 struct lttng_ust_lib_ring_buffer *buf,
334 struct channel *chan,
335 unsigned long buf_offset,
336 unsigned long commit_count,
337 struct lttng_ust_shm_handle *handle,
338 struct commit_counters_hot *cc_hot)
339 {
340 unsigned long commit_seq_old;
341
342 if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
343 return;
344
345 /*
346 * subbuf_offset includes commit_count_mask. We can simply
347 * compare the offsets within the subbuffer without caring about
348 * buffer full/empty mismatch because offset is never zero here
349 * (subbuffer header and record headers have non-zero length).
350 */
351 if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
352 return;
353
354 commit_seq_old = v_read(config, &cc_hot->seq);
355 if (caa_likely((long) (commit_seq_old - commit_count) < 0))
356 v_set(config, &cc_hot->seq, commit_count);
357 }
358
359 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
360 struct channel_backend *chanb, int cpu,
361 struct lttng_ust_shm_handle *handle,
362 struct shm_object *shmobj);
363 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
364 struct lttng_ust_shm_handle *handle);
365
366 /* Keep track of trap nesting inside ring buffer code */
367 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
368
369 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */
This page took 0.03581 seconds and 3 git commands to generate.