Cleanup: apply `include-what-you-use` guideline for `uint*_t`
[lttng-ust.git] / libringbuffer / frontend.h
... / ...
CommitLineData
1#ifndef _LTTNG_RING_BUFFER_FRONTEND_H
2#define _LTTNG_RING_BUFFER_FRONTEND_H
3
4/*
5 * libringbuffer/frontend.h
6 *
7 * Ring Buffer Library Synchronization Header (API).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 *
26 * Author:
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 *
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
30 */
31#include <stddef.h>
32#include <stdint.h>
33
34#include <urcu/compiler.h>
35#include <urcu/uatomic.h>
36
37#include "smp.h"
38/* Internal helpers */
39#include "frontend_internal.h"
40
41/* Buffer creation/removal and setup operations */
42
43/*
44 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
45 * padding to let readers get those sub-buffers. Used for live streaming.
46 *
47 * read_timer_interval is the time interval (in us) to wake up pending readers.
48 *
49 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
50 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
51 * be set to NULL for other backends.
52 *
53 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
54 * memory area for client-specific data. This memory is managed by lib
55 * ring buffer. priv_data_align is the alignment required for the
56 * private data area.
57 */
58
59extern
60struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
61 const char *name,
62 void **priv_data,
63 size_t priv_data_align,
64 size_t priv_data_size,
65 void *priv_data_init,
66 void *buf_addr,
67 size_t subbuf_size, size_t num_subbuf,
68 unsigned int switch_timer_interval,
69 unsigned int read_timer_interval,
70 const int *stream_fds, int nr_stream_fds,
71 int64_t blocking_timeout);
72
73/*
74 * channel_destroy finalizes all channel's buffers, waits for readers to
75 * release all references, and destroys the channel.
76 */
77extern
78void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
79 int consumer);
80
81
82/* Buffer read operations */
83
84/*
85 * Iteration on channel cpumask needs to issue a read barrier to match the write
86 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
87 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
88 * only performed at channel destruction.
89 */
90#define for_each_channel_cpu(cpu, chan) \
91 for_each_possible_cpu(cpu)
92
93extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
94 const struct lttng_ust_lib_ring_buffer_config *config,
95 struct channel *chan, int cpu,
96 struct lttng_ust_shm_handle *handle,
97 int *shm_fd, int *wait_fd,
98 int *wakeup_fd,
99 uint64_t *memory_map_size);
100extern
101int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
102 struct channel *chan,
103 struct lttng_ust_shm_handle *handle);
104extern
105int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
106 struct channel *chan,
107 struct lttng_ust_shm_handle *handle);
108extern
109int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
110 struct channel *chan,
111 struct lttng_ust_shm_handle *handle,
112 int cpu);
113extern
114int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
115 struct channel *chan,
116 struct lttng_ust_shm_handle *handle,
117 int cpu);
118
119extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
120 struct lttng_ust_shm_handle *handle);
121extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
122 struct lttng_ust_shm_handle *handle);
123
124/*
125 * Initialize signals for ring buffer. Should be called early e.g. by
126 * main() in the program to affect all threads.
127 */
128void lib_ringbuffer_signal_init(void);
129
130/*
131 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
132 */
133extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
134 unsigned long *consumed,
135 unsigned long *produced,
136 struct lttng_ust_shm_handle *handle);
137extern int lib_ring_buffer_snapshot_sample_positions(
138 struct lttng_ust_lib_ring_buffer *buf,
139 unsigned long *consumed,
140 unsigned long *produced,
141 struct lttng_ust_shm_handle *handle);
142extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
143 unsigned long consumed_new,
144 struct lttng_ust_shm_handle *handle);
145
146extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
147 unsigned long consumed,
148 struct lttng_ust_shm_handle *handle);
149extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
150 struct lttng_ust_shm_handle *handle);
151
152/*
153 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
154 * to read sub-buffers sequentially.
155 */
156static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
157 struct lttng_ust_shm_handle *handle)
158{
159 int ret;
160
161 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
162 &buf->prod_snapshot, handle);
163 if (ret)
164 return ret;
165 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
166 return ret;
167}
168
169static inline
170void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
171 struct lttng_ust_shm_handle *handle)
172{
173 struct channel *chan;
174
175 chan = shmp(handle, buf->backend.chan);
176 if (!chan)
177 return;
178 lib_ring_buffer_put_subbuf(buf, handle);
179 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
180 handle);
181}
182
183extern void channel_reset(struct channel *chan);
184extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
185 struct lttng_ust_shm_handle *handle);
186
187static inline
188unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
189 struct lttng_ust_lib_ring_buffer *buf)
190{
191 return v_read(config, &buf->offset);
192}
193
194static inline
195unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
196 struct lttng_ust_lib_ring_buffer *buf)
197{
198 return uatomic_read(&buf->consumed);
199}
200
201/*
202 * Must call lib_ring_buffer_is_finalized before reading counters (memory
203 * ordering enforced with respect to trace teardown).
204 */
205static inline
206int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
207 struct lttng_ust_lib_ring_buffer *buf)
208{
209 int finalized = CMM_ACCESS_ONCE(buf->finalized);
210 /*
211 * Read finalized before counters.
212 */
213 cmm_smp_rmb();
214 return finalized;
215}
216
217static inline
218int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
219{
220 return chan->finalized;
221}
222
223static inline
224int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
225{
226 return uatomic_read(&chan->record_disabled);
227}
228
229static inline
230unsigned long lib_ring_buffer_get_read_data_size(
231 const struct lttng_ust_lib_ring_buffer_config *config,
232 struct lttng_ust_lib_ring_buffer *buf,
233 struct lttng_ust_shm_handle *handle)
234{
235 return subbuffer_get_read_data_size(config, &buf->backend, handle);
236}
237
238static inline
239unsigned long lib_ring_buffer_get_records_count(
240 const struct lttng_ust_lib_ring_buffer_config *config,
241 struct lttng_ust_lib_ring_buffer *buf)
242{
243 return v_read(config, &buf->records_count);
244}
245
246static inline
247unsigned long lib_ring_buffer_get_records_overrun(
248 const struct lttng_ust_lib_ring_buffer_config *config,
249 struct lttng_ust_lib_ring_buffer *buf)
250{
251 return v_read(config, &buf->records_overrun);
252}
253
254static inline
255unsigned long lib_ring_buffer_get_records_lost_full(
256 const struct lttng_ust_lib_ring_buffer_config *config,
257 struct lttng_ust_lib_ring_buffer *buf)
258{
259 return v_read(config, &buf->records_lost_full);
260}
261
262static inline
263unsigned long lib_ring_buffer_get_records_lost_wrap(
264 const struct lttng_ust_lib_ring_buffer_config *config,
265 struct lttng_ust_lib_ring_buffer *buf)
266{
267 return v_read(config, &buf->records_lost_wrap);
268}
269
270static inline
271unsigned long lib_ring_buffer_get_records_lost_big(
272 const struct lttng_ust_lib_ring_buffer_config *config,
273 struct lttng_ust_lib_ring_buffer *buf)
274{
275 return v_read(config, &buf->records_lost_big);
276}
277
278static inline
279unsigned long lib_ring_buffer_get_records_read(
280 const struct lttng_ust_lib_ring_buffer_config *config,
281 struct lttng_ust_lib_ring_buffer *buf)
282{
283 return v_read(config, &buf->backend.records_read);
284}
285
286#endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
This page took 0.022778 seconds and 4 git commands to generate.