Add ustctl_snapshot_sample_positions ustctl command
[lttng-ust.git] / libringbuffer / frontend.h
1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_H
3
4 /*
5 * libringbuffer/frontend.h
6 *
7 * Ring Buffer Library Synchronization Header (API).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 *
26 * Author:
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 *
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
30 */
31
32 #include <urcu/compiler.h>
33 #include <urcu/uatomic.h>
34
35 #include "smp.h"
36 /* Internal helpers */
37 #include "frontend_internal.h"
38
39 /* Buffer creation/removal and setup operations */
40
41 /*
42 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
43 * padding to let readers get those sub-buffers. Used for live streaming.
44 *
45 * read_timer_interval is the time interval (in us) to wake up pending readers.
46 *
47 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
48 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
49 * be set to NULL for other backends.
50 *
51 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
52 * memory area for client-specific data. This memory is managed by lib
53 * ring buffer. priv_data_align is the alignment required for the
54 * private data area.
55 */
56
57 extern
58 struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
59 const char *name,
60 void **priv_data,
61 size_t priv_data_align,
62 size_t priv_data_size,
63 void *priv_data_init,
64 void *buf_addr,
65 size_t subbuf_size, size_t num_subbuf,
66 unsigned int switch_timer_interval,
67 unsigned int read_timer_interval,
68 const int *stream_fds, int nr_stream_fds);
69
70 /*
71 * channel_destroy finalizes all channel's buffers, waits for readers to
72 * release all references, and destroys the channel.
73 */
74 extern
75 void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
76 int consumer);
77
78
79 /* Buffer read operations */
80
81 /*
82 * Iteration on channel cpumask needs to issue a read barrier to match the write
83 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
84 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
85 * only performed at channel destruction.
86 */
87 #define for_each_channel_cpu(cpu, chan) \
88 for_each_possible_cpu(cpu)
89
90 extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
91 const struct lttng_ust_lib_ring_buffer_config *config,
92 struct channel *chan, int cpu,
93 struct lttng_ust_shm_handle *handle,
94 int *shm_fd, int *wait_fd,
95 int *wakeup_fd,
96 uint64_t *memory_map_size);
97 extern
98 int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
99 struct channel *chan,
100 struct lttng_ust_shm_handle *handle);
101 extern
102 int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
103 struct channel *chan,
104 struct lttng_ust_shm_handle *handle);
105 extern
106 int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
107 struct channel *chan,
108 struct lttng_ust_shm_handle *handle,
109 int cpu);
110 extern
111 int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
112 struct channel *chan,
113 struct lttng_ust_shm_handle *handle,
114 int cpu);
115
116 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
117 struct lttng_ust_shm_handle *handle);
118 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
119 struct lttng_ust_shm_handle *handle);
120
121 /*
122 * Initialize signals for ring buffer. Should be called early e.g. by
123 * main() in the program to affect all threads.
124 */
125 void lib_ringbuffer_signal_init(void);
126
127 /*
128 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
129 */
130 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
131 unsigned long *consumed,
132 unsigned long *produced,
133 struct lttng_ust_shm_handle *handle);
134 extern int lib_ring_buffer_snapshot_sample_positions(
135 struct lttng_ust_lib_ring_buffer *buf,
136 unsigned long *consumed,
137 unsigned long *produced,
138 struct lttng_ust_shm_handle *handle);
139 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
140 unsigned long consumed_new,
141 struct lttng_ust_shm_handle *handle);
142
143 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
144 unsigned long consumed,
145 struct lttng_ust_shm_handle *handle);
146 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
147 struct lttng_ust_shm_handle *handle);
148
149 /*
150 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
151 * to read sub-buffers sequentially.
152 */
153 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
154 struct lttng_ust_shm_handle *handle)
155 {
156 int ret;
157
158 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
159 &buf->prod_snapshot, handle);
160 if (ret)
161 return ret;
162 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
163 return ret;
164 }
165
166 static inline
167 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
168 struct lttng_ust_shm_handle *handle)
169 {
170 struct channel *chan;
171
172 chan = shmp(handle, buf->backend.chan);
173 if (!chan)
174 return;
175 lib_ring_buffer_put_subbuf(buf, handle);
176 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
177 handle);
178 }
179
180 extern void channel_reset(struct channel *chan);
181 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
182 struct lttng_ust_shm_handle *handle);
183
184 static inline
185 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
186 struct lttng_ust_lib_ring_buffer *buf)
187 {
188 return v_read(config, &buf->offset);
189 }
190
191 static inline
192 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
193 struct lttng_ust_lib_ring_buffer *buf)
194 {
195 return uatomic_read(&buf->consumed);
196 }
197
198 /*
199 * Must call lib_ring_buffer_is_finalized before reading counters (memory
200 * ordering enforced with respect to trace teardown).
201 */
202 static inline
203 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
204 struct lttng_ust_lib_ring_buffer *buf)
205 {
206 int finalized = CMM_ACCESS_ONCE(buf->finalized);
207 /*
208 * Read finalized before counters.
209 */
210 cmm_smp_rmb();
211 return finalized;
212 }
213
214 static inline
215 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
216 {
217 return chan->finalized;
218 }
219
220 static inline
221 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
222 {
223 return uatomic_read(&chan->record_disabled);
224 }
225
226 static inline
227 unsigned long lib_ring_buffer_get_read_data_size(
228 const struct lttng_ust_lib_ring_buffer_config *config,
229 struct lttng_ust_lib_ring_buffer *buf,
230 struct lttng_ust_shm_handle *handle)
231 {
232 return subbuffer_get_read_data_size(config, &buf->backend, handle);
233 }
234
235 static inline
236 unsigned long lib_ring_buffer_get_records_count(
237 const struct lttng_ust_lib_ring_buffer_config *config,
238 struct lttng_ust_lib_ring_buffer *buf)
239 {
240 return v_read(config, &buf->records_count);
241 }
242
243 static inline
244 unsigned long lib_ring_buffer_get_records_overrun(
245 const struct lttng_ust_lib_ring_buffer_config *config,
246 struct lttng_ust_lib_ring_buffer *buf)
247 {
248 return v_read(config, &buf->records_overrun);
249 }
250
251 static inline
252 unsigned long lib_ring_buffer_get_records_lost_full(
253 const struct lttng_ust_lib_ring_buffer_config *config,
254 struct lttng_ust_lib_ring_buffer *buf)
255 {
256 return v_read(config, &buf->records_lost_full);
257 }
258
259 static inline
260 unsigned long lib_ring_buffer_get_records_lost_wrap(
261 const struct lttng_ust_lib_ring_buffer_config *config,
262 struct lttng_ust_lib_ring_buffer *buf)
263 {
264 return v_read(config, &buf->records_lost_wrap);
265 }
266
267 static inline
268 unsigned long lib_ring_buffer_get_records_lost_big(
269 const struct lttng_ust_lib_ring_buffer_config *config,
270 struct lttng_ust_lib_ring_buffer *buf)
271 {
272 return v_read(config, &buf->records_lost_big);
273 }
274
275 static inline
276 unsigned long lib_ring_buffer_get_records_read(
277 const struct lttng_ust_lib_ring_buffer_config *config,
278 struct lttng_ust_lib_ring_buffer *buf)
279 {
280 return v_read(config, &buf->backend.records_read);
281 }
282
283 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
This page took 0.034036 seconds and 4 git commands to generate.