Fix: ensure all probe providers have their symbols
[lttng-ust.git] / libringbuffer / frontend.h
CommitLineData
e92f3e28
MD
1#ifndef _LTTNG_RING_BUFFER_FRONTEND_H
2#define _LTTNG_RING_BUFFER_FRONTEND_H
852c2936
MD
3
4/*
e92f3e28 5 * libringbuffer/frontend.h
852c2936
MD
6 *
7 * Ring Buffer Library Synchronization Header (API).
8 *
e92f3e28
MD
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 *
852c2936
MD
26 * Author:
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 *
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
852c2936
MD
30 */
31
14641deb
MD
32#include <urcu/compiler.h>
33#include <urcu/uatomic.h>
34
a6352fd4 35#include "smp.h"
852c2936 36/* Internal helpers */
4931a13e 37#include "frontend_internal.h"
852c2936
MD
38
39/* Buffer creation/removal and setup operations */
40
41/*
42 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
43 * padding to let readers get those sub-buffers. Used for live streaming.
44 *
45 * read_timer_interval is the time interval (in us) to wake up pending readers.
46 *
47 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
48 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
49 * be set to NULL for other backends.
a3f61e7f
MD
50 *
51 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
52 * memory area for client-specific data. This memory is managed by lib
53 * ring buffer. priv_data_align is the alignment required for the
54 * private data area.
852c2936
MD
55 */
56
57extern
4cfec15c 58struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
a3f61e7f
MD
59 const char *name,
60 void **priv_data,
61 size_t priv_data_align,
62 size_t priv_data_size,
d028eddb 63 void *priv_data_init,
431d5cf0
MD
64 void *buf_addr,
65 size_t subbuf_size, size_t num_subbuf,
66 unsigned int switch_timer_interval,
74d81a6c 67 unsigned int read_timer_interval);
852c2936
MD
68
69/*
a3f61e7f
MD
70 * channel_destroy finalizes all channel's buffers, waits for readers to
71 * release all references, and destroys the channel.
852c2936
MD
72 */
73extern
a3f61e7f 74void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
74d81a6c 75 int consumer);
852c2936
MD
76
77
78/* Buffer read operations */
79
80/*
81 * Iteration on channel cpumask needs to issue a read barrier to match the write
82 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
83 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
84 * only performed at channel destruction.
85 */
86#define for_each_channel_cpu(cpu, chan) \
a6352fd4 87 for_each_possible_cpu(cpu)
852c2936 88
4cfec15c
MD
89extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
90 const struct lttng_ust_lib_ring_buffer_config *config,
1d498196 91 struct channel *chan, int cpu,
38fae1d3 92 struct lttng_ust_shm_handle *handle,
74d81a6c
MD
93 int *shm_fd, int *wait_fd,
94 int *wakeup_fd,
95 uint64_t *memory_map_size);
96extern
97int ring_buffer_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
98 struct channel *chan,
99 struct lttng_ust_shm_handle *handle,
100 int cpu);
101extern
102int ring_buffer_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
103 struct channel *chan,
104 struct lttng_ust_shm_handle *handle,
105 int cpu);
106
4cfec15c 107extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
74d81a6c 108 struct lttng_ust_shm_handle *handle);
4cfec15c 109extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
74d81a6c 110 struct lttng_ust_shm_handle *handle);
852c2936
MD
111
112/*
113 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
114 */
4cfec15c 115extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
852c2936 116 unsigned long *consumed,
1d498196 117 unsigned long *produced,
38fae1d3 118 struct lttng_ust_shm_handle *handle);
4cfec15c 119extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
1d498196 120 unsigned long consumed_new,
38fae1d3 121 struct lttng_ust_shm_handle *handle);
852c2936 122
4cfec15c 123extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
1d498196 124 unsigned long consumed,
38fae1d3 125 struct lttng_ust_shm_handle *handle);
4cfec15c 126extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 127 struct lttng_ust_shm_handle *handle);
852c2936
MD
128
129/*
130 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
131 * to read sub-buffers sequentially.
132 */
4cfec15c 133static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 134 struct lttng_ust_shm_handle *handle)
852c2936
MD
135{
136 int ret;
137
138 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
1d498196 139 &buf->prod_snapshot, handle);
852c2936
MD
140 if (ret)
141 return ret;
1d498196 142 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
852c2936
MD
143 return ret;
144}
145
1d498196 146static inline
4cfec15c 147void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 148 struct lttng_ust_shm_handle *handle)
852c2936 149{
1d498196 150 lib_ring_buffer_put_subbuf(buf, handle);
852c2936 151 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
1d498196 152 shmp(handle, buf->backend.chan)), handle);
852c2936
MD
153}
154
155extern void channel_reset(struct channel *chan);
4cfec15c 156extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 157 struct lttng_ust_shm_handle *handle);
852c2936
MD
158
159static inline
4cfec15c
MD
160unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
161 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
162{
163 return v_read(config, &buf->offset);
164}
165
166static inline
4cfec15c
MD
167unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
168 struct lttng_ust_lib_ring_buffer *buf)
852c2936 169{
14641deb 170 return uatomic_read(&buf->consumed);
852c2936
MD
171}
172
173/*
174 * Must call lib_ring_buffer_is_finalized before reading counters (memory
175 * ordering enforced with respect to trace teardown).
176 */
177static inline
4cfec15c
MD
178int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
179 struct lttng_ust_lib_ring_buffer *buf)
852c2936 180{
14641deb 181 int finalized = CMM_ACCESS_ONCE(buf->finalized);
852c2936
MD
182 /*
183 * Read finalized before counters.
184 */
14641deb 185 cmm_smp_rmb();
852c2936
MD
186 return finalized;
187}
188
189static inline
190int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
191{
192 return chan->finalized;
193}
194
195static inline
196int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
197{
14641deb 198 return uatomic_read(&chan->record_disabled);
852c2936
MD
199}
200
201static inline
202unsigned long lib_ring_buffer_get_read_data_size(
4cfec15c
MD
203 const struct lttng_ust_lib_ring_buffer_config *config,
204 struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 205 struct lttng_ust_shm_handle *handle)
852c2936 206{
1d498196 207 return subbuffer_get_read_data_size(config, &buf->backend, handle);
852c2936
MD
208}
209
210static inline
211unsigned long lib_ring_buffer_get_records_count(
4cfec15c
MD
212 const struct lttng_ust_lib_ring_buffer_config *config,
213 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
214{
215 return v_read(config, &buf->records_count);
216}
217
218static inline
219unsigned long lib_ring_buffer_get_records_overrun(
4cfec15c
MD
220 const struct lttng_ust_lib_ring_buffer_config *config,
221 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
222{
223 return v_read(config, &buf->records_overrun);
224}
225
226static inline
227unsigned long lib_ring_buffer_get_records_lost_full(
4cfec15c
MD
228 const struct lttng_ust_lib_ring_buffer_config *config,
229 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
230{
231 return v_read(config, &buf->records_lost_full);
232}
233
234static inline
235unsigned long lib_ring_buffer_get_records_lost_wrap(
4cfec15c
MD
236 const struct lttng_ust_lib_ring_buffer_config *config,
237 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
238{
239 return v_read(config, &buf->records_lost_wrap);
240}
241
242static inline
243unsigned long lib_ring_buffer_get_records_lost_big(
4cfec15c
MD
244 const struct lttng_ust_lib_ring_buffer_config *config,
245 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
246{
247 return v_read(config, &buf->records_lost_big);
248}
249
250static inline
251unsigned long lib_ring_buffer_get_records_read(
4cfec15c
MD
252 const struct lttng_ust_lib_ring_buffer_config *config,
253 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
254{
255 return v_read(config, &buf->backend.records_read);
256}
257
e92f3e28 258#endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
This page took 0.037423 seconds and 4 git commands to generate.