b1ccddec1e8bfd3367a53f08714f5f153ed4a9b4
[lttng-ust.git] / libringbuffer / frontend.h
1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_H
3
4 /*
5 * libringbuffer/frontend.h
6 *
7 * Ring Buffer Library Synchronization Header (API).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 *
26 * Author:
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 *
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
30 */
31
32 #include <urcu/compiler.h>
33 #include <urcu/uatomic.h>
34
35 #include "smp.h"
36 /* Internal helpers */
37 #include "frontend_internal.h"
38
39 /* Buffer creation/removal and setup operations */
40
41 /*
42 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
43 * padding to let readers get those sub-buffers. Used for live streaming.
44 *
45 * read_timer_interval is the time interval (in us) to wake up pending readers.
46 *
47 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
48 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
49 * be set to NULL for other backends.
50 *
51 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
52 * memory area for client-specific data. This memory is managed by lib
53 * ring buffer. priv_data_align is the alignment required for the
54 * private data area.
55 */
56
57 extern
58 struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
59 const char *name,
60 void **priv_data,
61 size_t priv_data_align,
62 size_t priv_data_size,
63 void *priv_data_init,
64 void *buf_addr,
65 size_t subbuf_size, size_t num_subbuf,
66 unsigned int switch_timer_interval,
67 unsigned int read_timer_interval,
68 int **shm_fd, int **wait_fd,
69 uint64_t **memory_map_size);
70
71 /* channel_handle_create - for consumer. */
72 extern
73 struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
74 uint64_t memory_map_size);
75
76 /* channel_handle_add_stream - for consumer. */
77 extern
78 int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
79 int shm_fd, int wait_fd, uint64_t memory_map_size);
80
81 /*
82 * channel_destroy finalizes all channel's buffers, waits for readers to
83 * release all references, and destroys the channel.
84 */
85 extern
86 void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
87 int shadow);
88
89
90 /* Buffer read operations */
91
92 /*
93 * Iteration on channel cpumask needs to issue a read barrier to match the write
94 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
95 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
96 * only performed at channel destruction.
97 */
98 #define for_each_channel_cpu(cpu, chan) \
99 for_each_possible_cpu(cpu)
100
101 extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
102 const struct lttng_ust_lib_ring_buffer_config *config,
103 struct channel *chan, int cpu,
104 struct lttng_ust_shm_handle *handle,
105 int **shm_fd, int **wait_fd,
106 uint64_t **memory_map_size);
107 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
108 struct lttng_ust_shm_handle *handle,
109 int shadow);
110 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
111 struct lttng_ust_shm_handle *handle,
112 int shadow);
113
114 /*
115 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
116 */
117 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
118 unsigned long *consumed,
119 unsigned long *produced,
120 struct lttng_ust_shm_handle *handle);
121 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
122 unsigned long consumed_new,
123 struct lttng_ust_shm_handle *handle);
124
125 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
126 unsigned long consumed,
127 struct lttng_ust_shm_handle *handle);
128 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
129 struct lttng_ust_shm_handle *handle);
130
131 /*
132 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
133 * to read sub-buffers sequentially.
134 */
135 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
136 struct lttng_ust_shm_handle *handle)
137 {
138 int ret;
139
140 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
141 &buf->prod_snapshot, handle);
142 if (ret)
143 return ret;
144 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
145 return ret;
146 }
147
148 static inline
149 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
150 struct lttng_ust_shm_handle *handle)
151 {
152 lib_ring_buffer_put_subbuf(buf, handle);
153 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
154 shmp(handle, buf->backend.chan)), handle);
155 }
156
157 extern void channel_reset(struct channel *chan);
158 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
159 struct lttng_ust_shm_handle *handle);
160
161 static inline
162 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
163 struct lttng_ust_lib_ring_buffer *buf)
164 {
165 return v_read(config, &buf->offset);
166 }
167
168 static inline
169 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
170 struct lttng_ust_lib_ring_buffer *buf)
171 {
172 return uatomic_read(&buf->consumed);
173 }
174
175 /*
176 * Must call lib_ring_buffer_is_finalized before reading counters (memory
177 * ordering enforced with respect to trace teardown).
178 */
179 static inline
180 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
181 struct lttng_ust_lib_ring_buffer *buf)
182 {
183 int finalized = CMM_ACCESS_ONCE(buf->finalized);
184 /*
185 * Read finalized before counters.
186 */
187 cmm_smp_rmb();
188 return finalized;
189 }
190
191 static inline
192 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
193 {
194 return chan->finalized;
195 }
196
197 static inline
198 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
199 {
200 return uatomic_read(&chan->record_disabled);
201 }
202
203 static inline
204 unsigned long lib_ring_buffer_get_read_data_size(
205 const struct lttng_ust_lib_ring_buffer_config *config,
206 struct lttng_ust_lib_ring_buffer *buf,
207 struct lttng_ust_shm_handle *handle)
208 {
209 return subbuffer_get_read_data_size(config, &buf->backend, handle);
210 }
211
212 static inline
213 unsigned long lib_ring_buffer_get_records_count(
214 const struct lttng_ust_lib_ring_buffer_config *config,
215 struct lttng_ust_lib_ring_buffer *buf)
216 {
217 return v_read(config, &buf->records_count);
218 }
219
220 static inline
221 unsigned long lib_ring_buffer_get_records_overrun(
222 const struct lttng_ust_lib_ring_buffer_config *config,
223 struct lttng_ust_lib_ring_buffer *buf)
224 {
225 return v_read(config, &buf->records_overrun);
226 }
227
228 static inline
229 unsigned long lib_ring_buffer_get_records_lost_full(
230 const struct lttng_ust_lib_ring_buffer_config *config,
231 struct lttng_ust_lib_ring_buffer *buf)
232 {
233 return v_read(config, &buf->records_lost_full);
234 }
235
236 static inline
237 unsigned long lib_ring_buffer_get_records_lost_wrap(
238 const struct lttng_ust_lib_ring_buffer_config *config,
239 struct lttng_ust_lib_ring_buffer *buf)
240 {
241 return v_read(config, &buf->records_lost_wrap);
242 }
243
244 static inline
245 unsigned long lib_ring_buffer_get_records_lost_big(
246 const struct lttng_ust_lib_ring_buffer_config *config,
247 struct lttng_ust_lib_ring_buffer *buf)
248 {
249 return v_read(config, &buf->records_lost_big);
250 }
251
252 static inline
253 unsigned long lib_ring_buffer_get_records_read(
254 const struct lttng_ust_lib_ring_buffer_config *config,
255 struct lttng_ust_lib_ring_buffer *buf)
256 {
257 return v_read(config, &buf->backend.records_read);
258 }
259
260 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
This page took 0.033424 seconds and 3 git commands to generate.