Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / frontend.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring Buffer Library Synchronization Header (API).
7 *
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
9 */
10
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_H
13
14 #include <stddef.h>
15 #include <stdint.h>
16
17 #include <urcu/compiler.h>
18 #include <urcu/uatomic.h>
19
20 #include "smp.h"
21 /* Internal helpers */
22 #include "frontend_internal.h"
23
24 /* Buffer creation/removal and setup operations */
25
26 /*
27 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
28 * padding to let readers get those sub-buffers. Used for live streaming.
29 *
30 * read_timer_interval is the time interval (in us) to wake up pending readers.
31 *
32 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
33 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
34 * be set to NULL for other backends.
35 *
36 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
37 * memory area for client-specific data. This memory is managed by lib
38 * ring buffer. priv_data_align is the alignment required for the
39 * private data area.
40 */
41
42 extern
43 struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
44 const char *name,
45 void **priv_data,
46 size_t priv_data_align,
47 size_t priv_data_size,
48 void *priv_data_init,
49 void *buf_addr,
50 size_t subbuf_size, size_t num_subbuf,
51 unsigned int switch_timer_interval,
52 unsigned int read_timer_interval,
53 const int *stream_fds, int nr_stream_fds,
54 int64_t blocking_timeout);
55
56 /*
57 * channel_destroy finalizes all channel's buffers, waits for readers to
58 * release all references, and destroys the channel.
59 */
60 extern
61 void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
62 int consumer);
63
64
65 /* Buffer read operations */
66
67 /*
68 * Iteration on channel cpumask needs to issue a read barrier to match the write
69 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
70 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
71 * only performed at channel destruction.
72 */
73 #define for_each_channel_cpu(cpu, chan) \
74 for_each_possible_cpu(cpu)
75
76 extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
77 const struct lttng_ust_lib_ring_buffer_config *config,
78 struct channel *chan, int cpu,
79 struct lttng_ust_shm_handle *handle,
80 int *shm_fd, int *wait_fd,
81 int *wakeup_fd,
82 uint64_t *memory_map_size);
83 extern
84 int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
85 struct channel *chan,
86 struct lttng_ust_shm_handle *handle);
87 extern
88 int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
89 struct channel *chan,
90 struct lttng_ust_shm_handle *handle);
91 extern
92 int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
93 struct channel *chan,
94 struct lttng_ust_shm_handle *handle,
95 int cpu);
96 extern
97 int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
98 struct channel *chan,
99 struct lttng_ust_shm_handle *handle,
100 int cpu);
101
102 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
103 struct lttng_ust_shm_handle *handle);
104 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
105 struct lttng_ust_shm_handle *handle);
106
107 /*
108 * Initialize signals for ring buffer. Should be called early e.g. by
109 * main() in the program to affect all threads.
110 */
111 void lib_ringbuffer_signal_init(void);
112
113 /*
114 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
115 */
116 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
117 unsigned long *consumed,
118 unsigned long *produced,
119 struct lttng_ust_shm_handle *handle);
120 extern int lib_ring_buffer_snapshot_sample_positions(
121 struct lttng_ust_lib_ring_buffer *buf,
122 unsigned long *consumed,
123 unsigned long *produced,
124 struct lttng_ust_shm_handle *handle);
125 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
126 unsigned long consumed_new,
127 struct lttng_ust_shm_handle *handle);
128
129 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
130 unsigned long consumed,
131 struct lttng_ust_shm_handle *handle);
132 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
133 struct lttng_ust_shm_handle *handle);
134
135 /*
136 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
137 * to read sub-buffers sequentially.
138 */
139 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
140 struct lttng_ust_shm_handle *handle)
141 {
142 int ret;
143
144 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
145 &buf->prod_snapshot, handle);
146 if (ret)
147 return ret;
148 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
149 return ret;
150 }
151
152 static inline
153 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
154 struct lttng_ust_shm_handle *handle)
155 {
156 struct channel *chan;
157
158 chan = shmp(handle, buf->backend.chan);
159 if (!chan)
160 return;
161 lib_ring_buffer_put_subbuf(buf, handle);
162 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
163 handle);
164 }
165
166 extern void channel_reset(struct channel *chan);
167 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
168 struct lttng_ust_shm_handle *handle);
169
170 static inline
171 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
172 struct lttng_ust_lib_ring_buffer *buf)
173 {
174 return v_read(config, &buf->offset);
175 }
176
177 static inline
178 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
179 struct lttng_ust_lib_ring_buffer *buf)
180 {
181 return uatomic_read(&buf->consumed);
182 }
183
184 /*
185 * Must call lib_ring_buffer_is_finalized before reading counters (memory
186 * ordering enforced with respect to trace teardown).
187 */
188 static inline
189 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
190 struct lttng_ust_lib_ring_buffer *buf)
191 {
192 int finalized = CMM_ACCESS_ONCE(buf->finalized);
193 /*
194 * Read finalized before counters.
195 */
196 cmm_smp_rmb();
197 return finalized;
198 }
199
200 static inline
201 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
202 {
203 return chan->finalized;
204 }
205
206 static inline
207 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
208 {
209 return uatomic_read(&chan->record_disabled);
210 }
211
212 static inline
213 unsigned long lib_ring_buffer_get_read_data_size(
214 const struct lttng_ust_lib_ring_buffer_config *config,
215 struct lttng_ust_lib_ring_buffer *buf,
216 struct lttng_ust_shm_handle *handle)
217 {
218 return subbuffer_get_read_data_size(config, &buf->backend, handle);
219 }
220
221 static inline
222 unsigned long lib_ring_buffer_get_records_count(
223 const struct lttng_ust_lib_ring_buffer_config *config,
224 struct lttng_ust_lib_ring_buffer *buf)
225 {
226 return v_read(config, &buf->records_count);
227 }
228
229 static inline
230 unsigned long lib_ring_buffer_get_records_overrun(
231 const struct lttng_ust_lib_ring_buffer_config *config,
232 struct lttng_ust_lib_ring_buffer *buf)
233 {
234 return v_read(config, &buf->records_overrun);
235 }
236
237 static inline
238 unsigned long lib_ring_buffer_get_records_lost_full(
239 const struct lttng_ust_lib_ring_buffer_config *config,
240 struct lttng_ust_lib_ring_buffer *buf)
241 {
242 return v_read(config, &buf->records_lost_full);
243 }
244
245 static inline
246 unsigned long lib_ring_buffer_get_records_lost_wrap(
247 const struct lttng_ust_lib_ring_buffer_config *config,
248 struct lttng_ust_lib_ring_buffer *buf)
249 {
250 return v_read(config, &buf->records_lost_wrap);
251 }
252
253 static inline
254 unsigned long lib_ring_buffer_get_records_lost_big(
255 const struct lttng_ust_lib_ring_buffer_config *config,
256 struct lttng_ust_lib_ring_buffer *buf)
257 {
258 return v_read(config, &buf->records_lost_big);
259 }
260
261 static inline
262 unsigned long lib_ring_buffer_get_records_read(
263 const struct lttng_ust_lib_ring_buffer_config *config,
264 struct lttng_ust_lib_ring_buffer *buf)
265 {
266 return v_read(config, &buf->backend.records_read);
267 }
268
269 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
This page took 0.034672 seconds and 4 git commands to generate.