Define max nesting count constant
[lttng-modules.git] / lib / ringbuffer / frontend.h
1 #ifndef _LIB_RING_BUFFER_FRONTEND_H
2 #define _LIB_RING_BUFFER_FRONTEND_H
3
4 /*
5 * lib/ringbuffer/frontend.h
6 *
7 * Ring Buffer Library Synchronization Header (API).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * Author:
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
27 *
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
29 */
30
31 #include <linux/pipe_fs_i.h>
32 #include <linux/rcupdate.h>
33 #include <linux/cpumask.h>
34 #include <linux/module.h>
35 #include <linux/bitops.h>
36 #include <linux/splice.h>
37 #include <linux/string.h>
38 #include <linux/timer.h>
39 #include <linux/sched.h>
40 #include <linux/cache.h>
41 #include <linux/time.h>
42 #include <linux/slab.h>
43 #include <linux/init.h>
44 #include <linux/stat.h>
45 #include <linux/cpu.h>
46 #include <linux/fs.h>
47
48 #include <asm/atomic.h>
49 #include <asm/local.h>
50
51 /* Internal helpers */
52 #include <wrapper/ringbuffer/frontend_internal.h>
53
54 /* Max ring buffer nesting count, see lib_ring_buffer_get_cpu(). */
55 #define RING_BUFFER_MAX_NESTING 4
56
57 /* Buffer creation/removal and setup operations */
58
59 /*
60 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
61 * padding to let readers get those sub-buffers. Used for live streaming.
62 *
63 * read_timer_interval is the time interval (in us) to wake up pending readers.
64 *
65 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
66 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
67 * be set to NULL for other backends.
68 */
69
70 extern
71 struct channel *channel_create(const struct lib_ring_buffer_config *config,
72 const char *name, void *priv,
73 void *buf_addr,
74 size_t subbuf_size, size_t num_subbuf,
75 unsigned int switch_timer_interval,
76 unsigned int read_timer_interval);
77
78 /*
79 * channel_destroy returns the private data pointer. It finalizes all channel's
80 * buffers, waits for readers to release all references, and destroys the
81 * channel.
82 */
83 extern
84 void *channel_destroy(struct channel *chan);
85
86
87 /* Buffer read operations */
88
89 /*
90 * Iteration on channel cpumask needs to issue a read barrier to match the write
91 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
92 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
93 * only performed at channel destruction.
94 */
95 #define for_each_channel_cpu(cpu, chan) \
96 for ((cpu) = -1; \
97 ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
98 smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
99
100 extern struct lib_ring_buffer *channel_get_ring_buffer(
101 const struct lib_ring_buffer_config *config,
102 struct channel *chan, int cpu);
103 extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
104 extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
105
106 /*
107 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
108 */
109 extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
110 unsigned long *consumed,
111 unsigned long *produced);
112 extern int lib_ring_buffer_snapshot_sample_positions(
113 struct lib_ring_buffer *buf,
114 unsigned long *consumed,
115 unsigned long *produced);
116 extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
117 unsigned long consumed_new);
118
119 extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
120 unsigned long consumed);
121 extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
122
123 void lib_ring_buffer_set_quiescent_channel(struct channel *chan);
124 void lib_ring_buffer_clear_quiescent_channel(struct channel *chan);
125
126 /*
127 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
128 * to read sub-buffers sequentially.
129 */
130 static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
131 {
132 int ret;
133
134 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
135 &buf->prod_snapshot);
136 if (ret)
137 return ret;
138 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
139 return ret;
140 }
141
142 static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
143 {
144 lib_ring_buffer_put_subbuf(buf);
145 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
146 buf->backend.chan));
147 }
148
149 extern void channel_reset(struct channel *chan);
150 extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
151
152 static inline
153 unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
154 struct lib_ring_buffer *buf)
155 {
156 return v_read(config, &buf->offset);
157 }
158
159 static inline
160 unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
161 struct lib_ring_buffer *buf)
162 {
163 return atomic_long_read(&buf->consumed);
164 }
165
166 /*
167 * Must call lib_ring_buffer_is_finalized before reading counters (memory
168 * ordering enforced with respect to trace teardown).
169 */
170 static inline
171 int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
172 struct lib_ring_buffer *buf)
173 {
174 int finalized = READ_ONCE(buf->finalized);
175 /*
176 * Read finalized before counters.
177 */
178 smp_rmb();
179 return finalized;
180 }
181
182 static inline
183 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
184 {
185 return chan->finalized;
186 }
187
188 static inline
189 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
190 {
191 return atomic_read(&chan->record_disabled);
192 }
193
194 static inline
195 unsigned long lib_ring_buffer_get_read_data_size(
196 const struct lib_ring_buffer_config *config,
197 struct lib_ring_buffer *buf)
198 {
199 return subbuffer_get_read_data_size(config, &buf->backend);
200 }
201
202 static inline
203 unsigned long lib_ring_buffer_get_records_count(
204 const struct lib_ring_buffer_config *config,
205 struct lib_ring_buffer *buf)
206 {
207 return v_read(config, &buf->records_count);
208 }
209
210 static inline
211 unsigned long lib_ring_buffer_get_records_overrun(
212 const struct lib_ring_buffer_config *config,
213 struct lib_ring_buffer *buf)
214 {
215 return v_read(config, &buf->records_overrun);
216 }
217
218 static inline
219 unsigned long lib_ring_buffer_get_records_lost_full(
220 const struct lib_ring_buffer_config *config,
221 struct lib_ring_buffer *buf)
222 {
223 return v_read(config, &buf->records_lost_full);
224 }
225
226 static inline
227 unsigned long lib_ring_buffer_get_records_lost_wrap(
228 const struct lib_ring_buffer_config *config,
229 struct lib_ring_buffer *buf)
230 {
231 return v_read(config, &buf->records_lost_wrap);
232 }
233
234 static inline
235 unsigned long lib_ring_buffer_get_records_lost_big(
236 const struct lib_ring_buffer_config *config,
237 struct lib_ring_buffer *buf)
238 {
239 return v_read(config, &buf->records_lost_big);
240 }
241
242 static inline
243 unsigned long lib_ring_buffer_get_records_read(
244 const struct lib_ring_buffer_config *config,
245 struct lib_ring_buffer *buf)
246 {
247 return v_read(config, &buf->backend.records_read);
248 }
249
250 #endif /* _LIB_RING_BUFFER_FRONTEND_H */
This page took 0.033773 seconds and 4 git commands to generate.