Port ring buffer to userspace, part 1
[lttng-ust.git] / libringbuffer / frontend.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_H
2 #define _LINUX_RING_BUFFER_FRONTEND_H
3
4 /*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
21
22 /* Internal helpers */
23 #include "frontend_internal.h"
24
25 /* Buffer creation/removal and setup operations */
26
27 /*
28 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
29 * padding to let readers get those sub-buffers. Used for live streaming.
30 *
31 * read_timer_interval is the time interval (in us) to wake up pending readers.
32 *
33 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
34 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
35 * be set to NULL for other backends.
36 */
37
38 extern
39 struct channel *channel_create(const struct lib_ring_buffer_config *config,
40 const char *name, void *priv,
41 void *buf_addr,
42 size_t subbuf_size, size_t num_subbuf,
43 unsigned int switch_timer_interval,
44 unsigned int read_timer_interval);
45
46 /*
47 * channel_destroy returns the private data pointer. It finalizes all channel's
48 * buffers, waits for readers to release all references, and destroys the
49 * channel.
50 */
51 extern
52 void *channel_destroy(struct channel *chan);
53
54
55 /* Buffer read operations */
56
57 /*
58 * Iteration on channel cpumask needs to issue a read barrier to match the write
59 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
60 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
61 * only performed at channel destruction.
62 */
63 #define for_each_channel_cpu(cpu, chan) \
64 for ((cpu) = -1; \
65 ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
66 cmm_smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
67
68 extern struct lib_ring_buffer *channel_get_ring_buffer(
69 const struct lib_ring_buffer_config *config,
70 struct channel *chan, int cpu);
71 extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
72 extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
73
74 /*
75 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
76 */
77 extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
78 unsigned long *consumed,
79 unsigned long *produced);
80 extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
81 unsigned long consumed_new);
82
83 extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
84 unsigned long consumed);
85 extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
86
87 /*
88 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
89 * to read sub-buffers sequentially.
90 */
91 static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
92 {
93 int ret;
94
95 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
96 &buf->prod_snapshot);
97 if (ret)
98 return ret;
99 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
100 return ret;
101 }
102
103 static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
104 {
105 lib_ring_buffer_put_subbuf(buf);
106 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
107 buf->backend.chan));
108 }
109
110 extern void channel_reset(struct channel *chan);
111 extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
112
113 static inline
114 unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
115 struct lib_ring_buffer *buf)
116 {
117 return v_read(config, &buf->offset);
118 }
119
120 static inline
121 unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
122 struct lib_ring_buffer *buf)
123 {
124 return uatomic_read(&buf->consumed);
125 }
126
127 /*
128 * Must call lib_ring_buffer_is_finalized before reading counters (memory
129 * ordering enforced with respect to trace teardown).
130 */
131 static inline
132 int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
133 struct lib_ring_buffer *buf)
134 {
135 int finalized = CMM_ACCESS_ONCE(buf->finalized);
136 /*
137 * Read finalized before counters.
138 */
139 cmm_smp_rmb();
140 return finalized;
141 }
142
143 static inline
144 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
145 {
146 return chan->finalized;
147 }
148
149 static inline
150 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
151 {
152 return uatomic_read(&chan->record_disabled);
153 }
154
155 static inline
156 unsigned long lib_ring_buffer_get_read_data_size(
157 const struct lib_ring_buffer_config *config,
158 struct lib_ring_buffer *buf)
159 {
160 return subbuffer_get_read_data_size(config, &buf->backend);
161 }
162
163 static inline
164 unsigned long lib_ring_buffer_get_records_count(
165 const struct lib_ring_buffer_config *config,
166 struct lib_ring_buffer *buf)
167 {
168 return v_read(config, &buf->records_count);
169 }
170
171 static inline
172 unsigned long lib_ring_buffer_get_records_overrun(
173 const struct lib_ring_buffer_config *config,
174 struct lib_ring_buffer *buf)
175 {
176 return v_read(config, &buf->records_overrun);
177 }
178
179 static inline
180 unsigned long lib_ring_buffer_get_records_lost_full(
181 const struct lib_ring_buffer_config *config,
182 struct lib_ring_buffer *buf)
183 {
184 return v_read(config, &buf->records_lost_full);
185 }
186
187 static inline
188 unsigned long lib_ring_buffer_get_records_lost_wrap(
189 const struct lib_ring_buffer_config *config,
190 struct lib_ring_buffer *buf)
191 {
192 return v_read(config, &buf->records_lost_wrap);
193 }
194
195 static inline
196 unsigned long lib_ring_buffer_get_records_lost_big(
197 const struct lib_ring_buffer_config *config,
198 struct lib_ring_buffer *buf)
199 {
200 return v_read(config, &buf->records_lost_big);
201 }
202
203 static inline
204 unsigned long lib_ring_buffer_get_records_read(
205 const struct lib_ring_buffer_config *config,
206 struct lib_ring_buffer *buf)
207 {
208 return v_read(config, &buf->backend.records_read);
209 }
210
211 #endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.035936 seconds and 5 git commands to generate.