Implement shm object table
[lttng-ust.git] / libringbuffer / frontend.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_H
2 #define _LINUX_RING_BUFFER_FRONTEND_H
3
4 /*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
21
22 #include "smp.h"
23 /* Internal helpers */
24 #include "frontend_internal.h"
25
26 /* Buffer creation/removal and setup operations */
27
28 /*
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
31 *
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
33 *
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
37 */
38
39 extern
40 struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
41 const char *name, void *priv,
42 void *buf_addr,
43 size_t subbuf_size, size_t num_subbuf,
44 unsigned int switch_timer_interval,
45 unsigned int read_timer_interval);
46
47 /*
48 * channel_destroy returns the private data pointer. It finalizes all channel's
49 * buffers, waits for readers to release all references, and destroys the
50 * channel.
51 */
52 extern
53 void *channel_destroy(struct channel *chan, struct shm_handle *handle);
54
55
56 /* Buffer read operations */
57
58 /*
59 * Iteration on channel cpumask needs to issue a read barrier to match the write
60 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
61 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
62 * only performed at channel destruction.
63 */
64 #define for_each_channel_cpu(cpu, chan) \
65 for_each_possible_cpu(cpu)
66
67 extern struct lib_ring_buffer *channel_get_ring_buffer(
68 const struct lib_ring_buffer_config *config,
69 struct channel *chan, int cpu,
70 struct shm_handle *handle);
71 extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
72 struct shm_handle *handle);
73 extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
74 struct shm_handle *handle);
75
76 /*
77 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
78 */
79 extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
80 unsigned long *consumed,
81 unsigned long *produced,
82 struct shm_handle *handle);
83 extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
84 unsigned long consumed_new,
85 struct shm_handle *handle);
86
87 extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
88 unsigned long consumed,
89 struct shm_handle *handle);
90 extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
91 struct shm_handle *handle);
92
93 /*
94 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
95 * to read sub-buffers sequentially.
96 */
97 static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf,
98 struct shm_handle *handle)
99 {
100 int ret;
101
102 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
103 &buf->prod_snapshot, handle);
104 if (ret)
105 return ret;
106 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
107 return ret;
108 }
109
110 static inline
111 void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf,
112 struct shm_handle *handle)
113 {
114 lib_ring_buffer_put_subbuf(buf, handle);
115 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
116 shmp(handle, buf->backend.chan)), handle);
117 }
118
119 extern void channel_reset(struct channel *chan);
120 extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
121 struct shm_handle *handle);
122
123 static inline
124 unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
125 struct lib_ring_buffer *buf)
126 {
127 return v_read(config, &buf->offset);
128 }
129
130 static inline
131 unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
132 struct lib_ring_buffer *buf)
133 {
134 return uatomic_read(&buf->consumed);
135 }
136
137 /*
138 * Must call lib_ring_buffer_is_finalized before reading counters (memory
139 * ordering enforced with respect to trace teardown).
140 */
141 static inline
142 int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
143 struct lib_ring_buffer *buf)
144 {
145 int finalized = CMM_ACCESS_ONCE(buf->finalized);
146 /*
147 * Read finalized before counters.
148 */
149 cmm_smp_rmb();
150 return finalized;
151 }
152
153 static inline
154 int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
155 {
156 return chan->finalized;
157 }
158
159 static inline
160 int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
161 {
162 return uatomic_read(&chan->record_disabled);
163 }
164
165 static inline
166 unsigned long lib_ring_buffer_get_read_data_size(
167 const struct lib_ring_buffer_config *config,
168 struct lib_ring_buffer *buf,
169 struct shm_handle *handle)
170 {
171 return subbuffer_get_read_data_size(config, &buf->backend, handle);
172 }
173
174 static inline
175 unsigned long lib_ring_buffer_get_records_count(
176 const struct lib_ring_buffer_config *config,
177 struct lib_ring_buffer *buf)
178 {
179 return v_read(config, &buf->records_count);
180 }
181
182 static inline
183 unsigned long lib_ring_buffer_get_records_overrun(
184 const struct lib_ring_buffer_config *config,
185 struct lib_ring_buffer *buf)
186 {
187 return v_read(config, &buf->records_overrun);
188 }
189
190 static inline
191 unsigned long lib_ring_buffer_get_records_lost_full(
192 const struct lib_ring_buffer_config *config,
193 struct lib_ring_buffer *buf)
194 {
195 return v_read(config, &buf->records_lost_full);
196 }
197
198 static inline
199 unsigned long lib_ring_buffer_get_records_lost_wrap(
200 const struct lib_ring_buffer_config *config,
201 struct lib_ring_buffer *buf)
202 {
203 return v_read(config, &buf->records_lost_wrap);
204 }
205
206 static inline
207 unsigned long lib_ring_buffer_get_records_lost_big(
208 const struct lib_ring_buffer_config *config,
209 struct lib_ring_buffer *buf)
210 {
211 return v_read(config, &buf->records_lost_big);
212 }
213
214 static inline
215 unsigned long lib_ring_buffer_get_records_read(
216 const struct lib_ring_buffer_config *config,
217 struct lib_ring_buffer *buf)
218 {
219 return v_read(config, &buf->backend.records_read);
220 }
221
222 #endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.034371 seconds and 5 git commands to generate.