libust ABI: export streams
[lttng-ust.git] / libringbuffer / frontend.h
CommitLineData
852c2936
MD
1#ifndef _LINUX_RING_BUFFER_FRONTEND_H
2#define _LINUX_RING_BUFFER_FRONTEND_H
3
4/*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
14641deb
MD
19#include <urcu/compiler.h>
20#include <urcu/uatomic.h>
21
a6352fd4 22#include "smp.h"
852c2936 23/* Internal helpers */
4931a13e 24#include "frontend_internal.h"
852c2936
MD
25
26/* Buffer creation/removal and setup operations */
27
28/*
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
31 *
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
33 *
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
37 */
38
39extern
431d5cf0
MD
40struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
41 const char *name, void *priv,
42 void *buf_addr,
43 size_t subbuf_size, size_t num_subbuf,
44 unsigned int switch_timer_interval,
45 unsigned int read_timer_interval);
852c2936
MD
46
47/*
48 * channel_destroy returns the private data pointer. It finalizes all channel's
49 * buffers, waits for readers to release all references, and destroys the
50 * channel.
51 */
52extern
1d498196 53void *channel_destroy(struct channel *chan, struct shm_handle *handle);
852c2936
MD
54
55
56/* Buffer read operations */
57
58/*
59 * Iteration on channel cpumask needs to issue a read barrier to match the write
60 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
61 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
62 * only performed at channel destruction.
63 */
64#define for_each_channel_cpu(cpu, chan) \
a6352fd4 65 for_each_possible_cpu(cpu)
852c2936
MD
66
67extern struct lib_ring_buffer *channel_get_ring_buffer(
68 const struct lib_ring_buffer_config *config,
1d498196 69 struct channel *chan, int cpu,
381c0f1e
MD
70 struct shm_handle *handle,
71 int *shm_fd, int *wait_fd,
72 uint64_t *memory_map_size);
1d498196
MD
73extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
74 struct shm_handle *handle);
75extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
76 struct shm_handle *handle);
852c2936
MD
77
78/*
79 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
80 */
81extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
82 unsigned long *consumed,
1d498196
MD
83 unsigned long *produced,
84 struct shm_handle *handle);
852c2936 85extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
1d498196
MD
86 unsigned long consumed_new,
87 struct shm_handle *handle);
852c2936
MD
88
89extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
1d498196
MD
90 unsigned long consumed,
91 struct shm_handle *handle);
92extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
93 struct shm_handle *handle);
852c2936
MD
94
95/*
96 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
97 * to read sub-buffers sequentially.
98 */
1d498196
MD
99static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf,
100 struct shm_handle *handle)
852c2936
MD
101{
102 int ret;
103
104 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
1d498196 105 &buf->prod_snapshot, handle);
852c2936
MD
106 if (ret)
107 return ret;
1d498196 108 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
852c2936
MD
109 return ret;
110}
111
1d498196
MD
112static inline
113void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf,
114 struct shm_handle *handle)
852c2936 115{
1d498196 116 lib_ring_buffer_put_subbuf(buf, handle);
852c2936 117 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
1d498196 118 shmp(handle, buf->backend.chan)), handle);
852c2936
MD
119}
120
121extern void channel_reset(struct channel *chan);
1d498196
MD
122extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
123 struct shm_handle *handle);
852c2936
MD
124
125static inline
126unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
127 struct lib_ring_buffer *buf)
128{
129 return v_read(config, &buf->offset);
130}
131
132static inline
133unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
134 struct lib_ring_buffer *buf)
135{
14641deb 136 return uatomic_read(&buf->consumed);
852c2936
MD
137}
138
139/*
140 * Must call lib_ring_buffer_is_finalized before reading counters (memory
141 * ordering enforced with respect to trace teardown).
142 */
143static inline
144int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
145 struct lib_ring_buffer *buf)
146{
14641deb 147 int finalized = CMM_ACCESS_ONCE(buf->finalized);
852c2936
MD
148 /*
149 * Read finalized before counters.
150 */
14641deb 151 cmm_smp_rmb();
852c2936
MD
152 return finalized;
153}
154
155static inline
156int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
157{
158 return chan->finalized;
159}
160
161static inline
162int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
163{
14641deb 164 return uatomic_read(&chan->record_disabled);
852c2936
MD
165}
166
167static inline
168unsigned long lib_ring_buffer_get_read_data_size(
169 const struct lib_ring_buffer_config *config,
1d498196
MD
170 struct lib_ring_buffer *buf,
171 struct shm_handle *handle)
852c2936 172{
1d498196 173 return subbuffer_get_read_data_size(config, &buf->backend, handle);
852c2936
MD
174}
175
176static inline
177unsigned long lib_ring_buffer_get_records_count(
178 const struct lib_ring_buffer_config *config,
179 struct lib_ring_buffer *buf)
180{
181 return v_read(config, &buf->records_count);
182}
183
184static inline
185unsigned long lib_ring_buffer_get_records_overrun(
186 const struct lib_ring_buffer_config *config,
187 struct lib_ring_buffer *buf)
188{
189 return v_read(config, &buf->records_overrun);
190}
191
192static inline
193unsigned long lib_ring_buffer_get_records_lost_full(
194 const struct lib_ring_buffer_config *config,
195 struct lib_ring_buffer *buf)
196{
197 return v_read(config, &buf->records_lost_full);
198}
199
200static inline
201unsigned long lib_ring_buffer_get_records_lost_wrap(
202 const struct lib_ring_buffer_config *config,
203 struct lib_ring_buffer *buf)
204{
205 return v_read(config, &buf->records_lost_wrap);
206}
207
208static inline
209unsigned long lib_ring_buffer_get_records_lost_big(
210 const struct lib_ring_buffer_config *config,
211 struct lib_ring_buffer *buf)
212{
213 return v_read(config, &buf->records_lost_big);
214}
215
216static inline
217unsigned long lib_ring_buffer_get_records_read(
218 const struct lib_ring_buffer_config *config,
219 struct lib_ring_buffer *buf)
220{
221 return v_read(config, &buf->backend.records_read);
222}
223
224#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.030945 seconds and 4 git commands to generate.