Move struct ltt_channel to shm for consumer flush
[lttng-ust.git] / libringbuffer / frontend.h
... / ...
CommitLineData
1#ifndef _LINUX_RING_BUFFER_FRONTEND_H
2#define _LINUX_RING_BUFFER_FRONTEND_H
3
4/*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19#include <urcu/compiler.h>
20#include <urcu/uatomic.h>
21
22#include "smp.h"
23/* Internal helpers */
24#include "frontend_internal.h"
25
26/* Buffer creation/removal and setup operations */
27
28/*
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
31 *
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
33 *
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
37 *
38 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
39 * memory area for client-specific data. This memory is managed by lib
40 * ring buffer. priv_data_align is the alignment required for the
41 * private data area.
42 */
43
44extern
45struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
46 const char *name,
47 void **priv_data,
48 size_t priv_data_align,
49 size_t priv_data_size,
50 void *buf_addr,
51 size_t subbuf_size, size_t num_subbuf,
52 unsigned int switch_timer_interval,
53 unsigned int read_timer_interval,
54 int *shm_fd, int *wait_fd,
55 uint64_t *memory_map_size);
56
57/* channel_handle_create - for consumer. */
58extern
59struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
60 uint64_t memory_map_size);
61
62/* channel_handle_add_stream - for consumer. */
63extern
64int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
65 int shm_fd, int wait_fd, uint64_t memory_map_size);
66
67/*
68 * channel_destroy finalizes all channel's buffers, waits for readers to
69 * release all references, and destroys the channel.
70 */
71extern
72void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
73 int shadow);
74
75
76/* Buffer read operations */
77
78/*
79 * Iteration on channel cpumask needs to issue a read barrier to match the write
80 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
81 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
82 * only performed at channel destruction.
83 */
84#define for_each_channel_cpu(cpu, chan) \
85 for_each_possible_cpu(cpu)
86
87extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
88 const struct lttng_ust_lib_ring_buffer_config *config,
89 struct channel *chan, int cpu,
90 struct lttng_ust_shm_handle *handle,
91 int *shm_fd, int *wait_fd,
92 uint64_t *memory_map_size);
93extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
94 struct lttng_ust_shm_handle *handle,
95 int shadow);
96extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
97 struct lttng_ust_shm_handle *handle,
98 int shadow);
99
100/*
101 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
102 */
103extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
104 unsigned long *consumed,
105 unsigned long *produced,
106 struct lttng_ust_shm_handle *handle);
107extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
108 unsigned long consumed_new,
109 struct lttng_ust_shm_handle *handle);
110
111extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
112 unsigned long consumed,
113 struct lttng_ust_shm_handle *handle);
114extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
115 struct lttng_ust_shm_handle *handle);
116
117/*
118 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
119 * to read sub-buffers sequentially.
120 */
121static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
122 struct lttng_ust_shm_handle *handle)
123{
124 int ret;
125
126 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
127 &buf->prod_snapshot, handle);
128 if (ret)
129 return ret;
130 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
131 return ret;
132}
133
134static inline
135void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
136 struct lttng_ust_shm_handle *handle)
137{
138 lib_ring_buffer_put_subbuf(buf, handle);
139 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
140 shmp(handle, buf->backend.chan)), handle);
141}
142
143extern void channel_reset(struct channel *chan);
144extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
145 struct lttng_ust_shm_handle *handle);
146
147static inline
148unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
149 struct lttng_ust_lib_ring_buffer *buf)
150{
151 return v_read(config, &buf->offset);
152}
153
154static inline
155unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
156 struct lttng_ust_lib_ring_buffer *buf)
157{
158 return uatomic_read(&buf->consumed);
159}
160
161/*
162 * Must call lib_ring_buffer_is_finalized before reading counters (memory
163 * ordering enforced with respect to trace teardown).
164 */
165static inline
166int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
167 struct lttng_ust_lib_ring_buffer *buf)
168{
169 int finalized = CMM_ACCESS_ONCE(buf->finalized);
170 /*
171 * Read finalized before counters.
172 */
173 cmm_smp_rmb();
174 return finalized;
175}
176
177static inline
178int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
179{
180 return chan->finalized;
181}
182
183static inline
184int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
185{
186 return uatomic_read(&chan->record_disabled);
187}
188
189static inline
190unsigned long lib_ring_buffer_get_read_data_size(
191 const struct lttng_ust_lib_ring_buffer_config *config,
192 struct lttng_ust_lib_ring_buffer *buf,
193 struct lttng_ust_shm_handle *handle)
194{
195 return subbuffer_get_read_data_size(config, &buf->backend, handle);
196}
197
198static inline
199unsigned long lib_ring_buffer_get_records_count(
200 const struct lttng_ust_lib_ring_buffer_config *config,
201 struct lttng_ust_lib_ring_buffer *buf)
202{
203 return v_read(config, &buf->records_count);
204}
205
206static inline
207unsigned long lib_ring_buffer_get_records_overrun(
208 const struct lttng_ust_lib_ring_buffer_config *config,
209 struct lttng_ust_lib_ring_buffer *buf)
210{
211 return v_read(config, &buf->records_overrun);
212}
213
214static inline
215unsigned long lib_ring_buffer_get_records_lost_full(
216 const struct lttng_ust_lib_ring_buffer_config *config,
217 struct lttng_ust_lib_ring_buffer *buf)
218{
219 return v_read(config, &buf->records_lost_full);
220}
221
222static inline
223unsigned long lib_ring_buffer_get_records_lost_wrap(
224 const struct lttng_ust_lib_ring_buffer_config *config,
225 struct lttng_ust_lib_ring_buffer *buf)
226{
227 return v_read(config, &buf->records_lost_wrap);
228}
229
230static inline
231unsigned long lib_ring_buffer_get_records_lost_big(
232 const struct lttng_ust_lib_ring_buffer_config *config,
233 struct lttng_ust_lib_ring_buffer *buf)
234{
235 return v_read(config, &buf->records_lost_big);
236}
237
238static inline
239unsigned long lib_ring_buffer_get_records_read(
240 const struct lttng_ust_lib_ring_buffer_config *config,
241 struct lttng_ust_lib_ring_buffer *buf)
242{
243 return v_read(config, &buf->backend.records_read);
244}
245
246#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.02307 seconds and 4 git commands to generate.