Only flush when there are readers active
[lttng-ust.git] / libringbuffer / frontend_types.h
CommitLineData
e92f3e28
MD
1#ifndef _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
2#define _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
852c2936
MD
3
4/*
e92f3e28 5 * libringbuffer/frontend_types.h
852c2936
MD
6 *
7 * Ring Buffer Library Synchronization Header (types).
8 *
e92f3e28
MD
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
852c2936
MD
25 * Author:
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
27 *
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
852c2936
MD
29 */
30
a6352fd4 31#include <string.h>
03d2d293 32#include <time.h> /* for timer_t */
a6352fd4 33
14641deb
MD
34#include <urcu/list.h>
35#include <urcu/uatomic.h>
14641deb 36
4318ae1b 37#include <lttng/ringbuffer-config.h>
44c72f10 38#include <usterr-signal-safe.h>
4931a13e 39#include "backend_types.h"
1d498196 40#include "shm_internal.h"
03d2d293 41#include "shm_types.h"
a3bb4b27 42#include "vatomic.h"
852c2936
MD
43
44/*
45 * A switch is done during tracing or as a final flush after tracing (so it
46 * won't write in the new sub-buffer).
47 */
48enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
49
852c2936 50/* channel: collection of per-cpu ring buffers. */
74d81a6c 51#define RB_CHANNEL_PADDING 32
852c2936 52struct channel {
14641deb 53 int record_disabled;
852c2936
MD
54 unsigned long commit_count_mask; /*
55 * Commit count mask, removing
56 * the MSBs corresponding to
57 * bits used to represent the
58 * subbuffer index.
59 */
60
03d2d293
MD
61 unsigned long switch_timer_interval; /* Buffer flush (us) */
62 timer_t switch_timer;
63 int switch_timer_enabled;
64
65 unsigned long read_timer_interval; /* Reader wakeup (us) */
66 //timer_t read_timer;
14641deb 67 //wait_queue_head_t read_wait; /* reader wait queue */
852c2936 68 int finalized; /* Has channel been finalized */
a3f61e7f 69 size_t priv_data_offset;
74d81a6c 70 unsigned int nr_streams; /* Number of streams */
03d2d293 71 struct lttng_ust_shm_handle *handle;
74d81a6c 72 char padding[RB_CHANNEL_PADDING];
de85e7c3
MD
73 /*
74 * Associated backend contains a variable-length array. Needs to
75 * be last member.
76 */
77 struct channel_backend backend; /* Associated backend */
b728d87e 78} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
79
80/* Per-subbuffer commit counters used on the hot path */
3c8964ba 81#define RB_COMMIT_COUNT_HOT_PADDING 16
852c2936
MD
82struct commit_counters_hot {
83 union v_atomic cc; /* Commit counter */
84 union v_atomic seq; /* Consecutive commits */
3c8964ba 85 char padding[RB_COMMIT_COUNT_HOT_PADDING];
b728d87e 86} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
87
88/* Per-subbuffer commit counters used only on cold paths */
3c8964ba 89#define RB_COMMIT_COUNT_COLD_PADDING 24
852c2936
MD
90struct commit_counters_cold {
91 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
3c8964ba 92 char padding[RB_COMMIT_COUNT_COLD_PADDING];
b728d87e 93} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936 94
852c2936 95/* ring buffer state */
3c8964ba 96#define RB_RING_BUFFER_PADDING 64
4cfec15c 97struct lttng_ust_lib_ring_buffer {
852c2936
MD
98 /* First 32 bytes cache-hot cacheline */
99 union v_atomic offset; /* Current offset in the buffer */
a6352fd4 100 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
852c2936 101 /* Commit count per sub-buffer */
14641deb 102 long consumed; /*
852c2936
MD
103 * Current offset in the buffer
104 * standard atomic access (shared)
105 */
14641deb 106 int record_disabled;
852c2936
MD
107 /* End of first 32 bytes cacheline */
108 union v_atomic last_tsc; /*
109 * Last timestamp written in the buffer.
110 */
111
4cfec15c 112 struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
852c2936 113
a6352fd4 114 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
852c2936 115 /* Commit count per sub-buffer */
14641deb 116 long active_readers; /*
852c2936
MD
117 * Active readers count
118 * standard atomic access (shared)
119 */
120 /* Dropped records */
121 union v_atomic records_lost_full; /* Buffer full */
122 union v_atomic records_lost_wrap; /* Nested wrap-around */
123 union v_atomic records_lost_big; /* Events too big */
124 union v_atomic records_count; /* Number of records written */
125 union v_atomic records_overrun; /* Number of overwritten records */
14641deb 126 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
852c2936 127 int finalized; /* buffer has been finalized */
852c2936
MD
128 unsigned long get_subbuf_consumed; /* Read-side consumed */
129 unsigned long prod_snapshot; /* Producer count snapshot */
130 unsigned long cons_snapshot; /* Consumer count snapshot */
7c2caf2b
MD
131 unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
132 switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
133 read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
5d61a504 134 /* shmp pointer to self */
4cfec15c 135 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
3c8964ba 136 char padding[RB_RING_BUFFER_PADDING];
b728d87e 137} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
138
139static inline
140void *channel_get_private(struct channel *chan)
141{
a3f61e7f 142 return ((char *) chan) + chan->priv_data_offset;
852c2936
MD
143}
144
0d4aa2df
MD
145#ifndef __rb_same_type
146#define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
147#endif
148
852c2936
MD
149/*
150 * Issue warnings and disable channels upon internal error.
4cfec15c 151 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
852c2936
MD
152 * parameters.
153 */
154#define CHAN_WARN_ON(c, cond) \
155 ({ \
156 struct channel *__chan; \
b5a3dfa5 157 int _____ret = caa_unlikely(cond); \
852c2936 158 if (_____ret) { \
0d4aa2df 159 if (__rb_same_type(*(c), struct channel_backend)) \
14641deb 160 __chan = caa_container_of((void *) (c), \
852c2936
MD
161 struct channel, \
162 backend); \
0d4aa2df 163 else if (__rb_same_type(*(c), struct channel)) \
852c2936
MD
164 __chan = (void *) (c); \
165 else \
166 BUG_ON(1); \
14641deb 167 uatomic_inc(&__chan->record_disabled); \
852c2936
MD
168 WARN_ON(1); \
169 } \
170 _____ret; \
171 })
172
e92f3e28 173#endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.042048 seconds and 4 git commands to generate.