Remove unused ip field from struct lttng_ust_ring_buffer_ctx_private
[lttng-ust.git] / src / common / ringbuffer / frontend_types.h
CommitLineData
852c2936 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
852c2936 3 *
e92f3e28
MD
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * Ring Buffer Library Synchronization Header (types).
852c2936
MD
7 *
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
852c2936
MD
9 */
10
c0c0989a
MJ
11#ifndef _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
12#define _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
13
fb31eb73 14#include <stdint.h>
a6352fd4 15#include <string.h>
03d2d293 16#include <time.h> /* for timer_t */
a6352fd4 17
14641deb
MD
18#include <urcu/list.h>
19#include <urcu/uatomic.h>
14641deb 20
0b4b8811 21#include <lttng/ust-ringbuffer-context.h>
0466ac28 22#include "ringbuffer-config.h"
9d315d6d 23#include "common/logging.h"
4931a13e 24#include "backend_types.h"
1d498196 25#include "shm_internal.h"
03d2d293 26#include "shm_types.h"
a3bb4b27 27#include "vatomic.h"
852c2936 28
8936b6c0
MD
29#define LIB_RING_BUFFER_MAX_NESTING 5
30
852c2936
MD
31/*
32 * A switch is done during tracing or as a final flush after tracing (so it
33 * won't write in the new sub-buffer).
34 */
35enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
36
852c2936 37/* channel: collection of per-cpu ring buffers. */
74d81a6c 38#define RB_CHANNEL_PADDING 32
b5457df5 39struct lttng_ust_ring_buffer_channel {
14641deb 40 int record_disabled;
852c2936
MD
41 unsigned long commit_count_mask; /*
42 * Commit count mask, removing
43 * the MSBs corresponding to
44 * bits used to represent the
45 * subbuffer index.
46 */
47
03d2d293
MD
48 unsigned long switch_timer_interval; /* Buffer flush (us) */
49 timer_t switch_timer;
50 int switch_timer_enabled;
51
52 unsigned long read_timer_interval; /* Reader wakeup (us) */
34a91bdb
MD
53 timer_t read_timer;
54 int read_timer_enabled;
55
852c2936 56 int finalized; /* Has channel been finalized */
f0fde1c3 57 size_t priv_data_offset; /* Offset of private data channel config */
74d81a6c 58 unsigned int nr_streams; /* Number of streams */
03d2d293 59 struct lttng_ust_shm_handle *handle;
b2c5f61a
MD
60 /* Extended options. */
61 union {
62 struct {
63 int32_t blocking_timeout_ms;
f0fde1c3 64 void *priv; /* Private data pointer. */
b2c5f61a
MD
65 } s;
66 char padding[RB_CHANNEL_PADDING];
67 } u;
de85e7c3
MD
68 /*
69 * Associated backend contains a variable-length array. Needs to
70 * be last member.
71 */
72 struct channel_backend backend; /* Associated backend */
b728d87e 73} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
74
75/* Per-subbuffer commit counters used on the hot path */
3c8964ba 76#define RB_COMMIT_COUNT_HOT_PADDING 16
852c2936
MD
77struct commit_counters_hot {
78 union v_atomic cc; /* Commit counter */
79 union v_atomic seq; /* Consecutive commits */
3c8964ba 80 char padding[RB_COMMIT_COUNT_HOT_PADDING];
b728d87e 81} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
82
83/* Per-subbuffer commit counters used only on cold paths */
3c8964ba 84#define RB_COMMIT_COUNT_COLD_PADDING 24
852c2936
MD
85struct commit_counters_cold {
86 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
3c8964ba 87 char padding[RB_COMMIT_COUNT_COLD_PADDING];
b728d87e 88} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936 89
852c2936 90/* ring buffer state */
a9ff648c
MD
91#define RB_CRASH_DUMP_ABI_LEN 256
92#define RB_RING_BUFFER_PADDING 60
93
94#define RB_CRASH_DUMP_ABI_MAGIC_LEN 16
95
96/*
97 * The 128-bit magic number is xor'd in the process data so it does not
98 * cause a false positive when searching for buffers by scanning memory.
99 * The actual magic number is:
100 * 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17, 0x7B, 0xF1,
101 * 0x77, 0xBF, 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17,
102 */
103#define RB_CRASH_DUMP_ABI_MAGIC_XOR \
104 { \
105 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, 0x77 ^ 0xFF, \
106 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, \
107 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, \
108 0xF1 ^ 0xFF, 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, \
109 }
110
111#define RB_CRASH_ENDIAN 0x1234
112
113#define RB_CRASH_DUMP_ABI_MAJOR 0
114#define RB_CRASH_DUMP_ABI_MINOR 0
115
116enum lttng_crash_type {
117 LTTNG_CRASH_TYPE_UST = 0,
118 LTTNG_CRASH_TYPE_KERNEL = 1,
119};
120
121struct lttng_crash_abi {
122 uint8_t magic[RB_CRASH_DUMP_ABI_MAGIC_LEN];
123 uint64_t mmap_length; /* Overall lenght of crash record */
124 uint16_t endian; /*
125 * { 0x12, 0x34 }: big endian
126 * { 0x34, 0x12 }: little endian
127 */
128 uint16_t major; /* Major number. */
129 uint16_t minor; /* Minor number. */
130 uint8_t word_size; /* Word size (bytes). */
131 uint8_t layout_type; /* enum lttng_crash_type */
132
133 struct {
134 uint32_t prod_offset;
135 uint32_t consumed_offset;
136 uint32_t commit_hot_array;
137 uint32_t commit_hot_seq;
138 uint32_t buf_wsb_array;
139 uint32_t buf_wsb_id;
140 uint32_t sb_array;
141 uint32_t sb_array_shmp_offset;
142 uint32_t sb_backend_p_offset;
143 uint32_t content_size;
144 uint32_t packet_size;
145 } __attribute__((packed)) offset;
146 struct {
147 uint8_t prod_offset;
148 uint8_t consumed_offset;
149 uint8_t commit_hot_seq;
150 uint8_t buf_wsb_id;
151 uint8_t sb_array_shmp_offset;
152 uint8_t sb_backend_p_offset;
153 uint8_t content_size;
154 uint8_t packet_size;
155 } __attribute__((packed)) length;
156 struct {
157 uint32_t commit_hot_array;
158 uint32_t buf_wsb_array;
159 uint32_t sb_array;
160 } __attribute__((packed)) stride;
161
162 uint64_t buf_size; /* Size of the buffer */
163 uint64_t subbuf_size; /* Sub-buffer size */
164 uint64_t num_subbuf; /* Number of sub-buffers for writer */
165 uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
166} __attribute__((packed));
167
b5457df5 168struct lttng_ust_ring_buffer {
a9ff648c
MD
169 /* First 32 bytes are for the buffer crash dump ABI */
170 struct lttng_crash_abi crash_abi;
171
172 /* 32 bytes cache-hot cacheline */
173 union v_atomic __attribute__((aligned(32))) offset;
174 /* Current offset in the buffer */
a6352fd4 175 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
852c2936 176 /* Commit count per sub-buffer */
14641deb 177 long consumed; /*
852c2936
MD
178 * Current offset in the buffer
179 * standard atomic access (shared)
180 */
14641deb 181 int record_disabled;
a9ff648c
MD
182 /* End of cache-hot 32 bytes cacheline */
183
852c2936
MD
184 union v_atomic last_tsc; /*
185 * Last timestamp written in the buffer.
186 */
187
b5457df5 188 struct lttng_ust_ring_buffer_backend backend;
a9ff648c 189 /* Associated backend */
852c2936 190
a6352fd4 191 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
852c2936 192 /* Commit count per sub-buffer */
6c737d05
MD
193 DECLARE_SHMP(uint64_t, ts_end); /*
194 * timestamp_end per sub-buffer.
195 * Time is sampled by the
196 * switch_*_end() callbacks
197 * which are the last space
198 * reservation performed in the
199 * sub-buffer before it can be
200 * fully committed and
201 * delivered. This time value is
202 * then read by the deliver
203 * callback, performed by the
204 * last commit before the buffer
205 * becomes readable.
206 */
14641deb 207 long active_readers; /*
852c2936
MD
208 * Active readers count
209 * standard atomic access (shared)
210 */
211 /* Dropped records */
212 union v_atomic records_lost_full; /* Buffer full */
213 union v_atomic records_lost_wrap; /* Nested wrap-around */
214 union v_atomic records_lost_big; /* Events too big */
215 union v_atomic records_count; /* Number of records written */
216 union v_atomic records_overrun; /* Number of overwritten records */
14641deb 217 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
852c2936 218 int finalized; /* buffer has been finalized */
852c2936
MD
219 unsigned long get_subbuf_consumed; /* Read-side consumed */
220 unsigned long prod_snapshot; /* Producer count snapshot */
221 unsigned long cons_snapshot; /* Consumer count snapshot */
34a91bdb 222 unsigned int get_subbuf:1; /* Sub-buffer being held by reader */
5d61a504 223 /* shmp pointer to self */
b5457df5 224 DECLARE_SHMP(struct lttng_ust_ring_buffer, self);
3c8964ba 225 char padding[RB_RING_BUFFER_PADDING];
b728d87e 226} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936 227
8936b6c0
MD
228/*
229 * ring buffer private context
230 *
231 * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
b5457df5 232 * lib_ring_buffer_try_discard_reserve(), lttng_ust_ring_buffer_align_ctx() and
8936b6c0
MD
233 * lib_ring_buffer_write().
234 *
235 * This context is allocated on an internal shadow-stack by a successful reserve
236 * operation, used by align/write, and freed by commit.
237 */
238
b5457df5 239struct lttng_ust_ring_buffer_ctx_private {
8936b6c0 240 /* input received by lib_ring_buffer_reserve(). */
b5457df5
MD
241 struct lttng_ust_ring_buffer_ctx *pub;
242 struct lttng_ust_ring_buffer_channel *chan; /* channel */
8936b6c0
MD
243
244 /* output from lib_ring_buffer_reserve() */
245 int reserve_cpu; /* processor id updated by the reserve */
246 size_t slot_size; /* size of the reserved slot */
247 unsigned long buf_offset; /* offset following the record header */
248 unsigned long pre_offset; /*
249 * Initial offset position _before_
250 * the record is written. Positioned
251 * prior to record header alignment
252 * padding.
253 */
254 uint64_t tsc; /* time-stamp counter value */
255 unsigned int rflags; /* reservation flags */
8936b6c0 256
b5457df5 257 struct lttng_ust_ring_buffer *buf; /*
8936b6c0
MD
258 * buffer corresponding to processor id
259 * for this channel
260 */
b5457df5 261 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
8936b6c0
MD
262};
263
852c2936 264static inline
b5457df5 265void *channel_get_private_config(struct lttng_ust_ring_buffer_channel *chan)
852c2936 266{
a3f61e7f 267 return ((char *) chan) + chan->priv_data_offset;
852c2936
MD
268}
269
f0fde1c3 270static inline
b5457df5 271void *channel_get_private(struct lttng_ust_ring_buffer_channel *chan)
f0fde1c3
MD
272{
273 return chan->u.s.priv;
274}
275
276static inline
b5457df5 277void channel_set_private(struct lttng_ust_ring_buffer_channel *chan, void *priv)
f0fde1c3
MD
278{
279 chan->u.s.priv = priv;
280}
281
0d4aa2df
MD
282#ifndef __rb_same_type
283#define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
284#endif
285
852c2936
MD
286/*
287 * Issue warnings and disable channels upon internal error.
b5457df5 288 * Can receive struct lttng_ust_ring_buffer or struct lttng_ust_ring_buffer_backend
852c2936
MD
289 * parameters.
290 */
291#define CHAN_WARN_ON(c, cond) \
292 ({ \
b5457df5 293 struct lttng_ust_ring_buffer_channel *__chan; \
b5a3dfa5 294 int _____ret = caa_unlikely(cond); \
852c2936 295 if (_____ret) { \
0d4aa2df 296 if (__rb_same_type(*(c), struct channel_backend)) \
14641deb 297 __chan = caa_container_of((void *) (c), \
b5457df5 298 struct lttng_ust_ring_buffer_channel, \
5198080d
MJ
299 backend); \
300 else if (__rb_same_type(*(c), \
b5457df5 301 struct lttng_ust_ring_buffer_channel)) \
852c2936
MD
302 __chan = (void *) (c); \
303 else \
304 BUG_ON(1); \
14641deb 305 uatomic_inc(&__chan->record_disabled); \
852c2936
MD
306 WARN_ON(1); \
307 } \
257ecc17 308 _____ret = _____ret; /* For clang "unused result". */ \
852c2936
MD
309 })
310
8936b6c0 311/**
b5457df5 312 * lttng_ust_ring_buffer_align_ctx - Align context offset on "alignment"
8936b6c0
MD
313 * @ctx: ring buffer context.
314 */
106ff4da 315static inline
b5457df5 316void lttng_ust_ring_buffer_align_ctx(struct lttng_ust_ring_buffer_ctx *ctx,
106ff4da
MJ
317 size_t alignment)
318 lttng_ust_notrace;
8936b6c0 319static inline
b5457df5 320void lttng_ust_ring_buffer_align_ctx(struct lttng_ust_ring_buffer_ctx *ctx,
8936b6c0
MD
321 size_t alignment)
322{
b5457df5 323 struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
8936b6c0 324
b5457df5 325 ctx_private->buf_offset += lttng_ust_ring_buffer_align(ctx_private->buf_offset,
8936b6c0
MD
326 alignment);
327}
328
e92f3e28 329#endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.049251 seconds and 4 git commands to generate.