Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / frontend_types.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring Buffer Library Synchronization Header (types).
7 *
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
9 */
10
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
13
14 #include <stdint.h>
15 #include <string.h>
16 #include <time.h> /* for timer_t */
17
18 #include <urcu/list.h>
19 #include <urcu/uatomic.h>
20
21 #include <lttng/ringbuffer-config.h>
22 #include <usterr-signal-safe.h>
23 #include "backend_types.h"
24 #include "shm_internal.h"
25 #include "shm_types.h"
26 #include "vatomic.h"
27
28 /*
29 * A switch is done during tracing or as a final flush after tracing (so it
30 * won't write in the new sub-buffer).
31 */
32 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
33
34 /* channel: collection of per-cpu ring buffers. */
35 #define RB_CHANNEL_PADDING 32
36 struct channel {
37 int record_disabled;
38 unsigned long commit_count_mask; /*
39 * Commit count mask, removing
40 * the MSBs corresponding to
41 * bits used to represent the
42 * subbuffer index.
43 */
44
45 unsigned long switch_timer_interval; /* Buffer flush (us) */
46 timer_t switch_timer;
47 int switch_timer_enabled;
48
49 unsigned long read_timer_interval; /* Reader wakeup (us) */
50 timer_t read_timer;
51 int read_timer_enabled;
52
53 int finalized; /* Has channel been finalized */
54 size_t priv_data_offset;
55 unsigned int nr_streams; /* Number of streams */
56 struct lttng_ust_shm_handle *handle;
57 /* Extended options. */
58 union {
59 struct {
60 int32_t blocking_timeout_ms;
61 } s;
62 char padding[RB_CHANNEL_PADDING];
63 } u;
64 /*
65 * Associated backend contains a variable-length array. Needs to
66 * be last member.
67 */
68 struct channel_backend backend; /* Associated backend */
69 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
70
71 /* Per-subbuffer commit counters used on the hot path */
72 #define RB_COMMIT_COUNT_HOT_PADDING 16
73 struct commit_counters_hot {
74 union v_atomic cc; /* Commit counter */
75 union v_atomic seq; /* Consecutive commits */
76 char padding[RB_COMMIT_COUNT_HOT_PADDING];
77 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
78
79 /* Per-subbuffer commit counters used only on cold paths */
80 #define RB_COMMIT_COUNT_COLD_PADDING 24
81 struct commit_counters_cold {
82 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
83 char padding[RB_COMMIT_COUNT_COLD_PADDING];
84 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
85
86 /* ring buffer state */
87 #define RB_CRASH_DUMP_ABI_LEN 256
88 #define RB_RING_BUFFER_PADDING 60
89
90 #define RB_CRASH_DUMP_ABI_MAGIC_LEN 16
91
92 /*
93 * The 128-bit magic number is xor'd in the process data so it does not
94 * cause a false positive when searching for buffers by scanning memory.
95 * The actual magic number is:
96 * 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17, 0x7B, 0xF1,
97 * 0x77, 0xBF, 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17,
98 */
99 #define RB_CRASH_DUMP_ABI_MAGIC_XOR \
100 { \
101 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, 0x77 ^ 0xFF, \
102 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, \
103 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, \
104 0xF1 ^ 0xFF, 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, \
105 }
106
107 #define RB_CRASH_ENDIAN 0x1234
108
109 #define RB_CRASH_DUMP_ABI_MAJOR 0
110 #define RB_CRASH_DUMP_ABI_MINOR 0
111
112 enum lttng_crash_type {
113 LTTNG_CRASH_TYPE_UST = 0,
114 LTTNG_CRASH_TYPE_KERNEL = 1,
115 };
116
117 struct lttng_crash_abi {
118 uint8_t magic[RB_CRASH_DUMP_ABI_MAGIC_LEN];
119 uint64_t mmap_length; /* Overall lenght of crash record */
120 uint16_t endian; /*
121 * { 0x12, 0x34 }: big endian
122 * { 0x34, 0x12 }: little endian
123 */
124 uint16_t major; /* Major number. */
125 uint16_t minor; /* Minor number. */
126 uint8_t word_size; /* Word size (bytes). */
127 uint8_t layout_type; /* enum lttng_crash_type */
128
129 struct {
130 uint32_t prod_offset;
131 uint32_t consumed_offset;
132 uint32_t commit_hot_array;
133 uint32_t commit_hot_seq;
134 uint32_t buf_wsb_array;
135 uint32_t buf_wsb_id;
136 uint32_t sb_array;
137 uint32_t sb_array_shmp_offset;
138 uint32_t sb_backend_p_offset;
139 uint32_t content_size;
140 uint32_t packet_size;
141 } __attribute__((packed)) offset;
142 struct {
143 uint8_t prod_offset;
144 uint8_t consumed_offset;
145 uint8_t commit_hot_seq;
146 uint8_t buf_wsb_id;
147 uint8_t sb_array_shmp_offset;
148 uint8_t sb_backend_p_offset;
149 uint8_t content_size;
150 uint8_t packet_size;
151 } __attribute__((packed)) length;
152 struct {
153 uint32_t commit_hot_array;
154 uint32_t buf_wsb_array;
155 uint32_t sb_array;
156 } __attribute__((packed)) stride;
157
158 uint64_t buf_size; /* Size of the buffer */
159 uint64_t subbuf_size; /* Sub-buffer size */
160 uint64_t num_subbuf; /* Number of sub-buffers for writer */
161 uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
162 } __attribute__((packed));
163
164 struct lttng_ust_lib_ring_buffer {
165 /* First 32 bytes are for the buffer crash dump ABI */
166 struct lttng_crash_abi crash_abi;
167
168 /* 32 bytes cache-hot cacheline */
169 union v_atomic __attribute__((aligned(32))) offset;
170 /* Current offset in the buffer */
171 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
172 /* Commit count per sub-buffer */
173 long consumed; /*
174 * Current offset in the buffer
175 * standard atomic access (shared)
176 */
177 int record_disabled;
178 /* End of cache-hot 32 bytes cacheline */
179
180 union v_atomic last_tsc; /*
181 * Last timestamp written in the buffer.
182 */
183
184 struct lttng_ust_lib_ring_buffer_backend backend;
185 /* Associated backend */
186
187 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
188 /* Commit count per sub-buffer */
189 DECLARE_SHMP(uint64_t, ts_end); /*
190 * timestamp_end per sub-buffer.
191 * Time is sampled by the
192 * switch_*_end() callbacks
193 * which are the last space
194 * reservation performed in the
195 * sub-buffer before it can be
196 * fully committed and
197 * delivered. This time value is
198 * then read by the deliver
199 * callback, performed by the
200 * last commit before the buffer
201 * becomes readable.
202 */
203 long active_readers; /*
204 * Active readers count
205 * standard atomic access (shared)
206 */
207 /* Dropped records */
208 union v_atomic records_lost_full; /* Buffer full */
209 union v_atomic records_lost_wrap; /* Nested wrap-around */
210 union v_atomic records_lost_big; /* Events too big */
211 union v_atomic records_count; /* Number of records written */
212 union v_atomic records_overrun; /* Number of overwritten records */
213 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
214 int finalized; /* buffer has been finalized */
215 unsigned long get_subbuf_consumed; /* Read-side consumed */
216 unsigned long prod_snapshot; /* Producer count snapshot */
217 unsigned long cons_snapshot; /* Consumer count snapshot */
218 unsigned int get_subbuf:1; /* Sub-buffer being held by reader */
219 /* shmp pointer to self */
220 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
221 char padding[RB_RING_BUFFER_PADDING];
222 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
223
224 static inline
225 void *channel_get_private(struct channel *chan)
226 {
227 return ((char *) chan) + chan->priv_data_offset;
228 }
229
230 #ifndef __rb_same_type
231 #define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
232 #endif
233
234 /*
235 * Issue warnings and disable channels upon internal error.
236 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
237 * parameters.
238 */
239 #define CHAN_WARN_ON(c, cond) \
240 ({ \
241 struct channel *__chan; \
242 int _____ret = caa_unlikely(cond); \
243 if (_____ret) { \
244 if (__rb_same_type(*(c), struct channel_backend)) \
245 __chan = caa_container_of((void *) (c), \
246 struct channel, \
247 backend); \
248 else if (__rb_same_type(*(c), struct channel)) \
249 __chan = (void *) (c); \
250 else \
251 BUG_ON(1); \
252 uatomic_inc(&__chan->record_disabled); \
253 WARN_ON(1); \
254 } \
255 _____ret = _____ret; /* For clang "unused result". */ \
256 })
257
258 #endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.034705 seconds and 4 git commands to generate.