Update README.md for supported kernel
[lttng-modules.git] / lib / ringbuffer / frontend_types.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * lib/ringbuffer/frontend_types.h
f3bc08c5
MD
4 *
5 * Ring Buffer Library Synchronization Header (types).
6 *
886d51a3
MD
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
f3bc08c5 9 * See ring_buffer_frontend.c for more information on wait-free algorithms.
f3bc08c5
MD
10 */
11
9f36eaed
MJ
12#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
13#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
14
f40270ad 15#include <linux/kref.h>
5760f3f4
MD
16#include <include/ringbuffer/config.h>
17#include <include/ringbuffer/backend_types.h>
5671a661 18#include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
1e367326 19#include <lttng-cpuhotplug.h>
f3bc08c5
MD
20
21/*
22 * A switch is done during tracing or as a final flush after tracing (so it
23 * won't write in the new sub-buffer).
24 */
25enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
26
27/* channel-level read-side iterator */
28struct channel_iter {
29 /* Prio heap of buffers. Lowest timestamps at the top. */
a88db018 30 struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
f3bc08c5
MD
31 struct list_head empty_head; /* Empty buffers linked-list head */
32 int read_open; /* Opened for reading ? */
33 u64 last_qs; /* Last quiescent state timestamp */
34 u64 last_timestamp; /* Last timestamp (for WARN_ON) */
35 int last_cpu; /* Last timestamp cpu */
36 /*
37 * read() file operation state.
38 */
39 unsigned long len_left;
40};
41
42/* channel: collection of per-cpu ring buffers. */
43struct channel {
44 atomic_t record_disabled;
45 unsigned long commit_count_mask; /*
46 * Commit count mask, removing
47 * the MSBs corresponding to
48 * bits used to represent the
49 * subbuffer index.
50 */
51
52 struct channel_backend backend; /* Associated backend */
53
54 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
55 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
1e367326
MD
56 struct lttng_cpuhp_node cpuhp_prepare;
57 struct lttng_cpuhp_node cpuhp_online;
58 struct lttng_cpuhp_node cpuhp_iter_online;
1e367326 59 struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
f3bc08c5 60 wait_queue_head_t read_wait; /* reader wait queue */
24cedcfe
MD
61 wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
62 int finalized; /* Has channel been finalized */
f3bc08c5 63 struct channel_iter iter; /* Channel read-side iterator */
f40270ad 64 struct kref ref; /* Reference count */
f3bc08c5
MD
65};
66
67/* Per-subbuffer commit counters used on the hot path */
68struct commit_counters_hot {
69 union v_atomic cc; /* Commit counter */
70 union v_atomic seq; /* Consecutive commits */
71};
72
73/* Per-subbuffer commit counters used only on cold paths */
74struct commit_counters_cold {
75 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
76};
77
78/* Per-buffer read iterator */
79struct lib_ring_buffer_iter {
80 u64 timestamp; /* Current record timestamp */
81 size_t header_len; /* Current record header length */
82 size_t payload_len; /* Current record payload length */
83
84 struct list_head empty_node; /* Linked list of empty buffers */
85 unsigned long consumed, read_offset, data_size;
86 enum {
87 ITER_GET_SUBBUF = 0,
88 ITER_TEST_RECORD,
89 ITER_NEXT_RECORD,
90 ITER_PUT_SUBBUF,
91 } state;
9cccf98a
MD
92 unsigned int allocated:1;
93 unsigned int read_open:1; /* Opened for reading ? */
f3bc08c5
MD
94};
95
96/* ring buffer state */
97struct lib_ring_buffer {
98 /* First 32 bytes cache-hot cacheline */
99 union v_atomic offset; /* Current offset in the buffer */
100 struct commit_counters_hot *commit_hot;
101 /* Commit count per sub-buffer */
102 atomic_long_t consumed; /*
103 * Current offset in the buffer
104 * standard atomic access (shared)
105 */
106 atomic_t record_disabled;
107 /* End of first 32 bytes cacheline */
108 union v_atomic last_tsc; /*
109 * Last timestamp written in the buffer.
110 */
111
112 struct lib_ring_buffer_backend backend; /* Associated backend */
113
114 struct commit_counters_cold *commit_cold;
115 /* Commit count per sub-buffer */
2485a430
MD
116 u64 *ts_end; /*
117 * timestamp_end per sub-buffer.
118 * Time is sampled by the
119 * switch_*_end() callbacks which
120 * are the last space reservation
121 * performed in the sub-buffer
122 * before it can be fully
123 * committed and delivered. This
124 * time value is then read by
125 * the deliver callback,
126 * performed by the last commit
127 * before the buffer becomes
128 * readable.
129 */
f3bc08c5
MD
130 atomic_long_t active_readers; /*
131 * Active readers count
132 * standard atomic access (shared)
133 */
134 /* Dropped records */
135 union v_atomic records_lost_full; /* Buffer full */
136 union v_atomic records_lost_wrap; /* Nested wrap-around */
137 union v_atomic records_lost_big; /* Events too big */
138 union v_atomic records_count; /* Number of records written */
139 union v_atomic records_overrun; /* Number of overwritten records */
140 wait_queue_head_t read_wait; /* reader buffer-level wait queue */
71c1d843 141 wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
f3bc08c5
MD
142 int finalized; /* buffer has been finalized */
143 struct timer_list switch_timer; /* timer for periodical switch */
144 struct timer_list read_timer; /* timer for read poll */
145 raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
146 struct lib_ring_buffer_iter iter; /* read-side iterator */
147 unsigned long get_subbuf_consumed; /* Read-side consumed */
148 unsigned long prod_snapshot; /* Producer count snapshot */
149 unsigned long cons_snapshot; /* Consumer count snapshot */
9cccf98a
MD
150 unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
151 switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
64af2437
MD
152 read_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
153 quiescent:1;
f3bc08c5
MD
154};
155
9115fbdc
MD
156static inline
157void *channel_get_private(struct channel *chan)
158{
159 return chan->backend.priv;
160}
161
d7e74017
MD
162void lib_ring_buffer_lost_event_too_big(struct channel *chan);
163
f3bc08c5
MD
164/*
165 * Issue warnings and disable channels upon internal error.
166 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
167 * parameters.
168 */
169#define CHAN_WARN_ON(c, cond) \
170 ({ \
171 struct channel *__chan; \
172 int _____ret = unlikely(cond); \
173 if (_____ret) { \
174 if (__same_type(*(c), struct channel_backend)) \
175 __chan = container_of((void *) (c), \
176 struct channel, \
177 backend); \
178 else if (__same_type(*(c), struct channel)) \
179 __chan = (void *) (c); \
180 else \
181 BUG_ON(1); \
182 atomic_inc(&__chan->record_disabled); \
183 WARN_ON(1); \
184 } \
185 _____ret; \
186 })
187
886d51a3 188#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.045418 seconds and 4 git commands to generate.