Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
9f36eaed | 2 | * |
24591303 | 3 | * ringbuffer/frontend_types.h |
f3bc08c5 MD |
4 | * |
5 | * Ring Buffer Library Synchronization Header (types). | |
6 | * | |
886d51a3 MD |
7 | * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
8 | * | |
f3bc08c5 | 9 | * See ring_buffer_frontend.c for more information on wait-free algorithms. |
f3bc08c5 MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H |
13 | #define _LIB_RING_BUFFER_FRONTEND_TYPES_H | |
14 | ||
f40270ad | 15 | #include <linux/kref.h> |
fbd4d558 | 16 | #include <linux/irq_work.h> |
24591303 MD |
17 | #include <ringbuffer/config.h> |
18 | #include <ringbuffer/backend_types.h> | |
a071f25d | 19 | #include <lttng/prio_heap.h> /* For per-CPU read-side iterator */ |
2df37e95 | 20 | #include <lttng/cpuhotplug.h> |
f3bc08c5 MD |
21 | |
22 | /* | |
23 | * A switch is done during tracing or as a final flush after tracing (so it | |
24 | * won't write in the new sub-buffer). | |
25 | */ | |
26 | enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH }; | |
27 | ||
28 | /* channel-level read-side iterator */ | |
29 | struct channel_iter { | |
30 | /* Prio heap of buffers. Lowest timestamps at the top. */ | |
e20c0fec | 31 | struct lttng_ptr_heap heap; /* Heap of struct lttng_kernel_ring_buffer ptrs */ |
f3bc08c5 MD |
32 | struct list_head empty_head; /* Empty buffers linked-list head */ |
33 | int read_open; /* Opened for reading ? */ | |
34 | u64 last_qs; /* Last quiescent state timestamp */ | |
35 | u64 last_timestamp; /* Last timestamp (for WARN_ON) */ | |
36 | int last_cpu; /* Last timestamp cpu */ | |
37 | /* | |
38 | * read() file operation state. | |
39 | */ | |
40 | unsigned long len_left; | |
41 | }; | |
42 | ||
43 | /* channel: collection of per-cpu ring buffers. */ | |
860c213b | 44 | struct lttng_kernel_ring_buffer_channel { |
f3bc08c5 MD |
45 | atomic_t record_disabled; |
46 | unsigned long commit_count_mask; /* | |
47 | * Commit count mask, removing | |
48 | * the MSBs corresponding to | |
49 | * bits used to represent the | |
50 | * subbuffer index. | |
51 | */ | |
52 | ||
53 | struct channel_backend backend; /* Associated backend */ | |
54 | ||
55 | unsigned long switch_timer_interval; /* Buffer flush (jiffies) */ | |
56 | unsigned long read_timer_interval; /* Reader wakeup (jiffies) */ | |
5f4c791e | 57 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) |
1e367326 MD |
58 | struct lttng_cpuhp_node cpuhp_prepare; |
59 | struct lttng_cpuhp_node cpuhp_online; | |
60 | struct lttng_cpuhp_node cpuhp_iter_online; | |
61 | #else | |
f3bc08c5 | 62 | struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ |
f3bc08c5 | 63 | struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ |
9cccf98a MD |
64 | unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ |
65 | unsigned int hp_iter_enable:1; /* Enable hp iter notif. */ | |
1e367326 MD |
66 | #endif |
67 | struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ | |
f3bc08c5 | 68 | wait_queue_head_t read_wait; /* reader wait queue */ |
24cedcfe | 69 | wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ |
fbd4d558 | 70 | struct irq_work wakeup_pending; /* Pending wakeup irq work */ |
24cedcfe | 71 | int finalized; /* Has channel been finalized */ |
f3bc08c5 | 72 | struct channel_iter iter; /* Channel read-side iterator */ |
f40270ad | 73 | struct kref ref; /* Reference count */ |
f3bc08c5 MD |
74 | }; |
75 | ||
76 | /* Per-subbuffer commit counters used on the hot path */ | |
77 | struct commit_counters_hot { | |
78 | union v_atomic cc; /* Commit counter */ | |
79 | union v_atomic seq; /* Consecutive commits */ | |
80 | }; | |
81 | ||
82 | /* Per-subbuffer commit counters used only on cold paths */ | |
83 | struct commit_counters_cold { | |
84 | union v_atomic cc_sb; /* Incremented _once_ at sb switch */ | |
85 | }; | |
86 | ||
87 | /* Per-buffer read iterator */ | |
e20c0fec | 88 | struct lttng_kernel_ring_buffer_iter { |
f3bc08c5 MD |
89 | u64 timestamp; /* Current record timestamp */ |
90 | size_t header_len; /* Current record header length */ | |
91 | size_t payload_len; /* Current record payload length */ | |
92 | ||
93 | struct list_head empty_node; /* Linked list of empty buffers */ | |
94 | unsigned long consumed, read_offset, data_size; | |
95 | enum { | |
96 | ITER_GET_SUBBUF = 0, | |
97 | ITER_TEST_RECORD, | |
98 | ITER_NEXT_RECORD, | |
99 | ITER_PUT_SUBBUF, | |
100 | } state; | |
9cccf98a MD |
101 | unsigned int allocated:1; |
102 | unsigned int read_open:1; /* Opened for reading ? */ | |
f3bc08c5 MD |
103 | }; |
104 | ||
105 | /* ring buffer state */ | |
e20c0fec | 106 | struct lttng_kernel_ring_buffer { |
f3bc08c5 MD |
107 | /* First 32 bytes cache-hot cacheline */ |
108 | union v_atomic offset; /* Current offset in the buffer */ | |
109 | struct commit_counters_hot *commit_hot; | |
110 | /* Commit count per sub-buffer */ | |
111 | atomic_long_t consumed; /* | |
112 | * Current offset in the buffer | |
113 | * standard atomic access (shared) | |
114 | */ | |
115 | atomic_t record_disabled; | |
116 | /* End of first 32 bytes cacheline */ | |
117 | union v_atomic last_tsc; /* | |
118 | * Last timestamp written in the buffer. | |
119 | */ | |
120 | ||
e20c0fec | 121 | struct lttng_kernel_ring_buffer_backend backend; /* Associated backend */ |
f3bc08c5 MD |
122 | |
123 | struct commit_counters_cold *commit_cold; | |
124 | /* Commit count per sub-buffer */ | |
2485a430 MD |
125 | u64 *ts_end; /* |
126 | * timestamp_end per sub-buffer. | |
127 | * Time is sampled by the | |
128 | * switch_*_end() callbacks which | |
129 | * are the last space reservation | |
130 | * performed in the sub-buffer | |
131 | * before it can be fully | |
132 | * committed and delivered. This | |
133 | * time value is then read by | |
134 | * the deliver callback, | |
135 | * performed by the last commit | |
136 | * before the buffer becomes | |
137 | * readable. | |
138 | */ | |
f3bc08c5 MD |
139 | atomic_long_t active_readers; /* |
140 | * Active readers count | |
141 | * standard atomic access (shared) | |
142 | */ | |
143 | /* Dropped records */ | |
144 | union v_atomic records_lost_full; /* Buffer full */ | |
145 | union v_atomic records_lost_wrap; /* Nested wrap-around */ | |
146 | union v_atomic records_lost_big; /* Events too big */ | |
147 | union v_atomic records_count; /* Number of records written */ | |
148 | union v_atomic records_overrun; /* Number of overwritten records */ | |
149 | wait_queue_head_t read_wait; /* reader buffer-level wait queue */ | |
71c1d843 | 150 | wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */ |
fbd4d558 | 151 | struct irq_work wakeup_pending; /* Pending wakeup irq work */ |
f3bc08c5 MD |
152 | int finalized; /* buffer has been finalized */ |
153 | struct timer_list switch_timer; /* timer for periodical switch */ | |
154 | struct timer_list read_timer; /* timer for read poll */ | |
155 | raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */ | |
e20c0fec | 156 | struct lttng_kernel_ring_buffer_iter iter; /* read-side iterator */ |
f3bc08c5 MD |
157 | unsigned long get_subbuf_consumed; /* Read-side consumed */ |
158 | unsigned long prod_snapshot; /* Producer count snapshot */ | |
159 | unsigned long cons_snapshot; /* Consumer count snapshot */ | |
9cccf98a MD |
160 | unsigned int get_subbuf:1, /* Sub-buffer being held by reader */ |
161 | switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */ | |
64af2437 MD |
162 | read_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */ |
163 | quiescent:1; | |
f3bc08c5 MD |
164 | }; |
165 | ||
9115fbdc | 166 | static inline |
860c213b | 167 | void *channel_get_private(struct lttng_kernel_ring_buffer_channel *chan) |
9115fbdc MD |
168 | { |
169 | return chan->backend.priv; | |
170 | } | |
171 | ||
860c213b | 172 | void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan); |
d7e74017 | 173 | |
f3bc08c5 MD |
174 | /* |
175 | * Issue warnings and disable channels upon internal error. | |
e20c0fec | 176 | * Can receive struct lttng_kernel_ring_buffer or struct lttng_kernel_ring_buffer_backend |
f3bc08c5 MD |
177 | * parameters. |
178 | */ | |
179 | #define CHAN_WARN_ON(c, cond) \ | |
180 | ({ \ | |
860c213b | 181 | struct lttng_kernel_ring_buffer_channel *__chan; \ |
f3bc08c5 MD |
182 | int _____ret = unlikely(cond); \ |
183 | if (_____ret) { \ | |
184 | if (__same_type(*(c), struct channel_backend)) \ | |
185 | __chan = container_of((void *) (c), \ | |
860c213b | 186 | struct lttng_kernel_ring_buffer_channel, \ |
f3bc08c5 | 187 | backend); \ |
860c213b | 188 | else if (__same_type(*(c), struct lttng_kernel_ring_buffer_channel)) \ |
f3bc08c5 MD |
189 | __chan = (void *) (c); \ |
190 | else \ | |
191 | BUG_ON(1); \ | |
192 | atomic_inc(&__chan->record_disabled); \ | |
193 | WARN_ON(1); \ | |
194 | } \ | |
195 | _____ret; \ | |
196 | }) | |
197 | ||
886d51a3 | 198 | #endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */ |