1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
3 * lib/ringbuffer/frontend_types.h
5 * Ring Buffer Library Synchronization Header (types).
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * See ring_buffer_frontend.c for more information on wait-free algorithms.
12 #ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
13 #define _LIB_RING_BUFFER_FRONTEND_TYPES_H
15 #include <linux/kref.h>
16 #include <include/ringbuffer/config.h>
17 #include <include/ringbuffer/backend_types.h>
18 #include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
19 #include <lttng-cpuhotplug.h>
22 * A switch is done during tracing or as a final flush after tracing (so it
23 * won't write in the new sub-buffer).
25 enum switch_mode
{ SWITCH_ACTIVE
, SWITCH_FLUSH
};
27 /* channel-level read-side iterator */
29 /* Prio heap of buffers. Lowest timestamps at the top. */
30 struct lttng_ptr_heap heap
; /* Heap of struct lib_ring_buffer ptrs */
31 struct list_head empty_head
; /* Empty buffers linked-list head */
32 int read_open
; /* Opened for reading ? */
33 u64 last_qs
; /* Last quiescent state timestamp */
34 u64 last_timestamp
; /* Last timestamp (for WARN_ON) */
35 int last_cpu
; /* Last timestamp cpu */
37 * read() file operation state.
39 unsigned long len_left
;
42 /* channel: collection of per-cpu ring buffers. */
44 atomic_t record_disabled
;
45 unsigned long commit_count_mask
; /*
46 * Commit count mask, removing
47 * the MSBs corresponding to
48 * bits used to represent the
52 struct channel_backend backend
; /* Associated backend */
54 unsigned long switch_timer_interval
; /* Buffer flush (jiffies) */
55 unsigned long read_timer_interval
; /* Reader wakeup (jiffies) */
56 struct lttng_cpuhp_node cpuhp_prepare
;
57 struct lttng_cpuhp_node cpuhp_online
;
58 struct lttng_cpuhp_node cpuhp_iter_online
;
59 struct notifier_block tick_nohz_notifier
; /* CPU nohz notifier */
60 wait_queue_head_t read_wait
; /* reader wait queue */
61 wait_queue_head_t hp_wait
; /* CPU hotplug wait queue */
62 int finalized
; /* Has channel been finalized */
63 struct channel_iter iter
; /* Channel read-side iterator */
64 struct kref ref
; /* Reference count */
67 /* Per-subbuffer commit counters used on the hot path */
68 struct commit_counters_hot
{
69 union v_atomic cc
; /* Commit counter */
70 union v_atomic seq
; /* Consecutive commits */
73 /* Per-subbuffer commit counters used only on cold paths */
74 struct commit_counters_cold
{
75 union v_atomic cc_sb
; /* Incremented _once_ at sb switch */
78 /* Per-buffer read iterator */
79 struct lib_ring_buffer_iter
{
80 u64 timestamp
; /* Current record timestamp */
81 size_t header_len
; /* Current record header length */
82 size_t payload_len
; /* Current record payload length */
84 struct list_head empty_node
; /* Linked list of empty buffers */
85 unsigned long consumed
, read_offset
, data_size
;
92 unsigned int allocated
:1;
93 unsigned int read_open
:1; /* Opened for reading ? */
96 /* ring buffer state */
97 struct lib_ring_buffer
{
98 /* First 32 bytes cache-hot cacheline */
99 union v_atomic offset
; /* Current offset in the buffer */
100 struct commit_counters_hot
*commit_hot
;
101 /* Commit count per sub-buffer */
102 atomic_long_t consumed
; /*
103 * Current offset in the buffer
104 * standard atomic access (shared)
106 atomic_t record_disabled
;
107 /* End of first 32 bytes cacheline */
108 union v_atomic last_tsc
; /*
109 * Last timestamp written in the buffer.
112 struct lib_ring_buffer_backend backend
; /* Associated backend */
114 struct commit_counters_cold
*commit_cold
;
115 /* Commit count per sub-buffer */
117 * timestamp_end per sub-buffer.
118 * Time is sampled by the
119 * switch_*_end() callbacks which
120 * are the last space reservation
121 * performed in the sub-buffer
122 * before it can be fully
123 * committed and delivered. This
124 * time value is then read by
125 * the deliver callback,
126 * performed by the last commit
127 * before the buffer becomes
130 atomic_long_t active_readers
; /*
131 * Active readers count
132 * standard atomic access (shared)
134 /* Dropped records */
135 union v_atomic records_lost_full
; /* Buffer full */
136 union v_atomic records_lost_wrap
; /* Nested wrap-around */
137 union v_atomic records_lost_big
; /* Events too big */
138 union v_atomic records_count
; /* Number of records written */
139 union v_atomic records_overrun
; /* Number of overwritten records */
140 wait_queue_head_t read_wait
; /* reader buffer-level wait queue */
141 wait_queue_head_t write_wait
; /* writer buffer-level wait queue (for metadata only) */
142 int finalized
; /* buffer has been finalized */
143 struct timer_list switch_timer
; /* timer for periodical switch */
144 struct timer_list read_timer
; /* timer for read poll */
145 raw_spinlock_t raw_tick_nohz_spinlock
; /* nohz entry lock/trylock */
146 struct lib_ring_buffer_iter iter
; /* read-side iterator */
147 unsigned long get_subbuf_consumed
; /* Read-side consumed */
148 unsigned long prod_snapshot
; /* Producer count snapshot */
149 unsigned long cons_snapshot
; /* Consumer count snapshot */
150 unsigned int get_subbuf
:1, /* Sub-buffer being held by reader */
151 switch_timer_enabled
:1, /* Protected by ring_buffer_nohz_lock */
152 read_timer_enabled
:1, /* Protected by ring_buffer_nohz_lock */
157 void *channel_get_private(struct channel
*chan
)
159 return chan
->backend
.priv
;
162 void lib_ring_buffer_lost_event_too_big(struct channel
*chan
);
165 * Issue warnings and disable channels upon internal error.
166 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
169 #define CHAN_WARN_ON(c, cond) \
171 struct channel *__chan; \
172 int _____ret = unlikely(cond); \
174 if (__same_type(*(c), struct channel_backend)) \
175 __chan = container_of((void *) (c), \
178 else if (__same_type(*(c), struct channel)) \
179 __chan = (void *) (c); \
182 atomic_inc(&__chan->record_disabled); \
188 #endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */