| 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
| 2 | * |
| 3 | * ringbuffer/frontend_types.h |
| 4 | * |
| 5 | * Ring Buffer Library Synchronization Header (types). |
| 6 | * |
| 7 | * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 8 | * |
| 9 | * See ring_buffer_frontend.c for more information on wait-free algorithms. |
| 10 | */ |
| 11 | |
| 12 | #ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H |
| 13 | #define _LIB_RING_BUFFER_FRONTEND_TYPES_H |
| 14 | |
| 15 | #include <linux/kref.h> |
| 16 | #include <linux/irq_work.h> |
| 17 | #include <ringbuffer/config.h> |
| 18 | #include <ringbuffer/backend_types.h> |
| 19 | #include <lttng/prio_heap.h> /* For per-CPU read-side iterator */ |
| 20 | #include <lttng/cpuhotplug.h> |
| 21 | |
| 22 | /* |
| 23 | * A switch is done during tracing or as a final flush after tracing (so it |
| 24 | * won't write in the new sub-buffer). |
| 25 | */ |
| 26 | enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH }; |
| 27 | |
| 28 | /* channel-level read-side iterator */ |
| 29 | struct channel_iter { |
| 30 | /* Prio heap of buffers. Lowest timestamps at the top. */ |
| 31 | struct lttng_ptr_heap heap; /* Heap of struct lttng_kernel_ring_buffer ptrs */ |
| 32 | struct list_head empty_head; /* Empty buffers linked-list head */ |
| 33 | int read_open; /* Opened for reading ? */ |
| 34 | u64 last_qs; /* Last quiescent state timestamp */ |
| 35 | u64 last_timestamp; /* Last timestamp (for WARN_ON) */ |
| 36 | int last_cpu; /* Last timestamp cpu */ |
| 37 | /* |
| 38 | * read() file operation state. |
| 39 | */ |
| 40 | unsigned long len_left; |
| 41 | }; |
| 42 | |
| 43 | /* channel: collection of per-cpu ring buffers. */ |
| 44 | struct lttng_kernel_ring_buffer_channel { |
| 45 | atomic_t record_disabled; |
| 46 | unsigned long commit_count_mask; /* |
| 47 | * Commit count mask, removing |
| 48 | * the MSBs corresponding to |
| 49 | * bits used to represent the |
| 50 | * subbuffer index. |
| 51 | */ |
| 52 | |
| 53 | struct channel_backend backend; /* Associated backend */ |
| 54 | |
| 55 | unsigned long switch_timer_interval; /* Buffer flush (jiffies) */ |
| 56 | unsigned long read_timer_interval; /* Reader wakeup (jiffies) */ |
| 57 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) |
| 58 | struct lttng_cpuhp_node cpuhp_prepare; |
| 59 | struct lttng_cpuhp_node cpuhp_online; |
| 60 | struct lttng_cpuhp_node cpuhp_iter_online; |
| 61 | #else |
| 62 | struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ |
| 63 | struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ |
| 64 | unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ |
| 65 | unsigned int hp_iter_enable:1; /* Enable hp iter notif. */ |
| 66 | #endif |
| 67 | struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ |
| 68 | wait_queue_head_t read_wait; /* reader wait queue */ |
| 69 | wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ |
| 70 | struct irq_work wakeup_pending; /* Pending wakeup irq work */ |
| 71 | int finalized; /* Has channel been finalized */ |
| 72 | struct channel_iter iter; /* Channel read-side iterator */ |
| 73 | struct kref ref; /* Reference count */ |
| 74 | }; |
| 75 | |
| 76 | /* Per-subbuffer commit counters used on the hot path */ |
| 77 | struct commit_counters_hot { |
| 78 | union v_atomic cc; /* Commit counter */ |
| 79 | union v_atomic seq; /* Consecutive commits */ |
| 80 | }; |
| 81 | |
| 82 | /* Per-subbuffer commit counters used only on cold paths */ |
| 83 | struct commit_counters_cold { |
| 84 | union v_atomic cc_sb; /* Incremented _once_ at sb switch */ |
| 85 | }; |
| 86 | |
| 87 | /* Per-buffer read iterator */ |
| 88 | struct lttng_kernel_ring_buffer_iter { |
| 89 | u64 timestamp; /* Current record timestamp */ |
| 90 | size_t header_len; /* Current record header length */ |
| 91 | size_t payload_len; /* Current record payload length */ |
| 92 | |
| 93 | struct list_head empty_node; /* Linked list of empty buffers */ |
| 94 | unsigned long consumed, read_offset, data_size; |
| 95 | enum { |
| 96 | ITER_GET_SUBBUF = 0, |
| 97 | ITER_TEST_RECORD, |
| 98 | ITER_NEXT_RECORD, |
| 99 | ITER_PUT_SUBBUF, |
| 100 | } state; |
| 101 | unsigned int allocated:1; |
| 102 | unsigned int read_open:1; /* Opened for reading ? */ |
| 103 | }; |
| 104 | |
| 105 | /* ring buffer state */ |
| 106 | struct lttng_kernel_ring_buffer { |
| 107 | /* First 32 bytes cache-hot cacheline */ |
| 108 | union v_atomic offset; /* Current offset in the buffer */ |
| 109 | struct commit_counters_hot *commit_hot; |
| 110 | /* Commit count per sub-buffer */ |
| 111 | atomic_long_t consumed; /* |
| 112 | * Current offset in the buffer |
| 113 | * standard atomic access (shared) |
| 114 | */ |
| 115 | atomic_t record_disabled; |
| 116 | /* End of first 32 bytes cacheline */ |
| 117 | union v_atomic last_tsc; /* |
| 118 | * Last timestamp written in the buffer. |
| 119 | */ |
| 120 | |
| 121 | struct lttng_kernel_ring_buffer_backend backend; /* Associated backend */ |
| 122 | |
| 123 | struct commit_counters_cold *commit_cold; |
| 124 | /* Commit count per sub-buffer */ |
| 125 | u64 *ts_end; /* |
| 126 | * timestamp_end per sub-buffer. |
| 127 | * Time is sampled by the |
| 128 | * switch_*_end() callbacks which |
| 129 | * are the last space reservation |
| 130 | * performed in the sub-buffer |
| 131 | * before it can be fully |
| 132 | * committed and delivered. This |
| 133 | * time value is then read by |
| 134 | * the deliver callback, |
| 135 | * performed by the last commit |
| 136 | * before the buffer becomes |
| 137 | * readable. |
| 138 | */ |
| 139 | atomic_long_t active_readers; /* |
| 140 | * Active readers count |
| 141 | * standard atomic access (shared) |
| 142 | */ |
| 143 | /* Dropped records */ |
| 144 | union v_atomic records_lost_full; /* Buffer full */ |
| 145 | union v_atomic records_lost_wrap; /* Nested wrap-around */ |
| 146 | union v_atomic records_lost_big; /* Events too big */ |
| 147 | union v_atomic records_count; /* Number of records written */ |
| 148 | union v_atomic records_overrun; /* Number of overwritten records */ |
| 149 | wait_queue_head_t read_wait; /* reader buffer-level wait queue */ |
| 150 | wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */ |
| 151 | struct irq_work wakeup_pending; /* Pending wakeup irq work */ |
| 152 | int finalized; /* buffer has been finalized */ |
| 153 | struct timer_list switch_timer; /* timer for periodical switch */ |
| 154 | struct timer_list read_timer; /* timer for read poll */ |
| 155 | raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */ |
| 156 | struct lttng_kernel_ring_buffer_iter iter; /* read-side iterator */ |
| 157 | unsigned long get_subbuf_consumed; /* Read-side consumed */ |
| 158 | unsigned long prod_snapshot; /* Producer count snapshot */ |
| 159 | unsigned long cons_snapshot; /* Consumer count snapshot */ |
| 160 | unsigned int get_subbuf:1, /* Sub-buffer being held by reader */ |
| 161 | switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */ |
| 162 | read_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */ |
| 163 | quiescent:1; |
| 164 | }; |
| 165 | |
| 166 | static inline |
| 167 | void *channel_get_private(struct lttng_kernel_ring_buffer_channel *chan) |
| 168 | { |
| 169 | return chan->backend.priv; |
| 170 | } |
| 171 | |
| 172 | void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan); |
| 173 | |
| 174 | /* |
| 175 | * Issue warnings and disable channels upon internal error. |
| 176 | * Can receive struct lttng_kernel_ring_buffer or struct lttng_kernel_ring_buffer_backend |
| 177 | * parameters. |
| 178 | */ |
| 179 | #define CHAN_WARN_ON(c, cond) \ |
| 180 | ({ \ |
| 181 | struct lttng_kernel_ring_buffer_channel *__chan; \ |
| 182 | int _____ret = unlikely(cond); \ |
| 183 | if (_____ret) { \ |
| 184 | if (__same_type(*(c), struct channel_backend)) \ |
| 185 | __chan = container_of((void *) (c), \ |
| 186 | struct lttng_kernel_ring_buffer_channel, \ |
| 187 | backend); \ |
| 188 | else if (__same_type(*(c), struct lttng_kernel_ring_buffer_channel)) \ |
| 189 | __chan = (void *) (c); \ |
| 190 | else \ |
| 191 | BUG_ON(1); \ |
| 192 | atomic_inc(&__chan->record_disabled); \ |
| 193 | WARN_ON(1); \ |
| 194 | } \ |
| 195 | _____ret; \ |
| 196 | }) |
| 197 | |
| 198 | #endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */ |