/* channel-level read-side iterator */
struct channel_iter {
/* Prio heap of buffers. Lowest timestamps at the top. */
- struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
+ struct lttng_ptr_heap heap; /* Heap of struct lttng_kernel_ring_buffer ptrs */
struct list_head empty_head; /* Empty buffers linked-list head */
int read_open; /* Opened for reading ? */
u64 last_qs; /* Last quiescent state timestamp */
};
/* channel: collection of per-cpu ring buffers. */
-struct channel {
+struct lttng_kernel_ring_buffer_channel {
atomic_t record_disabled;
unsigned long commit_count_mask; /*
* Commit count mask, removing
unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
struct lttng_cpuhp_node cpuhp_prepare;
struct lttng_cpuhp_node cpuhp_online;
struct lttng_cpuhp_node cpuhp_iter_online;
};
/* Per-buffer read iterator */
-struct lib_ring_buffer_iter {
+struct lttng_kernel_ring_buffer_iter {
u64 timestamp; /* Current record timestamp */
size_t header_len; /* Current record header length */
size_t payload_len; /* Current record payload length */
};
/* ring buffer state */
-struct lib_ring_buffer {
+struct lttng_kernel_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
struct commit_counters_hot *commit_hot;
* Last timestamp written in the buffer.
*/
- struct lib_ring_buffer_backend backend; /* Associated backend */
+ struct lttng_kernel_ring_buffer_backend backend; /* Associated backend */
struct commit_counters_cold *commit_cold;
/* Commit count per sub-buffer */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
- struct lib_ring_buffer_iter iter; /* read-side iterator */
+ struct lttng_kernel_ring_buffer_iter iter; /* read-side iterator */
unsigned long get_subbuf_consumed; /* Read-side consumed */
unsigned long prod_snapshot; /* Producer count snapshot */
unsigned long cons_snapshot; /* Consumer count snapshot */
};
static inline
-void *channel_get_private(struct channel *chan)
+void *channel_get_private(struct lttng_kernel_ring_buffer_channel *chan)
{
return chan->backend.priv;
}
-void lib_ring_buffer_lost_event_too_big(struct channel *chan);
+void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan);
/*
* Issue warnings and disable channels upon internal error.
- * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
+ * Can receive struct lttng_kernel_ring_buffer or struct lttng_kernel_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
({ \
- struct channel *__chan; \
+ struct lttng_kernel_ring_buffer_channel *__chan; \
int _____ret = unlikely(cond); \
if (_____ret) { \
if (__same_type(*(c), struct channel_backend)) \
__chan = container_of((void *) (c), \
- struct channel, \
+ struct lttng_kernel_ring_buffer_channel, \
backend); \
- else if (__same_type(*(c), struct channel)) \
+ else if (__same_type(*(c), struct lttng_kernel_ring_buffer_channel)) \
__chan = (void *) (c); \
else \
BUG_ON(1); \