Implement lttng_ust_get_cpu()
[lttng-ust.git] / libringbuffer / frontend_types.h
CommitLineData
852c2936
MD
1#ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2#define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4/*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
a6352fd4
MD
19#include <string.h>
20
14641deb
MD
21#include <urcu/list.h>
22#include <urcu/uatomic.h>
14641deb 23
4318ae1b 24#include "lttng/core.h"
14641deb 25
4318ae1b
MD
26#include <lttng/usterr-signal-safe.h>
27#include <lttng/ringbuffer-config.h>
4931a13e 28#include "backend_types.h"
1d498196 29#include "shm_internal.h"
a3bb4b27 30#include "vatomic.h"
852c2936
MD
31
32/*
33 * A switch is done during tracing or as a final flush after tracing (so it
34 * won't write in the new sub-buffer).
35 */
36enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
37
852c2936
MD
38/* channel: collection of per-cpu ring buffers. */
39struct channel {
14641deb 40 int record_disabled;
852c2936
MD
41 unsigned long commit_count_mask; /*
42 * Commit count mask, removing
43 * the MSBs corresponding to
44 * bits used to represent the
45 * subbuffer index.
46 */
47
852c2936
MD
48 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
49 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
14641deb 50 //wait_queue_head_t read_wait; /* reader wait queue */
852c2936 51 int finalized; /* Has channel been finalized */
a3f61e7f 52 size_t priv_data_offset;
de85e7c3
MD
53 /*
54 * Associated backend contains a variable-length array. Needs to
55 * be last member.
56 */
57 struct channel_backend backend; /* Associated backend */
b728d87e 58} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
59
60/* Per-subbuffer commit counters used on the hot path */
61struct commit_counters_hot {
62 union v_atomic cc; /* Commit counter */
63 union v_atomic seq; /* Consecutive commits */
b728d87e 64} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
65
66/* Per-subbuffer commit counters used only on cold paths */
67struct commit_counters_cold {
68 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
b728d87e 69} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936 70
852c2936 71/* ring buffer state */
4cfec15c 72struct lttng_ust_lib_ring_buffer {
852c2936
MD
73 /* First 32 bytes cache-hot cacheline */
74 union v_atomic offset; /* Current offset in the buffer */
a6352fd4 75 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
852c2936 76 /* Commit count per sub-buffer */
14641deb 77 long consumed; /*
852c2936
MD
78 * Current offset in the buffer
79 * standard atomic access (shared)
80 */
14641deb 81 int record_disabled;
852c2936
MD
82 /* End of first 32 bytes cacheline */
83 union v_atomic last_tsc; /*
84 * Last timestamp written in the buffer.
85 */
86
4cfec15c 87 struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
852c2936 88
a6352fd4 89 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
852c2936 90 /* Commit count per sub-buffer */
14641deb 91 long active_readers; /*
852c2936
MD
92 * Active readers count
93 * standard atomic access (shared)
94 */
824f40b8 95 long active_shadow_readers;
852c2936
MD
96 /* Dropped records */
97 union v_atomic records_lost_full; /* Buffer full */
98 union v_atomic records_lost_wrap; /* Nested wrap-around */
99 union v_atomic records_lost_big; /* Events too big */
100 union v_atomic records_count; /* Number of records written */
101 union v_atomic records_overrun; /* Number of overwritten records */
14641deb 102 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
852c2936 103 int finalized; /* buffer has been finalized */
14641deb
MD
104 //struct timer_list switch_timer; /* timer for periodical switch */
105 //struct timer_list read_timer; /* timer for read poll */
852c2936
MD
106 unsigned long get_subbuf_consumed; /* Read-side consumed */
107 unsigned long prod_snapshot; /* Producer count snapshot */
108 unsigned long cons_snapshot; /* Consumer count snapshot */
109 int get_subbuf:1; /* Sub-buffer being held by reader */
110 int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
111 int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
5d61a504 112 /* shmp pointer to self */
4cfec15c 113 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
b728d87e 114} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
852c2936
MD
115
116static inline
117void *channel_get_private(struct channel *chan)
118{
a3f61e7f 119 return ((char *) chan) + chan->priv_data_offset;
852c2936
MD
120}
121
122/*
123 * Issue warnings and disable channels upon internal error.
4cfec15c 124 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
852c2936
MD
125 * parameters.
126 */
127#define CHAN_WARN_ON(c, cond) \
128 ({ \
129 struct channel *__chan; \
b5a3dfa5 130 int _____ret = caa_unlikely(cond); \
852c2936
MD
131 if (_____ret) { \
132 if (__same_type(*(c), struct channel_backend)) \
14641deb 133 __chan = caa_container_of((void *) (c), \
852c2936
MD
134 struct channel, \
135 backend); \
136 else if (__same_type(*(c), struct channel)) \
137 __chan = (void *) (c); \
138 else \
139 BUG_ON(1); \
14641deb 140 uatomic_inc(&__chan->record_disabled); \
852c2936
MD
141 WARN_ON(1); \
142 } \
143 _____ret; \
144 })
145
146#endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.029513 seconds and 4 git commands to generate.