961c90e5084bd705c502d40e40a7cbb7b54307f6
[lttng-ust.git] / libringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <string.h>
20
21 #include <urcu/list.h>
22 #include <urcu/uatomic.h>
23
24 #include <lttng/ringbuffer-config.h>
25 #include <usterr-signal-safe.h>
26 #include "backend_types.h"
27 #include "shm_internal.h"
28 #include "vatomic.h"
29
30 /*
31 * A switch is done during tracing or as a final flush after tracing (so it
32 * won't write in the new sub-buffer).
33 */
34 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
35
36 /* channel: collection of per-cpu ring buffers. */
37 struct channel {
38 int record_disabled;
39 unsigned long commit_count_mask; /*
40 * Commit count mask, removing
41 * the MSBs corresponding to
42 * bits used to represent the
43 * subbuffer index.
44 */
45
46 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
47 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
48 //wait_queue_head_t read_wait; /* reader wait queue */
49 int finalized; /* Has channel been finalized */
50 size_t priv_data_offset;
51 /*
52 * Associated backend contains a variable-length array. Needs to
53 * be last member.
54 */
55 struct channel_backend backend; /* Associated backend */
56 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
57
58 /* Per-subbuffer commit counters used on the hot path */
59 struct commit_counters_hot {
60 union v_atomic cc; /* Commit counter */
61 union v_atomic seq; /* Consecutive commits */
62 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
63
64 /* Per-subbuffer commit counters used only on cold paths */
65 struct commit_counters_cold {
66 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
67 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
68
69 /* ring buffer state */
70 struct lttng_ust_lib_ring_buffer {
71 /* First 32 bytes cache-hot cacheline */
72 union v_atomic offset; /* Current offset in the buffer */
73 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
74 /* Commit count per sub-buffer */
75 long consumed; /*
76 * Current offset in the buffer
77 * standard atomic access (shared)
78 */
79 int record_disabled;
80 /* End of first 32 bytes cacheline */
81 union v_atomic last_tsc; /*
82 * Last timestamp written in the buffer.
83 */
84
85 struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
86
87 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
88 /* Commit count per sub-buffer */
89 long active_readers; /*
90 * Active readers count
91 * standard atomic access (shared)
92 */
93 long active_shadow_readers;
94 /* Dropped records */
95 union v_atomic records_lost_full; /* Buffer full */
96 union v_atomic records_lost_wrap; /* Nested wrap-around */
97 union v_atomic records_lost_big; /* Events too big */
98 union v_atomic records_count; /* Number of records written */
99 union v_atomic records_overrun; /* Number of overwritten records */
100 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
101 int finalized; /* buffer has been finalized */
102 //struct timer_list switch_timer; /* timer for periodical switch */
103 //struct timer_list read_timer; /* timer for read poll */
104 unsigned long get_subbuf_consumed; /* Read-side consumed */
105 unsigned long prod_snapshot; /* Producer count snapshot */
106 unsigned long cons_snapshot; /* Consumer count snapshot */
107 unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
108 switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
109 read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
110 /* shmp pointer to self */
111 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
112 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
113
114 static inline
115 void *channel_get_private(struct channel *chan)
116 {
117 return ((char *) chan) + chan->priv_data_offset;
118 }
119
120 #ifndef __rb_same_type
121 #define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
122 #endif
123
124 /*
125 * Issue warnings and disable channels upon internal error.
126 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
127 * parameters.
128 */
129 #define CHAN_WARN_ON(c, cond) \
130 ({ \
131 struct channel *__chan; \
132 int _____ret = caa_unlikely(cond); \
133 if (_____ret) { \
134 if (__rb_same_type(*(c), struct channel_backend)) \
135 __chan = caa_container_of((void *) (c), \
136 struct channel, \
137 backend); \
138 else if (__rb_same_type(*(c), struct channel)) \
139 __chan = (void *) (c); \
140 else \
141 BUG_ON(1); \
142 uatomic_inc(&__chan->record_disabled); \
143 WARN_ON(1); \
144 } \
145 _____ret; \
146 })
147
148 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.031783 seconds and 3 git commands to generate.