Implement __rb_same_type
[lttng-ust.git] / libringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <string.h>
20
21 #include <urcu/list.h>
22 #include <urcu/uatomic.h>
23
24 #include "lttng/core.h"
25
26 #include <lttng/usterr-signal-safe.h>
27 #include <lttng/ringbuffer-config.h>
28 #include "backend_types.h"
29 #include "shm_internal.h"
30 #include "vatomic.h"
31
32 /*
33 * A switch is done during tracing or as a final flush after tracing (so it
34 * won't write in the new sub-buffer).
35 */
36 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
37
38 /* channel: collection of per-cpu ring buffers. */
39 struct channel {
40 int record_disabled;
41 unsigned long commit_count_mask; /*
42 * Commit count mask, removing
43 * the MSBs corresponding to
44 * bits used to represent the
45 * subbuffer index.
46 */
47
48 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
49 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
50 //wait_queue_head_t read_wait; /* reader wait queue */
51 int finalized; /* Has channel been finalized */
52 size_t priv_data_offset;
53 /*
54 * Associated backend contains a variable-length array. Needs to
55 * be last member.
56 */
57 struct channel_backend backend; /* Associated backend */
58 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
59
60 /* Per-subbuffer commit counters used on the hot path */
61 struct commit_counters_hot {
62 union v_atomic cc; /* Commit counter */
63 union v_atomic seq; /* Consecutive commits */
64 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
65
66 /* Per-subbuffer commit counters used only on cold paths */
67 struct commit_counters_cold {
68 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
69 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
70
71 /* ring buffer state */
72 struct lttng_ust_lib_ring_buffer {
73 /* First 32 bytes cache-hot cacheline */
74 union v_atomic offset; /* Current offset in the buffer */
75 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
76 /* Commit count per sub-buffer */
77 long consumed; /*
78 * Current offset in the buffer
79 * standard atomic access (shared)
80 */
81 int record_disabled;
82 /* End of first 32 bytes cacheline */
83 union v_atomic last_tsc; /*
84 * Last timestamp written in the buffer.
85 */
86
87 struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
88
89 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
90 /* Commit count per sub-buffer */
91 long active_readers; /*
92 * Active readers count
93 * standard atomic access (shared)
94 */
95 long active_shadow_readers;
96 /* Dropped records */
97 union v_atomic records_lost_full; /* Buffer full */
98 union v_atomic records_lost_wrap; /* Nested wrap-around */
99 union v_atomic records_lost_big; /* Events too big */
100 union v_atomic records_count; /* Number of records written */
101 union v_atomic records_overrun; /* Number of overwritten records */
102 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
103 int finalized; /* buffer has been finalized */
104 //struct timer_list switch_timer; /* timer for periodical switch */
105 //struct timer_list read_timer; /* timer for read poll */
106 unsigned long get_subbuf_consumed; /* Read-side consumed */
107 unsigned long prod_snapshot; /* Producer count snapshot */
108 unsigned long cons_snapshot; /* Consumer count snapshot */
109 int get_subbuf:1; /* Sub-buffer being held by reader */
110 int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
111 int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
112 /* shmp pointer to self */
113 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
114 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
115
116 static inline
117 void *channel_get_private(struct channel *chan)
118 {
119 return ((char *) chan) + chan->priv_data_offset;
120 }
121
122 #ifndef __rb_same_type
123 #define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
124 #endif
125
126 /*
127 * Issue warnings and disable channels upon internal error.
128 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
129 * parameters.
130 */
131 #define CHAN_WARN_ON(c, cond) \
132 ({ \
133 struct channel *__chan; \
134 int _____ret = caa_unlikely(cond); \
135 if (_____ret) { \
136 if (__rb_same_type(*(c), struct channel_backend)) \
137 __chan = caa_container_of((void *) (c), \
138 struct channel, \
139 backend); \
140 else if (__rb_same_type(*(c), struct channel)) \
141 __chan = (void *) (c); \
142 else \
143 BUG_ON(1); \
144 uatomic_inc(&__chan->record_disabled); \
145 WARN_ON(1); \
146 } \
147 _____ret; \
148 })
149
150 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.033835 seconds and 5 git commands to generate.