Fix ABI: add padding to structures shared between UST and consumer
[lttng-ust.git] / libringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <string.h>
20
21 #include <urcu/list.h>
22 #include <urcu/uatomic.h>
23
24 #include <lttng/ringbuffer-config.h>
25 #include <usterr-signal-safe.h>
26 #include "backend_types.h"
27 #include "shm_internal.h"
28 #include "vatomic.h"
29
30 /*
31 * A switch is done during tracing or as a final flush after tracing (so it
32 * won't write in the new sub-buffer).
33 */
34 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
35
36 /* channel: collection of per-cpu ring buffers. */
37 #define RB_CHANNEL_PADDING 64
38 struct channel {
39 int record_disabled;
40 unsigned long commit_count_mask; /*
41 * Commit count mask, removing
42 * the MSBs corresponding to
43 * bits used to represent the
44 * subbuffer index.
45 */
46
47 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
48 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
49 //wait_queue_head_t read_wait; /* reader wait queue */
50 int finalized; /* Has channel been finalized */
51 size_t priv_data_offset;
52 /*
53 * Associated backend contains a variable-length array. Needs to
54 * be last member.
55 */
56 struct channel_backend backend; /* Associated backend */
57 char padding[RB_CHANNEL_PADDING];
58 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
59
60 /* Per-subbuffer commit counters used on the hot path */
61 #define RB_COMMIT_COUNT_HOT_PADDING 16
62 struct commit_counters_hot {
63 union v_atomic cc; /* Commit counter */
64 union v_atomic seq; /* Consecutive commits */
65 char padding[RB_COMMIT_COUNT_HOT_PADDING];
66 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
67
68 /* Per-subbuffer commit counters used only on cold paths */
69 #define RB_COMMIT_COUNT_COLD_PADDING 24
70 struct commit_counters_cold {
71 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
72 char padding[RB_COMMIT_COUNT_COLD_PADDING];
73 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
74
75 /* ring buffer state */
76 #define RB_RING_BUFFER_PADDING 64
77 struct lttng_ust_lib_ring_buffer {
78 /* First 32 bytes cache-hot cacheline */
79 union v_atomic offset; /* Current offset in the buffer */
80 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
81 /* Commit count per sub-buffer */
82 long consumed; /*
83 * Current offset in the buffer
84 * standard atomic access (shared)
85 */
86 int record_disabled;
87 /* End of first 32 bytes cacheline */
88 union v_atomic last_tsc; /*
89 * Last timestamp written in the buffer.
90 */
91
92 struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
93
94 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
95 /* Commit count per sub-buffer */
96 long active_readers; /*
97 * Active readers count
98 * standard atomic access (shared)
99 */
100 long active_shadow_readers;
101 /* Dropped records */
102 union v_atomic records_lost_full; /* Buffer full */
103 union v_atomic records_lost_wrap; /* Nested wrap-around */
104 union v_atomic records_lost_big; /* Events too big */
105 union v_atomic records_count; /* Number of records written */
106 union v_atomic records_overrun; /* Number of overwritten records */
107 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
108 int finalized; /* buffer has been finalized */
109 //struct timer_list switch_timer; /* timer for periodical switch */
110 //struct timer_list read_timer; /* timer for read poll */
111 unsigned long get_subbuf_consumed; /* Read-side consumed */
112 unsigned long prod_snapshot; /* Producer count snapshot */
113 unsigned long cons_snapshot; /* Consumer count snapshot */
114 unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
115 switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
116 read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
117 /* shmp pointer to self */
118 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
119 char padding[RB_RING_BUFFER_PADDING];
120 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
121
122 static inline
123 void *channel_get_private(struct channel *chan)
124 {
125 return ((char *) chan) + chan->priv_data_offset;
126 }
127
128 #ifndef __rb_same_type
129 #define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
130 #endif
131
132 /*
133 * Issue warnings and disable channels upon internal error.
134 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
135 * parameters.
136 */
137 #define CHAN_WARN_ON(c, cond) \
138 ({ \
139 struct channel *__chan; \
140 int _____ret = caa_unlikely(cond); \
141 if (_____ret) { \
142 if (__rb_same_type(*(c), struct channel_backend)) \
143 __chan = caa_container_of((void *) (c), \
144 struct channel, \
145 backend); \
146 else if (__rb_same_type(*(c), struct channel)) \
147 __chan = (void *) (c); \
148 else \
149 BUG_ON(1); \
150 uatomic_inc(&__chan->record_disabled); \
151 WARN_ON(1); \
152 } \
153 _____ret; \
154 })
155
156 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.032154 seconds and 4 git commands to generate.