Implement shm object table
[lttng-ust.git] / libringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <string.h>
20
21 #include <urcu/list.h>
22 #include <urcu/uatomic.h>
23
24 #include "ust/core.h"
25
26 #include <ust/usterr-signal-safe.h>
27 #include <ust/ringbuffer-config.h>
28 #include "backend_types.h"
29 #include "shm_internal.h"
30
31 /*
32 * A switch is done during tracing or as a final flush after tracing (so it
33 * won't write in the new sub-buffer).
34 */
35 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
36
37 /* channel: collection of per-cpu ring buffers. */
38 struct channel {
39 int record_disabled;
40 unsigned long commit_count_mask; /*
41 * Commit count mask, removing
42 * the MSBs corresponding to
43 * bits used to represent the
44 * subbuffer index.
45 */
46
47 struct channel_backend backend; /* Associated backend */
48
49 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
50 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
51 //wait_queue_head_t read_wait; /* reader wait queue */
52 int finalized; /* Has channel been finalized */
53 } ____cacheline_aligned;
54
55 /* Per-subbuffer commit counters used on the hot path */
56 struct commit_counters_hot {
57 union v_atomic cc; /* Commit counter */
58 union v_atomic seq; /* Consecutive commits */
59 } ____cacheline_aligned;
60
61 /* Per-subbuffer commit counters used only on cold paths */
62 struct commit_counters_cold {
63 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
64 } ____cacheline_aligned;
65
66 /* ring buffer state */
67 struct lib_ring_buffer {
68 /* First 32 bytes cache-hot cacheline */
69 union v_atomic offset; /* Current offset in the buffer */
70 DECLARE_SHMP(struct commit_counters_hot, commit_hot);
71 /* Commit count per sub-buffer */
72 long consumed; /*
73 * Current offset in the buffer
74 * standard atomic access (shared)
75 */
76 int record_disabled;
77 /* End of first 32 bytes cacheline */
78 union v_atomic last_tsc; /*
79 * Last timestamp written in the buffer.
80 */
81
82 struct lib_ring_buffer_backend backend; /* Associated backend */
83
84 DECLARE_SHMP(struct commit_counters_cold, commit_cold);
85 /* Commit count per sub-buffer */
86 long active_readers; /*
87 * Active readers count
88 * standard atomic access (shared)
89 */
90 /* Dropped records */
91 union v_atomic records_lost_full; /* Buffer full */
92 union v_atomic records_lost_wrap; /* Nested wrap-around */
93 union v_atomic records_lost_big; /* Events too big */
94 union v_atomic records_count; /* Number of records written */
95 union v_atomic records_overrun; /* Number of overwritten records */
96 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
97 int finalized; /* buffer has been finalized */
98 //struct timer_list switch_timer; /* timer for periodical switch */
99 //struct timer_list read_timer; /* timer for read poll */
100 unsigned long get_subbuf_consumed; /* Read-side consumed */
101 unsigned long prod_snapshot; /* Producer count snapshot */
102 unsigned long cons_snapshot; /* Consumer count snapshot */
103 int get_subbuf:1; /* Sub-buffer being held by reader */
104 int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
105 int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
106 } ____cacheline_aligned;
107
108 static inline
109 void *channel_get_private(struct channel *chan)
110 {
111 return chan->backend.priv;
112 }
113
114 /*
115 * Issue warnings and disable channels upon internal error.
116 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
117 * parameters.
118 */
119 #define CHAN_WARN_ON(c, cond) \
120 ({ \
121 struct channel *__chan; \
122 int _____ret = unlikely(cond); \
123 if (_____ret) { \
124 if (__same_type(*(c), struct channel_backend)) \
125 __chan = caa_container_of((void *) (c), \
126 struct channel, \
127 backend); \
128 else if (__same_type(*(c), struct channel)) \
129 __chan = (void *) (c); \
130 else \
131 BUG_ON(1); \
132 uatomic_inc(&__chan->record_disabled); \
133 WARN_ON(1); \
134 } \
135 _____ret; \
136 })
137
138 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.032908 seconds and 5 git commands to generate.