80f0b6bf69ba383cb3a1505745a4a90286d152e8
[ust.git] / libtracing / relay.h
1 /*
2 * linux/include/linux/ltt-relay.h
3 *
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * CONFIG_RELAY definitions and declarations
9 */
10
11 #ifndef _LINUX_LTT_RELAY_H
12 #define _LINUX_LTT_RELAY_H
13
14 //ust// #include <linux/types.h>
15 //ust// #include <linux/sched.h>
16 //ust// #include <linux/timer.h>
17 //ust// #include <linux/wait.h>
18 //ust// #include <linux/list.h>
19 //ust// #include <linux/fs.h>
20 //ust// #include <linux/poll.h>
21 //ust// #include <linux/kref.h>
22 //ust// #include <linux/mm.h>
23 //ust// #include <linux/ltt-core.h>
24 #include "kref.h"
25 #include "list.h"
26
27 /* Needs a _much_ better name... */
28 #define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
29
30 /*
31 * Tracks changes to rchan/rchan_buf structs
32 */
33 #define LTT_RELAY_CHANNEL_VERSION 8
34
35 struct rchan_buf;
36
37 struct buf_page {
38 struct page *page;
39 struct rchan_buf *buf; /* buffer the page belongs to */
40 size_t offset; /* page offset in the buffer */
41 struct list_head list; /* buffer linked list */
42 };
43
44 /*
45 * Per-cpu relay channel buffer
46 */
47 struct rchan_buf {
48 struct rchan *chan; /* associated channel */
49 //ust// wait_queue_head_t read_wait; /* reader wait queue */
50 //ust// struct timer_list timer; /* reader wake-up timer */
51 //ust// struct dentry *dentry; /* channel file dentry */
52 struct kref kref; /* channel buffer refcount */
53 //ust// struct list_head pages; /* list of buffer pages */
54 void *buf_data; //ust//
55 size_t buf_size;
56 //ust// struct buf_page *wpage; /* current write page (cache) */
57 //ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
58 //ust// struct buf_page *rpage; /* current subbuf read page (cache) */
59 //ust// unsigned int page_count; /* number of current buffer pages */
60 unsigned int finalized; /* buffer has been finalized */
61 //ust// unsigned int cpu; /* this buf's cpu */
62 } ____cacheline_aligned;
63
64 /*
65 * Relay channel data structure
66 */
67 struct rchan {
68 u32 version; /* the version of this struct */
69 size_t subbuf_size; /* sub-buffer size */
70 size_t n_subbufs; /* number of sub-buffers per buffer */
71 size_t alloc_size; /* total buffer size allocated */
72 struct rchan_callbacks *cb; /* client callbacks */
73 struct kref kref; /* channel refcount */
74 void *private_data; /* for user-defined data */
75 //ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
76 struct rchan_buf *buf;
77 struct list_head list; /* for channel list */
78 struct dentry *parent; /* parent dentry passed to open */
79 int subbuf_size_order; /* order of sub-buffer size */
80 //ust// char base_filename[NAME_MAX]; /* saved base filename */
81 };
82
83 /*
84 * Relay channel client callbacks
85 */
86 struct rchan_callbacks {
87 /*
88 * subbuf_start - called on buffer-switch to a new sub-buffer
89 * @buf: the channel buffer containing the new sub-buffer
90 * @subbuf: the start of the new sub-buffer
91 * @prev_subbuf: the start of the previous sub-buffer
92 * @prev_padding: unused space at the end of previous sub-buffer
93 *
94 * The client should return 1 to continue logging, 0 to stop
95 * logging.
96 *
97 * NOTE: subbuf_start will also be invoked when the buffer is
98 * created, so that the first sub-buffer can be initialized
99 * if necessary. In this case, prev_subbuf will be NULL.
100 *
101 * NOTE: the client can reserve bytes at the beginning of the new
102 * sub-buffer by calling subbuf_start_reserve() in this callback.
103 */
104 int (*subbuf_start) (struct rchan_buf *buf,
105 void *subbuf,
106 void *prev_subbuf,
107 size_t prev_padding);
108
109 /*
110 * create_buf_file - create file to represent a relay channel buffer
111 * @filename: the name of the file to create
112 * @parent: the parent of the file to create
113 * @mode: the mode of the file to create
114 * @buf: the channel buffer
115 *
116 * Called during relay_open(), once for each per-cpu buffer,
117 * to allow the client to create a file to be used to
118 * represent the corresponding channel buffer. If the file is
119 * created outside of relay, the parent must also exist in
120 * that filesystem.
121 *
122 * The callback should return the dentry of the file created
123 * to represent the relay buffer.
124 *
125 * Setting the is_global outparam to a non-zero value will
126 * cause relay_open() to create a single global buffer rather
127 * than the default set of per-cpu buffers.
128 *
129 * See Documentation/filesystems/relayfs.txt for more info.
130 */
131 struct dentry *(*create_buf_file)(const char *filename,
132 struct dentry *parent,
133 int mode,
134 struct rchan_buf *buf);
135
136 /*
137 * remove_buf_file - remove file representing a relay channel buffer
138 * @dentry: the dentry of the file to remove
139 *
140 * Called during relay_close(), once for each per-cpu buffer,
141 * to allow the client to remove a file used to represent a
142 * channel buffer.
143 *
144 * The callback should return 0 if successful, negative if not.
145 */
146 //ust// int (*remove_buf_file)(struct rchan_buf *buf);
147 };
148
149 extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
150 struct buf_page *page, size_t offset, ssize_t diff_offset);
151
152 extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
153 struct buf_page *page, size_t offset, ssize_t diff_offset);
154
155 extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
156 const void *src, size_t len, ssize_t cpy);
157
158 extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
159 void *dest, size_t len);
160
161 extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
162 size_t offset);
163
164 /*
165 * Return the address where a given offset is located.
166 * Should be used to get the current subbuffer header pointer. Given we know
167 * it's never on a page boundary, it's safe to write directly to this address,
168 * as long as the write is never bigger than a page size.
169 */
170 extern void *ltt_relay_offset_address(struct rchan_buf *buf,
171 size_t offset);
172
173 /*
174 * Find the page containing "offset". Cache it if it is after the currently
175 * cached page.
176 */
177 static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
178 struct buf_page **page_cache,
179 struct buf_page *page, size_t offset)
180 {
181 ssize_t diff_offset;
182 ssize_t half_buf_size = buf->chan->alloc_size >> 1;
183
184 /*
185 * Make sure this is the page we want to write into. The current
186 * page is changed concurrently by other writers. [wrh]page are
187 * used as a cache remembering the last page written
188 * to/read/looked up for header address. No synchronization;
189 * could have to find the previous page is a nested write
190 * occured. Finding the right page is done by comparing the
191 * dest_offset with the buf_page offsets.
192 * When at the exact opposite of the buffer, bias towards forward search
193 * because it will be cached.
194 */
195
196 diff_offset = (ssize_t)offset - (ssize_t)page->offset;
197 if (diff_offset <= -(ssize_t)half_buf_size)
198 diff_offset += buf->chan->alloc_size;
199 else if (diff_offset > half_buf_size)
200 diff_offset -= buf->chan->alloc_size;
201
202 if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
203 page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
204 *page_cache = page;
205 } else if (unlikely(diff_offset < 0)) {
206 page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
207 }
208 return page;
209 }
210
211 //ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
212 static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
213 {
214 switch (len) {
215 case 0: break;
216 case 1: *(u8 *)dest = *(const u8 *)src;
217 break;
218 case 2: *(u16 *)dest = *(const u16 *)src;
219 break;
220 case 4: *(u32 *)dest = *(const u32 *)src;
221 break;
222 //ust// #if (BITS_PER_LONG == 64)
223 case 8: *(u64 *)dest = *(const u64 *)src;
224 break;
225 //ust// #endif
226 default:
227 memcpy(dest, src, len);
228 }
229 }
230 //ust// #else
231 //ust// /*
232 //ust// * Returns whether the dest and src addresses are aligned on
233 //ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
234 //ust// */
235 //ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
236 //ust// {
237 //ust// if (ltt_align((size_t)dest, len))
238 //ust// return 0;
239 //ust// if (ltt_align((size_t)src, len))
240 //ust// return 0;
241 //ust// return 1;
242 //ust// }
243 //ust//
244 //ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
245 //ust// {
246 //ust// switch (len) {
247 //ust// case 0: break;
248 //ust// case 1: *(u8 *)dest = *(const u8 *)src;
249 //ust// break;
250 //ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
251 //ust// goto memcpy_fallback;
252 //ust// *(u16 *)dest = *(const u16 *)src;
253 //ust// break;
254 //ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
255 //ust// goto memcpy_fallback;
256 //ust// *(u32 *)dest = *(const u32 *)src;
257 //ust// break;
258 //ust// #if (BITS_PER_LONG == 64)
259 //ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
260 //ust// goto memcpy_fallback;
261 //ust// *(u64 *)dest = *(const u64 *)src;
262 //ust// break;
263 //ust// #endif
264 //ust// default:
265 //ust// goto memcpy_fallback;
266 //ust// }
267 //ust// return;
268 //ust// memcpy_fallback:
269 //ust// memcpy(dest, src, len);
270 //ust// }
271 //ust// #endif
272
273 static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
274 const void *src, size_t len)
275 {
276 //ust// struct buf_page *page;
277 //ust// ssize_t pagecpy;
278 //ust//
279 //ust// offset &= buf->chan->alloc_size - 1;
280 //ust// page = buf->wpage;
281 //ust//
282 //ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
283 //ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
284 //ust// ltt_relay_do_copy(page_address(page->page)
285 //ust// + (offset & ~PAGE_MASK), src, pagecpy);
286 //ust//
287 //ust// if (unlikely(len != pagecpy))
288 //ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
289 //ust// return len;
290
291
292 size_t cpy;
293 cpy = min_t(size_t, len, buf->buf_size - offset);
294 ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
295
296 if (unlikely(len != cpy))
297 _ltt_relay_write(buf, offset, src, len, cpy);
298 return len;
299 }
300
301 /*
302 * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
303 */
304
305 struct rchan *ltt_relay_open(const char *base_filename,
306 struct dentry *parent,
307 size_t subbuf_size,
308 size_t n_subbufs,
309 void *private_data);
310 extern void ltt_relay_close(struct rchan *chan);
311
312 /*
313 * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
314 */
315 extern const struct file_operations ltt_relay_file_operations;
316
317
318 /* LTTng lockless logging buffer info */
319 struct ltt_channel_buf_struct {
320 /* First 32 bytes cache-hot cacheline */
321 local_t offset; /* Current offset in the buffer */
322 local_t *commit_count; /* Commit count per sub-buffer */
323 atomic_long_t consumed; /*
324 * Current offset in the buffer
325 * standard atomic access (shared)
326 */
327 unsigned long last_tsc; /*
328 * Last timestamp written in the buffer.
329 */
330 /* End of first 32 bytes cacheline */
331 atomic_long_t active_readers; /*
332 * Active readers count
333 * standard atomic access (shared)
334 */
335 local_t events_lost;
336 local_t corrupted_subbuffers;
337 spinlock_t full_lock; /*
338 * buffer full condition spinlock, only
339 * for userspace tracing blocking mode
340 * synchronization with reader.
341 */
342 //ust// wait_queue_head_t write_wait; /*
343 //ust// * Wait queue for blocking user space
344 //ust// * writers
345 //ust// */
346 atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
347 } ____cacheline_aligned;
348
349 int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old);
350
351 int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old);
352
353
354 #endif /* _LINUX_LTT_RELAY_H */
355
This page took 0.035974 seconds and 3 git commands to generate.