ust: continue implementation of ustd
[ust.git] / libtracing / relay.h
CommitLineData
bb07823d
PMF
1/*
2 * linux/include/linux/ltt-relay.h
3 *
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * CONFIG_RELAY definitions and declarations
9 */
10
11#ifndef _LINUX_LTT_RELAY_H
12#define _LINUX_LTT_RELAY_H
13
14//ust// #include <linux/types.h>
15//ust// #include <linux/sched.h>
16//ust// #include <linux/timer.h>
17//ust// #include <linux/wait.h>
18//ust// #include <linux/list.h>
19//ust// #include <linux/fs.h>
20//ust// #include <linux/poll.h>
21//ust// #include <linux/kref.h>
22//ust// #include <linux/mm.h>
23//ust// #include <linux/ltt-core.h>
24#include "kref.h"
25#include "list.h"
26
27/* Needs a _much_ better name... */
28#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
29
30/*
31 * Tracks changes to rchan/rchan_buf structs
32 */
33#define LTT_RELAY_CHANNEL_VERSION 8
34
35struct rchan_buf;
36
37struct buf_page {
38 struct page *page;
39 struct rchan_buf *buf; /* buffer the page belongs to */
40 size_t offset; /* page offset in the buffer */
41 struct list_head list; /* buffer linked list */
42};
43
44/*
45 * Per-cpu relay channel buffer
46 */
47struct rchan_buf {
48 struct rchan *chan; /* associated channel */
49//ust// wait_queue_head_t read_wait; /* reader wait queue */
50//ust// struct timer_list timer; /* reader wake-up timer */
51//ust// struct dentry *dentry; /* channel file dentry */
52 struct kref kref; /* channel buffer refcount */
53//ust// struct list_head pages; /* list of buffer pages */
54 void *buf_data; //ust//
55 size_t buf_size;
56//ust// struct buf_page *wpage; /* current write page (cache) */
57//ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
58//ust// struct buf_page *rpage; /* current subbuf read page (cache) */
59//ust// unsigned int page_count; /* number of current buffer pages */
60 unsigned int finalized; /* buffer has been finalized */
61//ust// unsigned int cpu; /* this buf's cpu */
3847c3ba 62 int shmid;
bb07823d
PMF
63} ____cacheline_aligned;
64
65/*
66 * Relay channel data structure
67 */
68struct rchan {
69 u32 version; /* the version of this struct */
70 size_t subbuf_size; /* sub-buffer size */
71 size_t n_subbufs; /* number of sub-buffers per buffer */
72 size_t alloc_size; /* total buffer size allocated */
73 struct rchan_callbacks *cb; /* client callbacks */
74 struct kref kref; /* channel refcount */
75 void *private_data; /* for user-defined data */
76//ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
77 struct rchan_buf *buf;
78 struct list_head list; /* for channel list */
79 struct dentry *parent; /* parent dentry passed to open */
80 int subbuf_size_order; /* order of sub-buffer size */
81//ust// char base_filename[NAME_MAX]; /* saved base filename */
82};
83
84/*
85 * Relay channel client callbacks
86 */
87struct rchan_callbacks {
88 /*
89 * subbuf_start - called on buffer-switch to a new sub-buffer
90 * @buf: the channel buffer containing the new sub-buffer
91 * @subbuf: the start of the new sub-buffer
92 * @prev_subbuf: the start of the previous sub-buffer
93 * @prev_padding: unused space at the end of previous sub-buffer
94 *
95 * The client should return 1 to continue logging, 0 to stop
96 * logging.
97 *
98 * NOTE: subbuf_start will also be invoked when the buffer is
99 * created, so that the first sub-buffer can be initialized
100 * if necessary. In this case, prev_subbuf will be NULL.
101 *
102 * NOTE: the client can reserve bytes at the beginning of the new
103 * sub-buffer by calling subbuf_start_reserve() in this callback.
104 */
105 int (*subbuf_start) (struct rchan_buf *buf,
106 void *subbuf,
107 void *prev_subbuf,
108 size_t prev_padding);
109
110 /*
111 * create_buf_file - create file to represent a relay channel buffer
112 * @filename: the name of the file to create
113 * @parent: the parent of the file to create
114 * @mode: the mode of the file to create
115 * @buf: the channel buffer
116 *
117 * Called during relay_open(), once for each per-cpu buffer,
118 * to allow the client to create a file to be used to
119 * represent the corresponding channel buffer. If the file is
120 * created outside of relay, the parent must also exist in
121 * that filesystem.
122 *
123 * The callback should return the dentry of the file created
124 * to represent the relay buffer.
125 *
126 * Setting the is_global outparam to a non-zero value will
127 * cause relay_open() to create a single global buffer rather
128 * than the default set of per-cpu buffers.
129 *
130 * See Documentation/filesystems/relayfs.txt for more info.
131 */
132 struct dentry *(*create_buf_file)(const char *filename,
133 struct dentry *parent,
134 int mode,
135 struct rchan_buf *buf);
136
137 /*
138 * remove_buf_file - remove file representing a relay channel buffer
139 * @dentry: the dentry of the file to remove
140 *
141 * Called during relay_close(), once for each per-cpu buffer,
142 * to allow the client to remove a file used to represent a
143 * channel buffer.
144 *
145 * The callback should return 0 if successful, negative if not.
146 */
98963de4 147//ust// int (*remove_buf_file)(struct rchan_buf *buf);
bb07823d
PMF
148};
149
150extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
151 struct buf_page *page, size_t offset, ssize_t diff_offset);
152
153extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
154 struct buf_page *page, size_t offset, ssize_t diff_offset);
155
156extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
157 const void *src, size_t len, ssize_t cpy);
158
159extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
160 void *dest, size_t len);
161
162extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
163 size_t offset);
164
165/*
166 * Return the address where a given offset is located.
167 * Should be used to get the current subbuffer header pointer. Given we know
168 * it's never on a page boundary, it's safe to write directly to this address,
169 * as long as the write is never bigger than a page size.
170 */
171extern void *ltt_relay_offset_address(struct rchan_buf *buf,
172 size_t offset);
173
174/*
175 * Find the page containing "offset". Cache it if it is after the currently
176 * cached page.
177 */
178static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
179 struct buf_page **page_cache,
180 struct buf_page *page, size_t offset)
181{
182 ssize_t diff_offset;
183 ssize_t half_buf_size = buf->chan->alloc_size >> 1;
184
185 /*
186 * Make sure this is the page we want to write into. The current
187 * page is changed concurrently by other writers. [wrh]page are
188 * used as a cache remembering the last page written
189 * to/read/looked up for header address. No synchronization;
190 * could have to find the previous page is a nested write
191 * occured. Finding the right page is done by comparing the
192 * dest_offset with the buf_page offsets.
193 * When at the exact opposite of the buffer, bias towards forward search
194 * because it will be cached.
195 */
196
197 diff_offset = (ssize_t)offset - (ssize_t)page->offset;
198 if (diff_offset <= -(ssize_t)half_buf_size)
199 diff_offset += buf->chan->alloc_size;
200 else if (diff_offset > half_buf_size)
201 diff_offset -= buf->chan->alloc_size;
202
203 if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
204 page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
205 *page_cache = page;
206 } else if (unlikely(diff_offset < 0)) {
207 page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
208 }
209 return page;
210}
211
212//ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
213 static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
214{
215 switch (len) {
216 case 0: break;
217 case 1: *(u8 *)dest = *(const u8 *)src;
218 break;
219 case 2: *(u16 *)dest = *(const u16 *)src;
220 break;
221 case 4: *(u32 *)dest = *(const u32 *)src;
222 break;
223//ust// #if (BITS_PER_LONG == 64)
224 case 8: *(u64 *)dest = *(const u64 *)src;
225 break;
226//ust// #endif
227 default:
228 memcpy(dest, src, len);
229 }
230}
231//ust// #else
232//ust// /*
233//ust// * Returns whether the dest and src addresses are aligned on
234//ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
235//ust// */
236//ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
237//ust// {
238//ust// if (ltt_align((size_t)dest, len))
239//ust// return 0;
240//ust// if (ltt_align((size_t)src, len))
241//ust// return 0;
242//ust// return 1;
243//ust// }
244//ust//
245//ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
246//ust// {
247//ust// switch (len) {
248//ust// case 0: break;
249//ust// case 1: *(u8 *)dest = *(const u8 *)src;
250//ust// break;
251//ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
252//ust// goto memcpy_fallback;
253//ust// *(u16 *)dest = *(const u16 *)src;
254//ust// break;
255//ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
256//ust// goto memcpy_fallback;
257//ust// *(u32 *)dest = *(const u32 *)src;
258//ust// break;
259//ust// #if (BITS_PER_LONG == 64)
260//ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
261//ust// goto memcpy_fallback;
262//ust// *(u64 *)dest = *(const u64 *)src;
263//ust// break;
264//ust// #endif
265//ust// default:
266//ust// goto memcpy_fallback;
267//ust// }
268//ust// return;
269//ust// memcpy_fallback:
270//ust// memcpy(dest, src, len);
271//ust// }
272//ust// #endif
273
274static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
275 const void *src, size_t len)
276{
277//ust// struct buf_page *page;
278//ust// ssize_t pagecpy;
279//ust//
280//ust// offset &= buf->chan->alloc_size - 1;
281//ust// page = buf->wpage;
282//ust//
283//ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
284//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
285//ust// ltt_relay_do_copy(page_address(page->page)
286//ust// + (offset & ~PAGE_MASK), src, pagecpy);
287//ust//
288//ust// if (unlikely(len != pagecpy))
289//ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
290//ust// return len;
291
292
293 size_t cpy;
294 cpy = min_t(size_t, len, buf->buf_size - offset);
295 ltt_relay_do_copy(buf->buf_data + offset, src, cpy);
296
297 if (unlikely(len != cpy))
298 _ltt_relay_write(buf, offset, src, len, cpy);
299 return len;
300}
301
302/*
303 * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
304 */
305
306struct rchan *ltt_relay_open(const char *base_filename,
307 struct dentry *parent,
308 size_t subbuf_size,
309 size_t n_subbufs,
310 void *private_data);
311extern void ltt_relay_close(struct rchan *chan);
312
313/*
314 * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
315 */
316extern const struct file_operations ltt_relay_file_operations;
317
9c67dc50
PMF
318
319/* LTTng lockless logging buffer info */
320struct ltt_channel_buf_struct {
321 /* First 32 bytes cache-hot cacheline */
322 local_t offset; /* Current offset in the buffer */
323 local_t *commit_count; /* Commit count per sub-buffer */
324 atomic_long_t consumed; /*
325 * Current offset in the buffer
326 * standard atomic access (shared)
327 */
328 unsigned long last_tsc; /*
329 * Last timestamp written in the buffer.
330 */
331 /* End of first 32 bytes cacheline */
332 atomic_long_t active_readers; /*
333 * Active readers count
334 * standard atomic access (shared)
335 */
336 local_t events_lost;
337 local_t corrupted_subbuffers;
338 spinlock_t full_lock; /*
339 * buffer full condition spinlock, only
340 * for userspace tracing blocking mode
341 * synchronization with reader.
342 */
343//ust// wait_queue_head_t write_wait; /*
344//ust// * Wait queue for blocking user space
345//ust// * writers
346//ust// */
347 atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
348} ____cacheline_aligned;
349
350int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old);
351
352int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old);
353
354
bb07823d
PMF
355#endif /* _LINUX_LTT_RELAY_H */
356
This page took 0.033895 seconds and 4 git commands to generate.