ust: add kernel files: relay-alloc.c (ltt-relay-alloc.c), relay.c (ltt-relay.c)
[ust.git] / libtracing / relay-alloc.c
1 /*
2 * Public API and common code for kernel->userspace relay file support.
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * Moved to kernel/relay.c by Paul Mundt, 2006.
9 * November 2006 - CPU hotplug support by Mathieu Desnoyers
10 * (mathieu.desnoyers@polymtl.ca)
11 *
12 * This file is released under the GPL.
13 */
14 #include <linux/errno.h>
15 #include <linux/stddef.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/string.h>
19 #include <linux/ltt-relay.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mm.h>
22 #include <linux/cpu.h>
23 #include <linux/splice.h>
24 #include <linux/bitops.h>
25
26 /* list of open channels, for cpu hotplug */
27 static DEFINE_MUTEX(relay_channels_mutex);
28 static LIST_HEAD(relay_channels);
29
30 /**
31 * relay_alloc_buf - allocate a channel buffer
32 * @buf: the buffer struct
33 * @size: total size of the buffer
34 */
35 static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
36 {
37 unsigned int i, n_pages;
38 struct buf_page *buf_page, *n;
39
40 *size = PAGE_ALIGN(*size);
41 n_pages = *size >> PAGE_SHIFT;
42
43 INIT_LIST_HEAD(&buf->pages);
44
45 for (i = 0; i < n_pages; i++) {
46 buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
47 cpu_to_node(buf->cpu));
48 if (unlikely(!buf_page))
49 goto depopulate;
50 buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
51 GFP_KERNEL | __GFP_ZERO, 0);
52 if (unlikely(!buf_page->page)) {
53 kfree(buf_page);
54 goto depopulate;
55 }
56 list_add_tail(&buf_page->list, &buf->pages);
57 buf_page->offset = (size_t)i << PAGE_SHIFT;
58 buf_page->buf = buf;
59 set_page_private(buf_page->page, (unsigned long)buf_page);
60 if (i == 0) {
61 buf->wpage = buf_page;
62 buf->hpage[0] = buf_page;
63 buf->hpage[1] = buf_page;
64 buf->rpage = buf_page;
65 }
66 }
67 buf->page_count = n_pages;
68 return 0;
69
70 depopulate:
71 list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
72 list_del_init(&buf_page->list);
73 __free_page(buf_page->page);
74 kfree(buf_page);
75 }
76 return -ENOMEM;
77 }
78
79 /**
80 * relay_create_buf - allocate and initialize a channel buffer
81 * @chan: the relay channel
82 * @cpu: cpu the buffer belongs to
83 *
84 * Returns channel buffer if successful, %NULL otherwise.
85 */
86 static struct rchan_buf *relay_create_buf(struct rchan *chan, int cpu)
87 {
88 int ret;
89 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
90 if (!buf)
91 return NULL;
92
93 buf->cpu = cpu;
94 ret = relay_alloc_buf(buf, &chan->alloc_size);
95 if (ret)
96 goto free_buf;
97
98 buf->chan = chan;
99 kref_get(&buf->chan->kref);
100 return buf;
101
102 free_buf:
103 kfree(buf);
104 return NULL;
105 }
106
107 /**
108 * relay_destroy_channel - free the channel struct
109 * @kref: target kernel reference that contains the relay channel
110 *
111 * Should only be called from kref_put().
112 */
113 static void relay_destroy_channel(struct kref *kref)
114 {
115 struct rchan *chan = container_of(kref, struct rchan, kref);
116 kfree(chan);
117 }
118
119 /**
120 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
121 * @buf: the buffer struct
122 */
123 static void relay_destroy_buf(struct rchan_buf *buf)
124 {
125 struct rchan *chan = buf->chan;
126 struct buf_page *buf_page, *n;
127
128 list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
129 list_del_init(&buf_page->list);
130 __free_page(buf_page->page);
131 kfree(buf_page);
132 }
133 chan->buf[buf->cpu] = NULL;
134 kfree(buf);
135 kref_put(&chan->kref, relay_destroy_channel);
136 }
137
138 /**
139 * relay_remove_buf - remove a channel buffer
140 * @kref: target kernel reference that contains the relay buffer
141 *
142 * Removes the file from the fileystem, which also frees the
143 * rchan_buf_struct and the channel buffer. Should only be called from
144 * kref_put().
145 */
146 static void relay_remove_buf(struct kref *kref)
147 {
148 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
149 buf->chan->cb->remove_buf_file(buf->dentry);
150 relay_destroy_buf(buf);
151 }
152
153 /*
154 * High-level relay kernel API and associated functions.
155 */
156
157 /*
158 * rchan_callback implementations defining default channel behavior. Used
159 * in place of corresponding NULL values in client callback struct.
160 */
161
162 /*
163 * create_buf_file_create() default callback. Does nothing.
164 */
165 static struct dentry *create_buf_file_default_callback(const char *filename,
166 struct dentry *parent,
167 int mode,
168 struct rchan_buf *buf)
169 {
170 return NULL;
171 }
172
173 /*
174 * remove_buf_file() default callback. Does nothing.
175 */
176 static int remove_buf_file_default_callback(struct dentry *dentry)
177 {
178 return -EINVAL;
179 }
180
181 /* relay channel default callbacks */
182 static struct rchan_callbacks default_channel_callbacks = {
183 .create_buf_file = create_buf_file_default_callback,
184 .remove_buf_file = remove_buf_file_default_callback,
185 };
186
187 /**
188 * wakeup_readers - wake up readers waiting on a channel
189 * @data: contains the channel buffer
190 *
191 * This is the timer function used to defer reader waking.
192 */
193 static void wakeup_readers(unsigned long data)
194 {
195 struct rchan_buf *buf = (struct rchan_buf *)data;
196 wake_up_interruptible(&buf->read_wait);
197 }
198
199 /**
200 * __relay_reset - reset a channel buffer
201 * @buf: the channel buffer
202 * @init: 1 if this is a first-time initialization
203 *
204 * See relay_reset() for description of effect.
205 */
206 static void __relay_reset(struct rchan_buf *buf, unsigned int init)
207 {
208 if (init) {
209 init_waitqueue_head(&buf->read_wait);
210 kref_init(&buf->kref);
211 setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
212 } else
213 del_timer_sync(&buf->timer);
214
215 buf->finalized = 0;
216 }
217
218 /*
219 * relay_open_buf - create a new relay channel buffer
220 *
221 * used by relay_open() and CPU hotplug.
222 */
223 static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
224 {
225 struct rchan_buf *buf = NULL;
226 struct dentry *dentry;
227 char *tmpname;
228
229 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
230 if (!tmpname)
231 goto end;
232 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
233
234 buf = relay_create_buf(chan, cpu);
235 if (!buf)
236 goto free_name;
237
238 __relay_reset(buf, 1);
239
240 /* Create file in fs */
241 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
242 buf);
243 if (!dentry)
244 goto free_buf;
245
246 buf->dentry = dentry;
247
248 goto free_name;
249
250 free_buf:
251 relay_destroy_buf(buf);
252 buf = NULL;
253 free_name:
254 kfree(tmpname);
255 end:
256 return buf;
257 }
258
259 /**
260 * relay_close_buf - close a channel buffer
261 * @buf: channel buffer
262 *
263 * Marks the buffer finalized and restores the default callbacks.
264 * The channel buffer and channel buffer data structure are then freed
265 * automatically when the last reference is given up.
266 */
267 static void relay_close_buf(struct rchan_buf *buf)
268 {
269 del_timer_sync(&buf->timer);
270 kref_put(&buf->kref, relay_remove_buf);
271 }
272
273 static void setup_callbacks(struct rchan *chan,
274 struct rchan_callbacks *cb)
275 {
276 if (!cb) {
277 chan->cb = &default_channel_callbacks;
278 return;
279 }
280
281 if (!cb->create_buf_file)
282 cb->create_buf_file = create_buf_file_default_callback;
283 if (!cb->remove_buf_file)
284 cb->remove_buf_file = remove_buf_file_default_callback;
285 chan->cb = cb;
286 }
287
288 /**
289 * relay_hotcpu_callback - CPU hotplug callback
290 * @nb: notifier block
291 * @action: hotplug action to take
292 * @hcpu: CPU number
293 *
294 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
295 */
296 static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
297 unsigned long action,
298 void *hcpu)
299 {
300 unsigned int hotcpu = (unsigned long)hcpu;
301 struct rchan *chan;
302
303 switch (action) {
304 case CPU_UP_PREPARE:
305 case CPU_UP_PREPARE_FROZEN:
306 mutex_lock(&relay_channels_mutex);
307 list_for_each_entry(chan, &relay_channels, list) {
308 if (chan->buf[hotcpu])
309 continue;
310 chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
311 if (!chan->buf[hotcpu]) {
312 printk(KERN_ERR
313 "relay_hotcpu_callback: cpu %d buffer "
314 "creation failed\n", hotcpu);
315 mutex_unlock(&relay_channels_mutex);
316 return NOTIFY_BAD;
317 }
318 }
319 mutex_unlock(&relay_channels_mutex);
320 break;
321 case CPU_DEAD:
322 case CPU_DEAD_FROZEN:
323 /* No need to flush the cpu : will be flushed upon
324 * final relay_flush() call. */
325 break;
326 }
327 return NOTIFY_OK;
328 }
329
330 /**
331 * ltt_relay_open - create a new relay channel
332 * @base_filename: base name of files to create
333 * @parent: dentry of parent directory, %NULL for root directory
334 * @subbuf_size: size of sub-buffers
335 * @n_subbufs: number of sub-buffers
336 * @cb: client callback functions
337 * @private_data: user-defined data
338 *
339 * Returns channel pointer if successful, %NULL otherwise.
340 *
341 * Creates a channel buffer for each cpu using the sizes and
342 * attributes specified. The created channel buffer files
343 * will be named base_filename0...base_filenameN-1. File
344 * permissions will be %S_IRUSR.
345 */
346 struct rchan *ltt_relay_open(const char *base_filename,
347 struct dentry *parent,
348 size_t subbuf_size,
349 size_t n_subbufs,
350 struct rchan_callbacks *cb,
351 void *private_data)
352 {
353 unsigned int i;
354 struct rchan *chan;
355 if (!base_filename)
356 return NULL;
357
358 if (!(subbuf_size && n_subbufs))
359 return NULL;
360
361 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
362 if (!chan)
363 return NULL;
364
365 chan->version = LTT_RELAY_CHANNEL_VERSION;
366 chan->n_subbufs = n_subbufs;
367 chan->subbuf_size = subbuf_size;
368 chan->subbuf_size_order = get_count_order(subbuf_size);
369 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
370 chan->parent = parent;
371 chan->private_data = private_data;
372 strlcpy(chan->base_filename, base_filename, NAME_MAX);
373 setup_callbacks(chan, cb);
374 kref_init(&chan->kref);
375
376 mutex_lock(&relay_channels_mutex);
377 for_each_online_cpu(i) {
378 chan->buf[i] = relay_open_buf(chan, i);
379 if (!chan->buf[i])
380 goto free_bufs;
381 }
382 list_add(&chan->list, &relay_channels);
383 mutex_unlock(&relay_channels_mutex);
384
385 return chan;
386
387 free_bufs:
388 for_each_possible_cpu(i) {
389 if (!chan->buf[i])
390 break;
391 relay_close_buf(chan->buf[i]);
392 }
393
394 kref_put(&chan->kref, relay_destroy_channel);
395 mutex_unlock(&relay_channels_mutex);
396 return NULL;
397 }
398 EXPORT_SYMBOL_GPL(ltt_relay_open);
399
400 /**
401 * ltt_relay_close - close the channel
402 * @chan: the channel
403 *
404 * Closes all channel buffers and frees the channel.
405 */
406 void ltt_relay_close(struct rchan *chan)
407 {
408 unsigned int i;
409
410 if (!chan)
411 return;
412
413 mutex_lock(&relay_channels_mutex);
414 for_each_possible_cpu(i)
415 if (chan->buf[i])
416 relay_close_buf(chan->buf[i]);
417
418 list_del(&chan->list);
419 kref_put(&chan->kref, relay_destroy_channel);
420 mutex_unlock(&relay_channels_mutex);
421 }
422 EXPORT_SYMBOL_GPL(ltt_relay_close);
423
424 /*
425 * Start iteration at the previous element. Skip the real list head.
426 */
427 struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
428 struct buf_page *page, size_t offset, ssize_t diff_offset)
429 {
430 struct buf_page *iter;
431 size_t orig_iter_off;
432 unsigned int i = 0;
433
434 orig_iter_off = page->offset;
435 list_for_each_entry_reverse(iter, &page->list, list) {
436 /*
437 * Skip the real list head.
438 */
439 if (&iter->list == &buf->pages)
440 continue;
441 i++;
442 if (offset >= iter->offset
443 && offset < iter->offset + PAGE_SIZE) {
444 #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
445 if (i > 1) {
446 printk(KERN_WARNING
447 "Backward random access detected in "
448 "ltt_relay. Iterations %u, "
449 "offset %zu, orig iter->off %zu, "
450 "iter->off %zu diff_offset %zd.\n", i,
451 offset, orig_iter_off, iter->offset,
452 diff_offset);
453 WARN_ON(1);
454 }
455 #endif
456 return iter;
457 }
458 }
459 WARN_ON(1);
460 return NULL;
461 }
462 EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
463
464 /*
465 * Start iteration at the next element. Skip the real list head.
466 */
467 struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
468 struct buf_page *page, size_t offset, ssize_t diff_offset)
469 {
470 struct buf_page *iter;
471 unsigned int i = 0;
472 size_t orig_iter_off;
473
474 orig_iter_off = page->offset;
475 list_for_each_entry(iter, &page->list, list) {
476 /*
477 * Skip the real list head.
478 */
479 if (&iter->list == &buf->pages)
480 continue;
481 i++;
482 if (offset >= iter->offset
483 && offset < iter->offset + PAGE_SIZE) {
484 #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
485 if (i > 1) {
486 printk(KERN_WARNING
487 "Forward random access detected in "
488 "ltt_relay. Iterations %u, "
489 "offset %zu, orig iter->off %zu, "
490 "iter->off %zu diff_offset %zd.\n", i,
491 offset, orig_iter_off, iter->offset,
492 diff_offset);
493 WARN_ON(1);
494 }
495 #endif
496 return iter;
497 }
498 }
499 WARN_ON(1);
500 return NULL;
501 }
502 EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
503
504 /**
505 * ltt_relay_write - write data to a ltt_relay buffer.
506 * @buf : buffer
507 * @offset : offset within the buffer
508 * @src : source address
509 * @len : length to write
510 * @page : cached buffer page
511 * @pagecpy : page size copied so far
512 */
513 void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
514 const void *src, size_t len, struct buf_page *page, ssize_t pagecpy)
515 {
516 do {
517 len -= pagecpy;
518 src += pagecpy;
519 offset += pagecpy;
520 /*
521 * Underlying layer should never ask for writes across
522 * subbuffers.
523 */
524 WARN_ON(offset >= buf->chan->alloc_size);
525
526 page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
527 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
528 ltt_relay_do_copy(page_address(page->page)
529 + (offset & ~PAGE_MASK), src, pagecpy);
530 } while (unlikely(len != pagecpy));
531 }
532 EXPORT_SYMBOL_GPL(_ltt_relay_write);
533
534 /**
535 * ltt_relay_read - read data from ltt_relay_buffer.
536 * @buf : buffer
537 * @offset : offset within the buffer
538 * @dest : destination address
539 * @len : length to write
540 */
541 int ltt_relay_read(struct rchan_buf *buf, size_t offset,
542 void *dest, size_t len)
543 {
544 struct buf_page *page;
545 ssize_t pagecpy, orig_len;
546
547 orig_len = len;
548 offset &= buf->chan->alloc_size - 1;
549 page = buf->rpage;
550 if (unlikely(!len))
551 return 0;
552 for (;;) {
553 page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
554 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
555 memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
556 pagecpy);
557 len -= pagecpy;
558 if (likely(!len))
559 break;
560 dest += pagecpy;
561 offset += pagecpy;
562 /*
563 * Underlying layer should never ask for reads across
564 * subbuffers.
565 */
566 WARN_ON(offset >= buf->chan->alloc_size);
567 }
568 return orig_len;
569 }
570 EXPORT_SYMBOL_GPL(ltt_relay_read);
571
572 /**
573 * ltt_relay_read_get_page - Get a whole page to read from
574 * @buf : buffer
575 * @offset : offset within the buffer
576 */
577 struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
578 {
579 struct buf_page *page;
580
581 offset &= buf->chan->alloc_size - 1;
582 page = buf->rpage;
583 page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
584 return page;
585 }
586 EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
587
588 /**
589 * ltt_relay_offset_address - get address of a location within the buffer
590 * @buf : buffer
591 * @offset : offset within the buffer.
592 *
593 * Return the address where a given offset is located.
594 * Should be used to get the current subbuffer header pointer. Given we know
595 * it's never on a page boundary, it's safe to write directly to this address,
596 * as long as the write is never bigger than a page size.
597 */
598 void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
599 {
600 struct buf_page *page;
601 unsigned int odd;
602
603 offset &= buf->chan->alloc_size - 1;
604 odd = !!(offset & buf->chan->subbuf_size);
605 page = buf->hpage[odd];
606 if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
607 buf->hpage[odd] = page = buf->wpage;
608 page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
609 return page_address(page->page) + (offset & ~PAGE_MASK);
610 }
611 EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
612
613 /**
614 * relay_file_open - open file op for relay files
615 * @inode: the inode
616 * @filp: the file
617 *
618 * Increments the channel buffer refcount.
619 */
620 static int relay_file_open(struct inode *inode, struct file *filp)
621 {
622 struct rchan_buf *buf = inode->i_private;
623 kref_get(&buf->kref);
624 filp->private_data = buf;
625
626 return nonseekable_open(inode, filp);
627 }
628
629 /**
630 * relay_file_release - release file op for relay files
631 * @inode: the inode
632 * @filp: the file
633 *
634 * Decrements the channel refcount, as the filesystem is
635 * no longer using it.
636 */
637 static int relay_file_release(struct inode *inode, struct file *filp)
638 {
639 struct rchan_buf *buf = filp->private_data;
640 kref_put(&buf->kref, relay_remove_buf);
641
642 return 0;
643 }
644
645 const struct file_operations ltt_relay_file_operations = {
646 .open = relay_file_open,
647 .release = relay_file_release,
648 };
649 EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
650
651 static __init int relay_init(void)
652 {
653 hotcpu_notifier(relay_hotcpu_callback, 5);
654 return 0;
655 }
656
657 module_init(relay_init);
This page took 0.043405 seconds and 4 git commands to generate.