ust: continue work
[ust.git] / libtracing / relay-alloc.c
CommitLineData
e1152c37
PMF
1/*
2 * Public API and common code for kernel->userspace relay file support.
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * Moved to kernel/relay.c by Paul Mundt, 2006.
9 * November 2006 - CPU hotplug support by Mathieu Desnoyers
10 * (mathieu.desnoyers@polymtl.ca)
11 *
12 * This file is released under the GPL.
13 */
5f54827b
PMF
14//ust// #include <linux/errno.h>
15//ust// #include <linux/stddef.h>
16//ust// #include <linux/slab.h>
17//ust// #include <linux/module.h>
18//ust// #include <linux/string.h>
19//ust// #include <linux/ltt-relay.h>
20//ust// #include <linux/vmalloc.h>
21//ust// #include <linux/mm.h>
22//ust// #include <linux/cpu.h>
23//ust// #include <linux/splice.h>
24//ust// #include <linux/bitops.h>
25#include <sys/mman.h>
26#include "kernelcompat.h"
27#include "list.h"
28#include "relay.h"
29#include "channels.h"
30#include "kref.h"
e1152c37
PMF
31
32/* list of open channels, for cpu hotplug */
33static DEFINE_MUTEX(relay_channels_mutex);
34static LIST_HEAD(relay_channels);
35
36/**
37 * relay_alloc_buf - allocate a channel buffer
38 * @buf: the buffer struct
39 * @size: total size of the buffer
40 */
5f54827b
PMF
41//ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
42//ust//{
43//ust// unsigned int i, n_pages;
44//ust// struct buf_page *buf_page, *n;
45//ust//
46//ust// *size = PAGE_ALIGN(*size);
47//ust// n_pages = *size >> PAGE_SHIFT;
48//ust//
49//ust// INIT_LIST_HEAD(&buf->pages);
50//ust//
51//ust// for (i = 0; i < n_pages; i++) {
52//ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
53//ust// cpu_to_node(buf->cpu));
54//ust// if (unlikely(!buf_page))
55//ust// goto depopulate;
56//ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
57//ust// GFP_KERNEL | __GFP_ZERO, 0);
58//ust// if (unlikely(!buf_page->page)) {
59//ust// kfree(buf_page);
60//ust// goto depopulate;
61//ust// }
62//ust// list_add_tail(&buf_page->list, &buf->pages);
63//ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
64//ust// buf_page->buf = buf;
65//ust// set_page_private(buf_page->page, (unsigned long)buf_page);
66//ust// if (i == 0) {
67//ust// buf->wpage = buf_page;
68//ust// buf->hpage[0] = buf_page;
69//ust// buf->hpage[1] = buf_page;
70//ust// buf->rpage = buf_page;
71//ust// }
72//ust// }
73//ust// buf->page_count = n_pages;
74//ust// return 0;
75//ust//
76//ust//depopulate:
77//ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
78//ust// list_del_init(&buf_page->list);
79//ust// __free_page(buf_page->page);
80//ust// kfree(buf_page);
81//ust// }
82//ust// return -ENOMEM;
83//ust//}
84
e1152c37
PMF
85static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
86{
5f54827b 87 unsigned int n_pages;
e1152c37
PMF
88 struct buf_page *buf_page, *n;
89
5f54827b
PMF
90 void *result;
91
e1152c37 92 *size = PAGE_ALIGN(*size);
e1152c37 93
5f54827b
PMF
94 /* Maybe do read-ahead */
95 result = mmap(NULL, *size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, -1, 0);
96 if(result == MAP_FAILED) {
97 PERROR("mmap");
98 return -1;
e1152c37 99 }
5f54827b
PMF
100
101 buf->buf_data = result;
102 buf->buf_size = *size;
103
104 return 0;
e1152c37
PMF
105}
106
107/**
108 * relay_create_buf - allocate and initialize a channel buffer
109 * @chan: the relay channel
110 * @cpu: cpu the buffer belongs to
111 *
112 * Returns channel buffer if successful, %NULL otherwise.
113 */
114static struct rchan_buf *relay_create_buf(struct rchan *chan, int cpu)
115{
116 int ret;
117 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
118 if (!buf)
119 return NULL;
120
5f54827b 121// buf->cpu = cpu;
e1152c37
PMF
122 ret = relay_alloc_buf(buf, &chan->alloc_size);
123 if (ret)
124 goto free_buf;
125
126 buf->chan = chan;
127 kref_get(&buf->chan->kref);
128 return buf;
129
130free_buf:
131 kfree(buf);
132 return NULL;
133}
134
135/**
136 * relay_destroy_channel - free the channel struct
137 * @kref: target kernel reference that contains the relay channel
138 *
139 * Should only be called from kref_put().
140 */
141static void relay_destroy_channel(struct kref *kref)
142{
143 struct rchan *chan = container_of(kref, struct rchan, kref);
144 kfree(chan);
145}
146
147/**
148 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
149 * @buf: the buffer struct
150 */
151static void relay_destroy_buf(struct rchan_buf *buf)
152{
153 struct rchan *chan = buf->chan;
154 struct buf_page *buf_page, *n;
5f54827b 155 int result;
e1152c37 156
5f54827b
PMF
157 result = munmap(buf->buf_data, buf->buf_size);
158 if(result == -1) {
159 PERROR("munmap");
160
161//ust// chan->buf[buf->cpu] = NULL;
e1152c37
PMF
162 kfree(buf);
163 kref_put(&chan->kref, relay_destroy_channel);
164}
165
166/**
167 * relay_remove_buf - remove a channel buffer
168 * @kref: target kernel reference that contains the relay buffer
169 *
170 * Removes the file from the fileystem, which also frees the
171 * rchan_buf_struct and the channel buffer. Should only be called from
172 * kref_put().
173 */
174static void relay_remove_buf(struct kref *kref)
175{
176 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
177 buf->chan->cb->remove_buf_file(buf->dentry);
178 relay_destroy_buf(buf);
179}
180
181/*
182 * High-level relay kernel API and associated functions.
183 */
184
185/*
186 * rchan_callback implementations defining default channel behavior. Used
187 * in place of corresponding NULL values in client callback struct.
188 */
189
190/*
191 * create_buf_file_create() default callback. Does nothing.
192 */
193static struct dentry *create_buf_file_default_callback(const char *filename,
194 struct dentry *parent,
195 int mode,
196 struct rchan_buf *buf)
197{
198 return NULL;
199}
200
201/*
202 * remove_buf_file() default callback. Does nothing.
203 */
204static int remove_buf_file_default_callback(struct dentry *dentry)
205{
206 return -EINVAL;
207}
208
209/* relay channel default callbacks */
210static struct rchan_callbacks default_channel_callbacks = {
211 .create_buf_file = create_buf_file_default_callback,
212 .remove_buf_file = remove_buf_file_default_callback,
213};
214
215/**
216 * wakeup_readers - wake up readers waiting on a channel
217 * @data: contains the channel buffer
218 *
219 * This is the timer function used to defer reader waking.
220 */
221static void wakeup_readers(unsigned long data)
222{
223 struct rchan_buf *buf = (struct rchan_buf *)data;
224 wake_up_interruptible(&buf->read_wait);
225}
226
227/**
228 * __relay_reset - reset a channel buffer
229 * @buf: the channel buffer
230 * @init: 1 if this is a first-time initialization
231 *
232 * See relay_reset() for description of effect.
233 */
234static void __relay_reset(struct rchan_buf *buf, unsigned int init)
235{
236 if (init) {
237 init_waitqueue_head(&buf->read_wait);
238 kref_init(&buf->kref);
239 setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
240 } else
241 del_timer_sync(&buf->timer);
242
243 buf->finalized = 0;
244}
245
246/*
247 * relay_open_buf - create a new relay channel buffer
248 *
249 * used by relay_open() and CPU hotplug.
250 */
251static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
252{
253 struct rchan_buf *buf = NULL;
254 struct dentry *dentry;
255 char *tmpname;
256
257 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
258 if (!tmpname)
259 goto end;
260 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
261
262 buf = relay_create_buf(chan, cpu);
263 if (!buf)
264 goto free_name;
265
266 __relay_reset(buf, 1);
267
268 /* Create file in fs */
5f54827b
PMF
269//ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
270//ust// buf);
271//ust// if (!dentry)
272//ust// goto free_buf;
273//ust//
274//ust// buf->dentry = dentry;
e1152c37
PMF
275
276 goto free_name;
277
278free_buf:
279 relay_destroy_buf(buf);
280 buf = NULL;
281free_name:
282 kfree(tmpname);
283end:
284 return buf;
285}
286
287/**
288 * relay_close_buf - close a channel buffer
289 * @buf: channel buffer
290 *
291 * Marks the buffer finalized and restores the default callbacks.
292 * The channel buffer and channel buffer data structure are then freed
293 * automatically when the last reference is given up.
294 */
295static void relay_close_buf(struct rchan_buf *buf)
296{
297 del_timer_sync(&buf->timer);
298 kref_put(&buf->kref, relay_remove_buf);
299}
300
301static void setup_callbacks(struct rchan *chan,
302 struct rchan_callbacks *cb)
303{
304 if (!cb) {
305 chan->cb = &default_channel_callbacks;
306 return;
307 }
308
309 if (!cb->create_buf_file)
310 cb->create_buf_file = create_buf_file_default_callback;
311 if (!cb->remove_buf_file)
312 cb->remove_buf_file = remove_buf_file_default_callback;
313 chan->cb = cb;
314}
315
316/**
317 * relay_hotcpu_callback - CPU hotplug callback
318 * @nb: notifier block
319 * @action: hotplug action to take
320 * @hcpu: CPU number
321 *
322 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
323 */
5f54827b
PMF
324//ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
325//ust// unsigned long action,
326//ust// void *hcpu)
327//ust// {
328//ust// unsigned int hotcpu = (unsigned long)hcpu;
329//ust// struct rchan *chan;
330//ust//
331//ust// switch (action) {
332//ust// case CPU_UP_PREPARE:
333//ust// case CPU_UP_PREPARE_FROZEN:
334//ust// mutex_lock(&relay_channels_mutex);
335//ust// list_for_each_entry(chan, &relay_channels, list) {
336//ust// if (chan->buf[hotcpu])
337//ust// continue;
338//ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
339//ust// if (!chan->buf[hotcpu]) {
340//ust// printk(KERN_ERR
341//ust// "relay_hotcpu_callback: cpu %d buffer "
342//ust// "creation failed\n", hotcpu);
343//ust// mutex_unlock(&relay_channels_mutex);
344//ust// return NOTIFY_BAD;
345//ust// }
346//ust// }
347//ust// mutex_unlock(&relay_channels_mutex);
348//ust// break;
349//ust// case CPU_DEAD:
350//ust// case CPU_DEAD_FROZEN:
351//ust// /* No need to flush the cpu : will be flushed upon
352//ust// * final relay_flush() call. */
353//ust// break;
354//ust// }
355//ust// return NOTIFY_OK;
356//ust// }
e1152c37
PMF
357
358/**
359 * ltt_relay_open - create a new relay channel
360 * @base_filename: base name of files to create
361 * @parent: dentry of parent directory, %NULL for root directory
362 * @subbuf_size: size of sub-buffers
363 * @n_subbufs: number of sub-buffers
364 * @cb: client callback functions
365 * @private_data: user-defined data
366 *
367 * Returns channel pointer if successful, %NULL otherwise.
368 *
369 * Creates a channel buffer for each cpu using the sizes and
370 * attributes specified. The created channel buffer files
371 * will be named base_filename0...base_filenameN-1. File
372 * permissions will be %S_IRUSR.
373 */
374struct rchan *ltt_relay_open(const char *base_filename,
375 struct dentry *parent,
376 size_t subbuf_size,
377 size_t n_subbufs,
378 struct rchan_callbacks *cb,
379 void *private_data)
380{
381 unsigned int i;
382 struct rchan *chan;
383 if (!base_filename)
384 return NULL;
385
386 if (!(subbuf_size && n_subbufs))
387 return NULL;
388
389 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
390 if (!chan)
391 return NULL;
392
393 chan->version = LTT_RELAY_CHANNEL_VERSION;
394 chan->n_subbufs = n_subbufs;
395 chan->subbuf_size = subbuf_size;
396 chan->subbuf_size_order = get_count_order(subbuf_size);
397 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
398 chan->parent = parent;
399 chan->private_data = private_data;
400 strlcpy(chan->base_filename, base_filename, NAME_MAX);
401 setup_callbacks(chan, cb);
402 kref_init(&chan->kref);
403
404 mutex_lock(&relay_channels_mutex);
405 for_each_online_cpu(i) {
406 chan->buf[i] = relay_open_buf(chan, i);
407 if (!chan->buf[i])
408 goto free_bufs;
409 }
410 list_add(&chan->list, &relay_channels);
411 mutex_unlock(&relay_channels_mutex);
412
413 return chan;
414
415free_bufs:
416 for_each_possible_cpu(i) {
417 if (!chan->buf[i])
418 break;
419 relay_close_buf(chan->buf[i]);
420 }
421
422 kref_put(&chan->kref, relay_destroy_channel);
423 mutex_unlock(&relay_channels_mutex);
424 return NULL;
425}
426EXPORT_SYMBOL_GPL(ltt_relay_open);
427
428/**
429 * ltt_relay_close - close the channel
430 * @chan: the channel
431 *
432 * Closes all channel buffers and frees the channel.
433 */
434void ltt_relay_close(struct rchan *chan)
435{
436 unsigned int i;
437
438 if (!chan)
439 return;
440
441 mutex_lock(&relay_channels_mutex);
442 for_each_possible_cpu(i)
443 if (chan->buf[i])
444 relay_close_buf(chan->buf[i]);
445
446 list_del(&chan->list);
447 kref_put(&chan->kref, relay_destroy_channel);
448 mutex_unlock(&relay_channels_mutex);
449}
450EXPORT_SYMBOL_GPL(ltt_relay_close);
451
452/*
453 * Start iteration at the previous element. Skip the real list head.
454 */
455struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
456 struct buf_page *page, size_t offset, ssize_t diff_offset)
457{
458 struct buf_page *iter;
459 size_t orig_iter_off;
460 unsigned int i = 0;
461
462 orig_iter_off = page->offset;
463 list_for_each_entry_reverse(iter, &page->list, list) {
464 /*
465 * Skip the real list head.
466 */
467 if (&iter->list == &buf->pages)
468 continue;
469 i++;
470 if (offset >= iter->offset
471 && offset < iter->offset + PAGE_SIZE) {
472#ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
473 if (i > 1) {
474 printk(KERN_WARNING
475 "Backward random access detected in "
476 "ltt_relay. Iterations %u, "
477 "offset %zu, orig iter->off %zu, "
478 "iter->off %zu diff_offset %zd.\n", i,
479 offset, orig_iter_off, iter->offset,
480 diff_offset);
481 WARN_ON(1);
482 }
483#endif
484 return iter;
485 }
486 }
487 WARN_ON(1);
488 return NULL;
489}
490EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
491
492/*
493 * Start iteration at the next element. Skip the real list head.
494 */
495struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
496 struct buf_page *page, size_t offset, ssize_t diff_offset)
497{
498 struct buf_page *iter;
499 unsigned int i = 0;
500 size_t orig_iter_off;
501
502 orig_iter_off = page->offset;
503 list_for_each_entry(iter, &page->list, list) {
504 /*
505 * Skip the real list head.
506 */
507 if (&iter->list == &buf->pages)
508 continue;
509 i++;
510 if (offset >= iter->offset
511 && offset < iter->offset + PAGE_SIZE) {
512#ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
513 if (i > 1) {
514 printk(KERN_WARNING
515 "Forward random access detected in "
516 "ltt_relay. Iterations %u, "
517 "offset %zu, orig iter->off %zu, "
518 "iter->off %zu diff_offset %zd.\n", i,
519 offset, orig_iter_off, iter->offset,
520 diff_offset);
521 WARN_ON(1);
522 }
523#endif
524 return iter;
525 }
526 }
527 WARN_ON(1);
528 return NULL;
529}
530EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
531
532/**
533 * ltt_relay_write - write data to a ltt_relay buffer.
534 * @buf : buffer
535 * @offset : offset within the buffer
536 * @src : source address
537 * @len : length to write
538 * @page : cached buffer page
539 * @pagecpy : page size copied so far
540 */
541void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
542 const void *src, size_t len, struct buf_page *page, ssize_t pagecpy)
543{
544 do {
545 len -= pagecpy;
546 src += pagecpy;
547 offset += pagecpy;
548 /*
549 * Underlying layer should never ask for writes across
550 * subbuffers.
551 */
552 WARN_ON(offset >= buf->chan->alloc_size);
553
554 page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
555 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
556 ltt_relay_do_copy(page_address(page->page)
557 + (offset & ~PAGE_MASK), src, pagecpy);
558 } while (unlikely(len != pagecpy));
559}
560EXPORT_SYMBOL_GPL(_ltt_relay_write);
561
562/**
563 * ltt_relay_read - read data from ltt_relay_buffer.
564 * @buf : buffer
565 * @offset : offset within the buffer
566 * @dest : destination address
567 * @len : length to write
568 */
569int ltt_relay_read(struct rchan_buf *buf, size_t offset,
570 void *dest, size_t len)
571{
572 struct buf_page *page;
573 ssize_t pagecpy, orig_len;
574
575 orig_len = len;
576 offset &= buf->chan->alloc_size - 1;
577 page = buf->rpage;
578 if (unlikely(!len))
579 return 0;
580 for (;;) {
581 page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
582 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
583 memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
584 pagecpy);
585 len -= pagecpy;
586 if (likely(!len))
587 break;
588 dest += pagecpy;
589 offset += pagecpy;
590 /*
591 * Underlying layer should never ask for reads across
592 * subbuffers.
593 */
594 WARN_ON(offset >= buf->chan->alloc_size);
595 }
596 return orig_len;
597}
598EXPORT_SYMBOL_GPL(ltt_relay_read);
599
600/**
601 * ltt_relay_read_get_page - Get a whole page to read from
602 * @buf : buffer
603 * @offset : offset within the buffer
604 */
5f54827b
PMF
605//ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
606//ust// {
607//ust// struct buf_page *page;
e1152c37 608
5f54827b
PMF
609//ust// offset &= buf->chan->alloc_size - 1;
610//ust// page = buf->rpage;
611//ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
612//ust// return page;
613//ust// }
614//ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
e1152c37
PMF
615
616/**
617 * ltt_relay_offset_address - get address of a location within the buffer
618 * @buf : buffer
619 * @offset : offset within the buffer.
620 *
621 * Return the address where a given offset is located.
622 * Should be used to get the current subbuffer header pointer. Given we know
623 * it's never on a page boundary, it's safe to write directly to this address,
624 * as long as the write is never bigger than a page size.
625 */
626void *ltt_relay_offset_address(struct rchan_buf *buf, size_t offset)
627{
628 struct buf_page *page;
629 unsigned int odd;
630
631 offset &= buf->chan->alloc_size - 1;
632 odd = !!(offset & buf->chan->subbuf_size);
633 page = buf->hpage[odd];
634 if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
635 buf->hpage[odd] = page = buf->wpage;
636 page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
637 return page_address(page->page) + (offset & ~PAGE_MASK);
638}
5f54827b 639//ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
e1152c37
PMF
640
641/**
642 * relay_file_open - open file op for relay files
643 * @inode: the inode
644 * @filp: the file
645 *
646 * Increments the channel buffer refcount.
647 */
5f54827b
PMF
648//ust// static int relay_file_open(struct inode *inode, struct file *filp)
649//ust// {
650//ust// struct rchan_buf *buf = inode->i_private;
651//ust// kref_get(&buf->kref);
652//ust// filp->private_data = buf;
653//ust//
654//ust// return nonseekable_open(inode, filp);
655//ust// }
e1152c37
PMF
656
657/**
658 * relay_file_release - release file op for relay files
659 * @inode: the inode
660 * @filp: the file
661 *
662 * Decrements the channel refcount, as the filesystem is
663 * no longer using it.
664 */
665static int relay_file_release(struct inode *inode, struct file *filp)
666{
667 struct rchan_buf *buf = filp->private_data;
668 kref_put(&buf->kref, relay_remove_buf);
669
670 return 0;
671}
672
5f54827b
PMF
673//ust// const struct file_operations ltt_relay_file_operations = {
674//ust// .open = relay_file_open,
675//ust// .release = relay_file_release,
676//ust// };
677//ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
e1152c37 678
5f54827b
PMF
679//ust// static __init int relay_init(void)
680//ust// {
681//ust// hotcpu_notifier(relay_hotcpu_callback, 5);
682//ust// return 0;
683//ust// }
e1152c37 684
5f54827b 685//ust// module_init(relay_init);
This page took 0.046284 seconds and 4 git commands to generate.