lttng-modules v0.19-stable: setup_trace_write: Fix recursive locking
[lttng-modules.git] / ltt-relay-alloc.c
1 /*
2 * ltt-relay-alloc.c
3 *
4 * Copyright (C) 2008,2009 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9 #include <linux/errno.h>
10 #include <linux/stddef.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/bitops.h>
18 #include <linux/delay.h>
19
20 #include "ltt-relay.h"
21 #include "ltt-tracer.h"
22 #include "ltt-relay-lockless.h" /* for cpu hotplug */
23
24 /**
25 * ltt_chanbuf_allocate - allocate a channel buffer
26 * @buf: the buffer struct
27 * @size: total size of the buffer
28 * @n_sb: number of subbuffers
29 * @extra_reader_sb: need extra subbuffer for reader
30 */
31 static
32 int ltt_chanbuf_allocate(struct ltt_chanbuf_alloc *buf, size_t size,
33 size_t n_sb, int extra_reader_sb)
34 {
35 long i, j, n_pages, n_pages_per_sb, page_idx = 0;
36 struct page **pages;
37 void **virt;
38
39 n_pages = size >> PAGE_SHIFT;
40 n_pages_per_sb = n_pages >> get_count_order(n_sb);
41 if (extra_reader_sb)
42 n_pages += n_pages_per_sb; /* Add pages for reader */
43
44 pages = kmalloc_node(max_t(size_t, sizeof(*pages) * n_pages,
45 1 << INTERNODE_CACHE_SHIFT),
46 GFP_KERNEL, cpu_to_node(buf->cpu));
47 if (unlikely(!pages))
48 goto pages_error;
49
50 virt = kmalloc_node(ALIGN(sizeof(*virt) * n_pages,
51 1 << INTERNODE_CACHE_SHIFT),
52 GFP_KERNEL, cpu_to_node(buf->cpu));
53 if (unlikely(!virt))
54 goto virt_error;
55
56 for (i = 0; i < n_pages; i++) {
57 pages[i] = alloc_pages_node(cpu_to_node(buf->cpu),
58 GFP_KERNEL | __GFP_ZERO, 0);
59 if (unlikely(!pages[i]))
60 goto depopulate;
61 virt[i] = page_address(pages[i]);
62 }
63 buf->nr_pages = n_pages;
64 buf->_pages = pages;
65 buf->_virt = virt;
66
67 /* Allocate write-side page index */
68 buf->buf_wsb = kzalloc_node(max_t(size_t,
69 sizeof(struct chanbuf_sb) * n_sb,
70 1 << INTERNODE_CACHE_SHIFT),
71 GFP_KERNEL, cpu_to_node(buf->cpu));
72 if (unlikely(!buf->buf_wsb))
73 goto depopulate;
74
75 for (i = 0; i < n_sb; i++) {
76 buf->buf_wsb[i].pages =
77 kzalloc_node(max_t(size_t,
78 sizeof(struct chanbuf_page) * n_pages_per_sb,
79 1 << INTERNODE_CACHE_SHIFT),
80 GFP_KERNEL, cpu_to_node(buf->cpu));
81 if (!buf->buf_wsb[i].pages)
82 goto free_buf_wsb;
83 }
84
85 if (extra_reader_sb) {
86 /* Allocate read-side page index */
87 buf->buf_rsb.pages =
88 kzalloc_node(max_t(size_t,
89 sizeof(struct chanbuf_page) * n_pages_per_sb,
90 1 << INTERNODE_CACHE_SHIFT),
91 GFP_KERNEL, cpu_to_node(buf->cpu));
92 if (unlikely(!buf->buf_rsb.pages))
93 goto free_buf_wsb;
94 } else {
95 buf->buf_rsb.pages = buf->buf_wsb[0].pages;
96 }
97
98 /* Assign pages to write-side page index */
99 for (i = 0; i < n_sb; i++) {
100 for (j = 0; j < n_pages_per_sb; j++) {
101 WARN_ON(page_idx > n_pages);
102 buf->buf_wsb[i].pages[j].virt = virt[page_idx];
103 buf->buf_wsb[i].pages[j].page = pages[page_idx];
104 page_idx++;
105 }
106 RCHAN_SB_SET_NOREF(buf->buf_wsb[i].pages);
107 }
108
109 if (extra_reader_sb) {
110 for (j = 0; j < n_pages_per_sb; j++) {
111 WARN_ON(page_idx > n_pages);
112 buf->buf_rsb.pages[j].virt = virt[page_idx];
113 buf->buf_rsb.pages[j].page = pages[page_idx];
114 page_idx++;
115 }
116 RCHAN_SB_SET_NOREF(buf->buf_rsb.pages);
117 }
118
119 /*
120 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
121 * will not fault.
122 */
123 vmalloc_sync_all();
124 return 0;
125
126 free_buf_wsb:
127 for (i = 0; i < n_sb; i++) {
128 RCHAN_SB_CLEAR_NOREF(buf->buf_wsb[i].pages);
129 kfree(buf->buf_wsb[i].pages);
130 }
131 kfree(buf->buf_wsb);
132 depopulate:
133 /*
134 * Free all pages from [ i - 1 down to 0 ].
135 * If i = 0, don't free anything.
136 */
137 for (i--; i >= 0; i--)
138 __free_page(pages[i]);
139 kfree(virt);
140 virt_error:
141 kfree(pages);
142 pages_error:
143 return -ENOMEM;
144 }
145
146 int ltt_chanbuf_alloc_create(struct ltt_chanbuf_alloc *buf,
147 struct ltt_chan_alloc *chan, int cpu)
148 {
149 int ret = 0;
150
151 ret = ltt_chanbuf_allocate(buf, chan->buf_size, chan->n_sb,
152 chan->extra_reader_sb);
153 if (ret)
154 goto end;
155
156 buf->chan = chan;
157 buf->cpu = cpu;
158 end:
159 return ret;
160 }
161
162 void ltt_chanbuf_alloc_free(struct ltt_chanbuf_alloc *buf)
163 {
164 struct ltt_chan_alloc *chan = buf->chan;
165 struct page **pages;
166 long i;
167
168 /* Destroy index */
169 if (chan->extra_reader_sb) {
170 RCHAN_SB_CLEAR_NOREF(buf->buf_rsb.pages);
171 kfree(buf->buf_rsb.pages);
172 }
173 for (i = 0; i < chan->n_sb; i++) {
174 RCHAN_SB_CLEAR_NOREF(buf->buf_wsb[i].pages);
175 kfree(buf->buf_wsb[i].pages);
176 }
177 kfree(buf->buf_wsb);
178
179 /* Destroy pages */
180 pages = buf->_pages;
181 for (i = 0; i < buf->nr_pages; i++)
182 __free_page(pages[i]);
183 kfree(buf->_pages);
184 kfree(buf->_virt);
185 buf->allocated = 0;
186 }
187
188 /**
189 * ltt_relay_hotcpu_callback - CPU hotplug callback
190 * @nb: notifier block
191 * @action: hotplug action to take
192 * @hcpu: CPU number
193 *
194 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
195 */
196 static
197 int __cpuinit ltt_relay_hotcpu_callback(struct notifier_block *nb,
198 unsigned long action,
199 void *hcpu)
200 {
201 unsigned int cpu = (unsigned long)hcpu;
202 struct ltt_trace *trace;
203 struct ltt_chan *chan;
204 struct ltt_chanbuf *buf;
205 int ret, i;
206
207 switch (action) {
208 case CPU_UP_PREPARE:
209 case CPU_UP_PREPARE_FROZEN:
210 /*
211 * CPU hotplug lock protects trace lock from this callback.
212 */
213 __list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
214 for (i = 0; i < trace->nr_channels; i++) {
215 chan = &trace->channels[i];
216 buf = per_cpu_ptr(chan->a.buf, cpu);
217 ret = ltt_chanbuf_create(buf, &chan->a, cpu);
218 if (ret) {
219 printk(KERN_ERR
220 "ltt_relay_hotcpu_callback: cpu %d "
221 "buffer creation failed\n", cpu);
222 return NOTIFY_BAD;
223 }
224
225 }
226 }
227 break;
228 case CPU_DEAD:
229 case CPU_DEAD_FROZEN:
230 /* No need to do a buffer switch here, because it will happen
231 * when tracing is stopped, or will be done by switch timer CPU
232 * DEAD callback. */
233 break;
234 }
235 return NOTIFY_OK;
236 }
237
238 /*
239 * Must be called with either trace lock or rcu read lock sched held.
240 */
241 void ltt_chan_for_each_channel(void (*cb) (struct ltt_chanbuf *buf), int cpu)
242 {
243 struct ltt_trace *trace;
244 struct ltt_chan *chan;
245 struct ltt_chanbuf *buf;
246 int i;
247
248 __list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
249 for (i = 0; i < trace->nr_channels; i++) {
250 chan = &trace->channels[i];
251 if (!chan->active)
252 continue;
253 buf = per_cpu_ptr(chan->a.buf, cpu);
254 cb(buf);
255 }
256 }
257 }
258
259 /**
260 * ltt_chan_create - create a new relay channel
261 * @chan: channel
262 * @trace: trace
263 * @base_filename: base name of files to create
264 * @parent: dentry of parent directory, %NULL for root directory
265 * @sb_size: size of sub-buffers (> PAGE_SIZE, power of 2)
266 * @n_sb: number of sub-buffers (power of 2)
267 * @extra_reader_sb: allocate an extra subbuffer for the reader
268 * @overwrite: channel is in overwrite mode
269 *
270 * Returns channel pointer if successful, %NULL otherwise.
271 *
272 * Creates per-cpu channel buffers using the sizes and attributes
273 * specified. The created channel buffer files will be named
274 * base_filename_0...base_filename_N-1. File permissions will
275 * be %S_IRUSR.
276 */
277 int ltt_chan_alloc_init(struct ltt_chan_alloc *chan, struct ltt_trace *trace,
278 const char *base_filename,
279 struct dentry *parent, size_t sb_size,
280 size_t n_sb, int extra_reader_sb, int overwrite)
281 {
282 unsigned int i;
283 int ret;
284
285 if (!base_filename)
286 return -EPERM;
287
288 if (!(sb_size && n_sb))
289 return -EPERM;
290
291 /* Check that the subbuffer size is larger than a page. */
292 WARN_ON_ONCE(sb_size < PAGE_SIZE);
293
294 /*
295 * Make sure the number of subbuffers and subbuffer size are power of 2.
296 */
297 WARN_ON_ONCE(hweight32(sb_size) != 1);
298 WARN_ON(hweight32(n_sb) != 1);
299
300 chan->trace = trace;
301 chan->buf_size = n_sb * sb_size;
302 chan->sb_size = sb_size;
303 chan->sb_size_order = get_count_order(sb_size);
304 chan->n_sb_order = get_count_order(n_sb);
305 chan->extra_reader_sb = extra_reader_sb;
306 chan->n_sb = n_sb;
307 chan->parent = parent;
308 strlcpy(chan->filename, base_filename, NAME_MAX);
309 kref_init(&chan->kref);
310 kref_get(&chan->trace->kref);
311
312 /* Allocating the child structure */
313 chan->buf = alloc_percpu(struct ltt_chanbuf);
314 if (!chan->buf)
315 goto free_chan;
316
317 for_each_online_cpu(i) {
318 ret = ltt_chanbuf_create(per_cpu_ptr(chan->buf, i), chan, i);
319 if (ret)
320 goto free_bufs;
321 }
322
323 return 0;
324
325 free_bufs:
326 for_each_possible_cpu(i) {
327 struct ltt_chanbuf *buf = per_cpu_ptr(chan->buf, i);
328
329 if (!buf->a.allocated)
330 continue;
331 ltt_chanbuf_remove_file(buf);
332 ltt_chanbuf_free(buf);
333 }
334 free_percpu(chan->buf);
335 free_chan:
336 kref_put(&chan->kref, ltt_chan_free);
337 return -ENOMEM;
338 }
339
340 /**
341 * ltt_chan_alloc_remove_files - remove channel files.
342 * @chan: the channel
343 *
344 * Remove all channel files and wait for dentry use counts to become zero.
345 */
346 void ltt_chan_alloc_remove_files(struct ltt_chan_alloc *chan)
347 {
348 unsigned int i;
349 struct dentry *dentry;
350
351 for_each_possible_cpu(i) {
352 struct ltt_chanbuf *buf = per_cpu_ptr(chan->buf, i);
353
354 if (!buf->a.allocated)
355 continue;
356 dentry = dget(buf->a.dentry);
357 ltt_chanbuf_remove_file(buf);
358 /* TODO: wait / wakeup instead */
359 /*
360 * Wait for every reference to the dentry to be gone,
361 * except us.
362 */
363 while (ACCESS_ONCE(dentry->d_count) != 1)
364 msleep(100);
365 dput(dentry);
366 }
367 }
368
369 /**
370 * ltt_chan_alloc_free - destroy the channel
371 * @chan: the channel
372 *
373 * Destroy all channel buffers and frees the channel.
374 */
375 void ltt_chan_alloc_free(struct ltt_chan_alloc *chan)
376 {
377 unsigned int i;
378
379 for_each_possible_cpu(i) {
380 struct ltt_chanbuf *buf = per_cpu_ptr(chan->buf, i);
381
382 if (!buf->a.allocated)
383 continue;
384 ltt_chanbuf_free(buf);
385 }
386 free_percpu(chan->buf);
387 kref_put(&chan->trace->kref, ltt_release_trace);
388 wake_up_interruptible(&chan->trace->kref_wq);
389 }
390
391 /**
392 * _ltt_relay_write - write data to a ltt_relay buffer.
393 * @bufa : buffer
394 * @offset : offset within the buffer
395 * @src : source address
396 * @len : length to write
397 * @pagecpy : page size copied so far
398 */
399 void _ltt_relay_write(struct ltt_chanbuf_alloc *bufa, size_t offset,
400 const void *src, size_t len, ssize_t pagecpy)
401 {
402 struct ltt_chan_alloc *chana = bufa->chan;
403 size_t sbidx, index;
404 struct chanbuf_page *rpages;
405
406 do {
407 len -= pagecpy;
408 src += pagecpy;
409 offset += pagecpy;
410 sbidx = offset >> chana->sb_size_order;
411 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
412
413 /*
414 * Underlying layer should never ask for writes across
415 * subbuffers.
416 */
417 WARN_ON(offset >= chana->buf_size);
418
419 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
420 rpages = bufa->buf_wsb[sbidx].pages;
421 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
422 ltt_relay_do_copy(rpages[index].virt + (offset & ~PAGE_MASK),
423 src, pagecpy);
424 } while (unlikely(len != pagecpy));
425 }
426 EXPORT_SYMBOL_GPL(_ltt_relay_write);
427
428 /**
429 * _ltt_relay_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
430 * @bufa : buffer
431 * @offset : offset within the buffer
432 * @len : length to write
433 * @copied: string actually copied
434 * @terminated: does string end with \0
435 *
436 * Fills string with "X" if incomplete.
437 */
438 void _ltt_relay_strncpy_fixup(struct ltt_chanbuf_alloc *bufa, size_t offset,
439 size_t len, size_t copied, int terminated)
440 {
441 struct ltt_chan_alloc *chana = bufa->chan;
442 size_t sbidx, index;
443 ssize_t pagecpy;
444 struct chanbuf_page *rpages;
445
446 if (copied == len) {
447 /*
448 * Deal with non-terminated string.
449 */
450 WARN_ON_ONCE(terminated);
451 offset += copied - 1;
452 sbidx = offset >> chana->sb_size_order;
453 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
454 /*
455 * Underlying layer should never ask for writes across
456 * subbuffers.
457 */
458 WARN_ON(offset >= chana->buf_size);
459 rpages = bufa->buf_wsb[sbidx].pages;
460 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
461 ltt_relay_do_memset(rpages[index].virt + (offset & ~PAGE_MASK),
462 '\0', 1);
463 return;
464 }
465
466 /*
467 * Deal with incomplete string.
468 * Overwrite string's \0 with X too.
469 */
470 pagecpy = copied - 1;
471 do {
472 WARN_ON_ONCE(!terminated);
473 len -= pagecpy;
474 offset += pagecpy;
475 sbidx = offset >> chana->sb_size_order;
476 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
477
478 /*
479 * Underlying layer should never ask for writes across
480 * subbuffers.
481 */
482 WARN_ON(offset >= chana->buf_size);
483
484 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
485 rpages = bufa->buf_wsb[sbidx].pages;
486 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
487 ltt_relay_do_memset(rpages[index].virt + (offset & ~PAGE_MASK),
488 'X', pagecpy);
489 } while (unlikely(len != pagecpy));
490 /*
491 * Overwrite last 'X' with '\0'.
492 */
493 offset += pagecpy - 1;
494 sbidx = offset >> chana->sb_size_order;
495 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
496 /*
497 * Underlying layer should never ask for writes across subbuffers.
498 */
499 WARN_ON(offset >= chana->buf_size);
500 rpages = bufa->buf_wsb[sbidx].pages;
501 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
502 ltt_relay_do_memset(rpages[index].virt + (offset & ~PAGE_MASK),
503 '\0', 1);
504 }
505 EXPORT_SYMBOL_GPL(_ltt_relay_strncpy_fixup);
506
507 /**
508 * _ltt_relay_strncpy - copy a string to a ltt_relay buffer.
509 * @bufa : buffer
510 * @offset : offset within the buffer
511 * @src : source address
512 * @len : length to write
513 * @pagecpy : page size copied so far
514 */
515 void _ltt_relay_strncpy(struct ltt_chanbuf_alloc *bufa, size_t offset,
516 const void *src, size_t len, ssize_t pagecpy)
517 {
518 struct ltt_chan_alloc *chana = bufa->chan;
519 size_t sbidx, index, copied;
520 struct chanbuf_page *rpages;
521 int terminated;
522
523 do {
524 len -= pagecpy;
525 src += pagecpy;
526 offset += pagecpy;
527 sbidx = offset >> chana->sb_size_order;
528 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
529
530 /*
531 * Underlying layer should never ask for writes across
532 * subbuffers.
533 */
534 WARN_ON(offset >= chana->buf_size);
535
536 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
537 rpages = bufa->buf_wsb[sbidx].pages;
538 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
539 copied = ltt_relay_do_strncpy(rpages[index].virt
540 + (offset & ~PAGE_MASK),
541 src, pagecpy, &terminated);
542 if (copied < pagecpy || ((len == pagecpy) && !terminated)) {
543 _ltt_relay_strncpy_fixup(bufa, offset, len, copied,
544 terminated);
545 break;
546 }
547 } while (unlikely(len != pagecpy));
548 }
549 EXPORT_SYMBOL_GPL(_ltt_relay_strncpy);
550
551 /**
552 * ltt_relay_read - read data from ltt_relay_buffer.
553 * @bufa : buffer
554 * @offset : offset within the buffer
555 * @dest : destination address
556 * @len : length to write
557 *
558 * Should be protected by get_subbuf/put_subbuf.
559 */
560 int ltt_relay_read(struct ltt_chanbuf_alloc *bufa, size_t offset, void *dest,
561 size_t len)
562 {
563 struct ltt_chan_alloc *chana = bufa->chan;
564 size_t index;
565 ssize_t pagecpy, orig_len;
566 struct chanbuf_page *rpages;
567
568 orig_len = len;
569 offset &= chana->buf_size - 1;
570 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
571 if (unlikely(!len))
572 return 0;
573 for (;;) {
574 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
575 rpages = bufa->buf_rsb.pages;
576 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
577 memcpy(dest, rpages[index].virt + (offset & ~PAGE_MASK),
578 pagecpy);
579 len -= pagecpy;
580 if (likely(!len))
581 break;
582 dest += pagecpy;
583 offset += pagecpy;
584 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
585 /*
586 * Underlying layer should never ask for reads across
587 * subbuffers.
588 */
589 WARN_ON(offset >= chana->buf_size);
590 }
591 return orig_len;
592 }
593 EXPORT_SYMBOL_GPL(ltt_relay_read);
594
595 /**
596 * ltt_relay_read_cstr - read a C-style string from ltt_relay_buffer.
597 * @bufa : buffer
598 * @offset : offset within the buffer
599 * @dest : destination address
600 * @len : destination's length
601 *
602 * return string's length
603 * Should be protected by get_subbuf/put_subbuf.
604 */
605 int ltt_relay_read_cstr(struct ltt_chanbuf_alloc *bufa, size_t offset,
606 void *dest, size_t len)
607 {
608 struct ltt_chan_alloc *chana = bufa->chan;
609 size_t index;
610 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
611 char *str;
612 struct chanbuf_page *rpages;
613
614 offset &= chana->buf_size - 1;
615 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
616 orig_offset = offset;
617 for (;;) {
618 rpages = bufa->buf_rsb.pages;
619 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
620 str = (char *)rpages[index].virt + (offset & ~PAGE_MASK);
621 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
622 strpagelen = strnlen(str, pagelen);
623 if (len) {
624 pagecpy = min_t(size_t, len, strpagelen);
625 if (dest) {
626 memcpy(dest, str, pagecpy);
627 dest += pagecpy;
628 }
629 len -= pagecpy;
630 }
631 offset += strpagelen;
632 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
633 if (strpagelen < pagelen)
634 break;
635 /*
636 * Underlying layer should never ask for reads across
637 * subbuffers.
638 */
639 WARN_ON(offset >= chana->buf_size);
640 }
641 if (dest && len)
642 ((char *)dest)[0] = 0;
643 return offset - orig_offset;
644 }
645 EXPORT_SYMBOL_GPL(ltt_relay_read_cstr);
646
647 /**
648 * ltt_relay_read_get_page - Get a whole page to read from
649 * @bufa : buffer
650 * @offset : offset within the buffer
651 *
652 * Should be protected by get_subbuf/put_subbuf.
653 */
654 struct page *ltt_relay_read_get_page(struct ltt_chanbuf_alloc *bufa,
655 size_t offset)
656 {
657 size_t index;
658 struct chanbuf_page *rpages;
659 struct ltt_chan_alloc *chana = bufa->chan;
660
661 offset &= chana->buf_size - 1;
662 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
663 rpages = bufa->buf_rsb.pages;
664 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
665 return rpages[index].page;
666 }
667 EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
668
669 /**
670 * ltt_relay_read_offset_address - get address of a location within the buffer
671 * @bufa : buffer
672 * @offset : offset within the buffer.
673 *
674 * Return the address where a given offset is located (for read).
675 * Should be used to get the current subbuffer header pointer. Given we know
676 * it's never on a page boundary, it's safe to write directly to this address,
677 * as long as the write is never bigger than a page size.
678 */
679 void *ltt_relay_read_offset_address(struct ltt_chanbuf_alloc *bufa,
680 size_t offset)
681 {
682 size_t index;
683 struct chanbuf_page *rpages;
684 struct ltt_chan_alloc *chana = bufa->chan;
685
686 offset &= chana->buf_size - 1;
687 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
688 rpages = bufa->buf_rsb.pages;
689 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
690 return rpages[index].virt + (offset & ~PAGE_MASK);
691 }
692 EXPORT_SYMBOL_GPL(ltt_relay_read_offset_address);
693
694 /**
695 * ltt_relay_offset_address - get address of a location within the buffer
696 * @bufa : buffer
697 * @offset : offset within the buffer.
698 *
699 * Return the address where a given offset is located.
700 * Should be used to get the current subbuffer header pointer. Given we know
701 * it's never on a page boundary, it's safe to write directly to this address,
702 * as long as the write is never bigger than a page size.
703 */
704 void *ltt_relay_offset_address(struct ltt_chanbuf_alloc *bufa, size_t offset)
705 {
706 size_t sbidx, index;
707 struct chanbuf_page *rpages;
708 struct ltt_chan_alloc *chana = bufa->chan;
709
710 offset &= chana->buf_size - 1;
711 sbidx = offset >> chana->sb_size_order;
712 index = (offset & (chana->sb_size - 1)) >> PAGE_SHIFT;
713 rpages = bufa->buf_wsb[sbidx].pages;
714 WARN_ON_ONCE(RCHAN_SB_IS_NOREF(rpages));
715 return rpages[index].virt + (offset & ~PAGE_MASK);
716 }
717 EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
718
719 static struct notifier_block ltt_relay_hotcpu = {
720 .notifier_call = ltt_relay_hotcpu_callback,
721 .priority = 5,
722 };
723
724 static __init int ltt_relay_alloc_init(void)
725 {
726 register_cpu_notifier(&ltt_relay_hotcpu);
727 ltt_relay_init();
728 ltt_ascii_init();
729 return 0;
730 }
731
732 static void __exit ltt_relay_alloc_exit(void)
733 {
734 ltt_ascii_exit();
735 ltt_relay_exit();
736 unregister_cpu_notifier(&ltt_relay_hotcpu);
737 }
738
739 module_init(ltt_relay_alloc_init);
740 module_exit(ltt_relay_alloc_exit);
This page took 0.044175 seconds and 4 git commands to generate.