2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Dual LGPL v2.1/GPL v2 license.
18 * lib_ring_buffer_backend_allocate - allocate a channel buffer
19 * @config: ring buffer instance configuration
20 * @buf: the buffer struct
21 * @size: total size of the buffer
22 * @num_subbuf: number of subbuffers
23 * @extra_reader_sb: need extra subbuffer for reader
26 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
27 struct lib_ring_buffer_backend
*bufb
,
28 size_t size
, size_t num_subbuf
,
31 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
32 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
33 unsigned long subbuf_size
, mmap_offset
= 0;
34 unsigned long num_subbuf_alloc
;
39 num_pages
= size
>> get_count_order(PAGE_SIZE
);
40 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
41 subbuf_size
= chanb
->subbuf_size
;
42 num_subbuf_alloc
= num_subbuf
;
44 if (extra_reader_sb
) {
45 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
49 pages
= malloc_align(sizeof(*pages
) * num_pages
);
53 virt
= malloc_align(sizeof(*virt
) * num_pages
);
57 bufb
->array
= malloc_align(sizeof(*bufb
->array
) * num_subbuf_alloc
);
58 if (unlikely(!bufb
->array
))
61 for (i
= 0; i
< num_pages
; i
++) {
62 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
63 GFP_KERNEL
| __GFP_ZERO
, 0);
64 if (unlikely(!pages
[i
]))
66 virt
[i
] = page_address(pages
[i
]);
68 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
70 /* Allocate backend pages array elements */
71 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
74 sizeof(struct lib_ring_buffer_backend_pages
) +
75 sizeof(struct lib_ring_buffer_backend_page
)
76 * num_pages_per_subbuf
);
81 /* Allocate write-side subbuffer table */
82 bufb
->buf_wsb
= zmalloc_align(
83 sizeof(struct lib_ring_buffer_backend_subbuffer
)
85 if (unlikely(!bufb
->buf_wsb
))
88 for (i
= 0; i
< num_subbuf
; i
++)
89 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
91 /* Assign read-side subbuffer table */
93 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
94 num_subbuf_alloc
- 1);
96 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
98 /* Assign pages to page index */
99 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
100 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
101 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
102 bufb
->array
[i
]->p
[j
].virt
= virt
[page_idx
];
103 bufb
->array
[i
]->p
[j
].page
= pages
[page_idx
];
106 if (config
->output
== RING_BUFFER_MMAP
) {
107 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
108 mmap_offset
+= subbuf_size
;
117 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
118 kfree(bufb
->array
[i
]);
120 /* Free all allocated pages */
121 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
122 __free_page(pages
[i
]);
132 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
133 struct channel_backend
*chanb
, int cpu
)
135 const struct lib_ring_buffer_config
*config
= chanb
->config
;
137 bufb
->chan
= caa_container_of(chanb
, struct channel
, backend
);
140 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
142 chanb
->extra_reader_sb
);
145 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
147 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
148 unsigned long i
, j
, num_subbuf_alloc
;
150 num_subbuf_alloc
= chanb
->num_subbuf
;
151 if (chanb
->extra_reader_sb
)
154 kfree(bufb
->buf_wsb
);
155 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
156 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
157 __free_page(bufb
->array
[i
]->p
[j
].page
);
158 kfree(bufb
->array
[i
]);
164 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
166 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
167 const struct lib_ring_buffer_config
*config
= chanb
->config
;
168 unsigned long num_subbuf_alloc
;
171 num_subbuf_alloc
= chanb
->num_subbuf
;
172 if (chanb
->extra_reader_sb
)
175 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
176 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
177 if (chanb
->extra_reader_sb
)
178 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
179 num_subbuf_alloc
- 1);
181 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
183 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
184 /* Don't reset mmap_offset */
185 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
186 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
187 bufb
->array
[i
]->data_size
= 0;
188 /* Don't reset backend page and virt addresses */
190 /* Don't reset num_pages_per_subbuf, cpu, allocated */
191 v_set(config
, &bufb
->records_read
, 0);
195 * The frontend is responsible for also calling ring_buffer_backend_reset for
196 * each buffer when calling channel_backend_reset.
198 void channel_backend_reset(struct channel_backend
*chanb
)
200 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
201 const struct lib_ring_buffer_config
*config
= chanb
->config
;
204 * Don't reset buf_size, subbuf_size, subbuf_size_order,
205 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
206 * priv, notifiers, config, cpumask and name.
208 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
211 #ifdef CONFIG_HOTPLUG_CPU
213 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
214 * @nb: notifier block
215 * @action: hotplug action to take
218 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
221 int __cpuinit
lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
222 unsigned long action
,
225 unsigned int cpu
= (unsigned long)hcpu
;
226 struct channel_backend
*chanb
= caa_container_of(nb
, struct channel_backend
,
228 const struct lib_ring_buffer_config
*config
= chanb
->config
;
229 struct lib_ring_buffer
*buf
;
232 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
236 case CPU_UP_PREPARE_FROZEN
:
237 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
238 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
241 "ring_buffer_cpu_hp_callback: cpu %d "
242 "buffer creation failed\n", cpu
);
247 case CPU_DEAD_FROZEN
:
248 /* No need to do a buffer switch here, because it will happen
249 * when tracing is stopped, or will be done by switch timer CPU
258 * channel_backend_init - initialize a channel backend
259 * @chanb: channel backend
260 * @name: channel name
261 * @config: client ring buffer configuration
262 * @priv: client private data
263 * @parent: dentry of parent directory, %NULL for root directory
264 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
265 * @num_subbuf: number of sub-buffers (power of 2)
267 * Returns channel pointer if successful, %NULL otherwise.
269 * Creates per-cpu channel buffers using the sizes and attributes
270 * specified. The created channel buffer files will be named
271 * name_0...name_N-1. File permissions will be %S_IRUSR.
273 * Called with CPU hotplug disabled.
275 int channel_backend_init(struct channel_backend
*chanb
,
277 const struct lib_ring_buffer_config
*config
,
278 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
280 struct channel
*chan
= caa_container_of(chanb
, struct channel
, backend
);
287 if (!(subbuf_size
&& num_subbuf
))
290 /* Check that the subbuffer size is larger than a page. */
291 if (subbuf_size
< PAGE_SIZE
)
295 * Make sure the number of subbuffers and subbuffer size are power of 2.
297 CHAN_WARN_ON(chanb
, hweight32(subbuf_size
) != 1);
298 CHAN_WARN_ON(chanb
, hweight32(num_subbuf
) != 1);
300 ret
= subbuffer_id_check_index(config
, num_subbuf
);
305 chanb
->buf_size
= num_subbuf
* subbuf_size
;
306 chanb
->subbuf_size
= subbuf_size
;
307 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
308 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
309 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
310 chanb
->extra_reader_sb
=
311 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
312 chanb
->num_subbuf
= num_subbuf
;
313 strlcpy(chanb
->name
, name
, NAME_MAX
);
314 chanb
->config
= config
;
316 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
317 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
321 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
322 /* Allocating the buffer per-cpu structures */
323 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
328 * In case of non-hotplug cpu, if the ring-buffer is allocated
329 * in early initcall, it will not be notified of secondary cpus.
330 * In that off case, we need to allocate for all possible cpus.
332 #ifdef CONFIG_HOTPLUG_CPU
334 * buf->backend.allocated test takes care of concurrent CPU
336 * Priority higher than frontend, so we create the ring buffer
337 * before we start the timer.
339 chanb
->cpu_hp_notifier
.notifier_call
=
340 lib_ring_buffer_cpu_hp_callback
;
341 chanb
->cpu_hp_notifier
.priority
= 5;
342 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
345 for_each_online_cpu(i
) {
346 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
349 goto free_bufs
; /* cpu hotplug locked */
353 for_each_possible_cpu(i
) {
354 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
357 goto free_bufs
; /* cpu hotplug locked */
361 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
364 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
368 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
373 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
374 for_each_possible_cpu(i
) {
375 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
377 if (!buf
->backend
.allocated
)
379 lib_ring_buffer_free(buf
);
381 #ifdef CONFIG_HOTPLUG_CPU
384 free_percpu(chanb
->buf
);
388 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
389 free_cpumask_var(chanb
->cpumask
);
394 * channel_backend_unregister_notifiers - unregister notifiers
399 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
401 const struct lib_ring_buffer_config
*config
= chanb
->config
;
403 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
404 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
408 * channel_backend_free - destroy the channel
411 * Destroy all channel buffers and frees the channel.
413 void channel_backend_free(struct channel_backend
*chanb
)
415 const struct lib_ring_buffer_config
*config
= chanb
->config
;
418 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
419 for_each_possible_cpu(i
) {
420 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
422 if (!buf
->backend
.allocated
)
424 lib_ring_buffer_free(buf
);
426 free_cpumask_var(chanb
->cpumask
);
427 free_percpu(chanb
->buf
);
429 struct lib_ring_buffer
*buf
= chanb
->buf
;
431 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
432 lib_ring_buffer_free(buf
);
438 * lib_ring_buffer_write - write data to a ring_buffer buffer.
439 * @bufb : buffer backend
440 * @offset : offset within the buffer
441 * @src : source address
442 * @len : length to write
443 * @pagecpy : page size copied so far
445 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
446 const void *src
, size_t len
, ssize_t pagecpy
)
448 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
449 const struct lib_ring_buffer_config
*config
= chanb
->config
;
451 struct lib_ring_buffer_backend_pages
*rpages
;
452 unsigned long sb_bindex
, id
;
458 sbidx
= offset
>> chanb
->subbuf_size_order
;
459 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
462 * Underlying layer should never ask for writes across
465 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
467 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
468 id
= bufb
->buf_wsb
[sbidx
].id
;
469 sb_bindex
= subbuffer_id_get_index(config
, id
);
470 rpages
= bufb
->array
[sb_bindex
];
471 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
472 && subbuffer_id_is_noref(config
, id
));
473 lib_ring_buffer_do_copy(config
,
474 rpages
->p
[index
].virt
475 + (offset
& ~PAGE_MASK
),
477 } while (unlikely(len
!= pagecpy
));
479 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
482 * lib_ring_buffer_read - read data from ring_buffer_buffer.
483 * @bufb : buffer backend
484 * @offset : offset within the buffer
485 * @dest : destination address
486 * @len : length to copy to destination
488 * Should be protected by get_subbuf/put_subbuf.
489 * Returns the length copied.
491 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
492 void *dest
, size_t len
)
494 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
495 const struct lib_ring_buffer_config
*config
= chanb
->config
;
497 ssize_t pagecpy
, orig_len
;
498 struct lib_ring_buffer_backend_pages
*rpages
;
499 unsigned long sb_bindex
, id
;
502 offset
&= chanb
->buf_size
- 1;
503 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
507 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
508 id
= bufb
->buf_rsb
.id
;
509 sb_bindex
= subbuffer_id_get_index(config
, id
);
510 rpages
= bufb
->array
[sb_bindex
];
511 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
512 && subbuffer_id_is_noref(config
, id
));
513 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
520 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
522 * Underlying layer should never ask for reads across
525 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
529 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
532 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
533 * @bufb : buffer backend
534 * @offset : offset within the buffer
535 * @dest : destination address
536 * @len : destination's length
538 * return string's length
539 * Should be protected by get_subbuf/put_subbuf.
541 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
542 void *dest
, size_t len
)
544 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
545 const struct lib_ring_buffer_config
*config
= chanb
->config
;
547 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
549 struct lib_ring_buffer_backend_pages
*rpages
;
550 unsigned long sb_bindex
, id
;
552 offset
&= chanb
->buf_size
- 1;
553 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
554 orig_offset
= offset
;
556 id
= bufb
->buf_rsb
.id
;
557 sb_bindex
= subbuffer_id_get_index(config
, id
);
558 rpages
= bufb
->array
[sb_bindex
];
559 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
560 && subbuffer_id_is_noref(config
, id
));
561 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
562 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
563 strpagelen
= strnlen(str
, pagelen
);
565 pagecpy
= min_t(size_t, len
, strpagelen
);
567 memcpy(dest
, str
, pagecpy
);
572 offset
+= strpagelen
;
573 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
574 if (strpagelen
< pagelen
)
577 * Underlying layer should never ask for reads across
580 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
583 ((char *)dest
)[0] = 0;
584 return offset
- orig_offset
;
586 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
589 * lib_ring_buffer_read_get_page - Get a whole page to read from
590 * @bufb : buffer backend
591 * @offset : offset within the buffer
592 * @virt : pointer to page address (output)
594 * Should be protected by get_subbuf/put_subbuf.
595 * Returns the pointer to the page struct pointer.
597 struct page
**lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend
*bufb
,
598 size_t offset
, void ***virt
)
601 struct lib_ring_buffer_backend_pages
*rpages
;
602 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
603 const struct lib_ring_buffer_config
*config
= chanb
->config
;
604 unsigned long sb_bindex
, id
;
606 offset
&= chanb
->buf_size
- 1;
607 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
608 id
= bufb
->buf_rsb
.id
;
609 sb_bindex
= subbuffer_id_get_index(config
, id
);
610 rpages
= bufb
->array
[sb_bindex
];
611 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
612 && subbuffer_id_is_noref(config
, id
));
613 *virt
= &rpages
->p
[index
].virt
;
614 return &rpages
->p
[index
].page
;
616 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page
);
619 * lib_ring_buffer_read_offset_address - get address of a buffer location
620 * @bufb : buffer backend
621 * @offset : offset within the buffer.
623 * Return the address where a given offset is located (for read).
624 * Should be used to get the current subbuffer header pointer. Given we know
625 * it's never on a page boundary, it's safe to write directly to this address,
626 * as long as the write is never bigger than a page size.
628 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
632 struct lib_ring_buffer_backend_pages
*rpages
;
633 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
634 const struct lib_ring_buffer_config
*config
= chanb
->config
;
635 unsigned long sb_bindex
, id
;
637 offset
&= chanb
->buf_size
- 1;
638 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
639 id
= bufb
->buf_rsb
.id
;
640 sb_bindex
= subbuffer_id_get_index(config
, id
);
641 rpages
= bufb
->array
[sb_bindex
];
642 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
643 && subbuffer_id_is_noref(config
, id
));
644 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
646 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
649 * lib_ring_buffer_offset_address - get address of a location within the buffer
650 * @bufb : buffer backend
651 * @offset : offset within the buffer.
653 * Return the address where a given offset is located.
654 * Should be used to get the current subbuffer header pointer. Given we know
655 * it's always at the beginning of a page, it's safe to write directly to this
656 * address, as long as the write is never bigger than a page size.
658 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
662 struct lib_ring_buffer_backend_pages
*rpages
;
663 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
664 const struct lib_ring_buffer_config
*config
= chanb
->config
;
665 unsigned long sb_bindex
, id
;
667 offset
&= chanb
->buf_size
- 1;
668 sbidx
= offset
>> chanb
->subbuf_size_order
;
669 index
= (offset
& (chanb
->subbuf_size
- 1)) >> get_count_order(PAGE_SIZE
);
670 id
= bufb
->buf_wsb
[sbidx
].id
;
671 sb_bindex
= subbuffer_id_get_index(config
, id
);
672 rpages
= bufb
->array
[sb_bindex
];
673 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
674 && subbuffer_id_is_noref(config
, id
));
675 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
677 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);