1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
3 * ring_buffer_backend.c
5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/stddef.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
18 #include <wrapper/cpu.h>
19 #include <wrapper/mm.h>
20 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
21 #include <ringbuffer/config.h>
22 #include <ringbuffer/backend.h>
23 #include <ringbuffer/frontend.h>
26 * lib_ring_buffer_backend_allocate - allocate a channel buffer
27 * @config: ring buffer instance configuration
28 * @buf: the buffer struct
29 * @size: total size of the buffer
30 * @num_subbuf: number of subbuffers
31 * @extra_reader_sb: need extra subbuffer for reader
34 int lib_ring_buffer_backend_allocate(const struct lttng_kernel_ring_buffer_config
*config
,
35 struct lttng_kernel_ring_buffer_backend
*bufb
,
36 size_t size
, size_t num_subbuf
,
39 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
40 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
41 unsigned long subbuf_size
, mmap_offset
= 0;
42 unsigned long num_subbuf_alloc
;
46 num_pages
= size
>> PAGE_SHIFT
;
49 * Verify that there is enough free pages available on the system for
50 * the current allocation request.
51 * wrapper_check_enough_free_pages uses si_mem_available() if available
52 * and returns if there should be enough free pages based on the
55 if (!wrapper_check_enough_free_pages(num_pages
))
56 goto not_enough_pages
;
59 * Set the current user thread as the first target of the OOM killer.
60 * If the estimate received by si_mem_available() was off, and we do
61 * end up running out of memory because of this buffer allocation, we
62 * want to kill the offending app first.
64 wrapper_set_current_oom_origin();
66 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
67 subbuf_size
= chanb
->subbuf_size
;
68 num_subbuf_alloc
= num_subbuf
;
70 if (extra_reader_sb
) {
71 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
75 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
76 1 << INTERNODE_CACHE_SHIFT
),
77 cpu_to_node(max(bufb
->cpu
, 0)));
81 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
83 1 << INTERNODE_CACHE_SHIFT
),
84 GFP_KERNEL
| __GFP_NOWARN
,
85 cpu_to_node(max(bufb
->cpu
, 0)));
86 if (unlikely(!bufb
->array
))
89 for (i
= 0; i
< num_pages
; i
++) {
90 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
91 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
92 if (unlikely(!pages
[i
]))
95 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
97 /* Allocate backend pages array elements */
98 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
100 lttng_kvzalloc_node(ALIGN(
101 sizeof(struct lttng_kernel_ring_buffer_backend_pages
) +
102 sizeof(struct lttng_kernel_ring_buffer_backend_page
)
103 * num_pages_per_subbuf
,
104 1 << INTERNODE_CACHE_SHIFT
),
105 GFP_KERNEL
| __GFP_NOWARN
,
106 cpu_to_node(max(bufb
->cpu
, 0)));
111 /* Allocate write-side subbuffer table */
112 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
113 sizeof(struct lttng_kernel_ring_buffer_backend_subbuffer
)
115 1 << INTERNODE_CACHE_SHIFT
),
116 GFP_KERNEL
| __GFP_NOWARN
,
117 cpu_to_node(max(bufb
->cpu
, 0)));
118 if (unlikely(!bufb
->buf_wsb
))
121 for (i
= 0; i
< num_subbuf
; i
++)
122 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
124 /* Assign read-side subbuffer table */
126 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
127 num_subbuf_alloc
- 1);
129 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
131 /* Allocate subbuffer packet counter table */
132 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
133 sizeof(struct lttng_kernel_ring_buffer_backend_counts
)
135 1 << INTERNODE_CACHE_SHIFT
),
136 GFP_KERNEL
| __GFP_NOWARN
,
137 cpu_to_node(max(bufb
->cpu
, 0)));
138 if (unlikely(!bufb
->buf_cnt
))
141 /* Assign pages to page index */
142 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
143 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
144 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
145 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
146 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
149 if (config
->output
== RING_BUFFER_MMAP
) {
150 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
151 mmap_offset
+= subbuf_size
;
156 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
159 wrapper_vmalloc_sync_mappings();
160 wrapper_clear_current_oom_origin();
165 lttng_kvfree(bufb
->buf_wsb
);
167 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
168 lttng_kvfree(bufb
->array
[i
]);
170 /* Free all allocated pages */
171 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
172 __free_page(pages
[i
]);
173 lttng_kvfree(bufb
->array
);
177 wrapper_clear_current_oom_origin();
182 int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend
*bufb
,
183 struct channel_backend
*chanb
, int cpu
)
185 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
187 bufb
->chan
= container_of(chanb
, struct lttng_kernel_ring_buffer_channel
, backend
);
190 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
192 chanb
->extra_reader_sb
);
195 void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend
*bufb
)
197 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
198 unsigned long i
, j
, num_subbuf_alloc
;
200 num_subbuf_alloc
= chanb
->num_subbuf
;
201 if (chanb
->extra_reader_sb
)
204 lttng_kvfree(bufb
->buf_wsb
);
205 lttng_kvfree(bufb
->buf_cnt
);
206 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
207 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
208 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
209 lttng_kvfree(bufb
->array
[i
]);
211 lttng_kvfree(bufb
->array
);
215 void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend
*bufb
)
217 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
218 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
219 unsigned long num_subbuf_alloc
;
222 num_subbuf_alloc
= chanb
->num_subbuf
;
223 if (chanb
->extra_reader_sb
)
226 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
227 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
228 if (chanb
->extra_reader_sb
)
229 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
230 num_subbuf_alloc
- 1);
232 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
234 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
235 /* Don't reset mmap_offset */
236 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
237 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
238 bufb
->array
[i
]->data_size
= 0;
239 /* Don't reset backend page and virt addresses */
241 /* Don't reset num_pages_per_subbuf, cpu, allocated */
242 v_set(config
, &bufb
->records_read
, 0);
246 * The frontend is responsible for also calling ring_buffer_backend_reset for
247 * each buffer when calling channel_backend_reset.
249 void channel_backend_reset(struct channel_backend
*chanb
)
251 struct lttng_kernel_ring_buffer_channel
*chan
= container_of(chanb
, struct lttng_kernel_ring_buffer_channel
, backend
);
252 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
255 * Don't reset buf_size, subbuf_size, subbuf_size_order,
256 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
257 * priv, notifiers, config, cpumask and name.
259 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
262 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
265 * No need to implement a "dead" callback to do a buffer switch here,
266 * because it will happen when tracing is stopped, or will be done by
267 * switch timer CPU DEAD callback.
268 * We don't free buffers when CPU go away, because it would make trace
269 * data vanish, which is unwanted.
271 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
272 struct lttng_cpuhp_node
*node
)
274 struct channel_backend
*chanb
= container_of(node
,
275 struct channel_backend
, cpuhp_prepare
);
276 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
277 struct lttng_kernel_ring_buffer
*buf
;
280 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
282 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
283 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
286 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
287 "buffer creation failed\n", cpu
);
292 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
294 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
296 #ifdef CONFIG_HOTPLUG_CPU
299 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
300 * @nb: notifier block
301 * @action: hotplug action to take
304 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
307 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
308 unsigned long action
,
311 unsigned int cpu
= (unsigned long)hcpu
;
312 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
314 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
315 struct lttng_kernel_ring_buffer
*buf
;
318 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
322 case CPU_UP_PREPARE_FROZEN
:
323 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
324 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
327 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
328 "buffer creation failed\n", cpu
);
333 case CPU_DEAD_FROZEN
:
334 /* No need to do a buffer switch here, because it will happen
335 * when tracing is stopped, or will be done by switch timer CPU
344 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
347 * channel_backend_init - initialize a channel backend
348 * @chanb: channel backend
349 * @name: channel name
350 * @config: client ring buffer configuration
351 * @priv: client private data
352 * @parent: dentry of parent directory, %NULL for root directory
353 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
354 * @num_subbuf: number of sub-buffers (power of 2)
356 * Returns channel pointer if successful, %NULL otherwise.
358 * Creates per-cpu channel buffers using the sizes and attributes
359 * specified. The created channel buffer files will be named
360 * name_0...name_N-1. File permissions will be %S_IRUSR.
362 * Called with CPU hotplug disabled.
364 int channel_backend_init(struct channel_backend
*chanb
,
366 const struct lttng_kernel_ring_buffer_config
*config
,
367 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
369 struct lttng_kernel_ring_buffer_channel
*chan
= container_of(chanb
, struct lttng_kernel_ring_buffer_channel
, backend
);
376 /* Check that the subbuffer size is larger than a page. */
377 if (subbuf_size
< PAGE_SIZE
)
381 * Make sure the number of subbuffers and subbuffer size are
382 * power of 2 and nonzero.
384 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
386 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
389 * Overwrite mode buffers require at least 2 subbuffers per
392 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
395 ret
= subbuffer_id_check_index(config
, num_subbuf
);
400 chanb
->buf_size
= num_subbuf
* subbuf_size
;
401 chanb
->subbuf_size
= subbuf_size
;
402 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
403 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
404 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
405 chanb
->extra_reader_sb
=
406 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
407 chanb
->num_subbuf
= num_subbuf
;
408 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,8,0))
409 strscpy(chanb
->name
, name
, NAME_MAX
);
411 strlcpy(chanb
->name
, name
, NAME_MAX
);
413 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
415 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
416 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
420 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
421 /* Allocating the buffer per-cpu structures */
422 chanb
->buf
= alloc_percpu(struct lttng_kernel_ring_buffer
);
426 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
427 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
428 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
429 &chanb
->cpuhp_prepare
.node
);
432 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
436 * In case of non-hotplug cpu, if the ring-buffer is allocated
437 * in early initcall, it will not be notified of secondary cpus.
438 * In that off case, we need to allocate for all possible cpus.
440 #ifdef CONFIG_HOTPLUG_CPU
442 * buf->backend.allocated test takes care of concurrent CPU
444 * Priority higher than frontend, so we create the ring buffer
445 * before we start the timer.
447 chanb
->cpu_hp_notifier
.notifier_call
=
448 lib_ring_buffer_cpu_hp_callback
;
449 chanb
->cpu_hp_notifier
.priority
= 5;
450 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
452 lttng_cpus_read_lock();
453 for_each_online_cpu(i
) {
454 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
457 goto free_bufs
; /* cpu hotplug locked */
459 lttng_cpus_read_unlock();
461 for_each_possible_cpu(i
) {
462 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
469 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
471 chanb
->buf
= kzalloc(sizeof(struct lttng_kernel_ring_buffer
), GFP_KERNEL
);
474 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
478 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
483 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
484 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
486 * Teardown of lttng_rb_hp_prepare instance
487 * on "add" error is handled within cpu hotplug,
488 * no teardown to do from the caller.
490 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
491 #ifdef CONFIG_HOTPLUG_CPU
492 lttng_cpus_read_unlock();
493 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
495 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
496 for_each_possible_cpu(i
) {
497 struct lttng_kernel_ring_buffer
*buf
=
498 per_cpu_ptr(chanb
->buf
, i
);
500 if (!buf
->backend
.allocated
)
502 lib_ring_buffer_free(buf
);
504 free_percpu(chanb
->buf
);
508 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
509 free_cpumask_var(chanb
->cpumask
);
514 * channel_backend_unregister_notifiers - unregister notifiers
519 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
521 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
523 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
524 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
527 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
528 &chanb
->cpuhp_prepare
.node
);
530 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
531 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
532 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
537 * channel_backend_free - destroy the channel
540 * Destroy all channel buffers and frees the channel.
542 void channel_backend_free(struct channel_backend
*chanb
)
544 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
547 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
548 for_each_possible_cpu(i
) {
549 struct lttng_kernel_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
551 if (!buf
->backend
.allocated
)
553 lib_ring_buffer_free(buf
);
555 free_cpumask_var(chanb
->cpumask
);
556 free_percpu(chanb
->buf
);
558 struct lttng_kernel_ring_buffer
*buf
= chanb
->buf
;
560 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
561 lib_ring_buffer_free(buf
);
567 * lib_ring_buffer_write - write data to a ring_buffer buffer.
568 * @bufb : buffer backend
569 * @offset : offset within the buffer
570 * @src : source address
571 * @len : length to write
573 void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend
*bufb
, size_t offset
,
574 const void *src
, size_t len
)
576 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
577 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
578 size_t sbidx
, index
, bytes_left_in_page
;
579 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
580 unsigned long sb_bindex
, id
;
583 sbidx
= offset
>> chanb
->subbuf_size_order
;
584 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
587 * Underlying layer should never ask for writes across
590 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
592 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
593 id
= bufb
->buf_wsb
[sbidx
].id
;
594 sb_bindex
= subbuffer_id_get_index(config
, id
);
595 rpages
= bufb
->array
[sb_bindex
];
596 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config
, id
));
598 lib_ring_buffer_do_copy(config
,
599 rpages
->p
[index
].virt
600 + (offset
& ~PAGE_MASK
),
601 src
, bytes_left_in_page
);
602 len
-= bytes_left_in_page
;
603 src
+= bytes_left_in_page
;
604 offset
+= bytes_left_in_page
;
605 } while (unlikely(len
));
607 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
610 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
611 * @bufb : buffer backend
612 * @offset : offset within the buffer
613 * @c : the byte to write
614 * @len : length to write
616 void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend
*bufb
,
617 size_t offset
, int c
, size_t len
)
619 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
620 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
621 size_t sbidx
, index
, bytes_left_in_page
;
622 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
623 unsigned long sb_bindex
, id
;
626 sbidx
= offset
>> chanb
->subbuf_size_order
;
627 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
630 * Underlying layer should never ask for writes across
633 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
635 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
636 id
= bufb
->buf_wsb
[sbidx
].id
;
637 sb_bindex
= subbuffer_id_get_index(config
, id
);
638 rpages
= bufb
->array
[sb_bindex
];
639 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
640 && subbuffer_id_is_noref(config
, id
));
641 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
642 + (offset
& ~PAGE_MASK
),
643 c
, bytes_left_in_page
);
644 len
-= bytes_left_in_page
;
645 offset
+= bytes_left_in_page
;
646 } while (unlikely(len
));
648 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
651 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
652 * @bufb : buffer backend
653 * @offset : offset within the buffer
654 * @src : source address
655 * @len : length to write
656 * @pad : character to use for padding
658 void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend
*bufb
,
659 size_t offset
, const char *src
, size_t len
, int pad
)
661 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
662 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
663 size_t sbidx
, index
, bytes_left_in_page
;
664 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
665 unsigned long sb_bindex
, id
;
666 bool src_terminated
= false;
668 CHAN_WARN_ON(chanb
, !len
);
670 sbidx
= offset
>> chanb
->subbuf_size_order
;
671 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
674 * Underlying layer should never ask for writes across
677 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
679 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
680 id
= bufb
->buf_wsb
[sbidx
].id
;
681 sb_bindex
= subbuffer_id_get_index(config
, id
);
682 rpages
= bufb
->array
[sb_bindex
];
683 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
684 && subbuffer_id_is_noref(config
, id
));
686 if (likely(!src_terminated
)) {
687 size_t count
, to_copy
;
689 to_copy
= bytes_left_in_page
;
690 if (bytes_left_in_page
== len
)
691 to_copy
--; /* Final '\0' */
692 count
= lib_ring_buffer_do_strcpy(config
,
693 rpages
->p
[index
].virt
694 + (offset
& ~PAGE_MASK
),
698 if (unlikely(count
< to_copy
)) {
699 size_t pad_len
= to_copy
- count
;
701 /* Next pages will have padding */
702 src_terminated
= true;
703 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
704 + (offset
& ~PAGE_MASK
),
711 pad_len
= bytes_left_in_page
;
712 if (bytes_left_in_page
== len
)
713 pad_len
--; /* Final '\0' */
714 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
715 + (offset
& ~PAGE_MASK
),
719 len
-= bytes_left_in_page
;
721 src
+= bytes_left_in_page
;
722 } while (unlikely(len
));
725 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
728 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
731 * _lib_ring_buffer_pstrcpy - write to a buffer backend P-string
732 * @bufb : buffer backend
733 * @src : source pointer to copy from
734 * @len : length of data to copy
735 * @pad : character to use for padding
737 * This function copies up to @len bytes of data from a source pointer
738 * to a Pascal String into the buffer backend. If a terminating '\0'
739 * character is found in @src before @len characters are copied, pad the
740 * buffer with @pad characters (e.g. '\0').
742 * The length of the pascal strings in the ring buffer is explicit: it
743 * is either the array or sequence length.
745 void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend
*bufb
,
746 size_t offset
, const char *src
, size_t len
, int pad
)
748 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
749 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
750 size_t sbidx
, index
, bytes_left_in_page
;
751 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
752 unsigned long sb_bindex
, id
;
753 bool src_terminated
= false;
755 CHAN_WARN_ON(chanb
, !len
);
757 sbidx
= offset
>> chanb
->subbuf_size_order
;
758 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
761 * Underlying layer should never ask for writes across
764 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
766 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
767 id
= bufb
->buf_wsb
[sbidx
].id
;
768 sb_bindex
= subbuffer_id_get_index(config
, id
);
769 rpages
= bufb
->array
[sb_bindex
];
770 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
771 && subbuffer_id_is_noref(config
, id
));
773 if (likely(!src_terminated
)) {
774 size_t count
, to_copy
;
776 to_copy
= bytes_left_in_page
;
777 count
= lib_ring_buffer_do_strcpy(config
,
778 rpages
->p
[index
].virt
779 + (offset
& ~PAGE_MASK
),
783 if (unlikely(count
< to_copy
)) {
784 size_t pad_len
= to_copy
- count
;
786 /* Next pages will have padding */
787 src_terminated
= true;
788 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
789 + (offset
& ~PAGE_MASK
),
796 pad_len
= bytes_left_in_page
;
797 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
798 + (offset
& ~PAGE_MASK
),
802 len
-= bytes_left_in_page
;
804 src
+= bytes_left_in_page
;
805 } while (unlikely(len
));
807 EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy
);
810 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
811 * @bufb : buffer backend
812 * @offset : offset within the buffer
813 * @src : source address
814 * @len : length to write
816 * This function deals with userspace pointers, it should never be called
817 * directly without having the src pointer checked with access_ok()
820 void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend
*bufb
,
821 size_t offset
, const void __user
*src
, size_t len
)
823 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
824 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
825 size_t sbidx
, index
, bytes_left_in_page
;
826 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
827 unsigned long sb_bindex
, id
;
831 sbidx
= offset
>> chanb
->subbuf_size_order
;
832 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
835 * Underlying layer should never ask for writes across
838 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
840 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
841 id
= bufb
->buf_wsb
[sbidx
].id
;
842 sb_bindex
= subbuffer_id_get_index(config
, id
);
843 rpages
= bufb
->array
[sb_bindex
];
844 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
845 && subbuffer_id_is_noref(config
, id
));
846 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
847 + (offset
& ~PAGE_MASK
),
848 src
, bytes_left_in_page
) != 0;
851 _lib_ring_buffer_memset(bufb
, offset
, 0, len
);
852 break; /* stop copy */
854 len
-= bytes_left_in_page
;
855 src
+= bytes_left_in_page
;
856 offset
+= bytes_left_in_page
;
857 } while (unlikely(len
));
859 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
862 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
863 * @bufb : buffer backend
864 * @offset : offset within the buffer
865 * @src : source address
866 * @len : length to write
867 * @pad : character to use for padding
869 * This function deals with userspace pointers, it should never be called
870 * directly without having the src pointer checked with access_ok()
873 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend
*bufb
,
874 size_t offset
, const char __user
*src
, size_t len
, int pad
)
876 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
877 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
878 size_t sbidx
, index
, bytes_left_in_page
;
879 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
880 unsigned long sb_bindex
, id
;
881 bool src_terminated
= false;
884 sbidx
= offset
>> chanb
->subbuf_size_order
;
885 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
888 * Underlying layer should never ask for writes across
891 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
893 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
894 id
= bufb
->buf_wsb
[sbidx
].id
;
895 sb_bindex
= subbuffer_id_get_index(config
, id
);
896 rpages
= bufb
->array
[sb_bindex
];
897 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
898 && subbuffer_id_is_noref(config
, id
));
900 if (likely(!src_terminated
)) {
901 size_t count
, to_copy
;
903 to_copy
= bytes_left_in_page
;
904 if (bytes_left_in_page
== len
)
905 to_copy
--; /* Final '\0' */
906 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
907 rpages
->p
[index
].virt
908 + (offset
& ~PAGE_MASK
),
912 if (unlikely(count
< to_copy
)) {
913 size_t pad_len
= to_copy
- count
;
915 /* Next pages will have padding */
916 src_terminated
= true;
917 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
918 + (offset
& ~PAGE_MASK
),
925 pad_len
= bytes_left_in_page
;
926 if (bytes_left_in_page
== len
)
927 pad_len
--; /* Final '\0' */
928 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
929 + (offset
& ~PAGE_MASK
),
933 len
-= bytes_left_in_page
;
935 src
+= bytes_left_in_page
;
936 } while (unlikely(len
));
939 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
942 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
945 * _lib_ring_buffer_pstrcpy_from_user_inatomic - write userspace string to a buffer backend P-string
946 * @bufb : buffer backend
947 * @src : source pointer to copy from
948 * @len : length of data to copy
949 * @pad : character to use for padding
951 * This function copies up to @len bytes of data from a source pointer
952 * to a Pascal String into the buffer backend. If a terminating '\0'
953 * character is found in @src before @len characters are copied, pad the
954 * buffer with @pad characters (e.g. '\0').
956 * The length of the pascal strings in the ring buffer is explicit: it
957 * is either the array or sequence length.
959 * This function deals with userspace pointers, it should never be called
960 * directly without having the src pointer checked with access_ok()
963 void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend
*bufb
,
964 size_t offset
, const char __user
*src
, size_t len
, int pad
)
966 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
967 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
968 size_t sbidx
, index
, bytes_left_in_page
;
969 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
970 unsigned long sb_bindex
, id
;
971 bool src_terminated
= false;
973 CHAN_WARN_ON(chanb
, !len
);
975 sbidx
= offset
>> chanb
->subbuf_size_order
;
976 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
979 * Underlying layer should never ask for writes across
982 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
984 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
985 id
= bufb
->buf_wsb
[sbidx
].id
;
986 sb_bindex
= subbuffer_id_get_index(config
, id
);
987 rpages
= bufb
->array
[sb_bindex
];
988 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
989 && subbuffer_id_is_noref(config
, id
));
991 if (likely(!src_terminated
)) {
992 size_t count
, to_copy
;
994 to_copy
= bytes_left_in_page
;
995 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
996 rpages
->p
[index
].virt
997 + (offset
& ~PAGE_MASK
),
1001 if (unlikely(count
< to_copy
)) {
1002 size_t pad_len
= to_copy
- count
;
1004 /* Next pages will have padding */
1005 src_terminated
= true;
1006 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
1007 + (offset
& ~PAGE_MASK
),
1014 pad_len
= bytes_left_in_page
;
1015 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
1016 + (offset
& ~PAGE_MASK
),
1020 len
-= bytes_left_in_page
;
1021 if (!src_terminated
)
1022 src
+= bytes_left_in_page
;
1023 } while (unlikely(len
));
1025 EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy_from_user_inatomic
);
1028 * lib_ring_buffer_read - read data from ring_buffer_buffer.
1029 * @bufb : buffer backend
1030 * @offset : offset within the buffer
1031 * @dest : destination address
1032 * @len : length to copy to destination
1034 * Should be protected by get_subbuf/put_subbuf.
1035 * Returns the length copied.
1037 size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend
*bufb
, size_t offset
,
1038 void *dest
, size_t len
)
1040 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1041 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1042 size_t index
, bytes_left_in_page
, orig_len
;
1043 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1044 unsigned long sb_bindex
, id
;
1047 offset
&= chanb
->buf_size
- 1;
1048 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1052 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
1053 id
= bufb
->buf_rsb
.id
;
1054 sb_bindex
= subbuffer_id_get_index(config
, id
);
1055 rpages
= bufb
->array
[sb_bindex
];
1056 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1057 && subbuffer_id_is_noref(config
, id
));
1058 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
1059 bytes_left_in_page
);
1060 len
-= bytes_left_in_page
;
1063 dest
+= bytes_left_in_page
;
1064 offset
+= bytes_left_in_page
;
1065 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1067 * Underlying layer should never ask for reads across
1070 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1074 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
1077 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
1078 * @bufb : buffer backend
1079 * @offset : offset within the buffer
1080 * @dest : destination userspace address
1081 * @len : length to copy to destination
1083 * Should be protected by get_subbuf/put_subbuf.
1084 * access_ok() must have been performed on dest addresses prior to call this
1086 * Returns -EFAULT on error, 0 if ok.
1088 int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend
*bufb
,
1089 size_t offset
, void __user
*dest
, size_t len
)
1091 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1092 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1094 ssize_t bytes_left_in_page
;
1095 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1096 unsigned long sb_bindex
, id
;
1098 offset
&= chanb
->buf_size
- 1;
1099 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1103 bytes_left_in_page
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
1104 id
= bufb
->buf_rsb
.id
;
1105 sb_bindex
= subbuffer_id_get_index(config
, id
);
1106 rpages
= bufb
->array
[sb_bindex
];
1107 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1108 && subbuffer_id_is_noref(config
, id
));
1109 if (__copy_to_user(dest
,
1110 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
1111 bytes_left_in_page
))
1113 len
-= bytes_left_in_page
;
1116 dest
+= bytes_left_in_page
;
1117 offset
+= bytes_left_in_page
;
1118 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1120 * Underlying layer should never ask for reads across
1123 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1127 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
1130 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
1131 * @bufb : buffer backend
1132 * @offset : offset within the buffer
1133 * @dest : destination address
1134 * @len : destination's length
1136 * Return string's length, or -EINVAL on error.
1137 * Should be protected by get_subbuf/put_subbuf.
1138 * Destination length should be at least 1 to hold '\0'.
1140 int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend
*bufb
, size_t offset
,
1141 void *dest
, size_t len
)
1143 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1144 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1146 ssize_t bytes_left_in_page
, pagelen
, strpagelen
, orig_offset
;
1148 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1149 unsigned long sb_bindex
, id
;
1151 offset
&= chanb
->buf_size
- 1;
1152 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1153 orig_offset
= offset
;
1157 id
= bufb
->buf_rsb
.id
;
1158 sb_bindex
= subbuffer_id_get_index(config
, id
);
1159 rpages
= bufb
->array
[sb_bindex
];
1160 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1161 && subbuffer_id_is_noref(config
, id
));
1162 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1163 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
1164 strpagelen
= strnlen(str
, pagelen
);
1166 bytes_left_in_page
= min_t(size_t, len
, strpagelen
);
1168 memcpy(dest
, str
, bytes_left_in_page
);
1169 dest
+= bytes_left_in_page
;
1171 len
-= bytes_left_in_page
;
1173 offset
+= strpagelen
;
1174 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1175 if (strpagelen
< pagelen
)
1178 * Underlying layer should never ask for reads across
1181 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1184 ((char *)dest
)[0] = 0;
1185 return offset
- orig_offset
;
1187 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1190 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1191 * @bufb : buffer backend
1192 * @offset : offset within the buffer
1193 * @virt : pointer to page address (output)
1195 * Should be protected by get_subbuf/put_subbuf.
1196 * Returns the pointer to the page frame number unsigned long.
1198 unsigned long *lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend
*bufb
,
1199 size_t offset
, void ***virt
)
1202 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1203 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1204 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1205 unsigned long sb_bindex
, id
;
1207 offset
&= chanb
->buf_size
- 1;
1208 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1209 id
= bufb
->buf_rsb
.id
;
1210 sb_bindex
= subbuffer_id_get_index(config
, id
);
1211 rpages
= bufb
->array
[sb_bindex
];
1212 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1213 && subbuffer_id_is_noref(config
, id
));
1214 *virt
= &rpages
->p
[index
].virt
;
1215 return &rpages
->p
[index
].pfn
;
1217 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1220 * lib_ring_buffer_read_offset_address - get address of a buffer location
1221 * @bufb : buffer backend
1222 * @offset : offset within the buffer.
1224 * Return the address where a given offset is located (for read).
1225 * Should be used to get the current subbuffer header pointer. Given we know
1226 * it's never on a page boundary, it's safe to read/write directly
1227 * from/to this address, as long as the read/write is never bigger than a
1230 void *lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend
*bufb
,
1234 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1235 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1236 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1237 unsigned long sb_bindex
, id
;
1239 offset
&= chanb
->buf_size
- 1;
1240 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1241 id
= bufb
->buf_rsb
.id
;
1242 sb_bindex
= subbuffer_id_get_index(config
, id
);
1243 rpages
= bufb
->array
[sb_bindex
];
1244 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1245 && subbuffer_id_is_noref(config
, id
));
1246 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1248 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1251 * lib_ring_buffer_offset_address - get address of a location within the buffer
1252 * @bufb : buffer backend
1253 * @offset : offset within the buffer.
1255 * Return the address where a given offset is located.
1256 * Should be used to get the current subbuffer header pointer. Given we know
1257 * it's always at the beginning of a page, it's safe to write directly to this
1258 * address, as long as the write is never bigger than a page size.
1260 void *lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend
*bufb
,
1263 size_t sbidx
, index
;
1264 struct lttng_kernel_ring_buffer_backend_pages
*rpages
;
1265 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1266 const struct lttng_kernel_ring_buffer_config
*config
= &chanb
->config
;
1267 unsigned long sb_bindex
, id
;
1269 offset
&= chanb
->buf_size
- 1;
1270 sbidx
= offset
>> chanb
->subbuf_size_order
;
1271 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1272 id
= bufb
->buf_wsb
[sbidx
].id
;
1273 sb_bindex
= subbuffer_id_get_index(config
, id
);
1274 rpages
= bufb
->array
[sb_bindex
];
1275 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1276 && subbuffer_id_is_noref(config
, id
));
1277 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1279 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);