Implement ring buffer Pascal string copy
[lttng-modules.git] / src / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
9f36eaed 2 *
f3bc08c5
MD
3 * ring_buffer_backend.c
4 *
886d51a3 5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5
MD
6 */
7
f3bc08c5
MD
8#include <linux/stddef.h>
9#include <linux/module.h>
10#include <linux/string.h>
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/slab.h>
15#include <linux/cpu.h>
16#include <linux/mm.h>
df388b78 17#include <linux/vmalloc.h>
f3bc08c5 18
7502f47a 19#include <wrapper/mm.h>
263b6c88 20#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
24591303
MD
21#include <ringbuffer/config.h>
22#include <ringbuffer/backend.h>
23#include <ringbuffer/frontend.h>
f3bc08c5
MD
24
25/**
26 * lib_ring_buffer_backend_allocate - allocate a channel buffer
27 * @config: ring buffer instance configuration
28 * @buf: the buffer struct
29 * @size: total size of the buffer
30 * @num_subbuf: number of subbuffers
31 * @extra_reader_sb: need extra subbuffer for reader
32 */
33static
e20c0fec
MD
34int lib_ring_buffer_backend_allocate(const struct lttng_kernel_ring_buffer_config *config,
35 struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
36 size_t size, size_t num_subbuf,
37 int extra_reader_sb)
38{
39 struct channel_backend *chanb = &bufb->chan->backend;
40 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
41 unsigned long subbuf_size, mmap_offset = 0;
42 unsigned long num_subbuf_alloc;
43 struct page **pages;
f3bc08c5
MD
44 unsigned long i;
45
46 num_pages = size >> PAGE_SHIFT;
1f0ab1eb
FD
47
48 /*
7502f47a
FD
49 * Verify that there is enough free pages available on the system for
50 * the current allocation request.
51 * wrapper_check_enough_free_pages uses si_mem_available() if available
52 * and returns if there should be enough free pages based on the
53 * current estimate.
1f0ab1eb 54 */
7502f47a 55 if (!wrapper_check_enough_free_pages(num_pages))
1f0ab1eb
FD
56 goto not_enough_pages;
57
58 /*
59 * Set the current user thread as the first target of the OOM killer.
60 * If the estimate received by si_mem_available() was off, and we do
61 * end up running out of memory because of this buffer allocation, we
62 * want to kill the offending app first.
63 */
7502f47a 64 wrapper_set_current_oom_origin();
1f0ab1eb 65
f3bc08c5
MD
66 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
67 subbuf_size = chanb->subbuf_size;
68 num_subbuf_alloc = num_subbuf;
69
70 if (extra_reader_sb) {
71 num_pages += num_pages_per_subbuf; /* Add pages for reader */
72 num_subbuf_alloc++;
73 }
74
df388b78 75 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 76 1 << INTERNODE_CACHE_SHIFT),
df388b78 77 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
78 if (unlikely(!pages))
79 goto pages_error;
80
48f5e0b5 81 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
f3bc08c5
MD
82 * num_subbuf_alloc,
83 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
84 GFP_KERNEL | __GFP_NOWARN,
85 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
86 if (unlikely(!bufb->array))
87 goto array_error;
88
89 for (i = 0; i < num_pages; i++) {
90 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 91 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
92 if (unlikely(!pages[i]))
93 goto depopulate;
f3bc08c5
MD
94 }
95 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
96
97 /* Allocate backend pages array elements */
98 for (i = 0; i < num_subbuf_alloc; i++) {
99 bufb->array[i] =
48f5e0b5 100 lttng_kvzalloc_node(ALIGN(
e20c0fec
MD
101 sizeof(struct lttng_kernel_ring_buffer_backend_pages) +
102 sizeof(struct lttng_kernel_ring_buffer_backend_page)
f3bc08c5
MD
103 * num_pages_per_subbuf,
104 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
105 GFP_KERNEL | __GFP_NOWARN,
106 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
107 if (!bufb->array[i])
108 goto free_array;
109 }
110
111 /* Allocate write-side subbuffer table */
48f5e0b5 112 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
e20c0fec 113 sizeof(struct lttng_kernel_ring_buffer_backend_subbuffer)
f3bc08c5
MD
114 * num_subbuf,
115 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
116 GFP_KERNEL | __GFP_NOWARN,
117 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
118 if (unlikely(!bufb->buf_wsb))
119 goto free_array;
120
121 for (i = 0; i < num_subbuf; i++)
122 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
123
124 /* Assign read-side subbuffer table */
125 if (extra_reader_sb)
126 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
127 num_subbuf_alloc - 1);
128 else
129 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
130
5b3cf4f9 131 /* Allocate subbuffer packet counter table */
48f5e0b5 132 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
e20c0fec 133 sizeof(struct lttng_kernel_ring_buffer_backend_counts)
5b3cf4f9
JD
134 * num_subbuf,
135 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
136 GFP_KERNEL | __GFP_NOWARN,
137 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
138 if (unlikely(!bufb->buf_cnt))
139 goto free_wsb;
140
f3bc08c5
MD
141 /* Assign pages to page index */
142 for (i = 0; i < num_subbuf_alloc; i++) {
143 for (j = 0; j < num_pages_per_subbuf; j++) {
144 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
145 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
146 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
147 page_idx++;
148 }
149 if (config->output == RING_BUFFER_MMAP) {
150 bufb->array[i]->mmap_offset = mmap_offset;
151 mmap_offset += subbuf_size;
152 }
153 }
154
155 /*
156 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
157 * will not fault.
158 */
263b6c88 159 wrapper_vmalloc_sync_mappings();
7502f47a 160 wrapper_clear_current_oom_origin();
df388b78 161 vfree(pages);
f3bc08c5
MD
162 return 0;
163
5b3cf4f9 164free_wsb:
48f5e0b5 165 lttng_kvfree(bufb->buf_wsb);
f3bc08c5
MD
166free_array:
167 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
48f5e0b5 168 lttng_kvfree(bufb->array[i]);
f3bc08c5
MD
169depopulate:
170 /* Free all allocated pages */
171 for (i = 0; (i < num_pages && pages[i]); i++)
172 __free_page(pages[i]);
48f5e0b5 173 lttng_kvfree(bufb->array);
f3bc08c5 174array_error:
df388b78 175 vfree(pages);
f3bc08c5 176pages_error:
7502f47a 177 wrapper_clear_current_oom_origin();
1f0ab1eb 178not_enough_pages:
f3bc08c5
MD
179 return -ENOMEM;
180}
181
e20c0fec 182int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
183 struct channel_backend *chanb, int cpu)
184{
e20c0fec 185 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 186
860c213b 187 bufb->chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
f3bc08c5
MD
188 bufb->cpu = cpu;
189
190 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
191 chanb->num_subbuf,
192 chanb->extra_reader_sb);
193}
194
e20c0fec 195void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend *bufb)
f3bc08c5
MD
196{
197 struct channel_backend *chanb = &bufb->chan->backend;
198 unsigned long i, j, num_subbuf_alloc;
199
200 num_subbuf_alloc = chanb->num_subbuf;
201 if (chanb->extra_reader_sb)
202 num_subbuf_alloc++;
203
48f5e0b5
MJ
204 lttng_kvfree(bufb->buf_wsb);
205 lttng_kvfree(bufb->buf_cnt);
f3bc08c5
MD
206 for (i = 0; i < num_subbuf_alloc; i++) {
207 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 208 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
48f5e0b5 209 lttng_kvfree(bufb->array[i]);
f3bc08c5 210 }
48f5e0b5 211 lttng_kvfree(bufb->array);
f3bc08c5
MD
212 bufb->allocated = 0;
213}
214
e20c0fec 215void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend *bufb)
f3bc08c5
MD
216{
217 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 218 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
219 unsigned long num_subbuf_alloc;
220 unsigned int i;
221
222 num_subbuf_alloc = chanb->num_subbuf;
223 if (chanb->extra_reader_sb)
224 num_subbuf_alloc++;
225
226 for (i = 0; i < chanb->num_subbuf; i++)
227 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
228 if (chanb->extra_reader_sb)
229 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
230 num_subbuf_alloc - 1);
231 else
232 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
233
234 for (i = 0; i < num_subbuf_alloc; i++) {
235 /* Don't reset mmap_offset */
236 v_set(config, &bufb->array[i]->records_commit, 0);
237 v_set(config, &bufb->array[i]->records_unread, 0);
238 bufb->array[i]->data_size = 0;
239 /* Don't reset backend page and virt addresses */
240 }
241 /* Don't reset num_pages_per_subbuf, cpu, allocated */
242 v_set(config, &bufb->records_read, 0);
243}
244
245/*
246 * The frontend is responsible for also calling ring_buffer_backend_reset for
247 * each buffer when calling channel_backend_reset.
248 */
249void channel_backend_reset(struct channel_backend *chanb)
250{
860c213b 251 struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
e20c0fec 252 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
253
254 /*
255 * Don't reset buf_size, subbuf_size, subbuf_size_order,
256 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
257 * priv, notifiers, config, cpumask and name.
258 */
259 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
260}
261
5f4c791e 262#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
263
264/*
265 * No need to implement a "dead" callback to do a buffer switch here,
266 * because it will happen when tracing is stopped, or will be done by
267 * switch timer CPU DEAD callback.
268 * We don't free buffers when CPU go away, because it would make trace
269 * data vanish, which is unwanted.
270 */
271int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
272 struct lttng_cpuhp_node *node)
273{
274 struct channel_backend *chanb = container_of(node,
275 struct channel_backend, cpuhp_prepare);
e20c0fec
MD
276 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
277 struct lttng_kernel_ring_buffer *buf;
1e367326
MD
278 int ret;
279
280 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
281
282 buf = per_cpu_ptr(chanb->buf, cpu);
283 ret = lib_ring_buffer_create(buf, chanb, cpu);
284 if (ret) {
285 printk(KERN_ERR
5a15f70c 286 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
1e367326
MD
287 "buffer creation failed\n", cpu);
288 return ret;
289 }
290 return 0;
291}
292EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
293
5f4c791e 294#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 295
f3bc08c5 296#ifdef CONFIG_HOTPLUG_CPU
1e367326 297
f3bc08c5
MD
298/**
299 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
300 * @nb: notifier block
301 * @action: hotplug action to take
302 * @hcpu: CPU number
303 *
304 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
305 */
306static
e8f071d5 307int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
308 unsigned long action,
309 void *hcpu)
310{
311 unsigned int cpu = (unsigned long)hcpu;
312 struct channel_backend *chanb = container_of(nb, struct channel_backend,
313 cpu_hp_notifier);
e20c0fec
MD
314 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
315 struct lttng_kernel_ring_buffer *buf;
f3bc08c5
MD
316 int ret;
317
318 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
319
320 switch (action) {
321 case CPU_UP_PREPARE:
322 case CPU_UP_PREPARE_FROZEN:
323 buf = per_cpu_ptr(chanb->buf, cpu);
324 ret = lib_ring_buffer_create(buf, chanb, cpu);
325 if (ret) {
326 printk(KERN_ERR
5a15f70c 327 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
f3bc08c5
MD
328 "buffer creation failed\n", cpu);
329 return NOTIFY_BAD;
330 }
331 break;
332 case CPU_DEAD:
333 case CPU_DEAD_FROZEN:
334 /* No need to do a buffer switch here, because it will happen
335 * when tracing is stopped, or will be done by switch timer CPU
336 * DEAD callback. */
337 break;
338 }
339 return NOTIFY_OK;
340}
1e367326 341
f3bc08c5
MD
342#endif
343
5f4c791e 344#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 345
f3bc08c5
MD
346/**
347 * channel_backend_init - initialize a channel backend
348 * @chanb: channel backend
349 * @name: channel name
350 * @config: client ring buffer configuration
351 * @priv: client private data
352 * @parent: dentry of parent directory, %NULL for root directory
353 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
354 * @num_subbuf: number of sub-buffers (power of 2)
355 *
356 * Returns channel pointer if successful, %NULL otherwise.
357 *
358 * Creates per-cpu channel buffers using the sizes and attributes
359 * specified. The created channel buffer files will be named
360 * name_0...name_N-1. File permissions will be %S_IRUSR.
361 *
362 * Called with CPU hotplug disabled.
363 */
364int channel_backend_init(struct channel_backend *chanb,
365 const char *name,
e20c0fec 366 const struct lttng_kernel_ring_buffer_config *config,
f3bc08c5
MD
367 void *priv, size_t subbuf_size, size_t num_subbuf)
368{
860c213b 369 struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
f3bc08c5
MD
370 unsigned int i;
371 int ret;
372
373 if (!name)
374 return -EPERM;
375
f3bc08c5 376 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
377 if (subbuf_size < PAGE_SIZE)
378 return -EINVAL;
f3bc08c5
MD
379
380 /*
bbda3a00
MD
381 * Make sure the number of subbuffers and subbuffer size are
382 * power of 2 and nonzero.
f3bc08c5 383 */
bbda3a00 384 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 385 return -EINVAL;
bbda3a00 386 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 387 return -EINVAL;
5140d2b3
MD
388 /*
389 * Overwrite mode buffers require at least 2 subbuffers per
390 * buffer.
391 */
392 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
393 return -EINVAL;
f3bc08c5
MD
394
395 ret = subbuffer_id_check_index(config, num_subbuf);
396 if (ret)
397 return ret;
398
399 chanb->priv = priv;
400 chanb->buf_size = num_subbuf * subbuf_size;
401 chanb->subbuf_size = subbuf_size;
402 chanb->buf_size_order = get_count_order(chanb->buf_size);
403 chanb->subbuf_size_order = get_count_order(subbuf_size);
404 chanb->num_subbuf_order = get_count_order(num_subbuf);
405 chanb->extra_reader_sb =
406 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
407 chanb->num_subbuf = num_subbuf;
408 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 409 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
410
411 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
412 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
413 return -ENOMEM;
414 }
415
416 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
417 /* Allocating the buffer per-cpu structures */
e20c0fec 418 chanb->buf = alloc_percpu(struct lttng_kernel_ring_buffer);
f3bc08c5
MD
419 if (!chanb->buf)
420 goto free_cpumask;
421
5f4c791e 422#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
423 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
424 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
425 &chanb->cpuhp_prepare.node);
426 if (ret)
427 goto free_bufs;
5f4c791e 428#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
429
430 {
431 /*
432 * In case of non-hotplug cpu, if the ring-buffer is allocated
433 * in early initcall, it will not be notified of secondary cpus.
434 * In that off case, we need to allocate for all possible cpus.
435 */
f3bc08c5 436#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
437 /*
438 * buf->backend.allocated test takes care of concurrent CPU
439 * hotplug.
440 * Priority higher than frontend, so we create the ring buffer
441 * before we start the timer.
442 */
443 chanb->cpu_hp_notifier.notifier_call =
444 lib_ring_buffer_cpu_hp_callback;
445 chanb->cpu_hp_notifier.priority = 5;
446 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
447
448 get_online_cpus();
449 for_each_online_cpu(i) {
450 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
451 chanb, i);
452 if (ret)
453 goto free_bufs; /* cpu hotplug locked */
454 }
455 put_online_cpus();
f3bc08c5 456#else
1e367326
MD
457 for_each_possible_cpu(i) {
458 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
459 chanb, i);
460 if (ret)
461 goto free_bufs;
462 }
f3bc08c5 463#endif
1e367326 464 }
5f4c791e 465#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 466 } else {
e20c0fec 467 chanb->buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL);
f3bc08c5
MD
468 if (!chanb->buf)
469 goto free_cpumask;
470 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
471 if (ret)
472 goto free_bufs;
473 }
474 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
475
476 return 0;
477
478free_bufs:
479 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
5f4c791e 480#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
5f14d8ae
MD
481 /*
482 * Teardown of lttng_rb_hp_prepare instance
483 * on "add" error is handled within cpu hotplug,
484 * no teardown to do from the caller.
485 */
5f4c791e 486#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
487#ifdef CONFIG_HOTPLUG_CPU
488 put_online_cpus();
489 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
490#endif
5f4c791e 491#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 492 for_each_possible_cpu(i) {
e20c0fec 493 struct lttng_kernel_ring_buffer *buf =
1e367326 494 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
495
496 if (!buf->backend.allocated)
497 continue;
498 lib_ring_buffer_free(buf);
499 }
f3bc08c5
MD
500 free_percpu(chanb->buf);
501 } else
502 kfree(chanb->buf);
503free_cpumask:
504 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
505 free_cpumask_var(chanb->cpumask);
506 return -ENOMEM;
507}
508
509/**
510 * channel_backend_unregister_notifiers - unregister notifiers
511 * @chan: the channel
512 *
513 * Holds CPU hotplug.
514 */
515void channel_backend_unregister_notifiers(struct channel_backend *chanb)
516{
e20c0fec 517 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 518
1e367326 519 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
5f4c791e 520#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
521 int ret;
522
523 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
524 &chanb->cpuhp_prepare.node);
525 WARN_ON(ret);
5f4c791e 526#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 527 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
5f4c791e 528#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 529 }
f3bc08c5
MD
530}
531
532/**
533 * channel_backend_free - destroy the channel
534 * @chan: the channel
535 *
536 * Destroy all channel buffers and frees the channel.
537 */
538void channel_backend_free(struct channel_backend *chanb)
539{
e20c0fec 540 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
541 unsigned int i;
542
543 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
544 for_each_possible_cpu(i) {
e20c0fec 545 struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
546
547 if (!buf->backend.allocated)
548 continue;
549 lib_ring_buffer_free(buf);
550 }
551 free_cpumask_var(chanb->cpumask);
552 free_percpu(chanb->buf);
553 } else {
e20c0fec 554 struct lttng_kernel_ring_buffer *buf = chanb->buf;
f3bc08c5
MD
555
556 CHAN_WARN_ON(chanb, !buf->backend.allocated);
557 lib_ring_buffer_free(buf);
558 kfree(buf);
559 }
560}
561
562/**
563 * lib_ring_buffer_write - write data to a ring_buffer buffer.
564 * @bufb : buffer backend
565 * @offset : offset within the buffer
566 * @src : source address
567 * @len : length to write
568 * @pagecpy : page size copied so far
569 */
e20c0fec 570void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
bfe529f9 571 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
572{
573 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 574 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 575 size_t sbidx, index;
e20c0fec 576 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
577 unsigned long sb_bindex, id;
578
579 do {
580 len -= pagecpy;
581 src += pagecpy;
582 offset += pagecpy;
583 sbidx = offset >> chanb->subbuf_size_order;
584 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
585
586 /*
587 * Underlying layer should never ask for writes across
588 * subbuffers.
589 */
590 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
591
592 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
593 id = bufb->buf_wsb[sbidx].id;
594 sb_bindex = subbuffer_id_get_index(config, id);
595 rpages = bufb->array[sb_bindex];
596 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config, id));
598 lib_ring_buffer_do_copy(config,
599 rpages->p[index].virt
600 + (offset & ~PAGE_MASK),
601 src, pagecpy);
602 } while (unlikely(len != pagecpy));
603}
604EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
605
4ea00e4f
JD
606
607/**
608 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
609 * @bufb : buffer backend
610 * @offset : offset within the buffer
611 * @c : the byte to write
612 * @len : length to write
613 * @pagecpy : page size copied so far
614 */
e20c0fec 615void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend *bufb,
4ea00e4f 616 size_t offset,
bfe529f9 617 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
618{
619 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 620 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4ea00e4f 621 size_t sbidx, index;
e20c0fec 622 struct lttng_kernel_ring_buffer_backend_pages *rpages;
4ea00e4f
JD
623 unsigned long sb_bindex, id;
624
625 do {
626 len -= pagecpy;
627 offset += pagecpy;
628 sbidx = offset >> chanb->subbuf_size_order;
629 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
630
631 /*
632 * Underlying layer should never ask for writes across
633 * subbuffers.
634 */
635 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
636
637 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
638 id = bufb->buf_wsb[sbidx].id;
639 sb_bindex = subbuffer_id_get_index(config, id);
640 rpages = bufb->array[sb_bindex];
641 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
642 && subbuffer_id_is_noref(config, id));
643 lib_ring_buffer_do_memset(rpages->p[index].virt
644 + (offset & ~PAGE_MASK),
645 c, pagecpy);
646 } while (unlikely(len != pagecpy));
647}
648EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
649
16f78f3a
MD
650/**
651 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
652 * @bufb : buffer backend
653 * @offset : offset within the buffer
654 * @src : source address
655 * @len : length to write
656 * @pagecpy : page size copied so far
657 * @pad : character to use for padding
658 */
e20c0fec 659void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb,
16f78f3a
MD
660 size_t offset, const char *src, size_t len,
661 size_t pagecpy, int pad)
662{
663 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 664 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
16f78f3a 665 size_t sbidx, index;
e20c0fec 666 struct lttng_kernel_ring_buffer_backend_pages *rpages;
16f78f3a
MD
667 unsigned long sb_bindex, id;
668 int src_terminated = 0;
669
670 CHAN_WARN_ON(chanb, !len);
671 offset += pagecpy;
672 do {
673 len -= pagecpy;
674 if (!src_terminated)
675 src += pagecpy;
676 sbidx = offset >> chanb->subbuf_size_order;
677 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
678
679 /*
680 * Underlying layer should never ask for writes across
681 * subbuffers.
682 */
683 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
684
685 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
686 id = bufb->buf_wsb[sbidx].id;
687 sb_bindex = subbuffer_id_get_index(config, id);
688 rpages = bufb->array[sb_bindex];
689 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
690 && subbuffer_id_is_noref(config, id));
691
692 if (likely(!src_terminated)) {
693 size_t count, to_copy;
694
695 to_copy = pagecpy;
696 if (pagecpy == len)
697 to_copy--; /* Final '\0' */
698 count = lib_ring_buffer_do_strcpy(config,
699 rpages->p[index].virt
700 + (offset & ~PAGE_MASK),
701 src, to_copy);
702 offset += count;
703 /* Padding */
704 if (unlikely(count < to_copy)) {
705 size_t pad_len = to_copy - count;
706
707 /* Next pages will have padding */
708 src_terminated = 1;
709 lib_ring_buffer_do_memset(rpages->p[index].virt
710 + (offset & ~PAGE_MASK),
711 pad, pad_len);
712 offset += pad_len;
713 }
714 } else {
715 size_t pad_len;
716
717 pad_len = pagecpy;
718 if (pagecpy == len)
719 pad_len--; /* Final '\0' */
720 lib_ring_buffer_do_memset(rpages->p[index].virt
721 + (offset & ~PAGE_MASK),
722 pad, pad_len);
723 offset += pad_len;
724 }
725 } while (unlikely(len != pagecpy));
726 /* Ending '\0' */
727 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
728 '\0', 1);
729}
730EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f 731
0d1080f4
MD
732/**
733 * _lib_ring_buffer_pstrcpy - write to a buffer backend P-string
734 * @bufb : buffer backend
735 * @src : source pointer to copy from
736 * @len : length of data to copy
737 * @pad : character to use for padding
738 *
739 * This function copies up to @len bytes of data from a source pointer
740 * to a Pascal String into the buffer backend. If a terminating '\0'
741 * character is found in @src before @len characters are copied, pad the
742 * buffer with @pad characters (e.g. '\0').
743 *
744 * The length of the pascal strings in the ring buffer is explicit: it
745 * is either the array or sequence length.
746 */
747void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend *bufb,
748 size_t offset, const char *src, size_t len, int pad)
749{
750 struct channel_backend *chanb = &bufb->chan->backend;
751 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
752 size_t sbidx, index, bytes_left_in_page;
753 struct lttng_kernel_ring_buffer_backend_pages *rpages;
754 unsigned long sb_bindex, id;
755 bool src_terminated = false;
756
757 CHAN_WARN_ON(chanb, !len);
758 do {
759 sbidx = offset >> chanb->subbuf_size_order;
760 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
761
762 /*
763 * Underlying layer should never ask for writes across
764 * subbuffers.
765 */
766 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
767
768 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
769 id = bufb->buf_wsb[sbidx].id;
770 sb_bindex = subbuffer_id_get_index(config, id);
771 rpages = bufb->array[sb_bindex];
772 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
773 && subbuffer_id_is_noref(config, id));
774
775 if (likely(!src_terminated)) {
776 size_t count, to_copy;
777
778 to_copy = bytes_left_in_page;
779 count = lib_ring_buffer_do_strcpy(config,
780 rpages->p[index].virt
781 + (offset & ~PAGE_MASK),
782 src, to_copy);
783 offset += count;
784 /* Padding */
785 if (unlikely(count < to_copy)) {
786 size_t pad_len = to_copy - count;
787
788 /* Next pages will have padding */
789 src_terminated = true;
790 lib_ring_buffer_do_memset(rpages->p[index].virt
791 + (offset & ~PAGE_MASK),
792 pad, pad_len);
793 offset += pad_len;
794 }
795 } else {
796 size_t pad_len;
797
798 pad_len = bytes_left_in_page;
799 lib_ring_buffer_do_memset(rpages->p[index].virt
800 + (offset & ~PAGE_MASK),
801 pad, pad_len);
802 offset += pad_len;
803 }
804 len -= bytes_left_in_page;
805 if (!src_terminated)
806 src += bytes_left_in_page;
807 } while (unlikely(len));
808}
809EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy);
810
4ea00e4f 811/**
7b8ea3a5 812 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
813 * @bufb : buffer backend
814 * @offset : offset within the buffer
815 * @src : source address
816 * @len : length to write
817 * @pagecpy : page size copied so far
818 *
819 * This function deals with userspace pointers, it should never be called
820 * directly without having the src pointer checked with access_ok()
821 * previously.
822 */
e20c0fec 823void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
4ea00e4f
JD
824 size_t offset,
825 const void __user *src, size_t len,
bfe529f9 826 size_t pagecpy)
4ea00e4f
JD
827{
828 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 829 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4ea00e4f 830 size_t sbidx, index;
e20c0fec 831 struct lttng_kernel_ring_buffer_backend_pages *rpages;
4ea00e4f
JD
832 unsigned long sb_bindex, id;
833 int ret;
834
835 do {
836 len -= pagecpy;
837 src += pagecpy;
838 offset += pagecpy;
839 sbidx = offset >> chanb->subbuf_size_order;
840 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
841
842 /*
843 * Underlying layer should never ask for writes across
844 * subbuffers.
845 */
846 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
847
848 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
849 id = bufb->buf_wsb[sbidx].id;
850 sb_bindex = subbuffer_id_get_index(config, id);
851 rpages = bufb->array[sb_bindex];
852 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
853 && subbuffer_id_is_noref(config, id));
7b8ea3a5 854 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
855 + (offset & ~PAGE_MASK),
856 src, pagecpy) != 0;
857 if (ret > 0) {
d87a9f03 858 /* Copy failed. */
4ea00e4f
JD
859 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
860 break; /* stop copy */
861 }
862 } while (unlikely(len != pagecpy));
863}
7b8ea3a5 864EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 865
16f78f3a
MD
866/**
867 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
868 * @bufb : buffer backend
869 * @offset : offset within the buffer
870 * @src : source address
871 * @len : length to write
872 * @pagecpy : page size copied so far
873 * @pad : character to use for padding
874 *
875 * This function deals with userspace pointers, it should never be called
876 * directly without having the src pointer checked with access_ok()
877 * previously.
878 */
e20c0fec 879void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
16f78f3a
MD
880 size_t offset, const char __user *src, size_t len,
881 size_t pagecpy, int pad)
882{
883 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 884 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
16f78f3a 885 size_t sbidx, index;
e20c0fec 886 struct lttng_kernel_ring_buffer_backend_pages *rpages;
16f78f3a
MD
887 unsigned long sb_bindex, id;
888 int src_terminated = 0;
889
890 offset += pagecpy;
891 do {
892 len -= pagecpy;
893 if (!src_terminated)
894 src += pagecpy;
895 sbidx = offset >> chanb->subbuf_size_order;
896 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
897
898 /*
899 * Underlying layer should never ask for writes across
900 * subbuffers.
901 */
902 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
903
904 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
905 id = bufb->buf_wsb[sbidx].id;
906 sb_bindex = subbuffer_id_get_index(config, id);
907 rpages = bufb->array[sb_bindex];
908 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
909 && subbuffer_id_is_noref(config, id));
910
911 if (likely(!src_terminated)) {
912 size_t count, to_copy;
913
914 to_copy = pagecpy;
915 if (pagecpy == len)
916 to_copy--; /* Final '\0' */
917 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
918 rpages->p[index].virt
919 + (offset & ~PAGE_MASK),
920 src, to_copy);
921 offset += count;
922 /* Padding */
923 if (unlikely(count < to_copy)) {
924 size_t pad_len = to_copy - count;
925
926 /* Next pages will have padding */
927 src_terminated = 1;
928 lib_ring_buffer_do_memset(rpages->p[index].virt
929 + (offset & ~PAGE_MASK),
930 pad, pad_len);
931 offset += pad_len;
932 }
933 } else {
934 size_t pad_len;
935
936 pad_len = pagecpy;
937 if (pagecpy == len)
938 pad_len--; /* Final '\0' */
939 lib_ring_buffer_do_memset(rpages->p[index].virt
940 + (offset & ~PAGE_MASK),
941 pad, pad_len);
942 offset += pad_len;
943 }
944 } while (unlikely(len != pagecpy));
945 /* Ending '\0' */
946 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
947 '\0', 1);
948}
949EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
950
0d1080f4
MD
951/**
952 * _lib_ring_buffer_pstrcpy_from_user_inatomic - write userspace string to a buffer backend P-string
953 * @bufb : buffer backend
954 * @src : source pointer to copy from
955 * @len : length of data to copy
956 * @pad : character to use for padding
957 *
958 * This function copies up to @len bytes of data from a source pointer
959 * to a Pascal String into the buffer backend. If a terminating '\0'
960 * character is found in @src before @len characters are copied, pad the
961 * buffer with @pad characters (e.g. '\0').
962 *
963 * The length of the pascal strings in the ring buffer is explicit: it
964 * is either the array or sequence length.
965 *
966 * This function deals with userspace pointers, it should never be called
967 * directly without having the src pointer checked with access_ok()
968 * previously.
969 */
970void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
971 size_t offset, const char __user *src, size_t len, int pad)
972{
973 struct channel_backend *chanb = &bufb->chan->backend;
974 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
975 size_t sbidx, index, bytes_left_in_page;
976 struct lttng_kernel_ring_buffer_backend_pages *rpages;
977 unsigned long sb_bindex, id;
978 bool src_terminated = false;
979
980 CHAN_WARN_ON(chanb, !len);
981 do {
982 sbidx = offset >> chanb->subbuf_size_order;
983 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
984
985 /*
986 * Underlying layer should never ask for writes across
987 * subbuffers.
988 */
989 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
990
991 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
992 id = bufb->buf_wsb[sbidx].id;
993 sb_bindex = subbuffer_id_get_index(config, id);
994 rpages = bufb->array[sb_bindex];
995 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
996 && subbuffer_id_is_noref(config, id));
997
998 if (likely(!src_terminated)) {
999 size_t count, to_copy;
1000
1001 to_copy = bytes_left_in_page;
1002 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
1003 rpages->p[index].virt
1004 + (offset & ~PAGE_MASK),
1005 src, to_copy);
1006 offset += count;
1007 /* Padding */
1008 if (unlikely(count < to_copy)) {
1009 size_t pad_len = to_copy - count;
1010
1011 /* Next pages will have padding */
1012 src_terminated = true;
1013 lib_ring_buffer_do_memset(rpages->p[index].virt
1014 + (offset & ~PAGE_MASK),
1015 pad, pad_len);
1016 offset += pad_len;
1017 }
1018 } else {
1019 size_t pad_len;
1020
1021 pad_len = bytes_left_in_page;
1022 lib_ring_buffer_do_memset(rpages->p[index].virt
1023 + (offset & ~PAGE_MASK),
1024 pad, pad_len);
1025 offset += pad_len;
1026 }
1027 len -= bytes_left_in_page;
1028 if (!src_terminated)
1029 src += bytes_left_in_page;
1030 } while (unlikely(len));
1031}
1032EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy_from_user_inatomic);
1033
f3bc08c5
MD
1034/**
1035 * lib_ring_buffer_read - read data from ring_buffer_buffer.
1036 * @bufb : buffer backend
1037 * @offset : offset within the buffer
1038 * @dest : destination address
1039 * @len : length to copy to destination
1040 *
1041 * Should be protected by get_subbuf/put_subbuf.
1042 * Returns the length copied.
1043 */
e20c0fec 1044size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
f3bc08c5
MD
1045 void *dest, size_t len)
1046{
1047 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1048 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
bfe529f9 1049 size_t index, pagecpy, orig_len;
e20c0fec 1050 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1051 unsigned long sb_bindex, id;
1052
1053 orig_len = len;
1054 offset &= chanb->buf_size - 1;
1055 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1056 if (unlikely(!len))
1057 return 0;
1058 for (;;) {
1059 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
1060 id = bufb->buf_rsb.id;
1061 sb_bindex = subbuffer_id_get_index(config, id);
1062 rpages = bufb->array[sb_bindex];
1063 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1064 && subbuffer_id_is_noref(config, id));
1065 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
1066 pagecpy);
1067 len -= pagecpy;
1068 if (likely(!len))
1069 break;
1070 dest += pagecpy;
1071 offset += pagecpy;
1072 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1073 /*
1074 * Underlying layer should never ask for reads across
1075 * subbuffers.
1076 */
1077 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1078 }
1079 return orig_len;
1080}
1081EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
1082
1083/**
1084 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
1085 * @bufb : buffer backend
1086 * @offset : offset within the buffer
1087 * @dest : destination userspace address
1088 * @len : length to copy to destination
1089 *
1090 * Should be protected by get_subbuf/put_subbuf.
1091 * access_ok() must have been performed on dest addresses prior to call this
1092 * function.
1093 * Returns -EFAULT on error, 0 if ok.
1094 */
e20c0fec 1095int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1096 size_t offset, void __user *dest, size_t len)
1097{
1098 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1099 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 1100 size_t index;
88dfd899 1101 ssize_t pagecpy;
e20c0fec 1102 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1103 unsigned long sb_bindex, id;
1104
f3bc08c5
MD
1105 offset &= chanb->buf_size - 1;
1106 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1107 if (unlikely(!len))
1108 return 0;
1109 for (;;) {
1110 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
1111 id = bufb->buf_rsb.id;
1112 sb_bindex = subbuffer_id_get_index(config, id);
1113 rpages = bufb->array[sb_bindex];
1114 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1115 && subbuffer_id_is_noref(config, id));
1116 if (__copy_to_user(dest,
1117 rpages->p[index].virt + (offset & ~PAGE_MASK),
1118 pagecpy))
1119 return -EFAULT;
1120 len -= pagecpy;
1121 if (likely(!len))
1122 break;
1123 dest += pagecpy;
1124 offset += pagecpy;
1125 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1126 /*
1127 * Underlying layer should never ask for reads across
1128 * subbuffers.
1129 */
1130 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1131 }
1132 return 0;
1133}
1134EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
1135
1136/**
1137 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
1138 * @bufb : buffer backend
1139 * @offset : offset within the buffer
1140 * @dest : destination address
1141 * @len : destination's length
1142 *
61eb4c39 1143 * Return string's length, or -EINVAL on error.
f3bc08c5 1144 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 1145 * Destination length should be at least 1 to hold '\0'.
f3bc08c5 1146 */
e20c0fec 1147int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
f3bc08c5
MD
1148 void *dest, size_t len)
1149{
1150 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1151 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1152 size_t index;
1153 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
1154 char *str;
e20c0fec 1155 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1156 unsigned long sb_bindex, id;
1157
1158 offset &= chanb->buf_size - 1;
1159 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1160 orig_offset = offset;
61eb4c39
MD
1161 if (unlikely(!len))
1162 return -EINVAL;
f3bc08c5
MD
1163 for (;;) {
1164 id = bufb->buf_rsb.id;
1165 sb_bindex = subbuffer_id_get_index(config, id);
1166 rpages = bufb->array[sb_bindex];
1167 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1168 && subbuffer_id_is_noref(config, id));
1169 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1170 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1171 strpagelen = strnlen(str, pagelen);
1172 if (len) {
1173 pagecpy = min_t(size_t, len, strpagelen);
1174 if (dest) {
1175 memcpy(dest, str, pagecpy);
1176 dest += pagecpy;
1177 }
1178 len -= pagecpy;
1179 }
1180 offset += strpagelen;
1181 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1182 if (strpagelen < pagelen)
1183 break;
1184 /*
1185 * Underlying layer should never ask for reads across
1186 * subbuffers.
1187 */
1188 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1189 }
1190 if (dest && len)
1191 ((char *)dest)[0] = 0;
1192 return offset - orig_offset;
1193}
1194EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1195
1196/**
0112cb7b 1197 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1198 * @bufb : buffer backend
1199 * @offset : offset within the buffer
1200 * @virt : pointer to page address (output)
1201 *
1202 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1203 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1204 */
e20c0fec 1205unsigned long *lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1206 size_t offset, void ***virt)
1207{
1208 size_t index;
e20c0fec 1209 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1210 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1211 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1212 unsigned long sb_bindex, id;
1213
1214 offset &= chanb->buf_size - 1;
1215 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1216 id = bufb->buf_rsb.id;
1217 sb_bindex = subbuffer_id_get_index(config, id);
1218 rpages = bufb->array[sb_bindex];
1219 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1220 && subbuffer_id_is_noref(config, id));
1221 *virt = &rpages->p[index].virt;
0112cb7b 1222 return &rpages->p[index].pfn;
f3bc08c5 1223}
0112cb7b 1224EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1225
1226/**
1227 * lib_ring_buffer_read_offset_address - get address of a buffer location
1228 * @bufb : buffer backend
1229 * @offset : offset within the buffer.
1230 *
1231 * Return the address where a given offset is located (for read).
1232 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1233 * it's never on a page boundary, it's safe to read/write directly
1234 * from/to this address, as long as the read/write is never bigger than a
1235 * page size.
f3bc08c5 1236 */
e20c0fec 1237void *lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1238 size_t offset)
1239{
1240 size_t index;
e20c0fec 1241 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1242 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1243 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1244 unsigned long sb_bindex, id;
1245
1246 offset &= chanb->buf_size - 1;
1247 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1248 id = bufb->buf_rsb.id;
1249 sb_bindex = subbuffer_id_get_index(config, id);
1250 rpages = bufb->array[sb_bindex];
1251 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1252 && subbuffer_id_is_noref(config, id));
1253 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1254}
1255EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1256
1257/**
1258 * lib_ring_buffer_offset_address - get address of a location within the buffer
1259 * @bufb : buffer backend
1260 * @offset : offset within the buffer.
1261 *
1262 * Return the address where a given offset is located.
1263 * Should be used to get the current subbuffer header pointer. Given we know
1264 * it's always at the beginning of a page, it's safe to write directly to this
1265 * address, as long as the write is never bigger than a page size.
1266 */
e20c0fec 1267void *lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1268 size_t offset)
1269{
1270 size_t sbidx, index;
e20c0fec 1271 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1272 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1273 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1274 unsigned long sb_bindex, id;
1275
1276 offset &= chanb->buf_size - 1;
1277 sbidx = offset >> chanb->subbuf_size_order;
1278 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1279 id = bufb->buf_wsb[sbidx].id;
1280 sb_bindex = subbuffer_id_get_index(config, id);
1281 rpages = bufb->array[sb_bindex];
1282 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1283 && subbuffer_id_is_noref(config, id));
1284 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1285}
1286EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.105052 seconds and 4 git commands to generate.