Fix: strlcpy removed in linux 6.8.0-rc1
[lttng-modules.git] / src / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
9f36eaed 2 *
f3bc08c5
MD
3 * ring_buffer_backend.c
4 *
886d51a3 5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5
MD
6 */
7
f3bc08c5
MD
8#include <linux/stddef.h>
9#include <linux/module.h>
10#include <linux/string.h>
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/slab.h>
f3bc08c5 15#include <linux/mm.h>
df388b78 16#include <linux/vmalloc.h>
f3bc08c5 17
ffcc8734 18#include <wrapper/cpu.h>
7502f47a 19#include <wrapper/mm.h>
263b6c88 20#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
24591303
MD
21#include <ringbuffer/config.h>
22#include <ringbuffer/backend.h>
23#include <ringbuffer/frontend.h>
f3bc08c5
MD
24
25/**
26 * lib_ring_buffer_backend_allocate - allocate a channel buffer
27 * @config: ring buffer instance configuration
28 * @buf: the buffer struct
29 * @size: total size of the buffer
30 * @num_subbuf: number of subbuffers
31 * @extra_reader_sb: need extra subbuffer for reader
32 */
33static
e20c0fec
MD
34int lib_ring_buffer_backend_allocate(const struct lttng_kernel_ring_buffer_config *config,
35 struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
36 size_t size, size_t num_subbuf,
37 int extra_reader_sb)
38{
39 struct channel_backend *chanb = &bufb->chan->backend;
40 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
41 unsigned long subbuf_size, mmap_offset = 0;
42 unsigned long num_subbuf_alloc;
43 struct page **pages;
f3bc08c5
MD
44 unsigned long i;
45
46 num_pages = size >> PAGE_SHIFT;
1f0ab1eb
FD
47
48 /*
7502f47a
FD
49 * Verify that there is enough free pages available on the system for
50 * the current allocation request.
51 * wrapper_check_enough_free_pages uses si_mem_available() if available
52 * and returns if there should be enough free pages based on the
53 * current estimate.
1f0ab1eb 54 */
7502f47a 55 if (!wrapper_check_enough_free_pages(num_pages))
1f0ab1eb
FD
56 goto not_enough_pages;
57
58 /*
59 * Set the current user thread as the first target of the OOM killer.
60 * If the estimate received by si_mem_available() was off, and we do
61 * end up running out of memory because of this buffer allocation, we
62 * want to kill the offending app first.
63 */
686eb005 64 set_current_oom_origin();
1f0ab1eb 65
f3bc08c5
MD
66 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
67 subbuf_size = chanb->subbuf_size;
68 num_subbuf_alloc = num_subbuf;
69
70 if (extra_reader_sb) {
71 num_pages += num_pages_per_subbuf; /* Add pages for reader */
72 num_subbuf_alloc++;
73 }
74
df388b78 75 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 76 1 << INTERNODE_CACHE_SHIFT),
df388b78 77 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
78 if (unlikely(!pages))
79 goto pages_error;
80
48f5e0b5 81 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
f3bc08c5
MD
82 * num_subbuf_alloc,
83 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
84 GFP_KERNEL | __GFP_NOWARN,
85 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
86 if (unlikely(!bufb->array))
87 goto array_error;
88
89 for (i = 0; i < num_pages; i++) {
90 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 91 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
92 if (unlikely(!pages[i]))
93 goto depopulate;
f3bc08c5
MD
94 }
95 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
96
97 /* Allocate backend pages array elements */
98 for (i = 0; i < num_subbuf_alloc; i++) {
99 bufb->array[i] =
48f5e0b5 100 lttng_kvzalloc_node(ALIGN(
e20c0fec
MD
101 sizeof(struct lttng_kernel_ring_buffer_backend_pages) +
102 sizeof(struct lttng_kernel_ring_buffer_backend_page)
f3bc08c5
MD
103 * num_pages_per_subbuf,
104 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
105 GFP_KERNEL | __GFP_NOWARN,
106 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
107 if (!bufb->array[i])
108 goto free_array;
109 }
110
111 /* Allocate write-side subbuffer table */
48f5e0b5 112 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
e20c0fec 113 sizeof(struct lttng_kernel_ring_buffer_backend_subbuffer)
f3bc08c5
MD
114 * num_subbuf,
115 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
116 GFP_KERNEL | __GFP_NOWARN,
117 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
118 if (unlikely(!bufb->buf_wsb))
119 goto free_array;
120
121 for (i = 0; i < num_subbuf; i++)
122 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
123
124 /* Assign read-side subbuffer table */
125 if (extra_reader_sb)
126 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
127 num_subbuf_alloc - 1);
128 else
129 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
130
5b3cf4f9 131 /* Allocate subbuffer packet counter table */
48f5e0b5 132 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
e20c0fec 133 sizeof(struct lttng_kernel_ring_buffer_backend_counts)
5b3cf4f9
JD
134 * num_subbuf,
135 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
136 GFP_KERNEL | __GFP_NOWARN,
137 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
138 if (unlikely(!bufb->buf_cnt))
139 goto free_wsb;
140
f3bc08c5
MD
141 /* Assign pages to page index */
142 for (i = 0; i < num_subbuf_alloc; i++) {
143 for (j = 0; j < num_pages_per_subbuf; j++) {
144 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
145 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
146 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
147 page_idx++;
148 }
149 if (config->output == RING_BUFFER_MMAP) {
150 bufb->array[i]->mmap_offset = mmap_offset;
151 mmap_offset += subbuf_size;
152 }
153 }
154
155 /*
156 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
157 * will not fault.
158 */
263b6c88 159 wrapper_vmalloc_sync_mappings();
686eb005 160 clear_current_oom_origin();
df388b78 161 vfree(pages);
f3bc08c5
MD
162 return 0;
163
5b3cf4f9 164free_wsb:
48f5e0b5 165 lttng_kvfree(bufb->buf_wsb);
f3bc08c5
MD
166free_array:
167 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
48f5e0b5 168 lttng_kvfree(bufb->array[i]);
f3bc08c5
MD
169depopulate:
170 /* Free all allocated pages */
171 for (i = 0; (i < num_pages && pages[i]); i++)
172 __free_page(pages[i]);
48f5e0b5 173 lttng_kvfree(bufb->array);
f3bc08c5 174array_error:
df388b78 175 vfree(pages);
f3bc08c5 176pages_error:
686eb005 177 clear_current_oom_origin();
1f0ab1eb 178not_enough_pages:
f3bc08c5
MD
179 return -ENOMEM;
180}
181
e20c0fec 182int lib_ring_buffer_backend_create(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
183 struct channel_backend *chanb, int cpu)
184{
e20c0fec 185 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 186
860c213b 187 bufb->chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
f3bc08c5
MD
188 bufb->cpu = cpu;
189
190 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
191 chanb->num_subbuf,
192 chanb->extra_reader_sb);
193}
194
e20c0fec 195void lib_ring_buffer_backend_free(struct lttng_kernel_ring_buffer_backend *bufb)
f3bc08c5
MD
196{
197 struct channel_backend *chanb = &bufb->chan->backend;
198 unsigned long i, j, num_subbuf_alloc;
199
200 num_subbuf_alloc = chanb->num_subbuf;
201 if (chanb->extra_reader_sb)
202 num_subbuf_alloc++;
203
48f5e0b5
MJ
204 lttng_kvfree(bufb->buf_wsb);
205 lttng_kvfree(bufb->buf_cnt);
f3bc08c5
MD
206 for (i = 0; i < num_subbuf_alloc; i++) {
207 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 208 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
48f5e0b5 209 lttng_kvfree(bufb->array[i]);
f3bc08c5 210 }
48f5e0b5 211 lttng_kvfree(bufb->array);
f3bc08c5
MD
212 bufb->allocated = 0;
213}
214
e20c0fec 215void lib_ring_buffer_backend_reset(struct lttng_kernel_ring_buffer_backend *bufb)
f3bc08c5
MD
216{
217 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 218 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
219 unsigned long num_subbuf_alloc;
220 unsigned int i;
221
222 num_subbuf_alloc = chanb->num_subbuf;
223 if (chanb->extra_reader_sb)
224 num_subbuf_alloc++;
225
226 for (i = 0; i < chanb->num_subbuf; i++)
227 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
228 if (chanb->extra_reader_sb)
229 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
230 num_subbuf_alloc - 1);
231 else
232 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
233
234 for (i = 0; i < num_subbuf_alloc; i++) {
235 /* Don't reset mmap_offset */
236 v_set(config, &bufb->array[i]->records_commit, 0);
237 v_set(config, &bufb->array[i]->records_unread, 0);
238 bufb->array[i]->data_size = 0;
239 /* Don't reset backend page and virt addresses */
240 }
241 /* Don't reset num_pages_per_subbuf, cpu, allocated */
242 v_set(config, &bufb->records_read, 0);
243}
244
245/*
246 * The frontend is responsible for also calling ring_buffer_backend_reset for
247 * each buffer when calling channel_backend_reset.
248 */
249void channel_backend_reset(struct channel_backend *chanb)
250{
860c213b 251 struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
e20c0fec 252 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
253
254 /*
255 * Don't reset buf_size, subbuf_size, subbuf_size_order,
256 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
257 * priv, notifiers, config, cpumask and name.
258 */
259 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
260}
261
5f4c791e 262#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
263
264/*
265 * No need to implement a "dead" callback to do a buffer switch here,
266 * because it will happen when tracing is stopped, or will be done by
267 * switch timer CPU DEAD callback.
268 * We don't free buffers when CPU go away, because it would make trace
269 * data vanish, which is unwanted.
270 */
271int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
272 struct lttng_cpuhp_node *node)
273{
274 struct channel_backend *chanb = container_of(node,
275 struct channel_backend, cpuhp_prepare);
e20c0fec
MD
276 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
277 struct lttng_kernel_ring_buffer *buf;
1e367326
MD
278 int ret;
279
280 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
281
282 buf = per_cpu_ptr(chanb->buf, cpu);
283 ret = lib_ring_buffer_create(buf, chanb, cpu);
284 if (ret) {
285 printk(KERN_ERR
5a15f70c 286 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
1e367326
MD
287 "buffer creation failed\n", cpu);
288 return ret;
289 }
290 return 0;
291}
292EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
293
5f4c791e 294#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 295
f3bc08c5 296#ifdef CONFIG_HOTPLUG_CPU
1e367326 297
f3bc08c5
MD
298/**
299 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
300 * @nb: notifier block
301 * @action: hotplug action to take
302 * @hcpu: CPU number
303 *
304 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
305 */
306static
e8f071d5 307int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
308 unsigned long action,
309 void *hcpu)
310{
311 unsigned int cpu = (unsigned long)hcpu;
312 struct channel_backend *chanb = container_of(nb, struct channel_backend,
313 cpu_hp_notifier);
e20c0fec
MD
314 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
315 struct lttng_kernel_ring_buffer *buf;
f3bc08c5
MD
316 int ret;
317
318 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
319
320 switch (action) {
321 case CPU_UP_PREPARE:
322 case CPU_UP_PREPARE_FROZEN:
323 buf = per_cpu_ptr(chanb->buf, cpu);
324 ret = lib_ring_buffer_create(buf, chanb, cpu);
325 if (ret) {
326 printk(KERN_ERR
5a15f70c 327 "LTTng: ring_buffer_cpu_hp_callback: cpu %d "
f3bc08c5
MD
328 "buffer creation failed\n", cpu);
329 return NOTIFY_BAD;
330 }
331 break;
332 case CPU_DEAD:
333 case CPU_DEAD_FROZEN:
334 /* No need to do a buffer switch here, because it will happen
335 * when tracing is stopped, or will be done by switch timer CPU
336 * DEAD callback. */
337 break;
338 }
339 return NOTIFY_OK;
340}
1e367326 341
f3bc08c5
MD
342#endif
343
5f4c791e 344#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 345
f3bc08c5
MD
346/**
347 * channel_backend_init - initialize a channel backend
348 * @chanb: channel backend
349 * @name: channel name
350 * @config: client ring buffer configuration
351 * @priv: client private data
352 * @parent: dentry of parent directory, %NULL for root directory
353 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
354 * @num_subbuf: number of sub-buffers (power of 2)
355 *
356 * Returns channel pointer if successful, %NULL otherwise.
357 *
358 * Creates per-cpu channel buffers using the sizes and attributes
359 * specified. The created channel buffer files will be named
360 * name_0...name_N-1. File permissions will be %S_IRUSR.
361 *
362 * Called with CPU hotplug disabled.
363 */
364int channel_backend_init(struct channel_backend *chanb,
365 const char *name,
e20c0fec 366 const struct lttng_kernel_ring_buffer_config *config,
f3bc08c5
MD
367 void *priv, size_t subbuf_size, size_t num_subbuf)
368{
860c213b 369 struct lttng_kernel_ring_buffer_channel *chan = container_of(chanb, struct lttng_kernel_ring_buffer_channel, backend);
f3bc08c5
MD
370 unsigned int i;
371 int ret;
372
373 if (!name)
374 return -EPERM;
375
f3bc08c5 376 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
377 if (subbuf_size < PAGE_SIZE)
378 return -EINVAL;
f3bc08c5
MD
379
380 /*
bbda3a00
MD
381 * Make sure the number of subbuffers and subbuffer size are
382 * power of 2 and nonzero.
f3bc08c5 383 */
bbda3a00 384 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 385 return -EINVAL;
bbda3a00 386 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 387 return -EINVAL;
5140d2b3
MD
388 /*
389 * Overwrite mode buffers require at least 2 subbuffers per
390 * buffer.
391 */
392 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
393 return -EINVAL;
f3bc08c5
MD
394
395 ret = subbuffer_id_check_index(config, num_subbuf);
396 if (ret)
397 return ret;
398
399 chanb->priv = priv;
400 chanb->buf_size = num_subbuf * subbuf_size;
401 chanb->subbuf_size = subbuf_size;
402 chanb->buf_size_order = get_count_order(chanb->buf_size);
403 chanb->subbuf_size_order = get_count_order(subbuf_size);
404 chanb->num_subbuf_order = get_count_order(num_subbuf);
405 chanb->extra_reader_sb =
406 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
407 chanb->num_subbuf = num_subbuf;
a3d0aa68
KS
408#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,8,0))
409 strscpy(chanb->name, name, NAME_MAX);
410#else
f3bc08c5 411 strlcpy(chanb->name, name, NAME_MAX);
a3d0aa68 412#endif
5a8fd222 413 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
414
415 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
416 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
417 return -ENOMEM;
418 }
419
420 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
421 /* Allocating the buffer per-cpu structures */
e20c0fec 422 chanb->buf = alloc_percpu(struct lttng_kernel_ring_buffer);
f3bc08c5
MD
423 if (!chanb->buf)
424 goto free_cpumask;
425
5f4c791e 426#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
427 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
428 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
429 &chanb->cpuhp_prepare.node);
430 if (ret)
431 goto free_bufs;
5f4c791e 432#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
433
434 {
435 /*
436 * In case of non-hotplug cpu, if the ring-buffer is allocated
437 * in early initcall, it will not be notified of secondary cpus.
438 * In that off case, we need to allocate for all possible cpus.
439 */
f3bc08c5 440#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
441 /*
442 * buf->backend.allocated test takes care of concurrent CPU
443 * hotplug.
444 * Priority higher than frontend, so we create the ring buffer
445 * before we start the timer.
446 */
447 chanb->cpu_hp_notifier.notifier_call =
448 lib_ring_buffer_cpu_hp_callback;
449 chanb->cpu_hp_notifier.priority = 5;
450 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
451
ffcc8734 452 lttng_cpus_read_lock();
1e367326
MD
453 for_each_online_cpu(i) {
454 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
455 chanb, i);
456 if (ret)
457 goto free_bufs; /* cpu hotplug locked */
458 }
ffcc8734 459 lttng_cpus_read_unlock();
f3bc08c5 460#else
1e367326
MD
461 for_each_possible_cpu(i) {
462 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
463 chanb, i);
464 if (ret)
465 goto free_bufs;
466 }
f3bc08c5 467#endif
1e367326 468 }
5f4c791e 469#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 470 } else {
e20c0fec 471 chanb->buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL);
f3bc08c5
MD
472 if (!chanb->buf)
473 goto free_cpumask;
474 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
475 if (ret)
476 goto free_bufs;
477 }
478 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
479
480 return 0;
481
482free_bufs:
483 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
5f4c791e 484#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
5f14d8ae
MD
485 /*
486 * Teardown of lttng_rb_hp_prepare instance
487 * on "add" error is handled within cpu hotplug,
488 * no teardown to do from the caller.
489 */
5f4c791e 490#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 491#ifdef CONFIG_HOTPLUG_CPU
ffcc8734 492 lttng_cpus_read_unlock();
1e367326
MD
493 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
494#endif
5f4c791e 495#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 496 for_each_possible_cpu(i) {
e20c0fec 497 struct lttng_kernel_ring_buffer *buf =
1e367326 498 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
499
500 if (!buf->backend.allocated)
501 continue;
502 lib_ring_buffer_free(buf);
503 }
f3bc08c5
MD
504 free_percpu(chanb->buf);
505 } else
506 kfree(chanb->buf);
507free_cpumask:
508 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
509 free_cpumask_var(chanb->cpumask);
510 return -ENOMEM;
511}
512
513/**
514 * channel_backend_unregister_notifiers - unregister notifiers
515 * @chan: the channel
516 *
517 * Holds CPU hotplug.
518 */
519void channel_backend_unregister_notifiers(struct channel_backend *chanb)
520{
e20c0fec 521 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 522
1e367326 523 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
5f4c791e 524#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
525 int ret;
526
527 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
528 &chanb->cpuhp_prepare.node);
529 WARN_ON(ret);
5f4c791e 530#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
f3bc08c5 531 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
5f4c791e 532#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 533 }
f3bc08c5
MD
534}
535
536/**
537 * channel_backend_free - destroy the channel
538 * @chan: the channel
539 *
540 * Destroy all channel buffers and frees the channel.
541 */
542void channel_backend_free(struct channel_backend *chanb)
543{
e20c0fec 544 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
545 unsigned int i;
546
547 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
548 for_each_possible_cpu(i) {
e20c0fec 549 struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
550
551 if (!buf->backend.allocated)
552 continue;
553 lib_ring_buffer_free(buf);
554 }
555 free_cpumask_var(chanb->cpumask);
556 free_percpu(chanb->buf);
557 } else {
e20c0fec 558 struct lttng_kernel_ring_buffer *buf = chanb->buf;
f3bc08c5
MD
559
560 CHAN_WARN_ON(chanb, !buf->backend.allocated);
561 lib_ring_buffer_free(buf);
562 kfree(buf);
563 }
564}
565
566/**
567 * lib_ring_buffer_write - write data to a ring_buffer buffer.
568 * @bufb : buffer backend
569 * @offset : offset within the buffer
570 * @src : source address
571 * @len : length to write
f3bc08c5 572 */
e20c0fec 573void _lib_ring_buffer_write(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
4774817f 574 const void *src, size_t len)
f3bc08c5
MD
575{
576 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 577 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4774817f 578 size_t sbidx, index, bytes_left_in_page;
e20c0fec 579 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
580 unsigned long sb_bindex, id;
581
582 do {
f3bc08c5
MD
583 sbidx = offset >> chanb->subbuf_size_order;
584 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
585
586 /*
587 * Underlying layer should never ask for writes across
588 * subbuffers.
589 */
590 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
591
4774817f 592 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
f3bc08c5
MD
593 id = bufb->buf_wsb[sbidx].id;
594 sb_bindex = subbuffer_id_get_index(config, id);
595 rpages = bufb->array[sb_bindex];
596 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config, id));
598 lib_ring_buffer_do_copy(config,
599 rpages->p[index].virt
600 + (offset & ~PAGE_MASK),
4774817f
MD
601 src, bytes_left_in_page);
602 len -= bytes_left_in_page;
603 src += bytes_left_in_page;
604 offset += bytes_left_in_page;
605 } while (unlikely(len));
f3bc08c5
MD
606}
607EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
608
4ea00e4f
JD
609/**
610 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
611 * @bufb : buffer backend
612 * @offset : offset within the buffer
613 * @c : the byte to write
614 * @len : length to write
4ea00e4f 615 */
e20c0fec 616void _lib_ring_buffer_memset(struct lttng_kernel_ring_buffer_backend *bufb,
4774817f 617 size_t offset, int c, size_t len)
4ea00e4f
JD
618{
619 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 620 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4774817f 621 size_t sbidx, index, bytes_left_in_page;
e20c0fec 622 struct lttng_kernel_ring_buffer_backend_pages *rpages;
4ea00e4f
JD
623 unsigned long sb_bindex, id;
624
625 do {
4ea00e4f
JD
626 sbidx = offset >> chanb->subbuf_size_order;
627 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
628
629 /*
630 * Underlying layer should never ask for writes across
631 * subbuffers.
632 */
633 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
634
4774817f 635 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
4ea00e4f
JD
636 id = bufb->buf_wsb[sbidx].id;
637 sb_bindex = subbuffer_id_get_index(config, id);
638 rpages = bufb->array[sb_bindex];
639 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
640 && subbuffer_id_is_noref(config, id));
641 lib_ring_buffer_do_memset(rpages->p[index].virt
642 + (offset & ~PAGE_MASK),
4774817f
MD
643 c, bytes_left_in_page);
644 len -= bytes_left_in_page;
645 offset += bytes_left_in_page;
646 } while (unlikely(len));
4ea00e4f
JD
647}
648EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
649
16f78f3a
MD
650/**
651 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
652 * @bufb : buffer backend
653 * @offset : offset within the buffer
654 * @src : source address
655 * @len : length to write
16f78f3a
MD
656 * @pad : character to use for padding
657 */
e20c0fec 658void _lib_ring_buffer_strcpy(struct lttng_kernel_ring_buffer_backend *bufb,
e5f1eb9a 659 size_t offset, const char *src, size_t len, int pad)
16f78f3a
MD
660{
661 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 662 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
e5f1eb9a 663 size_t sbidx, index, bytes_left_in_page;
e20c0fec 664 struct lttng_kernel_ring_buffer_backend_pages *rpages;
16f78f3a 665 unsigned long sb_bindex, id;
e5f1eb9a 666 bool src_terminated = false;
16f78f3a
MD
667
668 CHAN_WARN_ON(chanb, !len);
16f78f3a 669 do {
16f78f3a
MD
670 sbidx = offset >> chanb->subbuf_size_order;
671 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
672
673 /*
674 * Underlying layer should never ask for writes across
675 * subbuffers.
676 */
677 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
678
e5f1eb9a 679 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
16f78f3a
MD
680 id = bufb->buf_wsb[sbidx].id;
681 sb_bindex = subbuffer_id_get_index(config, id);
682 rpages = bufb->array[sb_bindex];
683 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
684 && subbuffer_id_is_noref(config, id));
685
686 if (likely(!src_terminated)) {
687 size_t count, to_copy;
688
e5f1eb9a
MD
689 to_copy = bytes_left_in_page;
690 if (bytes_left_in_page == len)
16f78f3a
MD
691 to_copy--; /* Final '\0' */
692 count = lib_ring_buffer_do_strcpy(config,
693 rpages->p[index].virt
694 + (offset & ~PAGE_MASK),
695 src, to_copy);
696 offset += count;
697 /* Padding */
698 if (unlikely(count < to_copy)) {
699 size_t pad_len = to_copy - count;
700
701 /* Next pages will have padding */
e5f1eb9a 702 src_terminated = true;
16f78f3a
MD
703 lib_ring_buffer_do_memset(rpages->p[index].virt
704 + (offset & ~PAGE_MASK),
705 pad, pad_len);
706 offset += pad_len;
707 }
708 } else {
709 size_t pad_len;
710
e5f1eb9a
MD
711 pad_len = bytes_left_in_page;
712 if (bytes_left_in_page == len)
16f78f3a
MD
713 pad_len--; /* Final '\0' */
714 lib_ring_buffer_do_memset(rpages->p[index].virt
715 + (offset & ~PAGE_MASK),
716 pad, pad_len);
717 offset += pad_len;
718 }
e5f1eb9a
MD
719 len -= bytes_left_in_page;
720 if (!src_terminated)
721 src += bytes_left_in_page;
722 } while (unlikely(len));
723
16f78f3a
MD
724 /* Ending '\0' */
725 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
726 '\0', 1);
727}
728EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f 729
0d1080f4
MD
730/**
731 * _lib_ring_buffer_pstrcpy - write to a buffer backend P-string
732 * @bufb : buffer backend
733 * @src : source pointer to copy from
734 * @len : length of data to copy
735 * @pad : character to use for padding
736 *
737 * This function copies up to @len bytes of data from a source pointer
738 * to a Pascal String into the buffer backend. If a terminating '\0'
739 * character is found in @src before @len characters are copied, pad the
740 * buffer with @pad characters (e.g. '\0').
741 *
742 * The length of the pascal strings in the ring buffer is explicit: it
743 * is either the array or sequence length.
744 */
745void _lib_ring_buffer_pstrcpy(struct lttng_kernel_ring_buffer_backend *bufb,
746 size_t offset, const char *src, size_t len, int pad)
747{
748 struct channel_backend *chanb = &bufb->chan->backend;
749 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
750 size_t sbidx, index, bytes_left_in_page;
751 struct lttng_kernel_ring_buffer_backend_pages *rpages;
752 unsigned long sb_bindex, id;
753 bool src_terminated = false;
754
755 CHAN_WARN_ON(chanb, !len);
756 do {
757 sbidx = offset >> chanb->subbuf_size_order;
758 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
759
760 /*
761 * Underlying layer should never ask for writes across
762 * subbuffers.
763 */
764 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
765
766 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
767 id = bufb->buf_wsb[sbidx].id;
768 sb_bindex = subbuffer_id_get_index(config, id);
769 rpages = bufb->array[sb_bindex];
770 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
771 && subbuffer_id_is_noref(config, id));
772
773 if (likely(!src_terminated)) {
774 size_t count, to_copy;
775
776 to_copy = bytes_left_in_page;
777 count = lib_ring_buffer_do_strcpy(config,
778 rpages->p[index].virt
779 + (offset & ~PAGE_MASK),
780 src, to_copy);
781 offset += count;
782 /* Padding */
783 if (unlikely(count < to_copy)) {
784 size_t pad_len = to_copy - count;
785
786 /* Next pages will have padding */
787 src_terminated = true;
788 lib_ring_buffer_do_memset(rpages->p[index].virt
789 + (offset & ~PAGE_MASK),
790 pad, pad_len);
791 offset += pad_len;
792 }
793 } else {
794 size_t pad_len;
795
796 pad_len = bytes_left_in_page;
797 lib_ring_buffer_do_memset(rpages->p[index].virt
798 + (offset & ~PAGE_MASK),
799 pad, pad_len);
800 offset += pad_len;
801 }
802 len -= bytes_left_in_page;
803 if (!src_terminated)
804 src += bytes_left_in_page;
805 } while (unlikely(len));
806}
807EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy);
808
4ea00e4f 809/**
7b8ea3a5 810 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
811 * @bufb : buffer backend
812 * @offset : offset within the buffer
813 * @src : source address
814 * @len : length to write
4ea00e4f
JD
815 *
816 * This function deals with userspace pointers, it should never be called
817 * directly without having the src pointer checked with access_ok()
818 * previously.
819 */
e20c0fec 820void _lib_ring_buffer_copy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
4774817f 821 size_t offset, const void __user *src, size_t len)
4ea00e4f
JD
822{
823 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 824 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4774817f 825 size_t sbidx, index, bytes_left_in_page;
e20c0fec 826 struct lttng_kernel_ring_buffer_backend_pages *rpages;
4ea00e4f
JD
827 unsigned long sb_bindex, id;
828 int ret;
829
830 do {
4ea00e4f
JD
831 sbidx = offset >> chanb->subbuf_size_order;
832 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
833
834 /*
835 * Underlying layer should never ask for writes across
836 * subbuffers.
837 */
838 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
839
4774817f 840 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
4ea00e4f
JD
841 id = bufb->buf_wsb[sbidx].id;
842 sb_bindex = subbuffer_id_get_index(config, id);
843 rpages = bufb->array[sb_bindex];
844 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
845 && subbuffer_id_is_noref(config, id));
7b8ea3a5 846 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f 847 + (offset & ~PAGE_MASK),
4774817f 848 src, bytes_left_in_page) != 0;
4ea00e4f 849 if (ret > 0) {
d87a9f03 850 /* Copy failed. */
4774817f 851 _lib_ring_buffer_memset(bufb, offset, 0, len);
4ea00e4f
JD
852 break; /* stop copy */
853 }
4774817f
MD
854 len -= bytes_left_in_page;
855 src += bytes_left_in_page;
856 offset += bytes_left_in_page;
857 } while (unlikely(len));
4ea00e4f 858}
7b8ea3a5 859EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 860
16f78f3a
MD
861/**
862 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
863 * @bufb : buffer backend
864 * @offset : offset within the buffer
865 * @src : source address
866 * @len : length to write
16f78f3a
MD
867 * @pad : character to use for padding
868 *
869 * This function deals with userspace pointers, it should never be called
870 * directly without having the src pointer checked with access_ok()
871 * previously.
872 */
e20c0fec 873void _lib_ring_buffer_strcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
e5f1eb9a 874 size_t offset, const char __user *src, size_t len, int pad)
16f78f3a
MD
875{
876 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 877 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
e5f1eb9a 878 size_t sbidx, index, bytes_left_in_page;
e20c0fec 879 struct lttng_kernel_ring_buffer_backend_pages *rpages;
16f78f3a 880 unsigned long sb_bindex, id;
e5f1eb9a 881 bool src_terminated = false;
16f78f3a 882
16f78f3a 883 do {
16f78f3a
MD
884 sbidx = offset >> chanb->subbuf_size_order;
885 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
886
887 /*
888 * Underlying layer should never ask for writes across
889 * subbuffers.
890 */
891 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
892
e5f1eb9a 893 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
16f78f3a
MD
894 id = bufb->buf_wsb[sbidx].id;
895 sb_bindex = subbuffer_id_get_index(config, id);
896 rpages = bufb->array[sb_bindex];
897 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
898 && subbuffer_id_is_noref(config, id));
899
900 if (likely(!src_terminated)) {
901 size_t count, to_copy;
902
e5f1eb9a
MD
903 to_copy = bytes_left_in_page;
904 if (bytes_left_in_page == len)
16f78f3a
MD
905 to_copy--; /* Final '\0' */
906 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
907 rpages->p[index].virt
908 + (offset & ~PAGE_MASK),
909 src, to_copy);
910 offset += count;
911 /* Padding */
912 if (unlikely(count < to_copy)) {
913 size_t pad_len = to_copy - count;
914
915 /* Next pages will have padding */
e5f1eb9a 916 src_terminated = true;
16f78f3a
MD
917 lib_ring_buffer_do_memset(rpages->p[index].virt
918 + (offset & ~PAGE_MASK),
919 pad, pad_len);
920 offset += pad_len;
921 }
922 } else {
923 size_t pad_len;
924
e5f1eb9a
MD
925 pad_len = bytes_left_in_page;
926 if (bytes_left_in_page == len)
16f78f3a
MD
927 pad_len--; /* Final '\0' */
928 lib_ring_buffer_do_memset(rpages->p[index].virt
929 + (offset & ~PAGE_MASK),
930 pad, pad_len);
931 offset += pad_len;
932 }
e5f1eb9a
MD
933 len -= bytes_left_in_page;
934 if (!src_terminated)
935 src += bytes_left_in_page;
936 } while (unlikely(len));
937
16f78f3a
MD
938 /* Ending '\0' */
939 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
940 '\0', 1);
941}
942EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
943
0d1080f4
MD
944/**
945 * _lib_ring_buffer_pstrcpy_from_user_inatomic - write userspace string to a buffer backend P-string
946 * @bufb : buffer backend
947 * @src : source pointer to copy from
948 * @len : length of data to copy
949 * @pad : character to use for padding
950 *
951 * This function copies up to @len bytes of data from a source pointer
952 * to a Pascal String into the buffer backend. If a terminating '\0'
953 * character is found in @src before @len characters are copied, pad the
954 * buffer with @pad characters (e.g. '\0').
955 *
956 * The length of the pascal strings in the ring buffer is explicit: it
957 * is either the array or sequence length.
958 *
959 * This function deals with userspace pointers, it should never be called
960 * directly without having the src pointer checked with access_ok()
961 * previously.
962 */
963void _lib_ring_buffer_pstrcpy_from_user_inatomic(struct lttng_kernel_ring_buffer_backend *bufb,
964 size_t offset, const char __user *src, size_t len, int pad)
965{
966 struct channel_backend *chanb = &bufb->chan->backend;
967 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
968 size_t sbidx, index, bytes_left_in_page;
969 struct lttng_kernel_ring_buffer_backend_pages *rpages;
970 unsigned long sb_bindex, id;
971 bool src_terminated = false;
972
973 CHAN_WARN_ON(chanb, !len);
974 do {
975 sbidx = offset >> chanb->subbuf_size_order;
976 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
977
978 /*
979 * Underlying layer should never ask for writes across
980 * subbuffers.
981 */
982 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
983
984 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
985 id = bufb->buf_wsb[sbidx].id;
986 sb_bindex = subbuffer_id_get_index(config, id);
987 rpages = bufb->array[sb_bindex];
988 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
989 && subbuffer_id_is_noref(config, id));
990
991 if (likely(!src_terminated)) {
992 size_t count, to_copy;
993
994 to_copy = bytes_left_in_page;
995 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
996 rpages->p[index].virt
997 + (offset & ~PAGE_MASK),
998 src, to_copy);
999 offset += count;
1000 /* Padding */
1001 if (unlikely(count < to_copy)) {
1002 size_t pad_len = to_copy - count;
1003
1004 /* Next pages will have padding */
1005 src_terminated = true;
1006 lib_ring_buffer_do_memset(rpages->p[index].virt
1007 + (offset & ~PAGE_MASK),
1008 pad, pad_len);
1009 offset += pad_len;
1010 }
1011 } else {
1012 size_t pad_len;
1013
1014 pad_len = bytes_left_in_page;
1015 lib_ring_buffer_do_memset(rpages->p[index].virt
1016 + (offset & ~PAGE_MASK),
1017 pad, pad_len);
1018 offset += pad_len;
1019 }
1020 len -= bytes_left_in_page;
1021 if (!src_terminated)
1022 src += bytes_left_in_page;
1023 } while (unlikely(len));
1024}
1025EXPORT_SYMBOL_GPL(_lib_ring_buffer_pstrcpy_from_user_inatomic);
1026
f3bc08c5
MD
1027/**
1028 * lib_ring_buffer_read - read data from ring_buffer_buffer.
1029 * @bufb : buffer backend
1030 * @offset : offset within the buffer
1031 * @dest : destination address
1032 * @len : length to copy to destination
1033 *
1034 * Should be protected by get_subbuf/put_subbuf.
1035 * Returns the length copied.
1036 */
e20c0fec 1037size_t lib_ring_buffer_read(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
f3bc08c5
MD
1038 void *dest, size_t len)
1039{
1040 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1041 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
4774817f 1042 size_t index, bytes_left_in_page, orig_len;
e20c0fec 1043 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1044 unsigned long sb_bindex, id;
1045
1046 orig_len = len;
1047 offset &= chanb->buf_size - 1;
1048 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1049 if (unlikely(!len))
1050 return 0;
1051 for (;;) {
4774817f 1052 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
f3bc08c5
MD
1053 id = bufb->buf_rsb.id;
1054 sb_bindex = subbuffer_id_get_index(config, id);
1055 rpages = bufb->array[sb_bindex];
1056 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1057 && subbuffer_id_is_noref(config, id));
1058 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
4774817f
MD
1059 bytes_left_in_page);
1060 len -= bytes_left_in_page;
f3bc08c5
MD
1061 if (likely(!len))
1062 break;
4774817f
MD
1063 dest += bytes_left_in_page;
1064 offset += bytes_left_in_page;
f3bc08c5
MD
1065 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1066 /*
1067 * Underlying layer should never ask for reads across
1068 * subbuffers.
1069 */
1070 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1071 }
1072 return orig_len;
1073}
1074EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
1075
1076/**
1077 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
1078 * @bufb : buffer backend
1079 * @offset : offset within the buffer
1080 * @dest : destination userspace address
1081 * @len : length to copy to destination
1082 *
1083 * Should be protected by get_subbuf/put_subbuf.
1084 * access_ok() must have been performed on dest addresses prior to call this
1085 * function.
1086 * Returns -EFAULT on error, 0 if ok.
1087 */
e20c0fec 1088int __lib_ring_buffer_copy_to_user(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1089 size_t offset, void __user *dest, size_t len)
1090{
1091 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1092 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 1093 size_t index;
4774817f 1094 ssize_t bytes_left_in_page;
e20c0fec 1095 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1096 unsigned long sb_bindex, id;
1097
f3bc08c5
MD
1098 offset &= chanb->buf_size - 1;
1099 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1100 if (unlikely(!len))
1101 return 0;
1102 for (;;) {
4774817f 1103 bytes_left_in_page = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
f3bc08c5
MD
1104 id = bufb->buf_rsb.id;
1105 sb_bindex = subbuffer_id_get_index(config, id);
1106 rpages = bufb->array[sb_bindex];
1107 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1108 && subbuffer_id_is_noref(config, id));
1109 if (__copy_to_user(dest,
1110 rpages->p[index].virt + (offset & ~PAGE_MASK),
4774817f 1111 bytes_left_in_page))
f3bc08c5 1112 return -EFAULT;
4774817f 1113 len -= bytes_left_in_page;
f3bc08c5
MD
1114 if (likely(!len))
1115 break;
4774817f
MD
1116 dest += bytes_left_in_page;
1117 offset += bytes_left_in_page;
f3bc08c5
MD
1118 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1119 /*
1120 * Underlying layer should never ask for reads across
1121 * subbuffers.
1122 */
1123 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1124 }
1125 return 0;
1126}
1127EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
1128
1129/**
1130 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
1131 * @bufb : buffer backend
1132 * @offset : offset within the buffer
1133 * @dest : destination address
1134 * @len : destination's length
1135 *
61eb4c39 1136 * Return string's length, or -EINVAL on error.
f3bc08c5 1137 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 1138 * Destination length should be at least 1 to hold '\0'.
f3bc08c5 1139 */
e20c0fec 1140int lib_ring_buffer_read_cstr(struct lttng_kernel_ring_buffer_backend *bufb, size_t offset,
f3bc08c5
MD
1141 void *dest, size_t len)
1142{
1143 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1144 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5 1145 size_t index;
4774817f 1146 ssize_t bytes_left_in_page, pagelen, strpagelen, orig_offset;
f3bc08c5 1147 char *str;
e20c0fec 1148 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5
MD
1149 unsigned long sb_bindex, id;
1150
1151 offset &= chanb->buf_size - 1;
1152 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1153 orig_offset = offset;
61eb4c39
MD
1154 if (unlikely(!len))
1155 return -EINVAL;
f3bc08c5
MD
1156 for (;;) {
1157 id = bufb->buf_rsb.id;
1158 sb_bindex = subbuffer_id_get_index(config, id);
1159 rpages = bufb->array[sb_bindex];
1160 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1161 && subbuffer_id_is_noref(config, id));
1162 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1163 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1164 strpagelen = strnlen(str, pagelen);
1165 if (len) {
4774817f 1166 bytes_left_in_page = min_t(size_t, len, strpagelen);
f3bc08c5 1167 if (dest) {
4774817f
MD
1168 memcpy(dest, str, bytes_left_in_page);
1169 dest += bytes_left_in_page;
f3bc08c5 1170 }
4774817f 1171 len -= bytes_left_in_page;
f3bc08c5
MD
1172 }
1173 offset += strpagelen;
1174 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1175 if (strpagelen < pagelen)
1176 break;
1177 /*
1178 * Underlying layer should never ask for reads across
1179 * subbuffers.
1180 */
1181 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1182 }
1183 if (dest && len)
1184 ((char *)dest)[0] = 0;
1185 return offset - orig_offset;
1186}
1187EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1188
1189/**
0112cb7b 1190 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1191 * @bufb : buffer backend
1192 * @offset : offset within the buffer
1193 * @virt : pointer to page address (output)
1194 *
1195 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1196 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1197 */
e20c0fec 1198unsigned long *lib_ring_buffer_read_get_pfn(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1199 size_t offset, void ***virt)
1200{
1201 size_t index;
e20c0fec 1202 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1203 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1204 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1205 unsigned long sb_bindex, id;
1206
1207 offset &= chanb->buf_size - 1;
1208 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1209 id = bufb->buf_rsb.id;
1210 sb_bindex = subbuffer_id_get_index(config, id);
1211 rpages = bufb->array[sb_bindex];
1212 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1213 && subbuffer_id_is_noref(config, id));
1214 *virt = &rpages->p[index].virt;
0112cb7b 1215 return &rpages->p[index].pfn;
f3bc08c5 1216}
0112cb7b 1217EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1218
1219/**
1220 * lib_ring_buffer_read_offset_address - get address of a buffer location
1221 * @bufb : buffer backend
1222 * @offset : offset within the buffer.
1223 *
1224 * Return the address where a given offset is located (for read).
1225 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1226 * it's never on a page boundary, it's safe to read/write directly
1227 * from/to this address, as long as the read/write is never bigger than a
1228 * page size.
f3bc08c5 1229 */
e20c0fec 1230void *lib_ring_buffer_read_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1231 size_t offset)
1232{
1233 size_t index;
e20c0fec 1234 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1235 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1236 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1237 unsigned long sb_bindex, id;
1238
1239 offset &= chanb->buf_size - 1;
1240 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1241 id = bufb->buf_rsb.id;
1242 sb_bindex = subbuffer_id_get_index(config, id);
1243 rpages = bufb->array[sb_bindex];
1244 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1245 && subbuffer_id_is_noref(config, id));
1246 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1247}
1248EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1249
1250/**
1251 * lib_ring_buffer_offset_address - get address of a location within the buffer
1252 * @bufb : buffer backend
1253 * @offset : offset within the buffer.
1254 *
1255 * Return the address where a given offset is located.
1256 * Should be used to get the current subbuffer header pointer. Given we know
1257 * it's always at the beginning of a page, it's safe to write directly to this
1258 * address, as long as the write is never bigger than a page size.
1259 */
e20c0fec 1260void *lib_ring_buffer_offset_address(struct lttng_kernel_ring_buffer_backend *bufb,
f3bc08c5
MD
1261 size_t offset)
1262{
1263 size_t sbidx, index;
e20c0fec 1264 struct lttng_kernel_ring_buffer_backend_pages *rpages;
f3bc08c5 1265 struct channel_backend *chanb = &bufb->chan->backend;
e20c0fec 1266 const struct lttng_kernel_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1267 unsigned long sb_bindex, id;
1268
1269 offset &= chanb->buf_size - 1;
1270 sbidx = offset >> chanb->subbuf_size_order;
1271 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1272 id = bufb->buf_wsb[sbidx].id;
1273 sb_bindex = subbuffer_id_get_index(config, id);
1274 rpages = bufb->array[sb_bindex];
1275 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1276 && subbuffer_id_is_noref(config, id));
1277 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1278}
1279EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.116079 seconds and 4 git commands to generate.