Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
9f36eaed | 2 | * |
f3bc08c5 MD |
3 | * ring_buffer_backend.c |
4 | * | |
886d51a3 | 5 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f3bc08c5 MD |
6 | */ |
7 | ||
f3bc08c5 MD |
8 | #include <linux/stddef.h> |
9 | #include <linux/module.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/bitops.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/cpu.h> | |
16 | #include <linux/mm.h> | |
df388b78 | 17 | #include <linux/vmalloc.h> |
c319299a | 18 | #include <linux/oom.h> |
f3bc08c5 | 19 | |
c075712b MD |
20 | #include <wrapper/ringbuffer/config.h> |
21 | #include <wrapper/ringbuffer/backend.h> | |
22 | #include <wrapper/ringbuffer/frontend.h> | |
f3bc08c5 MD |
23 | |
24 | /** | |
25 | * lib_ring_buffer_backend_allocate - allocate a channel buffer | |
26 | * @config: ring buffer instance configuration | |
27 | * @buf: the buffer struct | |
28 | * @size: total size of the buffer | |
29 | * @num_subbuf: number of subbuffers | |
30 | * @extra_reader_sb: need extra subbuffer for reader | |
31 | */ | |
32 | static | |
33 | int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, | |
34 | struct lib_ring_buffer_backend *bufb, | |
35 | size_t size, size_t num_subbuf, | |
36 | int extra_reader_sb) | |
37 | { | |
38 | struct channel_backend *chanb = &bufb->chan->backend; | |
39 | unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; | |
40 | unsigned long subbuf_size, mmap_offset = 0; | |
41 | unsigned long num_subbuf_alloc; | |
42 | struct page **pages; | |
f3bc08c5 MD |
43 | unsigned long i; |
44 | ||
45 | num_pages = size >> PAGE_SHIFT; | |
1f0ab1eb FD |
46 | |
47 | /* | |
7502f47a FD |
48 | * Verify that there is enough free pages available on the system for |
49 | * the current allocation request. | |
50 | * wrapper_check_enough_free_pages uses si_mem_available() if available | |
51 | * and returns if there should be enough free pages based on the | |
52 | * current estimate. | |
1f0ab1eb | 53 | */ |
c319299a | 54 | if (num_pages >= si_mem_available()) |
1f0ab1eb FD |
55 | goto not_enough_pages; |
56 | ||
57 | /* | |
58 | * Set the current user thread as the first target of the OOM killer. | |
59 | * If the estimate received by si_mem_available() was off, and we do | |
60 | * end up running out of memory because of this buffer allocation, we | |
61 | * want to kill the offending app first. | |
62 | */ | |
c319299a | 63 | set_current_oom_origin(); |
1f0ab1eb | 64 | |
f3bc08c5 MD |
65 | num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); |
66 | subbuf_size = chanb->subbuf_size; | |
67 | num_subbuf_alloc = num_subbuf; | |
68 | ||
69 | if (extra_reader_sb) { | |
70 | num_pages += num_pages_per_subbuf; /* Add pages for reader */ | |
71 | num_subbuf_alloc++; | |
72 | } | |
73 | ||
df388b78 | 74 | pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, |
f3bc08c5 | 75 | 1 << INTERNODE_CACHE_SHIFT), |
df388b78 | 76 | cpu_to_node(max(bufb->cpu, 0))); |
f3bc08c5 MD |
77 | if (unlikely(!pages)) |
78 | goto pages_error; | |
79 | ||
c50bdba2 MD |
80 | bufb->array = kvmalloc_node(ALIGN(sizeof(*bufb->array) |
81 | * num_subbuf_alloc, | |
82 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
83 | GFP_KERNEL | __GFP_NOWARN, |
84 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
85 | if (unlikely(!bufb->array)) |
86 | goto array_error; | |
f3bc08c5 MD |
87 | for (i = 0; i < num_pages; i++) { |
88 | pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), | |
df388b78 | 89 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); |
f3bc08c5 MD |
90 | if (unlikely(!pages[i])) |
91 | goto depopulate; | |
f3bc08c5 MD |
92 | } |
93 | bufb->num_pages_per_subbuf = num_pages_per_subbuf; | |
94 | ||
95 | /* Allocate backend pages array elements */ | |
96 | for (i = 0; i < num_subbuf_alloc; i++) { | |
97 | bufb->array[i] = | |
c50bdba2 | 98 | kvzalloc_node(ALIGN( |
f3bc08c5 MD |
99 | sizeof(struct lib_ring_buffer_backend_pages) + |
100 | sizeof(struct lib_ring_buffer_backend_page) | |
101 | * num_pages_per_subbuf, | |
102 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
103 | GFP_KERNEL | __GFP_NOWARN, |
104 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
105 | if (!bufb->array[i]) |
106 | goto free_array; | |
107 | } | |
108 | ||
109 | /* Allocate write-side subbuffer table */ | |
c50bdba2 | 110 | bufb->buf_wsb = kvzalloc_node(ALIGN( |
f3bc08c5 MD |
111 | sizeof(struct lib_ring_buffer_backend_subbuffer) |
112 | * num_subbuf, | |
113 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
114 | GFP_KERNEL | __GFP_NOWARN, |
115 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
116 | if (unlikely(!bufb->buf_wsb)) |
117 | goto free_array; | |
118 | ||
119 | for (i = 0; i < num_subbuf; i++) | |
120 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
121 | ||
122 | /* Assign read-side subbuffer table */ | |
123 | if (extra_reader_sb) | |
124 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
125 | num_subbuf_alloc - 1); | |
126 | else | |
127 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
128 | ||
5b3cf4f9 | 129 | /* Allocate subbuffer packet counter table */ |
c50bdba2 | 130 | bufb->buf_cnt = kvzalloc_node(ALIGN( |
5b3cf4f9 JD |
131 | sizeof(struct lib_ring_buffer_backend_counts) |
132 | * num_subbuf, | |
133 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
134 | GFP_KERNEL | __GFP_NOWARN, |
135 | cpu_to_node(max(bufb->cpu, 0))); | |
5b3cf4f9 JD |
136 | if (unlikely(!bufb->buf_cnt)) |
137 | goto free_wsb; | |
138 | ||
f3bc08c5 MD |
139 | /* Assign pages to page index */ |
140 | for (i = 0; i < num_subbuf_alloc; i++) { | |
141 | for (j = 0; j < num_pages_per_subbuf; j++) { | |
142 | CHAN_WARN_ON(chanb, page_idx > num_pages); | |
0112cb7b MD |
143 | bufb->array[i]->p[j].virt = page_address(pages[page_idx]); |
144 | bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]); | |
f3bc08c5 MD |
145 | page_idx++; |
146 | } | |
147 | if (config->output == RING_BUFFER_MMAP) { | |
148 | bufb->array[i]->mmap_offset = mmap_offset; | |
149 | mmap_offset += subbuf_size; | |
150 | } | |
151 | } | |
152 | ||
c319299a | 153 | clear_current_oom_origin(); |
df388b78 | 154 | vfree(pages); |
f3bc08c5 MD |
155 | return 0; |
156 | ||
5b3cf4f9 | 157 | free_wsb: |
c50bdba2 | 158 | kvfree(bufb->buf_wsb); |
f3bc08c5 MD |
159 | free_array: |
160 | for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) | |
c50bdba2 | 161 | kvfree(bufb->array[i]); |
f3bc08c5 MD |
162 | depopulate: |
163 | /* Free all allocated pages */ | |
164 | for (i = 0; (i < num_pages && pages[i]); i++) | |
165 | __free_page(pages[i]); | |
c50bdba2 | 166 | kvfree(bufb->array); |
f3bc08c5 | 167 | array_error: |
df388b78 | 168 | vfree(pages); |
f3bc08c5 | 169 | pages_error: |
c319299a | 170 | clear_current_oom_origin(); |
1f0ab1eb | 171 | not_enough_pages: |
f3bc08c5 MD |
172 | return -ENOMEM; |
173 | } | |
174 | ||
175 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
176 | struct channel_backend *chanb, int cpu) | |
177 | { | |
5a8fd222 | 178 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
179 | |
180 | bufb->chan = container_of(chanb, struct channel, backend); | |
181 | bufb->cpu = cpu; | |
182 | ||
183 | return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, | |
184 | chanb->num_subbuf, | |
185 | chanb->extra_reader_sb); | |
186 | } | |
187 | ||
188 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) | |
189 | { | |
190 | struct channel_backend *chanb = &bufb->chan->backend; | |
191 | unsigned long i, j, num_subbuf_alloc; | |
192 | ||
193 | num_subbuf_alloc = chanb->num_subbuf; | |
194 | if (chanb->extra_reader_sb) | |
195 | num_subbuf_alloc++; | |
196 | ||
c50bdba2 MD |
197 | kvfree(bufb->buf_wsb); |
198 | kvfree(bufb->buf_cnt); | |
f3bc08c5 MD |
199 | for (i = 0; i < num_subbuf_alloc; i++) { |
200 | for (j = 0; j < bufb->num_pages_per_subbuf; j++) | |
0112cb7b | 201 | __free_page(pfn_to_page(bufb->array[i]->p[j].pfn)); |
c50bdba2 | 202 | kvfree(bufb->array[i]); |
f3bc08c5 | 203 | } |
c50bdba2 | 204 | kvfree(bufb->array); |
f3bc08c5 MD |
205 | bufb->allocated = 0; |
206 | } | |
207 | ||
208 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) | |
209 | { | |
210 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 211 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
212 | unsigned long num_subbuf_alloc; |
213 | unsigned int i; | |
214 | ||
215 | num_subbuf_alloc = chanb->num_subbuf; | |
216 | if (chanb->extra_reader_sb) | |
217 | num_subbuf_alloc++; | |
218 | ||
219 | for (i = 0; i < chanb->num_subbuf; i++) | |
220 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
221 | if (chanb->extra_reader_sb) | |
222 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
223 | num_subbuf_alloc - 1); | |
224 | else | |
225 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
226 | ||
227 | for (i = 0; i < num_subbuf_alloc; i++) { | |
228 | /* Don't reset mmap_offset */ | |
229 | v_set(config, &bufb->array[i]->records_commit, 0); | |
230 | v_set(config, &bufb->array[i]->records_unread, 0); | |
231 | bufb->array[i]->data_size = 0; | |
232 | /* Don't reset backend page and virt addresses */ | |
233 | } | |
234 | /* Don't reset num_pages_per_subbuf, cpu, allocated */ | |
235 | v_set(config, &bufb->records_read, 0); | |
236 | } | |
237 | ||
238 | /* | |
239 | * The frontend is responsible for also calling ring_buffer_backend_reset for | |
240 | * each buffer when calling channel_backend_reset. | |
241 | */ | |
242 | void channel_backend_reset(struct channel_backend *chanb) | |
243 | { | |
244 | struct channel *chan = container_of(chanb, struct channel, backend); | |
5a8fd222 | 245 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
246 | |
247 | /* | |
248 | * Don't reset buf_size, subbuf_size, subbuf_size_order, | |
249 | * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, | |
250 | * priv, notifiers, config, cpumask and name. | |
251 | */ | |
252 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
253 | } | |
254 | ||
1e367326 MD |
255 | /* |
256 | * No need to implement a "dead" callback to do a buffer switch here, | |
257 | * because it will happen when tracing is stopped, or will be done by | |
258 | * switch timer CPU DEAD callback. | |
259 | * We don't free buffers when CPU go away, because it would make trace | |
260 | * data vanish, which is unwanted. | |
261 | */ | |
262 | int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, | |
263 | struct lttng_cpuhp_node *node) | |
264 | { | |
265 | struct channel_backend *chanb = container_of(node, | |
266 | struct channel_backend, cpuhp_prepare); | |
267 | const struct lib_ring_buffer_config *config = &chanb->config; | |
268 | struct lib_ring_buffer *buf; | |
269 | int ret; | |
270 | ||
271 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
272 | ||
273 | buf = per_cpu_ptr(chanb->buf, cpu); | |
274 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
275 | if (ret) { | |
276 | printk(KERN_ERR | |
277 | "ring_buffer_cpu_hp_callback: cpu %d " | |
278 | "buffer creation failed\n", cpu); | |
279 | return ret; | |
280 | } | |
281 | return 0; | |
282 | } | |
283 | EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); | |
284 | ||
f3bc08c5 MD |
285 | /** |
286 | * channel_backend_init - initialize a channel backend | |
287 | * @chanb: channel backend | |
288 | * @name: channel name | |
289 | * @config: client ring buffer configuration | |
290 | * @priv: client private data | |
291 | * @parent: dentry of parent directory, %NULL for root directory | |
292 | * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) | |
293 | * @num_subbuf: number of sub-buffers (power of 2) | |
294 | * | |
295 | * Returns channel pointer if successful, %NULL otherwise. | |
296 | * | |
297 | * Creates per-cpu channel buffers using the sizes and attributes | |
298 | * specified. The created channel buffer files will be named | |
299 | * name_0...name_N-1. File permissions will be %S_IRUSR. | |
300 | * | |
301 | * Called with CPU hotplug disabled. | |
302 | */ | |
303 | int channel_backend_init(struct channel_backend *chanb, | |
304 | const char *name, | |
305 | const struct lib_ring_buffer_config *config, | |
306 | void *priv, size_t subbuf_size, size_t num_subbuf) | |
307 | { | |
308 | struct channel *chan = container_of(chanb, struct channel, backend); | |
309 | unsigned int i; | |
310 | int ret; | |
311 | ||
312 | if (!name) | |
313 | return -EPERM; | |
314 | ||
f3bc08c5 | 315 | /* Check that the subbuffer size is larger than a page. */ |
2fb46300 MD |
316 | if (subbuf_size < PAGE_SIZE) |
317 | return -EINVAL; | |
f3bc08c5 MD |
318 | |
319 | /* | |
bbda3a00 MD |
320 | * Make sure the number of subbuffers and subbuffer size are |
321 | * power of 2 and nonzero. | |
f3bc08c5 | 322 | */ |
bbda3a00 | 323 | if (!subbuf_size || (subbuf_size & (subbuf_size - 1))) |
863497fa | 324 | return -EINVAL; |
bbda3a00 | 325 | if (!num_subbuf || (num_subbuf & (num_subbuf - 1))) |
863497fa | 326 | return -EINVAL; |
5140d2b3 MD |
327 | /* |
328 | * Overwrite mode buffers require at least 2 subbuffers per | |
329 | * buffer. | |
330 | */ | |
331 | if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2) | |
332 | return -EINVAL; | |
f3bc08c5 MD |
333 | |
334 | ret = subbuffer_id_check_index(config, num_subbuf); | |
335 | if (ret) | |
336 | return ret; | |
337 | ||
338 | chanb->priv = priv; | |
339 | chanb->buf_size = num_subbuf * subbuf_size; | |
340 | chanb->subbuf_size = subbuf_size; | |
341 | chanb->buf_size_order = get_count_order(chanb->buf_size); | |
342 | chanb->subbuf_size_order = get_count_order(subbuf_size); | |
343 | chanb->num_subbuf_order = get_count_order(num_subbuf); | |
344 | chanb->extra_reader_sb = | |
345 | (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0; | |
346 | chanb->num_subbuf = num_subbuf; | |
347 | strlcpy(chanb->name, name, NAME_MAX); | |
5a8fd222 | 348 | memcpy(&chanb->config, config, sizeof(chanb->config)); |
f3bc08c5 MD |
349 | |
350 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
351 | if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL)) | |
352 | return -ENOMEM; | |
353 | } | |
354 | ||
355 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
356 | /* Allocating the buffer per-cpu structures */ | |
357 | chanb->buf = alloc_percpu(struct lib_ring_buffer); | |
358 | if (!chanb->buf) | |
359 | goto free_cpumask; | |
360 | ||
1e367326 MD |
361 | chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; |
362 | ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, | |
363 | &chanb->cpuhp_prepare.node); | |
364 | if (ret) | |
365 | goto free_bufs; | |
f3bc08c5 MD |
366 | } else { |
367 | chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); | |
368 | if (!chanb->buf) | |
369 | goto free_cpumask; | |
370 | ret = lib_ring_buffer_create(chanb->buf, chanb, -1); | |
371 | if (ret) | |
372 | goto free_bufs; | |
373 | } | |
374 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
375 | ||
376 | return 0; | |
377 | ||
378 | free_bufs: | |
379 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
5f14d8ae MD |
380 | /* |
381 | * Teardown of lttng_rb_hp_prepare instance | |
382 | * on "add" error is handled within cpu hotplug, | |
383 | * no teardown to do from the caller. | |
384 | */ | |
f3bc08c5 | 385 | for_each_possible_cpu(i) { |
1e367326 MD |
386 | struct lib_ring_buffer *buf = |
387 | per_cpu_ptr(chanb->buf, i); | |
f3bc08c5 MD |
388 | |
389 | if (!buf->backend.allocated) | |
390 | continue; | |
391 | lib_ring_buffer_free(buf); | |
392 | } | |
f3bc08c5 MD |
393 | free_percpu(chanb->buf); |
394 | } else | |
395 | kfree(chanb->buf); | |
396 | free_cpumask: | |
397 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
398 | free_cpumask_var(chanb->cpumask); | |
399 | return -ENOMEM; | |
400 | } | |
401 | ||
402 | /** | |
403 | * channel_backend_unregister_notifiers - unregister notifiers | |
404 | * @chan: the channel | |
405 | * | |
406 | * Holds CPU hotplug. | |
407 | */ | |
408 | void channel_backend_unregister_notifiers(struct channel_backend *chanb) | |
409 | { | |
5a8fd222 | 410 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 411 | |
1e367326 | 412 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
1e367326 MD |
413 | int ret; |
414 | ||
415 | ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, | |
416 | &chanb->cpuhp_prepare.node); | |
417 | WARN_ON(ret); | |
1e367326 | 418 | } |
f3bc08c5 MD |
419 | } |
420 | ||
421 | /** | |
422 | * channel_backend_free - destroy the channel | |
423 | * @chan: the channel | |
424 | * | |
425 | * Destroy all channel buffers and frees the channel. | |
426 | */ | |
427 | void channel_backend_free(struct channel_backend *chanb) | |
428 | { | |
5a8fd222 | 429 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
430 | unsigned int i; |
431 | ||
432 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
433 | for_each_possible_cpu(i) { | |
434 | struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); | |
435 | ||
436 | if (!buf->backend.allocated) | |
437 | continue; | |
438 | lib_ring_buffer_free(buf); | |
439 | } | |
440 | free_cpumask_var(chanb->cpumask); | |
441 | free_percpu(chanb->buf); | |
442 | } else { | |
443 | struct lib_ring_buffer *buf = chanb->buf; | |
444 | ||
445 | CHAN_WARN_ON(chanb, !buf->backend.allocated); | |
446 | lib_ring_buffer_free(buf); | |
447 | kfree(buf); | |
448 | } | |
449 | } | |
450 | ||
451 | /** | |
452 | * lib_ring_buffer_write - write data to a ring_buffer buffer. | |
453 | * @bufb : buffer backend | |
454 | * @offset : offset within the buffer | |
455 | * @src : source address | |
456 | * @len : length to write | |
457 | * @pagecpy : page size copied so far | |
458 | */ | |
459 | void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, | |
bfe529f9 | 460 | const void *src, size_t len, size_t pagecpy) |
f3bc08c5 MD |
461 | { |
462 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 463 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
464 | size_t sbidx, index; |
465 | struct lib_ring_buffer_backend_pages *rpages; | |
466 | unsigned long sb_bindex, id; | |
467 | ||
468 | do { | |
469 | len -= pagecpy; | |
470 | src += pagecpy; | |
471 | offset += pagecpy; | |
472 | sbidx = offset >> chanb->subbuf_size_order; | |
473 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
474 | ||
475 | /* | |
476 | * Underlying layer should never ask for writes across | |
477 | * subbuffers. | |
478 | */ | |
479 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
480 | ||
481 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
482 | id = bufb->buf_wsb[sbidx].id; | |
483 | sb_bindex = subbuffer_id_get_index(config, id); | |
484 | rpages = bufb->array[sb_bindex]; | |
485 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
486 | && subbuffer_id_is_noref(config, id)); | |
487 | lib_ring_buffer_do_copy(config, | |
488 | rpages->p[index].virt | |
489 | + (offset & ~PAGE_MASK), | |
490 | src, pagecpy); | |
491 | } while (unlikely(len != pagecpy)); | |
492 | } | |
493 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_write); | |
494 | ||
4ea00e4f JD |
495 | |
496 | /** | |
497 | * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer. | |
498 | * @bufb : buffer backend | |
499 | * @offset : offset within the buffer | |
500 | * @c : the byte to write | |
501 | * @len : length to write | |
502 | * @pagecpy : page size copied so far | |
503 | */ | |
504 | void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, | |
505 | size_t offset, | |
bfe529f9 | 506 | int c, size_t len, size_t pagecpy) |
4ea00e4f JD |
507 | { |
508 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 509 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
510 | size_t sbidx, index; |
511 | struct lib_ring_buffer_backend_pages *rpages; | |
512 | unsigned long sb_bindex, id; | |
513 | ||
514 | do { | |
515 | len -= pagecpy; | |
516 | offset += pagecpy; | |
517 | sbidx = offset >> chanb->subbuf_size_order; | |
518 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
519 | ||
520 | /* | |
521 | * Underlying layer should never ask for writes across | |
522 | * subbuffers. | |
523 | */ | |
524 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
525 | ||
526 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
527 | id = bufb->buf_wsb[sbidx].id; | |
528 | sb_bindex = subbuffer_id_get_index(config, id); | |
529 | rpages = bufb->array[sb_bindex]; | |
530 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
531 | && subbuffer_id_is_noref(config, id)); | |
532 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
533 | + (offset & ~PAGE_MASK), | |
534 | c, pagecpy); | |
535 | } while (unlikely(len != pagecpy)); | |
536 | } | |
537 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset); | |
538 | ||
16f78f3a MD |
539 | /** |
540 | * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer. | |
541 | * @bufb : buffer backend | |
542 | * @offset : offset within the buffer | |
543 | * @src : source address | |
544 | * @len : length to write | |
545 | * @pagecpy : page size copied so far | |
546 | * @pad : character to use for padding | |
547 | */ | |
548 | void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, | |
549 | size_t offset, const char *src, size_t len, | |
550 | size_t pagecpy, int pad) | |
551 | { | |
552 | struct channel_backend *chanb = &bufb->chan->backend; | |
553 | const struct lib_ring_buffer_config *config = &chanb->config; | |
554 | size_t sbidx, index; | |
555 | struct lib_ring_buffer_backend_pages *rpages; | |
556 | unsigned long sb_bindex, id; | |
557 | int src_terminated = 0; | |
558 | ||
559 | CHAN_WARN_ON(chanb, !len); | |
560 | offset += pagecpy; | |
561 | do { | |
562 | len -= pagecpy; | |
563 | if (!src_terminated) | |
564 | src += pagecpy; | |
565 | sbidx = offset >> chanb->subbuf_size_order; | |
566 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
567 | ||
568 | /* | |
569 | * Underlying layer should never ask for writes across | |
570 | * subbuffers. | |
571 | */ | |
572 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
573 | ||
574 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
575 | id = bufb->buf_wsb[sbidx].id; | |
576 | sb_bindex = subbuffer_id_get_index(config, id); | |
577 | rpages = bufb->array[sb_bindex]; | |
578 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
579 | && subbuffer_id_is_noref(config, id)); | |
580 | ||
581 | if (likely(!src_terminated)) { | |
582 | size_t count, to_copy; | |
583 | ||
584 | to_copy = pagecpy; | |
585 | if (pagecpy == len) | |
586 | to_copy--; /* Final '\0' */ | |
587 | count = lib_ring_buffer_do_strcpy(config, | |
588 | rpages->p[index].virt | |
589 | + (offset & ~PAGE_MASK), | |
590 | src, to_copy); | |
591 | offset += count; | |
592 | /* Padding */ | |
593 | if (unlikely(count < to_copy)) { | |
594 | size_t pad_len = to_copy - count; | |
595 | ||
596 | /* Next pages will have padding */ | |
597 | src_terminated = 1; | |
598 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
599 | + (offset & ~PAGE_MASK), | |
600 | pad, pad_len); | |
601 | offset += pad_len; | |
602 | } | |
603 | } else { | |
604 | size_t pad_len; | |
605 | ||
606 | pad_len = pagecpy; | |
607 | if (pagecpy == len) | |
608 | pad_len--; /* Final '\0' */ | |
609 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
610 | + (offset & ~PAGE_MASK), | |
611 | pad, pad_len); | |
612 | offset += pad_len; | |
613 | } | |
614 | } while (unlikely(len != pagecpy)); | |
615 | /* Ending '\0' */ | |
616 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
617 | '\0', 1); | |
618 | } | |
619 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy); | |
4ea00e4f JD |
620 | |
621 | /** | |
7b8ea3a5 | 622 | * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer. |
4ea00e4f JD |
623 | * @bufb : buffer backend |
624 | * @offset : offset within the buffer | |
625 | * @src : source address | |
626 | * @len : length to write | |
627 | * @pagecpy : page size copied so far | |
628 | * | |
629 | * This function deals with userspace pointers, it should never be called | |
630 | * directly without having the src pointer checked with access_ok() | |
631 | * previously. | |
632 | */ | |
7b8ea3a5 | 633 | void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, |
4ea00e4f JD |
634 | size_t offset, |
635 | const void __user *src, size_t len, | |
bfe529f9 | 636 | size_t pagecpy) |
4ea00e4f JD |
637 | { |
638 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 639 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
640 | size_t sbidx, index; |
641 | struct lib_ring_buffer_backend_pages *rpages; | |
642 | unsigned long sb_bindex, id; | |
643 | int ret; | |
644 | ||
645 | do { | |
646 | len -= pagecpy; | |
647 | src += pagecpy; | |
648 | offset += pagecpy; | |
649 | sbidx = offset >> chanb->subbuf_size_order; | |
650 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
651 | ||
652 | /* | |
653 | * Underlying layer should never ask for writes across | |
654 | * subbuffers. | |
655 | */ | |
656 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
657 | ||
658 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
659 | id = bufb->buf_wsb[sbidx].id; | |
660 | sb_bindex = subbuffer_id_get_index(config, id); | |
661 | rpages = bufb->array[sb_bindex]; | |
662 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
663 | && subbuffer_id_is_noref(config, id)); | |
7b8ea3a5 | 664 | ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt |
4ea00e4f JD |
665 | + (offset & ~PAGE_MASK), |
666 | src, pagecpy) != 0; | |
667 | if (ret > 0) { | |
d87a9f03 | 668 | /* Copy failed. */ |
4ea00e4f JD |
669 | _lib_ring_buffer_memset(bufb, offset, 0, len, 0); |
670 | break; /* stop copy */ | |
671 | } | |
672 | } while (unlikely(len != pagecpy)); | |
673 | } | |
7b8ea3a5 | 674 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic); |
4ea00e4f | 675 | |
16f78f3a MD |
676 | /** |
677 | * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer. | |
678 | * @bufb : buffer backend | |
679 | * @offset : offset within the buffer | |
680 | * @src : source address | |
681 | * @len : length to write | |
682 | * @pagecpy : page size copied so far | |
683 | * @pad : character to use for padding | |
684 | * | |
685 | * This function deals with userspace pointers, it should never be called | |
686 | * directly without having the src pointer checked with access_ok() | |
687 | * previously. | |
688 | */ | |
689 | void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, | |
690 | size_t offset, const char __user *src, size_t len, | |
691 | size_t pagecpy, int pad) | |
692 | { | |
693 | struct channel_backend *chanb = &bufb->chan->backend; | |
694 | const struct lib_ring_buffer_config *config = &chanb->config; | |
695 | size_t sbidx, index; | |
696 | struct lib_ring_buffer_backend_pages *rpages; | |
697 | unsigned long sb_bindex, id; | |
698 | int src_terminated = 0; | |
699 | ||
700 | offset += pagecpy; | |
701 | do { | |
702 | len -= pagecpy; | |
703 | if (!src_terminated) | |
704 | src += pagecpy; | |
705 | sbidx = offset >> chanb->subbuf_size_order; | |
706 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
707 | ||
708 | /* | |
709 | * Underlying layer should never ask for writes across | |
710 | * subbuffers. | |
711 | */ | |
712 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
713 | ||
714 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
715 | id = bufb->buf_wsb[sbidx].id; | |
716 | sb_bindex = subbuffer_id_get_index(config, id); | |
717 | rpages = bufb->array[sb_bindex]; | |
718 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
719 | && subbuffer_id_is_noref(config, id)); | |
720 | ||
721 | if (likely(!src_terminated)) { | |
722 | size_t count, to_copy; | |
723 | ||
724 | to_copy = pagecpy; | |
725 | if (pagecpy == len) | |
726 | to_copy--; /* Final '\0' */ | |
727 | count = lib_ring_buffer_do_strcpy_from_user_inatomic(config, | |
728 | rpages->p[index].virt | |
729 | + (offset & ~PAGE_MASK), | |
730 | src, to_copy); | |
731 | offset += count; | |
732 | /* Padding */ | |
733 | if (unlikely(count < to_copy)) { | |
734 | size_t pad_len = to_copy - count; | |
735 | ||
736 | /* Next pages will have padding */ | |
737 | src_terminated = 1; | |
738 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
739 | + (offset & ~PAGE_MASK), | |
740 | pad, pad_len); | |
741 | offset += pad_len; | |
742 | } | |
743 | } else { | |
744 | size_t pad_len; | |
745 | ||
746 | pad_len = pagecpy; | |
747 | if (pagecpy == len) | |
748 | pad_len--; /* Final '\0' */ | |
749 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
750 | + (offset & ~PAGE_MASK), | |
751 | pad, pad_len); | |
752 | offset += pad_len; | |
753 | } | |
754 | } while (unlikely(len != pagecpy)); | |
755 | /* Ending '\0' */ | |
756 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
757 | '\0', 1); | |
758 | } | |
759 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic); | |
760 | ||
f3bc08c5 MD |
761 | /** |
762 | * lib_ring_buffer_read - read data from ring_buffer_buffer. | |
763 | * @bufb : buffer backend | |
764 | * @offset : offset within the buffer | |
765 | * @dest : destination address | |
766 | * @len : length to copy to destination | |
767 | * | |
768 | * Should be protected by get_subbuf/put_subbuf. | |
769 | * Returns the length copied. | |
770 | */ | |
771 | size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, | |
772 | void *dest, size_t len) | |
773 | { | |
774 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 775 | const struct lib_ring_buffer_config *config = &chanb->config; |
bfe529f9 | 776 | size_t index, pagecpy, orig_len; |
f3bc08c5 MD |
777 | struct lib_ring_buffer_backend_pages *rpages; |
778 | unsigned long sb_bindex, id; | |
779 | ||
780 | orig_len = len; | |
781 | offset &= chanb->buf_size - 1; | |
782 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
783 | if (unlikely(!len)) | |
784 | return 0; | |
785 | for (;;) { | |
786 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
787 | id = bufb->buf_rsb.id; | |
788 | sb_bindex = subbuffer_id_get_index(config, id); | |
789 | rpages = bufb->array[sb_bindex]; | |
790 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
791 | && subbuffer_id_is_noref(config, id)); | |
792 | memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), | |
793 | pagecpy); | |
794 | len -= pagecpy; | |
795 | if (likely(!len)) | |
796 | break; | |
797 | dest += pagecpy; | |
798 | offset += pagecpy; | |
799 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
800 | /* | |
801 | * Underlying layer should never ask for reads across | |
802 | * subbuffers. | |
803 | */ | |
804 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
805 | } | |
806 | return orig_len; | |
807 | } | |
808 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read); | |
809 | ||
810 | /** | |
811 | * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace | |
812 | * @bufb : buffer backend | |
813 | * @offset : offset within the buffer | |
814 | * @dest : destination userspace address | |
815 | * @len : length to copy to destination | |
816 | * | |
817 | * Should be protected by get_subbuf/put_subbuf. | |
818 | * access_ok() must have been performed on dest addresses prior to call this | |
819 | * function. | |
820 | * Returns -EFAULT on error, 0 if ok. | |
821 | */ | |
822 | int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, | |
823 | size_t offset, void __user *dest, size_t len) | |
824 | { | |
825 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 826 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 827 | size_t index; |
88dfd899 | 828 | ssize_t pagecpy; |
f3bc08c5 MD |
829 | struct lib_ring_buffer_backend_pages *rpages; |
830 | unsigned long sb_bindex, id; | |
831 | ||
f3bc08c5 MD |
832 | offset &= chanb->buf_size - 1; |
833 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
834 | if (unlikely(!len)) | |
835 | return 0; | |
836 | for (;;) { | |
837 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
838 | id = bufb->buf_rsb.id; | |
839 | sb_bindex = subbuffer_id_get_index(config, id); | |
840 | rpages = bufb->array[sb_bindex]; | |
841 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
842 | && subbuffer_id_is_noref(config, id)); | |
843 | if (__copy_to_user(dest, | |
844 | rpages->p[index].virt + (offset & ~PAGE_MASK), | |
845 | pagecpy)) | |
846 | return -EFAULT; | |
847 | len -= pagecpy; | |
848 | if (likely(!len)) | |
849 | break; | |
850 | dest += pagecpy; | |
851 | offset += pagecpy; | |
852 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
853 | /* | |
854 | * Underlying layer should never ask for reads across | |
855 | * subbuffers. | |
856 | */ | |
857 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
858 | } | |
859 | return 0; | |
860 | } | |
861 | EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user); | |
862 | ||
863 | /** | |
864 | * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer. | |
865 | * @bufb : buffer backend | |
866 | * @offset : offset within the buffer | |
867 | * @dest : destination address | |
868 | * @len : destination's length | |
869 | * | |
61eb4c39 | 870 | * Return string's length, or -EINVAL on error. |
f3bc08c5 | 871 | * Should be protected by get_subbuf/put_subbuf. |
61eb4c39 | 872 | * Destination length should be at least 1 to hold '\0'. |
f3bc08c5 MD |
873 | */ |
874 | int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, | |
875 | void *dest, size_t len) | |
876 | { | |
877 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 878 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
879 | size_t index; |
880 | ssize_t pagecpy, pagelen, strpagelen, orig_offset; | |
881 | char *str; | |
882 | struct lib_ring_buffer_backend_pages *rpages; | |
883 | unsigned long sb_bindex, id; | |
884 | ||
885 | offset &= chanb->buf_size - 1; | |
886 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
887 | orig_offset = offset; | |
61eb4c39 MD |
888 | if (unlikely(!len)) |
889 | return -EINVAL; | |
f3bc08c5 MD |
890 | for (;;) { |
891 | id = bufb->buf_rsb.id; | |
892 | sb_bindex = subbuffer_id_get_index(config, id); | |
893 | rpages = bufb->array[sb_bindex]; | |
894 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
895 | && subbuffer_id_is_noref(config, id)); | |
896 | str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK); | |
897 | pagelen = PAGE_SIZE - (offset & ~PAGE_MASK); | |
898 | strpagelen = strnlen(str, pagelen); | |
899 | if (len) { | |
900 | pagecpy = min_t(size_t, len, strpagelen); | |
901 | if (dest) { | |
902 | memcpy(dest, str, pagecpy); | |
903 | dest += pagecpy; | |
904 | } | |
905 | len -= pagecpy; | |
906 | } | |
907 | offset += strpagelen; | |
908 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
909 | if (strpagelen < pagelen) | |
910 | break; | |
911 | /* | |
912 | * Underlying layer should never ask for reads across | |
913 | * subbuffers. | |
914 | */ | |
915 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
916 | } | |
917 | if (dest && len) | |
918 | ((char *)dest)[0] = 0; | |
919 | return offset - orig_offset; | |
920 | } | |
921 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr); | |
922 | ||
923 | /** | |
0112cb7b | 924 | * lib_ring_buffer_read_get_pfn - Get a page frame number to read from |
f3bc08c5 MD |
925 | * @bufb : buffer backend |
926 | * @offset : offset within the buffer | |
927 | * @virt : pointer to page address (output) | |
928 | * | |
929 | * Should be protected by get_subbuf/put_subbuf. | |
0112cb7b | 930 | * Returns the pointer to the page frame number unsigned long. |
f3bc08c5 | 931 | */ |
0112cb7b | 932 | unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, |
f3bc08c5 MD |
933 | size_t offset, void ***virt) |
934 | { | |
935 | size_t index; | |
936 | struct lib_ring_buffer_backend_pages *rpages; | |
937 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 938 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
939 | unsigned long sb_bindex, id; |
940 | ||
941 | offset &= chanb->buf_size - 1; | |
942 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
943 | id = bufb->buf_rsb.id; | |
944 | sb_bindex = subbuffer_id_get_index(config, id); | |
945 | rpages = bufb->array[sb_bindex]; | |
946 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
947 | && subbuffer_id_is_noref(config, id)); | |
948 | *virt = &rpages->p[index].virt; | |
0112cb7b | 949 | return &rpages->p[index].pfn; |
f3bc08c5 | 950 | } |
0112cb7b | 951 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn); |
f3bc08c5 MD |
952 | |
953 | /** | |
954 | * lib_ring_buffer_read_offset_address - get address of a buffer location | |
955 | * @bufb : buffer backend | |
956 | * @offset : offset within the buffer. | |
957 | * | |
958 | * Return the address where a given offset is located (for read). | |
959 | * Should be used to get the current subbuffer header pointer. Given we know | |
759d02c1 MD |
960 | * it's never on a page boundary, it's safe to read/write directly |
961 | * from/to this address, as long as the read/write is never bigger than a | |
962 | * page size. | |
f3bc08c5 MD |
963 | */ |
964 | void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, | |
965 | size_t offset) | |
966 | { | |
967 | size_t index; | |
968 | struct lib_ring_buffer_backend_pages *rpages; | |
969 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 970 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
971 | unsigned long sb_bindex, id; |
972 | ||
973 | offset &= chanb->buf_size - 1; | |
974 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
975 | id = bufb->buf_rsb.id; | |
976 | sb_bindex = subbuffer_id_get_index(config, id); | |
977 | rpages = bufb->array[sb_bindex]; | |
978 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
979 | && subbuffer_id_is_noref(config, id)); | |
980 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
981 | } | |
982 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address); | |
983 | ||
984 | /** | |
985 | * lib_ring_buffer_offset_address - get address of a location within the buffer | |
986 | * @bufb : buffer backend | |
987 | * @offset : offset within the buffer. | |
988 | * | |
989 | * Return the address where a given offset is located. | |
990 | * Should be used to get the current subbuffer header pointer. Given we know | |
991 | * it's always at the beginning of a page, it's safe to write directly to this | |
992 | * address, as long as the write is never bigger than a page size. | |
993 | */ | |
994 | void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, | |
995 | size_t offset) | |
996 | { | |
997 | size_t sbidx, index; | |
998 | struct lib_ring_buffer_backend_pages *rpages; | |
999 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1000 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1001 | unsigned long sb_bindex, id; |
1002 | ||
1003 | offset &= chanb->buf_size - 1; | |
1004 | sbidx = offset >> chanb->subbuf_size_order; | |
1005 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1006 | id = bufb->buf_wsb[sbidx].id; | |
1007 | sb_bindex = subbuffer_id_get_index(config, id); | |
1008 | rpages = bufb->array[sb_bindex]; | |
1009 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1010 | && subbuffer_id_is_noref(config, id)); | |
1011 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
1012 | } | |
1013 | EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address); |