412180bdfc8c16ab2252410ec12971cee7a075f6
[lttng-ust.git] / libringbuffer / ring_buffer_backend.c
1 /*
2 * ring_buffer_backend.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define _LGPL_SOURCE
22 #include <stddef.h>
23 #include <stdint.h>
24 #include <unistd.h>
25 #include <urcu/arch.h>
26 #include <limits.h>
27
28 #include <lttng/align.h>
29 #include <lttng/ringbuffer-config.h>
30 #include "vatomic.h"
31 #include "backend.h"
32 #include "frontend.h"
33 #include "smp.h"
34 #include "shm.h"
35
36 /**
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
43 */
44 static
45 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
46 struct lttng_ust_lib_ring_buffer_backend *bufb,
47 size_t size, size_t num_subbuf,
48 int extra_reader_sb,
49 struct lttng_ust_shm_handle *handle,
50 struct shm_object *shmobj)
51 {
52 struct channel_backend *chanb;
53 unsigned long subbuf_size, mmap_offset = 0;
54 unsigned long num_subbuf_alloc;
55 unsigned long i;
56 long page_size;
57
58 chanb = &shmp(handle, bufb->chan)->backend;
59 if (!chanb)
60 return -EINVAL;
61
62 subbuf_size = chanb->subbuf_size;
63 num_subbuf_alloc = num_subbuf;
64
65 if (extra_reader_sb)
66 num_subbuf_alloc++;
67
68 page_size = LTTNG_UST_PAGE_SIZE;
69 if (page_size <= 0) {
70 goto page_size_error;
71 }
72
73 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
74 set_shmp(bufb->array, zalloc_shm(shmobj,
75 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
76 if (caa_unlikely(!shmp(handle, bufb->array)))
77 goto array_error;
78
79 /*
80 * This is the largest element (the buffer pages) which needs to
81 * be aligned on page size.
82 */
83 align_shm(shmobj, page_size);
84 set_shmp(bufb->memory_map, zalloc_shm(shmobj,
85 subbuf_size * num_subbuf_alloc));
86 if (caa_unlikely(!shmp(handle, bufb->memory_map)))
87 goto memory_map_error;
88
89 /* Allocate backend pages array elements */
90 for (i = 0; i < num_subbuf_alloc; i++) {
91 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
92 set_shmp(shmp_index(handle, bufb->array, i)->shmp,
93 zalloc_shm(shmobj,
94 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
95 if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
96 goto free_array;
97 }
98
99 /* Allocate write-side subbuffer table */
100 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
101 set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
102 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
103 * num_subbuf));
104 if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
105 goto free_array;
106
107 for (i = 0; i < num_subbuf; i++) {
108 struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
109
110 sb = shmp_index(handle, bufb->buf_wsb, i);
111 if (!sb)
112 goto free_array;
113 sb->id = subbuffer_id(config, 0, 1, i);
114 }
115
116 /* Assign read-side subbuffer table */
117 if (extra_reader_sb)
118 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
119 num_subbuf_alloc - 1);
120 else
121 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
122
123 /* Allocate subbuffer packet counter table */
124 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
125 set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
126 sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
127 * num_subbuf));
128 if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
129 goto free_wsb;
130
131 /* Assign pages to page index */
132 for (i = 0; i < num_subbuf_alloc; i++) {
133 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
134 struct lttng_ust_lib_ring_buffer_backend_pages *pages;
135 struct shm_ref ref;
136
137 ref.index = bufb->memory_map._ref.index;
138 ref.offset = bufb->memory_map._ref.offset;
139 ref.offset += i * subbuf_size;
140
141 sbp = shmp_index(handle, bufb->array, i);
142 if (!sbp)
143 goto free_array;
144 pages = shmp(handle, sbp->shmp);
145 if (!pages)
146 goto free_array;
147 set_shmp(pages->p, ref);
148 if (config->output == RING_BUFFER_MMAP) {
149 pages->mmap_offset = mmap_offset;
150 mmap_offset += subbuf_size;
151 }
152 }
153 return 0;
154
155 free_wsb:
156 /* bufb->buf_wsb will be freed by shm teardown */
157 free_array:
158 /* bufb->array[i] will be freed by shm teardown */
159 memory_map_error:
160 /* bufb->array will be freed by shm teardown */
161 array_error:
162 page_size_error:
163 return -ENOMEM;
164 }
165
166 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
167 struct channel_backend *chanb, int cpu,
168 struct lttng_ust_shm_handle *handle,
169 struct shm_object *shmobj)
170 {
171 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
172
173 set_shmp(bufb->chan, handle->chan._ref);
174 bufb->cpu = cpu;
175
176 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
177 chanb->num_subbuf,
178 chanb->extra_reader_sb,
179 handle, shmobj);
180 }
181
182 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
183 struct lttng_ust_shm_handle *handle)
184 {
185 struct channel_backend *chanb;
186 const struct lttng_ust_lib_ring_buffer_config *config;
187 unsigned long num_subbuf_alloc;
188 unsigned int i;
189
190 chanb = &shmp(handle, bufb->chan)->backend;
191 if (!chanb)
192 return;
193 config = &chanb->config;
194
195 num_subbuf_alloc = chanb->num_subbuf;
196 if (chanb->extra_reader_sb)
197 num_subbuf_alloc++;
198
199 for (i = 0; i < chanb->num_subbuf; i++) {
200 struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
201
202 sb = shmp_index(handle, bufb->buf_wsb, i);
203 if (!sb)
204 return;
205 sb->id = subbuffer_id(config, 0, 1, i);
206 }
207 if (chanb->extra_reader_sb)
208 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
209 num_subbuf_alloc - 1);
210 else
211 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
212
213 for (i = 0; i < num_subbuf_alloc; i++) {
214 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
215 struct lttng_ust_lib_ring_buffer_backend_pages *pages;
216
217 sbp = shmp_index(handle, bufb->array, i);
218 if (!sbp)
219 return;
220 pages = shmp(handle, sbp->shmp);
221 if (!pages)
222 return;
223 /* Don't reset mmap_offset */
224 v_set(config, &pages->records_commit, 0);
225 v_set(config, &pages->records_unread, 0);
226 pages->data_size = 0;
227 /* Don't reset backend page and virt addresses */
228 }
229 /* Don't reset num_pages_per_subbuf, cpu, allocated */
230 v_set(config, &bufb->records_read, 0);
231 }
232
233 /*
234 * The frontend is responsible for also calling ring_buffer_backend_reset for
235 * each buffer when calling channel_backend_reset.
236 */
237 void channel_backend_reset(struct channel_backend *chanb)
238 {
239 struct channel *chan = caa_container_of(chanb, struct channel, backend);
240 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
241
242 /*
243 * Don't reset buf_size, subbuf_size, subbuf_size_order,
244 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
245 * priv, notifiers, config, cpumask and name.
246 */
247 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
248 }
249
250 /**
251 * channel_backend_init - initialize a channel backend
252 * @chanb: channel backend
253 * @name: channel name
254 * @config: client ring buffer configuration
255 * @parent: dentry of parent directory, %NULL for root directory
256 * @subbuf_size: size of sub-buffers (> page size, power of 2)
257 * @num_subbuf: number of sub-buffers (power of 2)
258 * @lttng_ust_shm_handle: shared memory handle
259 * @stream_fds: stream file descriptors.
260 *
261 * Returns channel pointer if successful, %NULL otherwise.
262 *
263 * Creates per-cpu channel buffers using the sizes and attributes
264 * specified. The created channel buffer files will be named
265 * name_0...name_N-1. File permissions will be %S_IRUSR.
266 *
267 * Called with CPU hotplug disabled.
268 */
269 int channel_backend_init(struct channel_backend *chanb,
270 const char *name,
271 const struct lttng_ust_lib_ring_buffer_config *config,
272 size_t subbuf_size, size_t num_subbuf,
273 struct lttng_ust_shm_handle *handle,
274 const int *stream_fds)
275 {
276 struct channel *chan = caa_container_of(chanb, struct channel, backend);
277 unsigned int i;
278 int ret;
279 size_t shmsize = 0, num_subbuf_alloc;
280 long page_size;
281
282 if (!name)
283 return -EPERM;
284
285 page_size = LTTNG_UST_PAGE_SIZE;
286 if (page_size <= 0) {
287 return -ENOMEM;
288 }
289 /* Check that the subbuffer size is larger than a page. */
290 if (subbuf_size < page_size)
291 return -EINVAL;
292
293 /*
294 * Make sure the number of subbuffers and subbuffer size are
295 * power of 2, and nonzero.
296 */
297 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
298 return -EINVAL;
299 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
300 return -EINVAL;
301 /*
302 * Overwrite mode buffers require at least 2 subbuffers per
303 * buffer.
304 */
305 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
306 return -EINVAL;
307
308 ret = subbuffer_id_check_index(config, num_subbuf);
309 if (ret)
310 return ret;
311
312 chanb->buf_size = num_subbuf * subbuf_size;
313 chanb->subbuf_size = subbuf_size;
314 chanb->buf_size_order = get_count_order(chanb->buf_size);
315 chanb->subbuf_size_order = get_count_order(subbuf_size);
316 chanb->num_subbuf_order = get_count_order(num_subbuf);
317 chanb->extra_reader_sb =
318 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
319 chanb->num_subbuf = num_subbuf;
320 strncpy(chanb->name, name, NAME_MAX);
321 chanb->name[NAME_MAX - 1] = '\0';
322 memcpy(&chanb->config, config, sizeof(*config));
323
324 /* Per-cpu buffer size: control (prior to backend) */
325 shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
326 shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
327 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_hot));
328 shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
329 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_cold));
330 shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
331 /* Sampled timestamp end */
332 shmsize += lttng_ust_offset_align(shmsize, __alignof__(uint64_t));
333 shmsize += sizeof(uint64_t) * num_subbuf;
334
335 /* Per-cpu buffer size: backend */
336 /* num_subbuf + 1 is the worse case */
337 num_subbuf_alloc = num_subbuf + 1;
338 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
339 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
340 shmsize += lttng_ust_offset_align(shmsize, page_size);
341 shmsize += subbuf_size * num_subbuf_alloc;
342 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
343 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
344 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
345 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
346 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
347 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
348
349 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
350 struct lttng_ust_lib_ring_buffer *buf;
351 /*
352 * We need to allocate for all possible cpus.
353 */
354 for_each_possible_cpu(i) {
355 struct shm_object *shmobj;
356
357 shmobj = shm_object_table_alloc(handle->table, shmsize,
358 SHM_OBJECT_SHM, stream_fds[i], i);
359 if (!shmobj)
360 goto end;
361 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
362 set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
363 buf = shmp(handle, chanb->buf[i].shmp);
364 if (!buf)
365 goto end;
366 set_shmp(buf->self, chanb->buf[i].shmp._ref);
367 ret = lib_ring_buffer_create(buf, chanb, i,
368 handle, shmobj);
369 if (ret)
370 goto free_bufs; /* cpu hotplug locked */
371 }
372 } else {
373 struct shm_object *shmobj;
374 struct lttng_ust_lib_ring_buffer *buf;
375
376 shmobj = shm_object_table_alloc(handle->table, shmsize,
377 SHM_OBJECT_SHM, stream_fds[0], -1);
378 if (!shmobj)
379 goto end;
380 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
381 set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
382 buf = shmp(handle, chanb->buf[0].shmp);
383 if (!buf)
384 goto end;
385 set_shmp(buf->self, chanb->buf[0].shmp._ref);
386 ret = lib_ring_buffer_create(buf, chanb, -1,
387 handle, shmobj);
388 if (ret)
389 goto free_bufs;
390 }
391 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
392
393 return 0;
394
395 free_bufs:
396 /* We only free the buffer data upon shm teardown */
397 end:
398 return -ENOMEM;
399 }
400
401 /**
402 * channel_backend_free - destroy the channel
403 * @chan: the channel
404 *
405 * Destroy all channel buffers and frees the channel.
406 */
407 void channel_backend_free(struct channel_backend *chanb,
408 struct lttng_ust_shm_handle *handle)
409 {
410 /* SHM teardown takes care of everything */
411 }
412
413 /**
414 * lib_ring_buffer_read - read data from ring_buffer_buffer.
415 * @bufb : buffer backend
416 * @offset : offset within the buffer
417 * @dest : destination address
418 * @len : length to copy to destination
419 *
420 * Should be protected by get_subbuf/put_subbuf.
421 * Returns the length copied.
422 */
423 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
424 void *dest, size_t len, struct lttng_ust_shm_handle *handle)
425 {
426 struct channel_backend *chanb;
427 const struct lttng_ust_lib_ring_buffer_config *config;
428 ssize_t orig_len;
429 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
430 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
431 unsigned long sb_bindex, id;
432 void *src;
433
434 chanb = &shmp(handle, bufb->chan)->backend;
435 if (!chanb)
436 return 0;
437 config = &chanb->config;
438 orig_len = len;
439 offset &= chanb->buf_size - 1;
440
441 if (caa_unlikely(!len))
442 return 0;
443 id = bufb->buf_rsb.id;
444 sb_bindex = subbuffer_id_get_index(config, id);
445 rpages = shmp_index(handle, bufb->array, sb_bindex);
446 if (!rpages)
447 return 0;
448 /*
449 * Underlying layer should never ask for reads across
450 * subbuffers.
451 */
452 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
453 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
454 && subbuffer_id_is_noref(config, id));
455 backend_pages = shmp(handle, rpages->shmp);
456 if (!backend_pages)
457 return 0;
458 src = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
459 if (caa_unlikely(!src))
460 return 0;
461 memcpy(dest, src, len);
462 return orig_len;
463 }
464
465 /**
466 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
467 * @bufb : buffer backend
468 * @offset : offset within the buffer
469 * @dest : destination address
470 * @len : destination's length
471 *
472 * Return string's length, or -EINVAL on error.
473 * Should be protected by get_subbuf/put_subbuf.
474 * Destination length should be at least 1 to hold '\0'.
475 */
476 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
477 void *dest, size_t len, struct lttng_ust_shm_handle *handle)
478 {
479 struct channel_backend *chanb;
480 const struct lttng_ust_lib_ring_buffer_config *config;
481 ssize_t string_len, orig_offset;
482 char *str;
483 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
484 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
485 unsigned long sb_bindex, id;
486
487 chanb = &shmp(handle, bufb->chan)->backend;
488 if (!chanb)
489 return -EINVAL;
490 config = &chanb->config;
491 if (caa_unlikely(!len))
492 return -EINVAL;
493 offset &= chanb->buf_size - 1;
494 orig_offset = offset;
495 id = bufb->buf_rsb.id;
496 sb_bindex = subbuffer_id_get_index(config, id);
497 rpages = shmp_index(handle, bufb->array, sb_bindex);
498 if (!rpages)
499 return -EINVAL;
500 /*
501 * Underlying layer should never ask for reads across
502 * subbuffers.
503 */
504 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
505 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
506 && subbuffer_id_is_noref(config, id));
507 backend_pages = shmp(handle, rpages->shmp);
508 if (!backend_pages)
509 return -EINVAL;
510 str = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
511 if (caa_unlikely(!str))
512 return -EINVAL;
513 string_len = strnlen(str, len);
514 if (dest && len) {
515 memcpy(dest, str, string_len);
516 ((char *)dest)[0] = 0;
517 }
518 return offset - orig_offset;
519 }
520
521 /**
522 * lib_ring_buffer_read_offset_address - get address of a buffer location
523 * @bufb : buffer backend
524 * @offset : offset within the buffer.
525 *
526 * Return the address where a given offset is located (for read).
527 * Should be used to get the current subbuffer header pointer. Given we know
528 * it's never on a page boundary, it's safe to read/write directly
529 * from/to this address, as long as the read/write is never bigger than
530 * a page size.
531 */
532 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
533 size_t offset,
534 struct lttng_ust_shm_handle *handle)
535 {
536 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
537 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
538 struct channel_backend *chanb;
539 const struct lttng_ust_lib_ring_buffer_config *config;
540 unsigned long sb_bindex, id;
541
542 chanb = &shmp(handle, bufb->chan)->backend;
543 if (!chanb)
544 return NULL;
545 config = &chanb->config;
546 offset &= chanb->buf_size - 1;
547 id = bufb->buf_rsb.id;
548 sb_bindex = subbuffer_id_get_index(config, id);
549 rpages = shmp_index(handle, bufb->array, sb_bindex);
550 if (!rpages)
551 return NULL;
552 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
553 && subbuffer_id_is_noref(config, id));
554 backend_pages = shmp(handle, rpages->shmp);
555 if (!backend_pages)
556 return NULL;
557 return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
558 }
559
560 /**
561 * lib_ring_buffer_offset_address - get address of a location within the buffer
562 * @bufb : buffer backend
563 * @offset : offset within the buffer.
564 *
565 * Return the address where a given offset is located.
566 * Should be used to get the current subbuffer header pointer. Given we know
567 * it's always at the beginning of a page, it's safe to write directly to this
568 * address, as long as the write is never bigger than a page size.
569 */
570 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
571 size_t offset,
572 struct lttng_ust_shm_handle *handle)
573 {
574 size_t sbidx;
575 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
576 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
577 struct channel_backend *chanb;
578 const struct lttng_ust_lib_ring_buffer_config *config;
579 unsigned long sb_bindex, id;
580 struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
581
582 chanb = &shmp(handle, bufb->chan)->backend;
583 if (!chanb)
584 return NULL;
585 config = &chanb->config;
586 offset &= chanb->buf_size - 1;
587 sbidx = offset >> chanb->subbuf_size_order;
588 sb = shmp_index(handle, bufb->buf_wsb, sbidx);
589 if (!sb)
590 return NULL;
591 id = sb->id;
592 sb_bindex = subbuffer_id_get_index(config, id);
593 rpages = shmp_index(handle, bufb->array, sb_bindex);
594 if (!rpages)
595 return NULL;
596 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config, id));
598 backend_pages = shmp(handle, rpages->shmp);
599 if (!backend_pages)
600 return NULL;
601 return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
602 }
This page took 0.040045 seconds and 3 git commands to generate.