| 1 | /* |
| 2 | * ring_buffer_backend.c |
| 3 | * |
| 4 | * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 5 | * |
| 6 | * Dual LGPL v2.1/GPL v2 license. |
| 7 | */ |
| 8 | |
| 9 | #include <urcu/arch.h> |
| 10 | |
| 11 | #include "ust/core.h" |
| 12 | |
| 13 | #include <ust/ringbuffer-config.h> |
| 14 | #include "backend.h" |
| 15 | #include "frontend.h" |
| 16 | #include "smp.h" |
| 17 | #include "shm.h" |
| 18 | |
| 19 | /** |
| 20 | * lib_ring_buffer_backend_allocate - allocate a channel buffer |
| 21 | * @config: ring buffer instance configuration |
| 22 | * @buf: the buffer struct |
| 23 | * @size: total size of the buffer |
| 24 | * @num_subbuf: number of subbuffers |
| 25 | * @extra_reader_sb: need extra subbuffer for reader |
| 26 | */ |
| 27 | static |
| 28 | int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config, |
| 29 | struct lttng_ust_lib_ring_buffer_backend *bufb, |
| 30 | size_t size, size_t num_subbuf, |
| 31 | int extra_reader_sb, |
| 32 | struct lttng_ust_shm_handle *handle, |
| 33 | struct shm_object *shmobj) |
| 34 | { |
| 35 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 36 | unsigned long subbuf_size, mmap_offset = 0; |
| 37 | unsigned long num_subbuf_alloc; |
| 38 | unsigned long i; |
| 39 | |
| 40 | subbuf_size = chanb->subbuf_size; |
| 41 | num_subbuf_alloc = num_subbuf; |
| 42 | |
| 43 | if (extra_reader_sb) |
| 44 | num_subbuf_alloc++; |
| 45 | |
| 46 | align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp)); |
| 47 | set_shmp(bufb->array, zalloc_shm(shmobj, |
| 48 | sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc)); |
| 49 | if (caa_unlikely(!shmp(handle, bufb->array))) |
| 50 | goto array_error; |
| 51 | |
| 52 | /* |
| 53 | * This is the largest element (the buffer pages) which needs to |
| 54 | * be aligned on PAGE_SIZE. |
| 55 | */ |
| 56 | align_shm(shmobj, PAGE_SIZE); |
| 57 | set_shmp(bufb->memory_map, zalloc_shm(shmobj, |
| 58 | subbuf_size * num_subbuf_alloc)); |
| 59 | if (caa_unlikely(!shmp(handle, bufb->memory_map))) |
| 60 | goto memory_map_error; |
| 61 | |
| 62 | /* Allocate backend pages array elements */ |
| 63 | for (i = 0; i < num_subbuf_alloc; i++) { |
| 64 | align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages)); |
| 65 | set_shmp(shmp_index(handle, bufb->array, i)->shmp, |
| 66 | zalloc_shm(shmobj, |
| 67 | sizeof(struct lttng_ust_lib_ring_buffer_backend_pages))); |
| 68 | if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp)) |
| 69 | goto free_array; |
| 70 | } |
| 71 | |
| 72 | /* Allocate write-side subbuffer table */ |
| 73 | align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer)); |
| 74 | set_shmp(bufb->buf_wsb, zalloc_shm(shmobj, |
| 75 | sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) |
| 76 | * num_subbuf)); |
| 77 | if (caa_unlikely(!shmp(handle, bufb->buf_wsb))) |
| 78 | goto free_array; |
| 79 | |
| 80 | for (i = 0; i < num_subbuf; i++) |
| 81 | shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); |
| 82 | |
| 83 | /* Assign read-side subbuffer table */ |
| 84 | if (extra_reader_sb) |
| 85 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, |
| 86 | num_subbuf_alloc - 1); |
| 87 | else |
| 88 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); |
| 89 | |
| 90 | /* Assign pages to page index */ |
| 91 | for (i = 0; i < num_subbuf_alloc; i++) { |
| 92 | struct shm_ref ref; |
| 93 | |
| 94 | ref.index = bufb->memory_map._ref.index; |
| 95 | ref.offset = bufb->memory_map._ref.offset; |
| 96 | ref.offset += i * subbuf_size; |
| 97 | |
| 98 | set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p, |
| 99 | ref); |
| 100 | if (config->output == RING_BUFFER_MMAP) { |
| 101 | shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset; |
| 102 | mmap_offset += subbuf_size; |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | return 0; |
| 107 | |
| 108 | free_array: |
| 109 | /* bufb->array[i] will be freed by shm teardown */ |
| 110 | memory_map_error: |
| 111 | /* bufb->array will be freed by shm teardown */ |
| 112 | array_error: |
| 113 | return -ENOMEM; |
| 114 | } |
| 115 | |
| 116 | int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, |
| 117 | struct channel_backend *chanb, int cpu, |
| 118 | struct lttng_ust_shm_handle *handle, |
| 119 | struct shm_object *shmobj) |
| 120 | { |
| 121 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 122 | |
| 123 | set_shmp(bufb->chan, handle->chan._ref); |
| 124 | bufb->cpu = cpu; |
| 125 | |
| 126 | return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, |
| 127 | chanb->num_subbuf, |
| 128 | chanb->extra_reader_sb, |
| 129 | handle, shmobj); |
| 130 | } |
| 131 | |
| 132 | void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb) |
| 133 | { |
| 134 | /* bufb->buf_wsb will be freed by shm teardown */ |
| 135 | /* bufb->array[i] will be freed by shm teardown */ |
| 136 | /* bufb->array will be freed by shm teardown */ |
| 137 | bufb->allocated = 0; |
| 138 | } |
| 139 | |
| 140 | void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, |
| 141 | struct lttng_ust_shm_handle *handle) |
| 142 | { |
| 143 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 144 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 145 | unsigned long num_subbuf_alloc; |
| 146 | unsigned int i; |
| 147 | |
| 148 | num_subbuf_alloc = chanb->num_subbuf; |
| 149 | if (chanb->extra_reader_sb) |
| 150 | num_subbuf_alloc++; |
| 151 | |
| 152 | for (i = 0; i < chanb->num_subbuf; i++) |
| 153 | shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); |
| 154 | if (chanb->extra_reader_sb) |
| 155 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, |
| 156 | num_subbuf_alloc - 1); |
| 157 | else |
| 158 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); |
| 159 | |
| 160 | for (i = 0; i < num_subbuf_alloc; i++) { |
| 161 | /* Don't reset mmap_offset */ |
| 162 | v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0); |
| 163 | v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0); |
| 164 | shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0; |
| 165 | /* Don't reset backend page and virt addresses */ |
| 166 | } |
| 167 | /* Don't reset num_pages_per_subbuf, cpu, allocated */ |
| 168 | v_set(config, &bufb->records_read, 0); |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * The frontend is responsible for also calling ring_buffer_backend_reset for |
| 173 | * each buffer when calling channel_backend_reset. |
| 174 | */ |
| 175 | void channel_backend_reset(struct channel_backend *chanb) |
| 176 | { |
| 177 | struct channel *chan = caa_container_of(chanb, struct channel, backend); |
| 178 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 179 | |
| 180 | /* |
| 181 | * Don't reset buf_size, subbuf_size, subbuf_size_order, |
| 182 | * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, |
| 183 | * priv, notifiers, config, cpumask and name. |
| 184 | */ |
| 185 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); |
| 186 | } |
| 187 | |
| 188 | /** |
| 189 | * channel_backend_init - initialize a channel backend |
| 190 | * @chanb: channel backend |
| 191 | * @name: channel name |
| 192 | * @config: client ring buffer configuration |
| 193 | * @priv: client private data |
| 194 | * @parent: dentry of parent directory, %NULL for root directory |
| 195 | * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) |
| 196 | * @num_subbuf: number of sub-buffers (power of 2) |
| 197 | * @lttng_ust_shm_handle: shared memory handle |
| 198 | * |
| 199 | * Returns channel pointer if successful, %NULL otherwise. |
| 200 | * |
| 201 | * Creates per-cpu channel buffers using the sizes and attributes |
| 202 | * specified. The created channel buffer files will be named |
| 203 | * name_0...name_N-1. File permissions will be %S_IRUSR. |
| 204 | * |
| 205 | * Called with CPU hotplug disabled. |
| 206 | */ |
| 207 | int channel_backend_init(struct channel_backend *chanb, |
| 208 | const char *name, |
| 209 | const struct lttng_ust_lib_ring_buffer_config *config, |
| 210 | void *priv, size_t subbuf_size, size_t num_subbuf, |
| 211 | struct lttng_ust_shm_handle *handle) |
| 212 | { |
| 213 | struct channel *chan = caa_container_of(chanb, struct channel, backend); |
| 214 | unsigned int i; |
| 215 | int ret; |
| 216 | size_t shmsize = 0, num_subbuf_alloc; |
| 217 | |
| 218 | if (!name) |
| 219 | return -EPERM; |
| 220 | |
| 221 | if (!(subbuf_size && num_subbuf)) |
| 222 | return -EPERM; |
| 223 | |
| 224 | /* Check that the subbuffer size is larger than a page. */ |
| 225 | if (subbuf_size < PAGE_SIZE) |
| 226 | return -EINVAL; |
| 227 | |
| 228 | /* |
| 229 | * Make sure the number of subbuffers and subbuffer size are power of 2. |
| 230 | */ |
| 231 | CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1); |
| 232 | CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1); |
| 233 | |
| 234 | ret = subbuffer_id_check_index(config, num_subbuf); |
| 235 | if (ret) |
| 236 | return ret; |
| 237 | |
| 238 | chanb->priv = priv; |
| 239 | chanb->buf_size = num_subbuf * subbuf_size; |
| 240 | chanb->subbuf_size = subbuf_size; |
| 241 | chanb->buf_size_order = get_count_order(chanb->buf_size); |
| 242 | chanb->subbuf_size_order = get_count_order(subbuf_size); |
| 243 | chanb->num_subbuf_order = get_count_order(num_subbuf); |
| 244 | chanb->extra_reader_sb = |
| 245 | (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0; |
| 246 | chanb->num_subbuf = num_subbuf; |
| 247 | strncpy(chanb->name, name, NAME_MAX); |
| 248 | chanb->name[NAME_MAX - 1] = '\0'; |
| 249 | memcpy(&chanb->config, config, sizeof(*config)); |
| 250 | |
| 251 | /* Per-cpu buffer size: control (prior to backend) */ |
| 252 | shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer)); |
| 253 | shmsize += sizeof(struct lttng_ust_lib_ring_buffer); |
| 254 | |
| 255 | /* Per-cpu buffer size: backend */ |
| 256 | /* num_subbuf + 1 is the worse case */ |
| 257 | num_subbuf_alloc = num_subbuf + 1; |
| 258 | shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp)); |
| 259 | shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc; |
| 260 | shmsize += offset_align(shmsize, PAGE_SIZE); |
| 261 | shmsize += subbuf_size * num_subbuf_alloc; |
| 262 | shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages)); |
| 263 | shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc; |
| 264 | shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer)); |
| 265 | shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf; |
| 266 | /* Per-cpu buffer size: control (after backend) */ |
| 267 | shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot)); |
| 268 | shmsize += sizeof(struct commit_counters_hot) * num_subbuf; |
| 269 | shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold)); |
| 270 | shmsize += sizeof(struct commit_counters_cold) * num_subbuf; |
| 271 | |
| 272 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
| 273 | struct lttng_ust_lib_ring_buffer *buf; |
| 274 | /* |
| 275 | * We need to allocate for all possible cpus. |
| 276 | */ |
| 277 | for_each_possible_cpu(i) { |
| 278 | struct shm_object *shmobj; |
| 279 | |
| 280 | shmobj = shm_object_table_append(handle->table, shmsize); |
| 281 | if (!shmobj) |
| 282 | goto end; |
| 283 | align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer)); |
| 284 | set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer))); |
| 285 | buf = shmp(handle, chanb->buf[i].shmp); |
| 286 | if (!buf) |
| 287 | goto end; |
| 288 | set_shmp(buf->self, chanb->buf[i].shmp._ref); |
| 289 | ret = lib_ring_buffer_create(buf, chanb, i, |
| 290 | handle, shmobj); |
| 291 | if (ret) |
| 292 | goto free_bufs; /* cpu hotplug locked */ |
| 293 | } |
| 294 | } else { |
| 295 | struct shm_object *shmobj; |
| 296 | struct lttng_ust_lib_ring_buffer *buf; |
| 297 | |
| 298 | shmobj = shm_object_table_append(handle->table, shmsize); |
| 299 | if (!shmobj) |
| 300 | goto end; |
| 301 | align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer)); |
| 302 | set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer))); |
| 303 | buf = shmp(handle, chanb->buf[0].shmp); |
| 304 | if (!buf) |
| 305 | goto end; |
| 306 | ret = lib_ring_buffer_create(buf, chanb, -1, |
| 307 | handle, shmobj); |
| 308 | if (ret) |
| 309 | goto free_bufs; |
| 310 | } |
| 311 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); |
| 312 | |
| 313 | return 0; |
| 314 | |
| 315 | free_bufs: |
| 316 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
| 317 | for_each_possible_cpu(i) { |
| 318 | struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); |
| 319 | |
| 320 | if (!buf->backend.allocated) |
| 321 | continue; |
| 322 | lib_ring_buffer_free(buf, handle); |
| 323 | } |
| 324 | } |
| 325 | /* We only free the buffer data upon shm teardown */ |
| 326 | end: |
| 327 | return -ENOMEM; |
| 328 | } |
| 329 | |
| 330 | /** |
| 331 | * channel_backend_free - destroy the channel |
| 332 | * @chan: the channel |
| 333 | * |
| 334 | * Destroy all channel buffers and frees the channel. |
| 335 | */ |
| 336 | void channel_backend_free(struct channel_backend *chanb, |
| 337 | struct lttng_ust_shm_handle *handle) |
| 338 | { |
| 339 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 340 | unsigned int i; |
| 341 | |
| 342 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
| 343 | for_each_possible_cpu(i) { |
| 344 | struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); |
| 345 | |
| 346 | if (!buf->backend.allocated) |
| 347 | continue; |
| 348 | lib_ring_buffer_free(buf, handle); |
| 349 | } |
| 350 | } else { |
| 351 | struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp); |
| 352 | |
| 353 | CHAN_WARN_ON(chanb, !buf->backend.allocated); |
| 354 | lib_ring_buffer_free(buf, handle); |
| 355 | } |
| 356 | /* We only free the buffer data upon shm teardown */ |
| 357 | } |
| 358 | |
| 359 | /** |
| 360 | * lib_ring_buffer_read - read data from ring_buffer_buffer. |
| 361 | * @bufb : buffer backend |
| 362 | * @offset : offset within the buffer |
| 363 | * @dest : destination address |
| 364 | * @len : length to copy to destination |
| 365 | * |
| 366 | * Should be protected by get_subbuf/put_subbuf. |
| 367 | * Returns the length copied. |
| 368 | */ |
| 369 | size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, |
| 370 | void *dest, size_t len, struct lttng_ust_shm_handle *handle) |
| 371 | { |
| 372 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 373 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 374 | ssize_t orig_len; |
| 375 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; |
| 376 | unsigned long sb_bindex, id; |
| 377 | |
| 378 | orig_len = len; |
| 379 | offset &= chanb->buf_size - 1; |
| 380 | |
| 381 | if (caa_unlikely(!len)) |
| 382 | return 0; |
| 383 | id = bufb->buf_rsb.id; |
| 384 | sb_bindex = subbuffer_id_get_index(config, id); |
| 385 | rpages = shmp_index(handle, bufb->array, sb_bindex); |
| 386 | /* |
| 387 | * Underlying layer should never ask for reads across |
| 388 | * subbuffers. |
| 389 | */ |
| 390 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); |
| 391 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE |
| 392 | && subbuffer_id_is_noref(config, id)); |
| 393 | memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len); |
| 394 | return orig_len; |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer. |
| 399 | * @bufb : buffer backend |
| 400 | * @offset : offset within the buffer |
| 401 | * @dest : destination address |
| 402 | * @len : destination's length |
| 403 | * |
| 404 | * return string's length |
| 405 | * Should be protected by get_subbuf/put_subbuf. |
| 406 | */ |
| 407 | int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, |
| 408 | void *dest, size_t len, struct lttng_ust_shm_handle *handle) |
| 409 | { |
| 410 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 411 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 412 | ssize_t string_len, orig_offset; |
| 413 | char *str; |
| 414 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; |
| 415 | unsigned long sb_bindex, id; |
| 416 | |
| 417 | offset &= chanb->buf_size - 1; |
| 418 | orig_offset = offset; |
| 419 | id = bufb->buf_rsb.id; |
| 420 | sb_bindex = subbuffer_id_get_index(config, id); |
| 421 | rpages = shmp_index(handle, bufb->array, sb_bindex); |
| 422 | /* |
| 423 | * Underlying layer should never ask for reads across |
| 424 | * subbuffers. |
| 425 | */ |
| 426 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); |
| 427 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE |
| 428 | && subbuffer_id_is_noref(config, id)); |
| 429 | str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); |
| 430 | string_len = strnlen(str, len); |
| 431 | if (dest && len) { |
| 432 | memcpy(dest, str, string_len); |
| 433 | ((char *)dest)[0] = 0; |
| 434 | } |
| 435 | return offset - orig_offset; |
| 436 | } |
| 437 | |
| 438 | /** |
| 439 | * lib_ring_buffer_read_offset_address - get address of a buffer location |
| 440 | * @bufb : buffer backend |
| 441 | * @offset : offset within the buffer. |
| 442 | * |
| 443 | * Return the address where a given offset is located (for read). |
| 444 | * Should be used to get the current subbuffer header pointer. Given we know |
| 445 | * it's never on a page boundary, it's safe to write directly to this address, |
| 446 | * as long as the write is never bigger than a page size. |
| 447 | */ |
| 448 | void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, |
| 449 | size_t offset, |
| 450 | struct lttng_ust_shm_handle *handle) |
| 451 | { |
| 452 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; |
| 453 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 454 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 455 | unsigned long sb_bindex, id; |
| 456 | |
| 457 | offset &= chanb->buf_size - 1; |
| 458 | id = bufb->buf_rsb.id; |
| 459 | sb_bindex = subbuffer_id_get_index(config, id); |
| 460 | rpages = shmp_index(handle, bufb->array, sb_bindex); |
| 461 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE |
| 462 | && subbuffer_id_is_noref(config, id)); |
| 463 | return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); |
| 464 | } |
| 465 | |
| 466 | /** |
| 467 | * lib_ring_buffer_offset_address - get address of a location within the buffer |
| 468 | * @bufb : buffer backend |
| 469 | * @offset : offset within the buffer. |
| 470 | * |
| 471 | * Return the address where a given offset is located. |
| 472 | * Should be used to get the current subbuffer header pointer. Given we know |
| 473 | * it's always at the beginning of a page, it's safe to write directly to this |
| 474 | * address, as long as the write is never bigger than a page size. |
| 475 | */ |
| 476 | void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, |
| 477 | size_t offset, |
| 478 | struct lttng_ust_shm_handle *handle) |
| 479 | { |
| 480 | size_t sbidx; |
| 481 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; |
| 482 | struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; |
| 483 | const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; |
| 484 | unsigned long sb_bindex, id; |
| 485 | |
| 486 | offset &= chanb->buf_size - 1; |
| 487 | sbidx = offset >> chanb->subbuf_size_order; |
| 488 | id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; |
| 489 | sb_bindex = subbuffer_id_get_index(config, id); |
| 490 | rpages = shmp_index(handle, bufb->array, sb_bindex); |
| 491 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE |
| 492 | && subbuffer_id_is_noref(config, id)); |
| 493 | return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); |
| 494 | } |