X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_backend.c;h=466552debaf89c997a27a9a5679aceb52ce8f691;hb=4cfec15c93af7e0cfe3ce769ee90486bb8ab7c37;hp=18dd1b01073027efc9219bf01d905b1f3049c5a3;hpb=5d61a504c6d395914d78f97e82f6fd0fdf0f98a0;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_backend.c b/libringbuffer/ring_buffer_backend.c index 18dd1b01..466552de 100644 --- a/libringbuffer/ring_buffer_backend.c +++ b/libringbuffer/ring_buffer_backend.c @@ -25,11 +25,11 @@ * @extra_reader_sb: need extra subbuffer for reader */ static -int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_backend *bufb, +int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_backend *bufb, size_t size, size_t num_subbuf, int extra_reader_sb, - struct shm_handle *handle, + struct lttng_ust_shm_handle *handle, struct shm_object *shmobj) { struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; @@ -43,9 +43,9 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config if (extra_reader_sb) num_subbuf_alloc++; - align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages_shmp)); + align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp)); set_shmp(bufb->array, zalloc_shm(shmobj, - sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc)); + sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc)); if (unlikely(!shmp(handle, bufb->array))) goto array_error; @@ -61,24 +61,24 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { - align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages)); - set_shmp(shmp(handle, bufb->array)[i].shmp, + align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages)); + set_shmp(shmp_index(handle, bufb->array, i)->shmp, zalloc_shm(shmobj, - sizeof(struct lib_ring_buffer_backend_pages))); - if (!shmp(handle, shmp(handle, bufb->array)[i].shmp)) + sizeof(struct lttng_ust_lib_ring_buffer_backend_pages))); + if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp)) goto free_array; } /* Allocate write-side subbuffer table */ - align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_subbuffer)); + align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer)); set_shmp(bufb->buf_wsb, zalloc_shm(shmobj, - sizeof(struct lib_ring_buffer_backend_subbuffer) + sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf)); if (unlikely(!shmp(handle, bufb->buf_wsb))) goto free_array; for (i = 0; i < num_subbuf; i++) - shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i); + shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); /* Assign read-side subbuffer table */ if (extra_reader_sb) @@ -95,10 +95,10 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config ref.offset = bufb->memory_map._ref.offset; ref.offset += i * subbuf_size; - set_shmp(shmp(handle, shmp(handle, bufb->array)[i].shmp)->p, + set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p, ref); if (config->output == RING_BUFFER_MMAP) { - shmp(handle, shmp(handle, bufb->array)[i].shmp)->mmap_offset = mmap_offset; + shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset; mmap_offset += subbuf_size; } } @@ -113,12 +113,12 @@ array_error: return -ENOMEM; } -int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, +int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, struct channel_backend *chanb, int cpu, - struct shm_handle *handle, + struct lttng_ust_shm_handle *handle, struct shm_object *shmobj) { - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; set_shmp(bufb->chan, handle->chan._ref); bufb->cpu = cpu; @@ -129,7 +129,7 @@ int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, handle, shmobj); } -void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) +void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb) { /* bufb->buf_wsb will be freed by shm teardown */ /* bufb->array[i] will be freed by shm teardown */ @@ -137,11 +137,11 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) bufb->allocated = 0; } -void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, - struct shm_handle *handle) +void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, + struct lttng_ust_shm_handle *handle) { struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; unsigned long num_subbuf_alloc; unsigned int i; @@ -150,7 +150,7 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, num_subbuf_alloc++; for (i = 0; i < chanb->num_subbuf; i++) - shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i); + shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); if (chanb->extra_reader_sb) bufb->buf_rsb.id = subbuffer_id(config, 0, 1, num_subbuf_alloc - 1); @@ -159,9 +159,9 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, for (i = 0; i < num_subbuf_alloc; i++) { /* Don't reset mmap_offset */ - v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_commit, 0); - v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_unread, 0); - shmp(handle, shmp(handle, bufb->array)[i].shmp)->data_size = 0; + v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0); + v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0); + shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0; /* Don't reset backend page and virt addresses */ } /* Don't reset num_pages_per_subbuf, cpu, allocated */ @@ -175,7 +175,7 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, void channel_backend_reset(struct channel_backend *chanb) { struct channel *chan = caa_container_of(chanb, struct channel, backend); - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; /* * Don't reset buf_size, subbuf_size, subbuf_size_order, @@ -194,7 +194,7 @@ void channel_backend_reset(struct channel_backend *chanb) * @parent: dentry of parent directory, %NULL for root directory * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) * @num_subbuf: number of sub-buffers (power of 2) - * @shm_handle: shared memory handle + * @lttng_ust_shm_handle: shared memory handle * * Returns channel pointer if successful, %NULL otherwise. * @@ -206,14 +206,14 @@ void channel_backend_reset(struct channel_backend *chanb) */ int channel_backend_init(struct channel_backend *chanb, const char *name, - const struct lib_ring_buffer_config *config, + const struct lttng_ust_lib_ring_buffer_config *config, void *priv, size_t subbuf_size, size_t num_subbuf, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { struct channel *chan = caa_container_of(chanb, struct channel, backend); unsigned int i; int ret; - size_t shmsize = 0, bufshmsize = 0, num_subbuf_alloc; + size_t shmsize = 0, num_subbuf_alloc; if (!name) return -EPERM; @@ -246,23 +246,23 @@ int channel_backend_init(struct channel_backend *chanb, chanb->num_subbuf = num_subbuf; strncpy(chanb->name, name, NAME_MAX); chanb->name[NAME_MAX - 1] = '\0'; - chanb->config = config; + memcpy(&chanb->config, config, sizeof(*config)); /* Per-cpu buffer size: control (prior to backend) */ - shmsize = offset_align(shmsize, __alignof__(struct lib_ring_buffer)); - shmsize += sizeof(struct lib_ring_buffer); + shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer)); + shmsize += sizeof(struct lttng_ust_lib_ring_buffer); /* Per-cpu buffer size: backend */ /* num_subbuf + 1 is the worse case */ num_subbuf_alloc = num_subbuf + 1; - shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages_shmp)); - shmsize += sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, PAGE_SIZE); + shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp)); + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc; + shmsize += offset_align(shmsize, PAGE_SIZE); shmsize += subbuf_size * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_pages)); - shmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer)); - shmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf; + shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages)); + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc; + shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer)); + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf; /* Per-cpu buffer size: control (after backend) */ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot)); shmsize += sizeof(struct commit_counters_hot) * num_subbuf; @@ -270,7 +270,7 @@ int channel_backend_init(struct channel_backend *chanb, shmsize += sizeof(struct commit_counters_cold) * num_subbuf; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - struct lib_ring_buffer *buf; + struct lttng_ust_lib_ring_buffer *buf; /* * We need to allocate for all possible cpus. */ @@ -278,8 +278,10 @@ int channel_backend_init(struct channel_backend *chanb, struct shm_object *shmobj; shmobj = shm_object_table_append(handle->table, shmsize); - align_shm(shmobj, __alignof__(struct lib_ring_buffer)); - set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer))); + if (!shmobj) + goto end; + align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer)); + set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer))); buf = shmp(handle, chanb->buf[i].shmp); if (!buf) goto end; @@ -291,11 +293,13 @@ int channel_backend_init(struct channel_backend *chanb, } } else { struct shm_object *shmobj; - struct lib_ring_buffer *buf; + struct lttng_ust_lib_ring_buffer *buf; shmobj = shm_object_table_append(handle->table, shmsize); - align_shm(shmobj, __alignof__(struct lib_ring_buffer)); - set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer))); + if (!shmobj) + goto end; + align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer)); + set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer))); buf = shmp(handle, chanb->buf[0].shmp); if (!buf) goto end; @@ -311,7 +315,7 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_possible_cpu(i) { - struct lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); if (!buf->backend.allocated) continue; @@ -330,21 +334,21 @@ end: * Destroy all channel buffers and frees the channel. */ void channel_backend_free(struct channel_backend *chanb, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; unsigned int i; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_possible_cpu(i) { - struct lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[i].shmp); if (!buf->backend.allocated) continue; lib_ring_buffer_free(buf, handle); } } else { - struct lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chanb->buf[0].shmp); CHAN_WARN_ON(chanb, !buf->backend.allocated); lib_ring_buffer_free(buf, handle); @@ -362,13 +366,13 @@ void channel_backend_free(struct channel_backend *chanb, * Should be protected by get_subbuf/put_subbuf. * Returns the length copied. */ -size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, - void *dest, size_t len, struct shm_handle *handle) +size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, + void *dest, size_t len, struct lttng_ust_shm_handle *handle) { struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; ssize_t orig_len; - struct lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; unsigned long sb_bindex, id; orig_len = len; @@ -378,7 +382,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, return 0; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); /* * Underlying layer should never ask for reads across * subbuffers. @@ -386,7 +390,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, CHAN_WARN_ON(chanb, offset >= chanb->buf_size); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - memcpy(dest, shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)), len); + memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len); return orig_len; } @@ -400,21 +404,21 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, * return string's length * Should be protected by get_subbuf/put_subbuf. */ -int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, - void *dest, size_t len, struct shm_handle *handle) +int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, + void *dest, size_t len, struct lttng_ust_shm_handle *handle) { struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; ssize_t string_len, orig_offset; char *str; - struct lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; orig_offset = offset; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); /* * Underlying layer should never ask for reads across * subbuffers. @@ -422,7 +426,7 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse CHAN_WARN_ON(chanb, offset >= chanb->buf_size); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - str = (char *)shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); string_len = strnlen(str, len); if (dest && len) { memcpy(dest, str, string_len); @@ -441,22 +445,22 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse * it's never on a page boundary, it's safe to write directly to this address, * as long as the write is never bigger than a page size. */ -void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, +void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); } /** @@ -469,22 +473,22 @@ void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, * it's always at the beginning of a page, it's safe to write directly to this * address, as long as the write is never bigger than a page size. */ -void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, +void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { size_t sbidx; - struct lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend; - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; unsigned long sb_bindex, id; offset &= chanb->buf_size - 1; sbidx = offset >> chanb->subbuf_size_order; - id = shmp(handle, bufb->buf_wsb)[sbidx].id; + id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); }