X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_backend.c;h=72ddacf3d54e5abd395e9acb4ab999e9ab744153;hb=381c0f1ef474e0ae8a96b3753470ca4bda45c764;hp=badde80f0b4c40cea03ec04a63e99e201fd0a65d;hpb=1d4981969313da002983ca979bd85c95493f7316;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_backend.c b/libringbuffer/ring_buffer_backend.c index badde80f..72ddacf3 100644 --- a/libringbuffer/ring_buffer_backend.c +++ b/libringbuffer/ring_buffer_backend.c @@ -62,10 +62,10 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages)); - set_shmp(shmp(handle, bufb->array)[i].shmp, + set_shmp(shmp_index(handle, bufb->array, i)->shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer_backend_pages))); - if (!shmp(handle, shmp(handle, bufb->array)[i].shmp)) + if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp)) goto free_array; } @@ -78,7 +78,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config goto free_array; for (i = 0; i < num_subbuf; i++) - shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i); + shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); /* Assign read-side subbuffer table */ if (extra_reader_sb) @@ -95,10 +95,10 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config ref.offset = bufb->memory_map._ref.offset; ref.offset += i * subbuf_size; - set_shmp(shmp(handle, shmp(handle, bufb->array)[i].shmp)->p, + set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p, ref); if (config->output == RING_BUFFER_MMAP) { - shmp(handle, shmp(handle, bufb->array)[i].shmp)->mmap_offset = mmap_offset; + shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset; mmap_offset += subbuf_size; } } @@ -150,7 +150,7 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, num_subbuf_alloc++; for (i = 0; i < chanb->num_subbuf; i++) - shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i); + shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i); if (chanb->extra_reader_sb) bufb->buf_rsb.id = subbuffer_id(config, 0, 1, num_subbuf_alloc - 1); @@ -159,9 +159,9 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb, for (i = 0; i < num_subbuf_alloc; i++) { /* Don't reset mmap_offset */ - v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_commit, 0); - v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_unread, 0); - shmp(handle, shmp(handle, bufb->array)[i].shmp)->data_size = 0; + v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0); + v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0); + shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0; /* Don't reset backend page and virt addresses */ } /* Don't reset num_pages_per_subbuf, cpu, allocated */ @@ -213,7 +213,7 @@ int channel_backend_init(struct channel_backend *chanb, struct channel *chan = caa_container_of(chanb, struct channel, backend); unsigned int i; int ret; - size_t shmsize = 0, bufshmsize = 0, num_subbuf_alloc; + size_t shmsize = 0, num_subbuf_alloc; if (!name) return -EPERM; @@ -257,11 +257,11 @@ int channel_backend_init(struct channel_backend *chanb, num_subbuf_alloc = num_subbuf + 1; shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages_shmp)); shmsize += sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, PAGE_SIZE); + shmsize += offset_align(shmsize, PAGE_SIZE); shmsize += subbuf_size * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_pages)); + shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages)); shmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc; - shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer)); + shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer)); shmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf; /* Per-cpu buffer size: control (after backend) */ shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot)); @@ -278,11 +278,14 @@ int channel_backend_init(struct channel_backend *chanb, struct shm_object *shmobj; shmobj = shm_object_table_append(handle->table, shmsize); + if (!shmobj) + goto end; align_shm(shmobj, __alignof__(struct lib_ring_buffer)); set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer))); buf = shmp(handle, chanb->buf[i].shmp); if (!buf) goto end; + set_shmp(buf->self, chanb->buf[i].shmp._ref); ret = lib_ring_buffer_create(buf, chanb, i, handle, shmobj); if (ret) @@ -293,6 +296,8 @@ int channel_backend_init(struct channel_backend *chanb, struct lib_ring_buffer *buf; shmobj = shm_object_table_append(handle->table, shmsize); + if (!shmobj) + goto end; align_shm(shmobj, __alignof__(struct lib_ring_buffer)); set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer))); buf = shmp(handle, chanb->buf[0].shmp); @@ -377,7 +382,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, return 0; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); /* * Underlying layer should never ask for reads across * subbuffers. @@ -385,7 +390,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, CHAN_WARN_ON(chanb, offset >= chanb->buf_size); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - memcpy(dest, shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)), len); + memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len); return orig_len; } @@ -413,7 +418,7 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse orig_offset = offset; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); /* * Underlying layer should never ask for reads across * subbuffers. @@ -421,7 +426,7 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse CHAN_WARN_ON(chanb, offset >= chanb->buf_size); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - str = (char *)shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); string_len = strnlen(str, len); if (dest && len) { memcpy(dest, str, string_len); @@ -452,10 +457,10 @@ void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, offset &= chanb->buf_size - 1; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); } /** @@ -480,10 +485,10 @@ void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, offset &= chanb->buf_size - 1; sbidx = offset >> chanb->subbuf_size_order; - id = shmp(handle, bufb->buf_wsb)[sbidx].id; + id = shmp_index(handle, bufb->buf_wsb, sbidx)->id; sb_bindex = subbuffer_id_get_index(config, id); - rpages = &shmp(handle, bufb->array)[sb_bindex]; + rpages = shmp_index(handle, bufb->array, sb_bindex); CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); - return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)); + return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)); }