shm: introduce shmp_index
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 20 Aug 2011 13:20:40 +0000 (09:20 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 20 Aug 2011 13:20:40 +0000 (09:20 -0400)
Add array offsets before checking the target pointer range.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
libringbuffer/backend.h
libringbuffer/backend_internal.h
libringbuffer/frontend_api.h
libringbuffer/frontend_internal.h
libringbuffer/ring_buffer_backend.c
libringbuffer/ring_buffer_frontend.c
libringbuffer/shm.h

index e26045af2c7318f8bff93813cd9beaae8085db81..d93f6d1fd09ef2da267ce0b0a7307202a3f9427c 100644 (file)
@@ -76,9 +76,9 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
 
        offset &= chanb->buf_size - 1;
        sbidx = offset >> chanb->subbuf_size_order;
-       id = shmp(handle, bufb->buf_wsb)[sbidx].id;
+       id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
        sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = &shmp(handle, bufb->array)[sb_bindex];
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
        CHAN_WARN_ON(ctx->chan,
                     config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
@@ -88,7 +88,7 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
         */
        CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
        lib_ring_buffer_do_copy(config,
-                               shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)),
+                               shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1)),
                                src, len);
        ctx->buf_offset += len;
 }
@@ -110,15 +110,15 @@ unsigned long lib_ring_buffer_get_records_unread(
        unsigned int i;
 
        for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) {
-               id = shmp(handle, bufb->buf_wsb)[i].id;
+               id = shmp_index(handle, bufb->buf_wsb, i)->id;
                sb_bindex = subbuffer_id_get_index(config, id);
-               pages = &shmp(handle, bufb->array)[sb_bindex];
+               pages = shmp_index(handle, bufb->array, sb_bindex);
                records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
        }
        if (config->mode == RING_BUFFER_OVERWRITE) {
                id = bufb->buf_rsb.id;
                sb_bindex = subbuffer_id_get_index(config, id);
-               pages = &shmp(handle, bufb->array)[sb_bindex];
+               pages = shmp_index(handle, bufb->array, sb_bindex);
                records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
        }
        return records_unread;
index 30e32098af0de0832461dcdfb3df31c5f1b54ff8..ba0497f9f089f3cb6714f2e59603524eab5c725b 100644 (file)
@@ -190,8 +190,8 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config,
 {
        unsigned long sb_bindex;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
-       v_inc(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_commit);
+       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+       v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
 }
 
 /*
@@ -207,9 +207,9 @@ void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
 
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
        CHAN_WARN_ON(shmp(handle, bufb->chan),
-                    !v_read(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_unread));
+                    !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
        /* Non-atomic decrement protected by exclusive subbuffer access */
-       _v_dec(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_unread);
+       _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
        v_inc(config, &bufb->records_read);
 }
 
@@ -222,8 +222,8 @@ unsigned long subbuffer_get_records_count(
 {
        unsigned long sb_bindex;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
-       return v_read(config, &shmp(handle, (shmp(handle, bufb->array)[sb_bindex]).shmp)->records_commit);
+       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+       return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
 }
 
 /*
@@ -242,8 +242,8 @@ unsigned long subbuffer_count_records_overrun(
        struct lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long overruns, sb_bindex;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
-       pages = &shmp(handle, bufb->array)[sb_bindex];
+       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+       pages = shmp_index(handle, bufb->array, sb_bindex);
        overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
        v_set(config, &shmp(handle, pages->shmp)->records_unread,
              v_read(config, &shmp(handle, pages->shmp)->records_commit));
@@ -262,8 +262,8 @@ void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
        struct lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long sb_bindex;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
-       pages = &shmp(handle, bufb->array)[sb_bindex];
+       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+       pages = shmp_index(handle, bufb->array, sb_bindex);
        shmp(handle, pages->shmp)->data_size = data_size;
 }
 
@@ -277,7 +277,7 @@ unsigned long subbuffer_get_read_data_size(
        unsigned long sb_bindex;
 
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       pages = &shmp(handle, bufb->array)[sb_bindex];
+       pages = shmp_index(handle, bufb->array, sb_bindex);
        return shmp(handle, pages->shmp)->data_size;
 }
 
@@ -291,8 +291,8 @@ unsigned long subbuffer_get_data_size(
        struct lib_ring_buffer_backend_pages_shmp *pages;
        unsigned long sb_bindex;
 
-       sb_bindex = subbuffer_id_get_index(config, shmp(handle, bufb->buf_wsb)[idx].id);
-       pages = &shmp(handle, bufb->array)[sb_bindex];
+       sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+       pages = shmp_index(handle, bufb->array, sb_bindex);
        return shmp(handle, pages->shmp)->data_size;
 }
 
@@ -315,7 +315,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
         * Performing a volatile access to read the sb_pages, because we want to
         * read a coherent version of the pointer and the associated noref flag.
         */
-       id = CMM_ACCESS_ONCE(shmp(handle, bufb->buf_wsb)[idx].id);
+       id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
        for (;;) {
                /* This check is called on the fast path for each record. */
                if (likely(!subbuffer_id_is_noref(config, id))) {
@@ -329,7 +329,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
                }
                new_id = id;
                subbuffer_id_clear_noref(config, &new_id);
-               new_id = uatomic_cmpxchg(&shmp(handle, bufb->buf_wsb)[idx].id, id, new_id);
+               new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
                if (likely(new_id == id))
                        break;
                id = new_id;
@@ -361,13 +361,13 @@ void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *confi
         * readers of the noref flag.
         */
        CHAN_WARN_ON(shmp(handle, bufb->chan),
-                    subbuffer_id_is_noref(config, shmp(handle, bufb->buf_wsb)[idx].id));
+                    subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
        /*
         * Memory barrier that ensures counter stores are ordered before set
         * noref and offset.
         */
        cmm_smp_mb();
-       subbuffer_id_set_noref_offset(config, &shmp(handle, bufb->buf_wsb)[idx].id, offset);
+       subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
 }
 
 /**
@@ -390,7 +390,7 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config,
                 * old_wpage, because the value read will be confirmed by the
                 * following cmpxchg().
                 */
-               old_id = shmp(handle, bufb->buf_wsb)[consumed_idx].id;
+               old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
                if (unlikely(!subbuffer_id_is_noref(config, old_id)))
                        return -EAGAIN;
                /*
@@ -404,14 +404,14 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config,
                             !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
                subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
                                              consumed_count);
-               new_id = uatomic_cmpxchg(&shmp(handle, bufb->buf_wsb)[consumed_idx].id, old_id,
+               new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
                                 bufb->buf_rsb.id);
                if (unlikely(old_id != new_id))
                        return -EAGAIN;
                bufb->buf_rsb.id = new_id;
        } else {
                /* No page exchange, use the writer page directly */
-               bufb->buf_rsb.id = shmp(handle, bufb->buf_wsb)[consumed_idx].id;
+               bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
        }
        return 0;
 }
index 1d8e29493655da9709cec47909167177a7c1bcc6..f570cc168174de626d134998571120e8e10a81a7 100644 (file)
@@ -253,7 +253,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         */
        cmm_smp_wmb();
 
-       v_add(config, ctx->slot_size, &shmp(handle, buf->commit_hot)[endidx].cc);
+       v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -273,7 +273,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
                                      commit_count, endidx, handle);
index c60affe07d94dd4e549af3c4abc1dbcfc6eb6c3e..02cb4d4a75d37c4b70f1f515be7fe5a1e18443d9 100644 (file)
@@ -185,7 +185,7 @@ void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *c
                                          struct shm_handle *handle)
 {
        if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
-               v_set(config, &shmp(handle, buf->commit_hot)[idx].seq, commit_count);
+               v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count);
 }
 
 static inline
@@ -198,7 +198,7 @@ int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
 
        consumed_old = uatomic_read(&buf->consumed);
        consumed_idx = subbuf_index(consumed_old, chan);
-       commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
        /*
         * No memory barrier here, since we are only interested
         * in a statistically correct polling result. The next poll will
@@ -275,7 +275,7 @@ int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi
        do {
                offset = v_read(config, &buf->offset);
                idx = subbuf_index(offset, chan);
-               commit_count = v_read(config, &shmp(handle, buf->commit_hot)[idx].cc);
+               commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
        } while (offset != v_read(config, &buf->offset));
 
        return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
@@ -324,7 +324,7 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
                 * The subbuffer size is least 2 bytes (minimum size: 1 page).
                 * This guarantees that old_commit_count + 1 != commit_count.
                 */
-               if (likely(v_cmpxchg(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+               if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
                                         old_commit_count, old_commit_count + 1)
                           == old_commit_count)) {
                        /*
@@ -367,7 +367,7 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
                         */
                        cmm_smp_mb();
                        /* End of exclusive subbuffer access */
-                       v_set(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+                       v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
                              commit_count);
                        lib_ring_buffer_vmcore_check_deliver(config, buf,
                                                 commit_count, idx, handle);
@@ -431,9 +431,9 @@ void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *c
        if (unlikely(subbuf_offset(offset - commit_count, chan)))
                return;
 
-       commit_seq_old = v_read(config, &shmp(handle, buf->commit_hot)[idx].seq);
+       commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
        while ((long) (commit_seq_old - commit_count) < 0)
-               commit_seq_old = v_cmpxchg(config, &shmp(handle, buf->commit_hot)[idx].seq,
+               commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq,
                                           commit_seq_old, commit_count);
 }
 
index 18dd1b01073027efc9219bf01d905b1f3049c5a3..816c99cb2b3a2b4c9af29886c4e0322744aec936 100644 (file)
@@ -65,7 +65,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                set_shmp(shmp(handle, bufb->array)[i].shmp,
                        zalloc_shm(shmobj,
                                sizeof(struct lib_ring_buffer_backend_pages)));
-               if (!shmp(handle, shmp(handle, bufb->array)[i].shmp))
+               if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
                        goto free_array;
        }
 
@@ -78,7 +78,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                goto free_array;
 
        for (i = 0; i < num_subbuf; i++)
-               shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+               shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
 
        /* Assign read-side subbuffer table */
        if (extra_reader_sb)
@@ -95,10 +95,10 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                ref.offset = bufb->memory_map._ref.offset;
                ref.offset += i * subbuf_size;
 
-               set_shmp(shmp(handle, shmp(handle, bufb->array)[i].shmp)->p,
+               set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p,
                         ref);
                if (config->output == RING_BUFFER_MMAP) {
-                       shmp(handle, shmp(handle, bufb->array)[i].shmp)->mmap_offset = mmap_offset;
+                       shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset;
                        mmap_offset += subbuf_size;
                }
        }
@@ -150,7 +150,7 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
                num_subbuf_alloc++;
 
        for (i = 0; i < chanb->num_subbuf; i++)
-               shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+               shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
        if (chanb->extra_reader_sb)
                bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
                                                num_subbuf_alloc - 1);
@@ -159,9 +159,9 @@ void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
 
        for (i = 0; i < num_subbuf_alloc; i++) {
                /* Don't reset mmap_offset */
-               v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_commit, 0);
-               v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_unread, 0);
-               shmp(handle, shmp(handle, bufb->array)[i].shmp)->data_size = 0;
+               v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0);
+               v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0);
+               shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0;
                /* Don't reset backend page and virt addresses */
        }
        /* Don't reset num_pages_per_subbuf, cpu, allocated */
@@ -378,7 +378,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
                return 0;
        id = bufb->buf_rsb.id;
        sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = &shmp(handle, bufb->array)[sb_bindex];
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
        /*
         * Underlying layer should never ask for reads across
         * subbuffers.
@@ -386,7 +386,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
        CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
        CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
-       memcpy(dest, shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)), len);
+       memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1)), len);
        return orig_len;
 }
 
@@ -414,7 +414,7 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse
        orig_offset = offset;
        id = bufb->buf_rsb.id;
        sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = &shmp(handle, bufb->array)[sb_bindex];
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
        /*
         * Underlying layer should never ask for reads across
         * subbuffers.
@@ -422,7 +422,7 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse
        CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
        CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
-       str = (char *)shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+       str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
        string_len = strnlen(str, len);
        if (dest && len) {
                memcpy(dest, str, string_len);
@@ -453,10 +453,10 @@ void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
        offset &= chanb->buf_size - 1;
        id = bufb->buf_rsb.id;
        sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = &shmp(handle, bufb->array)[sb_bindex];
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
        CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
-       return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+       return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
 }
 
 /**
@@ -481,10 +481,10 @@ void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
 
        offset &= chanb->buf_size - 1;
        sbidx = offset >> chanb->subbuf_size_order;
-       id = shmp(handle, bufb->buf_wsb)[sbidx].id;
+       id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
        sb_bindex = subbuffer_id_get_index(config, id);
-       rpages = &shmp(handle, bufb->array)[sb_bindex];
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
        CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
-       return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+       return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
 }
index 6cd869c38c9e839c77aca6b322eb46fc7f0af3fc..ee205e86fbf0389b06d7bee31f8f59e55cb5db5a 100644 (file)
@@ -125,9 +125,9 @@ void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
         */
        v_set(config, &buf->offset, 0);
        for (i = 0; i < chan->backend.num_subbuf; i++) {
-               v_set(config, &shmp(handle, buf->commit_hot)[i].cc, 0);
-               v_set(config, &shmp(handle, buf->commit_hot)[i].seq, 0);
-               v_set(config, &shmp(handle, buf->commit_cold)[i].cc_sb, 0);
+               v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+               v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+               v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
        }
        uatomic_set(&buf->consumed, 0);
        uatomic_set(&buf->record_disabled, 0);
@@ -216,10 +216,10 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf,
         */
        subbuf_header_size = config->cb.subbuffer_header_size();
        v_set(config, &buf->offset, subbuf_header_size);
-       subbuffer_id_clear_noref(config, &shmp(handle, buf->backend.buf_wsb)[0].id);
+       subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
        tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
        config->cb.buffer_begin(buf, tsc, 0, handle);
-       v_add(config, subbuf_header_size, &shmp(handle, buf->commit_hot)[0].cc);
+       v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
 
        if (config->cb.buffer_create) {
                ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
@@ -715,7 +715,7 @@ retry:
        cmm_smp_rmb();
        consumed_cur = uatomic_read(&buf->consumed);
        consumed_idx = subbuf_index(consumed, chan);
-       commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
@@ -820,9 +820,9 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
         */
        read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
        v_add(config, v_read(config,
-                            &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread),
+                            &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
              &bufb->records_read);
-       v_set(config, &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread, 0);
+       v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
        CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
        subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
@@ -860,8 +860,8 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
        unsigned long cons_idx, commit_count, commit_count_sb;
 
        cons_idx = subbuf_index(cons_offset, chan);
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[cons_idx].cc);
-       commit_count_sb = v_read(config, &shmp(handle, buf->commit_cold)[cons_idx].cc_sb);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+       commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
 
        if (subbuf_offset(commit_count, chan) != 0)
                ERRMSG("ring buffer %s, cpu %d: "
@@ -965,8 +965,8 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
         */
        cmm_smp_wmb();
        v_add(config, config->cb.subbuffer_header_size(),
-             &shmp(handle, buf->commit_hot)[oldidx].cc);
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+             &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
                                      commit_count, oldidx, handle);
@@ -1005,8 +1005,8 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
         * determine that the subbuffer is full.
         */
        cmm_smp_wmb();
-       v_add(config, padding_size, &shmp(handle, buf->commit_hot)[oldidx].cc);
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
                                      commit_count, oldidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
@@ -1040,8 +1040,8 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
         */
        cmm_smp_wmb();
        v_add(config, config->cb.subbuffer_header_size(),
-             &shmp(handle, buf->commit_hot)[beginidx].cc);
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[beginidx].cc);
+             &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
                                      commit_count, beginidx, handle);
@@ -1078,8 +1078,8 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
         * determine that the subbuffer is full.
         */
        cmm_smp_wmb();
-       v_add(config, padding_size, &shmp(handle, buf->commit_hot)[endidx].cc);
-       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
                                  commit_count, endidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
@@ -1270,7 +1270,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                  (buf_trunc(offsets->begin, chan)
                   >> chan->backend.num_subbuf_order)
                  - ((unsigned long) v_read(config,
-                                           &shmp(handle, buf->commit_cold)[sb_index].cc_sb)
+                                           &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
                     & chan->commit_count_mask);
                if (likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
index da466df734c0f7b9a51ad54f3c41fbaf70fe5cb5..f5cc8e69a2b7a62faf9f614dec2489f99ccee96d 100644 (file)
  * both the index and offset with known boundaries.
  */
 static inline
-char *_shmp(struct shm_object_table *table, struct shm_ref *ref)
+char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
+                  size_t offset)
 {
        struct shm_object *obj;
-       size_t index, offset;
+       size_t index, ref_offset;
 
        index = (size_t) ref->index;
        if (unlikely(index >= table->allocated_len))
                return NULL;
        obj = &table->objects[index];
-       offset = (size_t) ref->offset;
-       if (unlikely(offset >= obj->memory_map_size))
+       ref_offset = (size_t) ref->offset;
+       ref_offset += offset;
+       if (unlikely(ref_offset >= obj->memory_map_size))
                return NULL;
-       return &obj->memory_map[offset];
+       return &obj->memory_map[ref_offset];
 }
 
-#define shmp(handle, ref)                                              \
+#define shmp_index(handle, ref, offset)                                        \
        ({                                                              \
                __typeof__((ref)._type) ____ptr_ret;                    \
-               ____ptr_ret = (__typeof__(____ptr_ret)) _shmp((handle)->table, &(ref)._ref);    \
+               ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, ((offset) * sizeof(*____ptr_ret)));  \
                ____ptr_ret;                                            \
        })
 
+#define shmp(handle, ref)      shmp_index(handle, ref, 0)
+
 static inline
 void _set_shmp(struct shm_ref *ref, struct shm_ref src)
 {
This page took 0.036433 seconds and 4 git commands to generate.