convert to uatomic ops
[ust.git] / ustd / lowlevel.c
index 65657490d6044d13ce3aa31aa1797d7637aab3ef..57a9f6631c1ba53ce035a17237a4718b2e7bd802 100644 (file)
@@ -65,14 +65,14 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
 {
        struct ust_buffer *ustbuf = buf->bufstruct_mem;
 
-       long write_offset = local_read(&ustbuf->offset);
-       long consumed_offset = atomic_long_read(&ustbuf->consumed);
+       long write_offset = uatomic_read(&ustbuf->offset);
+       long consumed_offset = uatomic_read(&ustbuf->consumed);
 
        long i_subbuf;
 
-       DBG("processing died buffer");
-       DBG("consumed offset is %ld", consumed_offset);
-       DBG("write offset is %ld", write_offset);
+       DBG("processing dead buffer (%s)", buf->name);
+       DBG("consumed offset is %ld (%s)", consumed_offset, buf->name);
+       DBG("write offset is %ld (%s)", write_offset, buf->name);
 
        /* First subbuf that we need to consume now. It is not modulo'd.
         * Consumed_offset is the next byte to consume.  */
@@ -95,7 +95,7 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                void *tmp;
                /* commit_seq is the offset in the buffer of the end of the last sequential commit.
                 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
-               long commit_seq = local_read(&ustbuf->commit_seq[i_subbuf]);
+               long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]);
 
                unsigned long valid_length = buf->subbuf_size;
                long n_subbufs_order = get_count_order(buf->n_subbufs);
@@ -112,12 +112,15 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                }
 
                /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
+               /* FIXME: not sure data_size = 0xffffffff when the buffer is not full. It might
+                * take the value of the header size initially */
                if (((commit_seq - buf->subbuf_size) & commit_seq_mask)
                    - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order) == 0
                     && header->data_size != 0xffffffff && header->sb_size != 0xffffffff) {
                        /* If it was, we only check the data_size. This is the amount of valid data at
                         * the beginning of the subbuffer. */
                        valid_length = header->data_size;
+                       DBG("writing full subbuffer (%d) with valid_length = %ld", i_subbuf, valid_length);
                }
                else {
                        /* If the subbuffer was not fully written, then we don't check data_size because
@@ -126,6 +129,7 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                         */
 
                        valid_length = commit_seq & (buf->subbuf_size-1);
+                       DBG("writing unfull subbuffer (%d) with valid_length = %ld", i_subbuf, valid_length);
                        header->data_size = valid_length;
                        header->sb_size = PAGE_ALIGN(valid_length);
                        assert(i_subbuf == (last_subbuf % buf->n_subbufs));
This page took 0.024378 seconds and 4 git commands to generate.