kmalloc, kfree, etc => malloc, free, etc
[ust.git] / ustd / lowlevel.c
index 3509bf58799ab932d27d0618dcae002477ab5084..5c145fa3f9e9ffae7cb947ef9ab76643eada4290 100644 (file)
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
  */
 
+#include <stdlib.h>
 #include <assert.h>
+#include <byteswap.h>
 
+#include "buffers.h"
 #include "tracer.h"
 #include "ustd.h"
-#include "localerr.h"
+#include "usterr.h"
 
 /* This truncates to an offset in the buffer. */
 #define USTD_BUFFER_TRUNC(offset, bufinfo) \
        ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1)))
 
+#define LTT_MAGIC_NUMBER 0x00D6B7ED
+#define LTT_REV_MAGIC_NUMBER 0xEDB7D600
+
+/* Returns the size of a subbuffer size. This is the size that
+ * will need to be written to disk.
+ *
+ * @subbuffer: pointer to the beginning of the subbuffer (the
+ *             beginning of its header)
+ */
+
+size_t subbuffer_data_size(void *subbuf)
+{
+       struct ltt_subbuffer_header *header = subbuf;
+       int reverse;
+       u32 data_size;
+
+       if(header->magic_number == LTT_MAGIC_NUMBER) {
+               reverse = 0;
+       }
+       else if(header->magic_number == LTT_REV_MAGIC_NUMBER) {
+               reverse = 1;
+       }
+       else {
+               return -1;
+       }
+
+       data_size = header->sb_size;
+       if(reverse)
+               data_size = bswap_32(data_size);
+
+       return data_size;
+}
+
+
 void finish_consuming_dead_subbuffer(struct buffer_info *buf)
 {
-       struct ltt_channel_buf_struct *ltt_buf = buf->bufstruct_mem;
+       struct ust_buffer *ustbuf = buf->bufstruct_mem;
 
-       long write_offset = local_read(&ltt_buf->offset);
-       long consumed_offset = atomic_long_read(&ltt_buf->consumed);
+       long write_offset = uatomic_read(&ustbuf->offset);
+       long consumed_offset = uatomic_read(&ustbuf->consumed);
 
        long i_subbuf;
 
-       DBG("processing died buffer");
-       DBG("consumed offset is %ld", consumed_offset);
-       DBG("write offset is %ld", write_offset);
+       DBG("processing dead buffer (%s)", buf->name);
+       DBG("consumed offset is %ld (%s)", consumed_offset, buf->name);
+       DBG("write offset is %ld (%s)", write_offset, buf->name);
 
        /* First subbuf that we need to consume now. It is not modulo'd.
         * Consumed_offset is the next byte to consume.  */
@@ -55,11 +92,11 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
        }
 
        /* Iterate on subbuffers to recover. */
-       for(i_subbuf=first_subbuf; ; i_subbuf++, i_subbuf %= buf->n_subbufs) {
+       for(i_subbuf = first_subbuf % buf->n_subbufs; ; i_subbuf++, i_subbuf %= buf->n_subbufs) {
                void *tmp;
                /* commit_seq is the offset in the buffer of the end of the last sequential commit.
                 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
-               long commit_seq = local_read(&ltt_buf->commit_seq[i_subbuf]);
+               long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]);
 
                unsigned long valid_length = buf->subbuf_size;
                long n_subbufs_order = get_count_order(buf->n_subbufs);
@@ -67,6 +104,8 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
 
                struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size);
 
+               int pad_size;
+
                if((commit_seq & commit_seq_mask) == 0) {
                        /* There is nothing to do. */
                        /* FIXME: is this needed? */
@@ -74,21 +113,26 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                }
 
                /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
+               /* FIXME: not sure data_size = 0xffffffff when the buffer is not full. It might
+                * take the value of the header size initially */
                if (((commit_seq - buf->subbuf_size) & commit_seq_mask)
-                   - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order)
-                   == 0) {
-                       /* If it was, we only check the lost_size. This is the lost padding at the end of
-                        * the subbuffer. */
-                       valid_length = (unsigned long)buf->subbuf_size - header->lost_size;
+                   - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order) == 0
+                    && header->data_size != 0xffffffff && header->sb_size != 0xffffffff) {
+                       /* If it was, we only check the data_size. This is the amount of valid data at
+                        * the beginning of the subbuffer. */
+                       valid_length = header->data_size;
+                       DBG("writing full subbuffer (%d) with valid_length = %ld", i_subbuf, valid_length);
                }
                else {
-                       /* If the subbuffer was not fully written, then we don't check lost_size because
+                       /* If the subbuffer was not fully written, then we don't check data_size because
                         * it hasn't been written yet. Instead we check commit_seq and use it to choose
-                        * a value for lost_size. The viewer will need this value when parsing.
+                        * a value for data_size. The viewer will need this value when parsing.
                         */
 
                        valid_length = commit_seq & (buf->subbuf_size-1);
-                       header->lost_size = buf->subbuf_size-valid_length;
+                       DBG("writing unfull subbuffer (%d) with valid_length = %ld", i_subbuf, valid_length);
+                       header->data_size = valid_length;
+                       header->sb_size = PAGE_ALIGN(valid_length);
                        assert(i_subbuf == (last_subbuf % buf->n_subbufs));
                }
 
@@ -96,10 +140,13 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                patient_write(buf->file_fd, buf->mem + i_subbuf * buf->subbuf_size, valid_length);
 
                /* pad with empty bytes */
-               tmp = malloc(buf->subbuf_size-valid_length);
-               memset(tmp, 0, buf->subbuf_size-valid_length);
-               patient_write(buf->file_fd, tmp, buf->subbuf_size-valid_length);
-               free(tmp);
+               pad_size = PAGE_ALIGN(valid_length)-valid_length;
+               if(pad_size) {
+                       tmp = malloc(pad_size);
+                       memset(tmp, 0, pad_size);
+                       patient_write(buf->file_fd, tmp, pad_size);
+                       free(tmp);
+               }
 
                if(i_subbuf == last_subbuf % buf->n_subbufs)
                        break;
This page took 0.024051 seconds and 4 git commands to generate.