#include <ltt/ltt-types.h>
#include <ltt/marker.h>
+#define DEFAULT_N_BLOCKS 32
+
/* from marker.c */
extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
#define PAGE_MASK (~(page_size-1))
#define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
-LttTrace *father_trace = NULL;
-
/* set the offset of the fields belonging to the event,
need the information of the archecture */
//void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
break;
case 2:
switch(header->minor_version) {
- case 3:
+ case 5:
{
- struct ltt_subbuffer_header_2_3 *vheader = header;
+ struct ltt_subbuffer_header_2_5 *vheader = header;
tf->buffer_header_size = ltt_subbuffer_header_size();
tf->tscbits = 27;
tf->eventbits = 5;
&vheader->start_freq);
t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
&vheader->freq_scale);
- if(father_trace) {
- t->start_freq = father_trace->start_freq;
- t->freq_scale = father_trace->freq_scale;
- } else {
- father_trace = t;
- }
t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
&vheader->cycle_count_begin);
t->start_monotonic = 0;
&vheader->start_time_usec);
t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
- t->start_time_from_tsc = ltt_time_from_uint64(
- (double)t->start_tsc
- * 1000000000.0 * tf->trace->freq_scale
- / (double)t->start_freq);
+ t->start_time_from_tsc =
+ ltt_time_from_uint64(tsc_to_uint64(t->freq_scale,
+ t->start_freq, t->start_tsc));
}
}
break;
return 0;
}
+int get_block_offset_size(LttTracefile *tf, guint block_num,
+ uint64_t *offset, uint32_t *size)
+{
+ uint64_t offa, offb;
+
+ if (unlikely(block_num >= tf->num_blocks))
+ return -1;
+
+ offa = g_array_index(tf->buf_index, uint64_t, block_num);
+ if (likely(block_num < tf->num_blocks - 1))
+ offb = g_array_index(tf->buf_index, uint64_t, block_num + 1);
+ else
+ offb = tf->file_size;
+ *offset = offa;
+ *size = offb - offa;
+ return 0;
+}
+
+int ltt_trace_create_block_index(LttTracefile *tf)
+{
+ int page_size = getpagesize();
+ uint64_t offset = 0;
+ unsigned long i = 0;
+ unsigned int header_map_size = PAGE_ALIGN(ltt_subbuffer_header_size());
+
+ tf->buf_index = g_array_sized_new(FALSE, TRUE, sizeof(uint64_t),
+ DEFAULT_N_BLOCKS);
+
+ g_assert(tf->buf_index->len == i);
+ while (offset < tf->file_size) {
+ ltt_subbuffer_header_t *header;
+ uint64_t *off;
+
+ tf->buf_index = g_array_set_size(tf->buf_index, i + 1);
+ off = &g_array_index(tf->buf_index, uint64_t, i);
+ *off = offset;
+
+ /* map block header */
+ header = mmap(0, header_map_size, PROT_READ,
+ MAP_PRIVATE, tf->fd, (off_t)offset);
+ if(header == MAP_FAILED) {
+ perror("Error in allocating memory for buffer of tracefile");
+ return -1;
+ }
+
+ /* read len, offset += len */
+ offset += ltt_get_uint32(LTT_GET_BO(tf), &header->sb_size);
+
+ /* unmap block header */
+ if(munmap(header, header_map_size)) {
+ g_warning("unmap size : %u\n", header_map_size);
+ perror("munmap error");
+ return -1;
+ }
+ ++i;
+ }
+ tf->num_blocks = i;
+
+ return 0;
+}
/*****************************************************************************
*Function name
tf->long_name = g_quark_from_string(fileName);
tf->trace = t;
tf->fd = open(fileName, O_RDONLY);
+ tf->buf_index = NULL;
if(tf->fd < 0){
g_warning("Unable to open input data file %s\n", fileName);
goto end;
//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
- tf->num_blocks = tf->file_size / tf->buf_size;
tf->events_lost = 0;
tf->subbuf_corrupt = 0;
}
tf->buffer.head = NULL;
+ /* Create block index */
+ ltt_trace_create_block_index(tf);
+
//read the first block
if(map_block(tf,0)) {
perror("Cannot map block for tracefile");
close_file:
close(tf->fd);
end:
+ if (tf->buf_index)
+ g_array_free(tf->buf_index, TRUE);
return -1;
}
int page_size = getpagesize();
if(t->buffer.head != NULL)
- if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buffer.size))) {
g_warning("unmap size : %u\n",
- PAGE_ALIGN(t->buf_size));
+ PAGE_ALIGN(t->buffer.size));
perror("munmap error");
g_assert(0);
}
close(t->fd);
+ if (t->buf_index)
+ g_array_free(t->buf_index, TRUE);
}
/****************************************************************************
get_absolute_pathname(pathname, abs_path);
t->pathname = g_quark_from_string(abs_path);
- t->start_tsc = 0;
- t->freq_scale = 1;
- t->start_freq = 1;
- t->start_time_from_tsc = ltt_time_zero;
-
g_datalist_init(&t->tracefiles);
/* Test to see if it looks like a trace */
closedir(dir);
/* Open all the tracefiles */
+ t->start_freq= 0;
if(open_tracefiles(t, abs_path, "")) {
g_warning("Error opening tracefile %s", abs_path);
goto find_error;
g_assert(!ret);
t->num_cpu = group->len;
+ t->drift = 1.;
+ t->offset = 0.;
//ret = allocate_marker_data(t);
//if (ret)
*end = ltt_time_zero;
} else
*end = tf->buffer.end.timestamp;
+
+ g_assert(end->tv_sec <= G_MAXUINT);
}
struct tracefile_time_span_get_args {
return 1;
}
+/*
+ * Convert a value in "TSC scale" to a value in nanoseconds
+ */
+guint64 tsc_to_uint64(guint32 freq_scale, uint64_t start_freq, guint64 tsc)
+{
+ return (double) tsc * NANOSECONDS_PER_SECOND * freq_scale / start_freq;
+}
+
/* Given a TSC value, return the LttTime (seconds,nanoseconds) it
* corresponds to.
*/
-
LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
{
- LttTime time;
-
- if(tsc > tf->trace->start_tsc) {
- time = ltt_time_from_uint64(
- (double)(tsc - tf->trace->start_tsc)
- * 1000000000.0 * tf->trace->freq_scale
- / (double)tf->trace->start_freq);
- time = ltt_time_add(tf->trace->start_time_from_tsc, time);
- } else {
- time = ltt_time_from_uint64(
- (double)(tf->trace->start_tsc - tsc)
- * 1000000000.0 * tf->trace->freq_scale
- / (double)tf->trace->start_freq);
- time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
- }
- return time;
+ return ltt_time_from_uint64(tsc_to_uint64(tf->trace->freq_scale,
+ tf->trace->start_freq, tf->trace->drift * tsc +
+ tf->trace->offset));
}
/* Calculate the real event time based on the buffer boundaries */
g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
g_quark_to_string(ev->tracefile->long_name),
- ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
- + (long)start_pos - (long)ev->tracefile->buffer.head);
+ (uint64_t)ev->tracefile->buffer.offset +
+ (long)start_pos - (long)ev->tracefile->buffer.head);
while (offset < (long)end_pos - (long)start_pos) {
g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
{
int page_size = getpagesize();
ltt_subbuffer_header_t *header;
+ uint64_t offset;
+ uint32_t size;
+ int ret;
g_assert(block_num < tf->num_blocks);
if(tf->buffer.head != NULL) {
- if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buffer.size))) {
g_warning("unmap size : %u\n",
- PAGE_ALIGN(tf->buf_size));
+ PAGE_ALIGN(tf->buffer.size));
perror("munmap error");
g_assert(0);
}
}
-
+
+ ret = get_block_offset_size(tf, block_num, &offset, &size);
+ g_assert(!ret);
+
+ g_debug("Map block %u, offset %llu, size %u\n", block_num,
+ (unsigned long long)offset, (unsigned int)size);
+
/* Multiple of pages aligned head */
- tf->buffer.head = mmap(0,
- PAGE_ALIGN(tf->buf_size),
- PROT_READ, MAP_PRIVATE, tf->fd,
- PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
+ tf->buffer.head = mmap(0, (size_t)size, PROT_READ, MAP_PRIVATE,
+ tf->fd, (off_t)offset);
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
goto map_error;
}
g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
-
tf->buffer.index = block_num;
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->cycle_count_begin);
- tf->buffer.begin.freq = tf->trace->start_freq;
-
- tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
- tf->buffer.begin.cycle_count);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->cycle_count_end);
- tf->buffer.end.freq = tf->trace->start_freq;
-
- tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
- &header->lost_size);
- tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
- tf->buffer.end.cycle_count);
+ tf->buffer.offset = offset;
+ tf->buffer.size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->sb_size);
+ tf->buffer.data_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->data_size);
tf->buffer.tsc = tf->buffer.begin.cycle_count;
tf->event.tsc = tf->buffer.tsc;
tf->buffer.freq = tf->buffer.begin.freq;
- /* FIXME
- * eventually support variable buffer size : will need a partial pre-read of
- * the headers to create an index when we open the trace... eventually. */
- g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
- &header->buf_size));
-
+ g_assert(size == tf->buffer.size);
+ g_assert(tf->buffer.data_size <= tf->buffer.size);
+
+ if (tf->trace->start_freq)
+ {
+ tf->buffer.begin.freq = tf->trace->start_freq;
+ tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.begin.cycle_count);
+ tf->buffer.end.freq = tf->trace->start_freq;
+ tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.end.cycle_count);
+ }
+
/* Make the current event point to the beginning of the buffer :
* it means that the event read must get the first event. */
tf->event.tracefile = tf;
g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
g_quark_to_string(ev->tracefile->long_name),
- ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
+ (uint64_t)ev->tracefile->buffer.offset
+ (long)ev->data - (long)ev->tracefile->buffer.head);
while (offset < max(ev->event_size, ev->data_size)) {
if(tf->event.offset == 0) {
tf->event.offset += tf->buffer_header_size;
- if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buffer.data_size) {
ret = ERANGE;
}
goto found;
tf->event.offset = pos - tf->buffer.head;
- if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buffer.data_size) {
ret = ERANGE;
goto found;
}
- g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
+ g_assert(tf->event.offset < tf->buffer.data_size);
found:
return ret;