//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->block_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
- tf->num_blocks = tf->file_size / tf->block_size;
-
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
+ tf->num_blocks = tf->file_size / tf->buf_size;
+
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
tf->buffer.head = NULL;
//read the first block
/* Error */
unmap_file:
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
close_file:
close(tf->fd);
end:
void ltt_tracefile_close(LttTracefile *t)
{
+ int page_size = getpagesize();
+
if(t->buffer.head != NULL)
- munmap(t->buffer.head, t->buf_size);
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(t->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+
close(t->fd);
}
strncpy(char_name, raw_name, underscore_pos);
+ char_name[underscore_pos] = '\0';
+
*name = g_quark_from_string(char_name);
*num = cpu_num;
* A tracefile group is simply an array where all the per cpu tracefiles sits.
*/
-static int open_tracefiles(LttTrace *trace, char *root_path,
- char *relative_path)
+static int open_tracefiles(LttTrace *trace, gchar *root_path,
+ gchar *relative_path)
{
DIR *dir = opendir(root_path);
struct dirent *entry;
struct stat stat_buf;
int ret;
- char path[PATH_MAX];
+ gchar path[PATH_MAX];
int path_len;
- char *path_ptr;
+ gchar *path_ptr;
int rel_path_len;
- char rel_path[PATH_MAX];
- char *rel_path_ptr;
+ gchar rel_path[PATH_MAX];
+ gchar *rel_path_ptr;
LttTracefile tmp_tf;
if(dir == NULL) {
g_assert(tf->trace->has_tsc);
time = ltt_time_from_uint64(
- (guint64)tf->buffer.tsc*tf->buffer.nsecs_per_cycle);
+ (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
+ tf->buffer.nsecs_per_cycle);
time = ltt_time_add(tf->buffer.begin.timestamp, time);
return time;
*Return value
*
* Returns 0 if an event can be used in tf->event.
- * Returns ERANGE on end of trace. The event in tf->event still can be used.
+ * Returns ERANGE on end of trace. The event in tf->event still can be used
+ * (if the last block was not empty).
* Returns EPERM on error.
*
* This function does make the tracefile event structure point to the event
g_assert(block_num < tf->num_blocks);
- if(tf->buffer.head != NULL)
- munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size));
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(tf->block_size),
+ PAGE_ALIGN(tf->buf_size),
PROT_READ, MAP_PRIVATE, tf->fd,
- PAGE_ALIGN((off_t)tf->block_size * (off_t)block_num));
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->begin.timestamp);
tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_warning("block %u begin : %lu.%lu", block_num,
+ tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->end.timestamp);
tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_warning("block %u end : %lu.%lu", block_num,
+ tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.cycle_count);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
/* FIXME
* eventually support variable buffer size : will need a partial pre-read of
* the headers to create an index when we open the trace... eventually. */
- g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf),
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
&header->buf_size));
/* Now that the buffer is mapped, calculate the time interpolation for the
else
size = 0;
- g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id,
+ g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ tf->event.facility_id,
tf->event.event_id, size);
}
if(tf->event.offset == 0) {
tf->event.offset += sizeof(struct ltt_block_start_header);
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
}
goto found;
tf->event.offset = pos - tf->buffer.head;
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
goto found;
}
LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
- lBufTotalTime = ltt_time_sub(
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp),
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp));
+ lBufTotalTime = ltt_time_sub(tf->buffer.end.timestamp,
+ tf->buffer.begin.timestamp);
/* Calculate the total cycles for this bufffer */
- lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count);
- lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf),
- &tf->buffer.begin.cycle_count);
+ lBufTotalCycle = tf->buffer.end.cycle_count;
+ lBufTotalCycle -= tf->buffer.begin.cycle_count;
/* Convert the total time to double */
lBufTotalNSec = ltt_time_to_double(lBufTotalTime);