#define PAGE_MASK (~(page_size-1))
#define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
-/* obtain the time of an event */
-
-static inline LttTime getEventTime(LttTracefile * tf);
-
-
/* set the offset of the fields belonging to the event,
need the information of the archecture */
void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
}
+guint ltt_trace_get_num_cpu(LttTrace *t)
+{
+ return t->num_cpu;
+}
+
+
+/* trace can be NULL
+ *
+ * Return value : 0 success, 1 bad tracefile
+ */
+int parse_trace_header(void *header, LttTracefile *tf, LttTrace *t)
+{
+ guint32 *magic_number = (guint32*)header;
+ struct ltt_trace_header_any *any = (struct ltt_trace_header_any *)header;
+
+ if(*magic_number == LTT_MAGIC_NUMBER)
+ tf->reverse_bo = 0;
+ else if(*magic_number == LTT_REV_MAGIC_NUMBER)
+ tf->reverse_bo = 1;
+ else /* invalid magic number, bad tracefile ! */
+ return 1;
+
+ /* Get float byte order : might be different from int byte order
+ * (or is set to 0 if the trace has no float (kernel trace)) */
+ tf->float_word_order = any->float_word_order;
+
+ if(t) {
+ t->arch_type = ltt_get_uint32(LTT_GET_BO(tf),
+ &any->arch_type);
+ t->arch_variant = ltt_get_uint32(LTT_GET_BO(tf),
+ &any->arch_variant);
+ t->arch_size = any->arch_size;
+ t->ltt_major_version = any->major_version;
+ t->ltt_minor_version = any->minor_version;
+ t->flight_recorder = any->flight_recorder;
+ t->has_heartbeat = any->has_heartbeat;
+ t->has_alignment = any->has_alignment;
+ t->has_tsc = any->has_tsc;
+ }
+
+
+ switch(any->major_version) {
+
+ case 0:
+ switch(any->minor_version) {
+ case 3:
+ {
+ tf->buffer_header_size =
+ sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_0_3);
+ }
+ break;
+ default:
+ g_warning("Unsupported trace version : %hhu.%hhu",
+ any->major_version, any->minor_version);
+ return 1;
+ }
+ break;
+
+ default:
+ g_warning("Unsupported trace version : %hhu.%hhu",
+ any->major_version, any->minor_version);
+ return 1;
+ }
+
+
+ return 0;
+}
+
+
/*****************************************************************************
*Function name
int page_size = getpagesize();
//open the file
- tf->name = g_quark_from_string(fileName);
+ tf->long_name = g_quark_from_string(fileName);
tf->trace = t;
tf->fd = open(fileName, O_RDONLY);
if(tf->fd < 0){
}
// Is the file large enough to contain a trace
- if(lTDFStat.st_size < (off_t)(sizeof(struct ltt_block_start_header))){
+ if(lTDFStat.st_size <
+ (off_t)(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any))){
g_print("The input data file %s does not contain a trace\n", fileName);
goto close_file;
}
/* Temporarily map the buffer start header to get trace information */
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(sizeof(struct ltt_block_start_header)), PROT_READ,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)), PROT_READ,
MAP_PRIVATE, tf->fd, 0);
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
header = (struct ltt_block_start_header*)tf->buffer.head;
- if(header->trace.magic_number == LTT_MAGIC_NUMBER)
- tf->reverse_bo = 0;
- else if(header->trace.magic_number == LTT_REV_MAGIC_NUMBER)
- tf->reverse_bo = 1;
- else /* invalid magic number, bad tracefile ! */
+ if(parse_trace_header(header->trace, tf, NULL)) {
+ g_warning("parse_trace_header error");
goto unmap_file;
+ }
//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->block_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
- tf->num_blocks = tf->file_size / tf->block_size;
-
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
+ tf->num_blocks = tf->file_size / tf->buf_size;
+
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)));
+ perror("munmap error");
+ g_assert(0);
+ }
tf->buffer.head = NULL;
//read the first block
/* Error */
unmap_file:
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)));
+ perror("munmap error");
+ g_assert(0);
+ }
close_file:
close(tf->fd);
end:
return -1;
}
+LttTrace *ltt_tracefile_get_trace(LttTracefile *tf)
+{
+ return tf->trace;
+}
+
#if 0
/*****************************************************************************
*Open control and per cpu tracefiles
void ltt_tracefile_close(LttTracefile *t)
{
+ int page_size = getpagesize();
+
if(t->buffer.head != NULL)
- munmap(t->buffer.head, t->buf_size);
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(t->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+
close(t->fd);
}
{
guint raw_name_len = strlen(raw_name);
gchar char_name[PATH_MAX];
- gchar *digit_begin;
int i;
int underscore_pos;
long int cpu_num;
strncpy(char_name, raw_name, underscore_pos);
+ char_name[underscore_pos] = '\0';
+
*name = g_quark_from_string(char_name);
*num = cpu_num;
* A tracefile group is simply an array where all the per cpu tracefiles sits.
*/
-static int open_tracefiles(LttTrace *trace, char *root_path,
- char *relative_path)
+static int open_tracefiles(LttTrace *trace, gchar *root_path,
+ gchar *relative_path)
{
DIR *dir = opendir(root_path);
struct dirent *entry;
struct stat stat_buf;
int ret;
- char path[PATH_MAX];
+ gchar path[PATH_MAX];
int path_len;
- char *path_ptr;
+ gchar *path_ptr;
int rel_path_len;
- char rel_path[PATH_MAX];
- char *rel_path_ptr;
+ gchar rel_path[PATH_MAX];
+ gchar *rel_path_ptr;
+ LttTracefile tmp_tf;
if(dir == NULL) {
perror(root_path);
g_debug("Tracefile file or directory : %s\n", path);
+ if(strcmp(rel_path, "/eventdefs") == 0) continue;
+
if(S_ISDIR(stat_buf.st_mode)) {
g_debug("Entering subdirectory...\n");
ret = open_tracefiles(trace, path, rel_path);
if(ret < 0) continue;
} else if(S_ISREG(stat_buf.st_mode)) {
- g_debug("Opening file.\n");
-
GQuark name;
guint num;
GArray *group;
- LttTracefile *tf;
- guint len;
if(get_tracefile_name_number(rel_path, &name, &num))
continue; /* invalid name */
+ g_debug("Opening file.\n");
+ if(ltt_tracefile_open(trace, path, &tmp_tf)) {
+ g_info("Error opening tracefile %s", path);
+
+ continue; /* error opening the tracefile : bad magic number ? */
+ }
+
g_debug("Tracefile name is %s and number is %u",
g_quark_to_string(name), num);
+ tmp_tf.cpu_online = 1;
+ tmp_tf.cpu_num = num;
+ tmp_tf.name = name;
+
group = g_datalist_id_get_data(&trace->tracefiles, name);
if(group == NULL) {
/* Elements are automatically cleared when the array is allocated.
g_datalist_id_set_data_full(&trace->tracefiles, name,
group, ltt_tracefile_group_destroy);
}
+
/* Add the per cpu tracefile to the named group */
unsigned int old_len = group->len;
if(num+1 > old_len)
group = g_array_set_size(group, num+1);
- tf = &g_array_index (group, LttTracefile, num);
+ g_array_index (group, LttTracefile, num) = tmp_tf;
- if(ltt_tracefile_open(trace, path, tf)) {
- g_info("Error opening tracefile %s", path);
- g_array_set_size(group, old_len);
-
- if(!ltt_tracefile_group_has_cpu_online(group))
- g_datalist_id_remove_data(&trace->tracefiles, name);
-
- continue; /* error opening the tracefile : bad magic number ? */
- }
- tf->cpu_online = 1;
- tf->cpu_num = num;
}
}
const gchar *text;
guint textlen;
gint err;
- int i, j;
- LttEventType *et;
text = g_quark_to_string(t->pathname);
textlen = strlen(text);
err = snprintf(desc_file_name+textlen, PATH_MAX-textlen-1,
"%u", f->checksum);
- if(err) goto name_error;
+ if(err < 0) goto name_error;
textlen=strlen(desc_file_name);
err = ltt_facility_open(f, t, desc_file_name);
if(err) goto facility_error;
-
- for(i=0;i<t->facilities_by_num->len;i++){
- f = &g_array_index(t->facilities_by_num, LttFacility, i);
- if(f->exists) {
- for(j=0; j<f->events->len; j++){
- et = &g_array_index(f->events, LttEventType, j);
- set_fields_offsets(fac_tf, et);
- }
- }
- }
-
return 0;
static void ltt_fac_ids_destroy(gpointer data)
{
GArray *fac_ids = (GArray *)data;
- int i;
- LttFacility *fac;
-
- for(i=0; i<fac_ids->len; i++) {
- fac = &g_array_index (fac_ids, LttFacility, i);
- ltt_facility_close(fac);
- }
g_array_free(fac_ids, TRUE);
}
int err;
LttFacility *fac;
GArray *fac_ids;
+ guint i;
+ LttEventType *et;
while(1) {
err = ltt_tracefile_read_seek(tf);
* 0 : facility load
* 1 : facility unload
* 2 : state dump facility load
- * Facility 1 : (heartbeat)
- * 0 : heartbeat
+ * 3 : heartbeat
*/
- if(tf->event.facility_id > 1) { /* Should only contain core and heartbeat
- facilities */
+ if(tf->event.facility_id != LTT_FACILITY_CORE) {
+ /* Should only contain core facility */
g_warning("Error in processing facility file %s, "
"should not contain facility id %u.", g_quark_to_string(tf->name),
tf->event.facility_id);
err = EPERM;
goto fac_id_error;
- } else if(tf->event.facility_id == LTT_FACILITY_CORE) {
+ } else {
struct LttFacilityLoad *fac_load_data;
struct LttStateDumpFacilityLoad *fac_state_dump_load_data;
switch((enum ltt_core_events)tf->event.event_id) {
case LTT_EVENT_FACILITY_LOAD:
fac_name = (char*)(tf->event.data);
+ g_debug("Doing LTT_EVENT_FACILITY_LOAD of facility %s",
+ fac_name);
fac_load_data =
(struct LttFacilityLoad *)
(tf->event.data + strlen(fac_name) + 1);
fac->id = ltt_get_uint32(LTT_GET_BO(tf), &fac_load_data->id);
fac->pointer_size = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->pointer_size);
+ fac->long_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &fac_load_data->long_size);
fac->size_t_size = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->size_t_size);
fac->alignment = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->alignment);
if(ltt_get_facility_description(fac, tf->trace, tf))
- goto facility_error;
+ continue; /* error opening description */
fac->trace = tf->trace;
+
+ /* Preset the field offsets */
+ for(i=0; i<fac->events->len; i++){
+ et = &g_array_index(fac->events, LttEventType, i);
+ set_fields_offsets(tf, et);
+ }
+
fac->exists = 1;
fac_ids = g_datalist_id_get_data(&tf->trace->facilities_by_name,
break;
case LTT_EVENT_FACILITY_UNLOAD:
+ g_debug("Doing LTT_EVENT_FACILITY_UNLOAD");
/* We don't care about unload : facilities ID are valid for the whole
* trace. They simply won't be used after the unload. */
break;
case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
fac_name = (char*)(tf->event.data);
+ g_debug("Doing LTT_EVENT_STATE_DUMP_FACILITY_LOAD of facility %s",
+ fac_name);
fac_state_dump_load_data =
- (struct LtttStateDumpFacilityLoad *)
+ (struct LttStateDumpFacilityLoad *)
(tf->event.data + strlen(fac_name) + 1);
fac = &g_array_index (tf->trace->facilities_by_num, LttFacility,
ltt_get_uint32(LTT_GET_BO(tf), &fac_state_dump_load_data->id));
g_assert(fac->exists == 0);
fac->name = g_quark_from_string(fac_name);
fac->checksum = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->checksum);
- fac->id = fac_load_data->id;
+ &fac_state_dump_load_data->checksum);
+ fac->id = fac_state_dump_load_data->id;
fac->pointer_size = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->pointer_size);
+ &fac_state_dump_load_data->pointer_size);
+ fac->long_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &fac_state_dump_load_data->long_size);
fac->size_t_size = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->size_t_size);
+ &fac_state_dump_load_data->size_t_size);
fac->alignment = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->alignment);
+ &fac_state_dump_load_data->alignment);
if(ltt_get_facility_description(fac, tf->trace, tf))
- goto facility_error;
+ continue; /* error opening description */
fac->trace = tf->trace;
+ /* Preset the field offsets */
+ for(i=0; i<fac->events->len; i++){
+ et = &g_array_index(fac->events, LttEventType, i);
+ set_fields_offsets(tf, et);
+ }
+
fac->exists = 1;
fac_ids = g_datalist_id_get_data(&tf->trace->facilities_by_name,
return 0;
/* Error handling */
-facility_error:
event_id_error:
fac_id_error:
update_error:
seek_error:
+ g_warning("An error occured in facility tracefile parsing");
return err;
}
LttTrace * t;
LttTracefile *tf;
GArray *group;
- int i;
+ int i, ret;
struct ltt_block_start_header *header;
+ DIR *dir;
+ struct dirent *entry;
+ guint control_found = 0;
+ guint eventdefs_found = 0;
+ struct stat stat_buf;
+ gchar path[PATH_MAX];
t = g_new(LttTrace, 1);
if(!t) goto alloc_error;
get_absolute_pathname(pathname, abs_path);
t->pathname = g_quark_from_string(abs_path);
- /* Open all the tracefiles */
g_datalist_init(&t->tracefiles);
- if(open_tracefiles(t, abs_path, ""))
- goto open_error;
+
+ /* Test to see if it looks like a trace */
+ dir = opendir(abs_path);
+ if(dir == NULL) {
+ perror(abs_path);
+ goto open_error;
+ }
+ while((entry = readdir(dir)) != NULL) {
+ strcpy(path, abs_path);
+ strcat(path, "/");
+ strcat(path, entry->d_name);
+ ret = stat(path, &stat_buf);
+ if(ret == -1) {
+ perror(path);
+ continue;
+ }
+ if(S_ISDIR(stat_buf.st_mode)) {
+ if(strcmp(entry->d_name, "control") == 0) {
+ control_found = 1;
+ }
+ if(strcmp(entry->d_name, "eventdefs") == 0) {
+ eventdefs_found = 1;
+ }
+ }
+ }
+ closedir(dir);
+
+ if(!control_found || !eventdefs_found) goto find_error;
+
+ /* Open all the tracefiles */
+ if(open_tracefiles(t, abs_path, "")) {
+ g_warning("Error opening tracefile %s", abs_path);
+ goto find_error;
+ }
/* Prepare the facilities containers : array and mapping */
/* Array is zeroed : the "exists" field is set to false by default */
g_assert(group->len > 0);
tf = &g_array_index (group, LttTracefile, 0);
header = (struct ltt_block_start_header*)tf->buffer.head;
- t->arch_type = ltt_get_uint32(LTT_GET_BO(tf), &header->trace.arch_type);
- t->arch_variant = ltt_get_uint32(LTT_GET_BO(tf), &header->trace.arch_variant);
- t->arch_size = header->trace.arch_size;
- t->ltt_major_version = header->trace.major_version;
- t->ltt_minor_version = header->trace.minor_version;
- t->flight_recorder = header->trace.flight_recorder;
- t->has_heartbeat = header->trace.has_heartbeat;
- t->has_alignment = header->trace.has_alignment;
- t->has_tsc = header->trace.has_tsc;
-
+ g_assert(parse_trace_header(header->trace,
+ tf, t) == 0);
+
+ t->num_cpu = group->len;
for(i=0; i<group->len; i++) {
tf = &g_array_index (group, LttTracefile, i);
facilities_error:
g_datalist_clear(&t->facilities_by_name);
g_array_free(t->facilities_by_num, TRUE);
-open_error:
+find_error:
g_datalist_clear(&t->tracefiles);
+open_error:
g_free(t);
alloc_error:
return NULL;
}
-GQuark ltt_trace_name(LttTrace *t)
+GQuark ltt_trace_name(const LttTrace *t)
{
return t->pathname;
}
void ltt_trace_close(LttTrace *t)
{
+ guint i;
+ LttFacility *fac;
+
+ for(i=0; i<t->facilities_by_num->len; i++) {
+ fac = &g_array_index (t->facilities_by_num, LttFacility, i);
+ if(fac->exists)
+ ltt_facility_close(fac);
+ }
+
g_datalist_clear(&t->facilities_by_name);
g_array_free(t->facilities_by_num, TRUE);
g_datalist_clear(&t->tracefiles);
static void ltt_tracefile_time_span_get(LttTracefile *tf,
LttTime *start, LttTime *end)
{
- struct ltt_block_start_header * header;
int err;
err = map_block(tf, 0);
*Get the name of a tracefile
****************************************************************************/
-GQuark ltt_tracefile_name(LttTracefile *tf)
+GQuark ltt_tracefile_name(const LttTracefile *tf)
{
return tf->name;
}
+GQuark ltt_tracefile_long_name(const LttTracefile *tf)
+{
+ return tf->long_name;
+}
+
+
+
+guint ltt_tracefile_num(LttTracefile *tf)
+{
+ return tf->cpu_num;
+}
+
/*****************************************************************************
* Get the number of blocks in the tracefile
****************************************************************************/
* the time passed in parameter.
*
* If the time parameter is outside the tracefile time span, seek to the first
- * or the last event of the tracefile.
+ * event or if after, return ERANGE.
*
* If the time parameter is before the first event, we have to seek specially to
* there.
*
- * If the time is after the end of the trace, get the last event.
+ * If the time is after the end of the trace, return ERANGE.
*
* Do a binary search to find the right block, then a sequential search in the
* block to find the event.
* you will jump over an event if you do.
*
* Return value : 0 : no error, the tf->event can be used
+ * ERANGE : time if after the last event of the trace
* otherwise : this is an error.
*
* */
* go to the first event. */
if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range;
+ else if (ret) goto fail;
goto found; /* There is either no event in the trace or the event points
to the first event in the trace */
}
goto fail;
}
- /* If the time is after the end of the trace, get the last event. */
- if(ltt_time_compare(time, tf->buffer.end.timestamp) >= 0) {
- /* While the ltt_tracefile_read doesn't return ERANGE or EPERM,
- * continue reading.
- */
- while(1) {
- ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
- else if(ret) goto fail;
- }
+ /* If the time is after the end of the trace, return ERANGE. */
+ if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
+ goto range;
}
/* Binary search the block */
* (or in the next buffer first event) */
while(1) {
ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
- break;
+ goto found;
}
- } if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
+ } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
/* go to lower part */
- high = block_num;
+ high = block_num - 1;
} else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
/* go to higher part */
- low = block_num;
+ low = block_num + 1;
} else {/* The event is right in the buffer!
(or in the next buffer first event) */
while(1) {
- ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
found:
return 0;
+range:
+ return ERANGE;
/* Error handling */
fail:
err = ltt_tracefile_read_op(tf);
if(err) goto fail;
- return;
+ return 0;
fail:
g_error("ltt_tracefile_seek_time failed on tracefile %s",
g_quark_to_string(tf->name));
+ return 1;
}
/* Calculate the real event time based on the buffer boundaries */
g_assert(tf->trace->has_tsc);
time = ltt_time_from_uint64(
- (guint64)tf->buffer.tsc*tf->buffer.nsecs_per_cycle);
+ (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
+ tf->buffer.nsecs_per_cycle);
time = ltt_time_add(tf->buffer.begin.timestamp, time);
return time;
*Return value
*
* Returns 0 if an event can be used in tf->event.
- * Returns ERANGE on end of trace. The event in tf->event still can be used.
+ * Returns ERANGE on end of trace. The event in tf->event still can be used
+ * (if the last block was not empty).
* Returns EPERM on error.
*
* This function does make the tracefile event structure point to the event
/* do specific operation on events */
int ltt_tracefile_read_op(LttTracefile *tf)
{
- int err;
- LttFacility *f;
- void * pos;
LttEvent *event;
event = &tf->event;
* event specific operation. */
int ltt_tracefile_read_update_event(LttTracefile *tf)
{
- int err;
- LttFacility *f;
void * pos;
LttEvent *event;
g_assert(block_num < tf->num_blocks);
- if(tf->buffer.head != NULL)
- munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size));
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(tf->block_size),
+ PAGE_ALIGN(tf->buf_size),
PROT_READ, MAP_PRIVATE, tf->fd,
- PAGE_ALIGN((off_t)tf->block_size * (off_t)block_num));
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->begin.timestamp);
tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC;
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->end.timestamp);
tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC;
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.cycle_count);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
/* FIXME
* eventually support variable buffer size : will need a partial pre-read of
* the headers to create an index when we open the trace... eventually. */
- g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf),
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
&header->buf_size));
/* Now that the buffer is mapped, calculate the time interpolation for the
switch((enum ltt_core_events)tf->event.event_id) {
case LTT_EVENT_FACILITY_LOAD:
size = strlen((char*)tf->event.data) + 1;
- g_debug("Event facility load of facility %s", (char*)tf->event.data);
+ //g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
size += sizeof(struct LttFacilityLoad);
break;
case LTT_EVENT_FACILITY_UNLOAD:
+ //g_debug("Update Event facility unload");
size = sizeof(struct LttFacilityUnload);
break;
case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
size = strlen((char*)tf->event.data) + 1;
- g_debug("Event facility load state dump of facility %s",
- (char*)tf->event.data);
+ //g_debug("Update Event facility load state dump of facility %s",
+ // (char*)tf->event.data);
size += sizeof(struct LttStateDumpFacilityLoad);
break;
case LTT_EVENT_HEARTBEAT:
+ //g_debug("Update Event heartbeat");
size = sizeof(TimeHeartbeat);
break;
default:
g_quark_to_string(tf->name));
goto event_type_error;
}
+
+ if(event_type->root_field)
+ size = get_field_type_size(tf, event_type,
+ 0, 0, event_type->root_field, tf->event.data);
+ else
+ size = 0;
- size = get_field_type_size(tf, event_type,
- 0, 0, event_type->root_field, tf->event.data);
- g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id,
- tf->event.event_id, size);
+ //g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ // tf->event.facility_id,
+ // tf->event.event_id, size);
}
tf->event.data_size = size;
/* Check consistency between kernel and LTTV structure sizes */
g_assert(tf->event.data_size == tf->event.event_size);
-
+
return;
facility_error:
{
int ret = 0;
void *pos;
- ssize_t event_size;
/* seek over the buffer header if we are at the buffer start */
if(tf->event.offset == 0) {
- tf->event.offset += sizeof(struct ltt_block_start_header);
+ tf->event.offset += tf->buffer_header_size;
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
}
goto found;
}
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
- ret = ERANGE;
- goto found;
- }
-
pos = tf->event.data;
if(tf->event.data_size < 0) goto error;
pos += (size_t)tf->event.data_size;
tf->event.offset = pos - tf->buffer.head;
+
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ ret = ERANGE;
+ goto found;
+ }
+ g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
found:
return ret;
LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
- lBufTotalTime = ltt_time_sub(
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp),
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp));
+ lBufTotalTime = ltt_time_sub(tf->buffer.end.timestamp,
+ tf->buffer.begin.timestamp);
/* Calculate the total cycles for this bufffer */
- lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count);
- lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf),
- &tf->buffer.begin.cycle_count);
+ lBufTotalCycle = tf->buffer.end.cycle_count;
+ lBufTotalCycle -= tf->buffer.begin.cycle_count;
/* Convert the total time to double */
lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
break;
case LTT_LONG:
case LTT_ULONG:
- field->field_size = (off_t)event_type->facility->pointer_size;
+ field->field_size = (off_t)event_type->facility->long_size;
field->fixed_size = FIELD_FIXED;
break;
case LTT_SIZE_T:
field->child[0]);
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_STRING:
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_ARRAY:
local_fixed_root = FIELD_VARIABLE;
&local_fixed_root, &local_fixed_parent,
field->child[0]);
field->fixed_size = field->child[0]->fixed_size;
- if(field->fixed_size == FIELD_FIXED)
+ if(field->fixed_size == FIELD_FIXED) {
field->field_size = type->element_number * field->child[0]->field_size;
- else
+ } else {
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
+ }
break;
case LTT_STRUCT:
current_root_offset = field->offset_root;
LttField *field1, LttField *field2)
{
guint different = 0;
- enum field_status local_fixed_root, local_fixed_parent;
guint i;
LttType *type1;
LttType *type2;
type1 = field1->field_type;
type2 = field2->field_type;
- size_t current_root_offset;
- size_t current_offset;
- enum field_status current_child_status, final_child_status;
- size_t max_size;
-
if(type1->type_class != type2->type_class) {
different = 1;
goto end;