#define PAGE_MASK (~(page_size-1))
#define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
-/* obtain the time of an event */
-
-static inline LttTime getEventTime(LttTracefile * tf);
-
-
/* set the offset of the fields belonging to the event,
need the information of the archecture */
void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
static gint map_block(LttTracefile * tf, guint block_num);
/* calculate nsec per cycles for current block */
-static double calc_nsecs_per_cycle(LttTracefile * t);
+#if 0
+static guint32 calc_nsecs_per_cycle(LttTracefile * t);
+static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
+#endif //0
/* go to the next event */
static int ltt_seek_next_event(LttTracefile *tf);
}
+guint ltt_trace_get_num_cpu(LttTrace *t)
+{
+ return t->num_cpu;
+}
+
+
+/* trace can be NULL
+ *
+ * Return value : 0 success, 1 bad tracefile
+ */
+int parse_trace_header(void *header, LttTracefile *tf, LttTrace *t)
+{
+ guint32 *magic_number = (guint32*)header;
+ struct ltt_trace_header_any *any = (struct ltt_trace_header_any *)header;
+
+ if(*magic_number == LTT_MAGIC_NUMBER)
+ tf->reverse_bo = 0;
+ else if(*magic_number == LTT_REV_MAGIC_NUMBER)
+ tf->reverse_bo = 1;
+ else /* invalid magic number, bad tracefile ! */
+ return 1;
+
+ /* Get float byte order : might be different from int byte order
+ * (or is set to 0 if the trace has no float (kernel trace)) */
+ tf->float_word_order = any->float_word_order;
+
+ if(t) {
+ t->arch_type = ltt_get_uint32(LTT_GET_BO(tf),
+ &any->arch_type);
+ t->arch_variant = ltt_get_uint32(LTT_GET_BO(tf),
+ &any->arch_variant);
+ t->arch_size = any->arch_size;
+ t->ltt_major_version = any->major_version;
+ t->ltt_minor_version = any->minor_version;
+ t->flight_recorder = any->flight_recorder;
+ t->has_heartbeat = any->has_heartbeat;
+ t->has_alignment = any->has_alignment;
+ t->has_tsc = any->has_tsc;
+ }
+
+
+ switch(any->major_version) {
+
+ case 0:
+ switch(any->minor_version) {
+ case 3:
+ {
+ tf->buffer_header_size =
+ sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_0_3);
+ g_warning("Unsupported trace version : %hhu.%hhu",
+ any->major_version, any->minor_version);
+ return 1;
+ }
+ break;
+ case 4:
+ {
+ struct ltt_trace_header_0_4 *vheader =
+ (struct ltt_trace_header_0_4 *)header;
+ tf->buffer_header_size =
+ sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_0_4);
+ if(t) {
+ t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &vheader->start_freq);
+ t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
+ &vheader->start_tsc);
+ t->start_monotonic = ltt_get_uint64(LTT_GET_BO(tf),
+ &vheader->start_monotonic);
+ t->start_time = ltt_get_time(LTT_GET_BO(tf),
+ &vheader->start_time);
+ }
+ }
+ break;
+ default:
+ g_warning("Unsupported trace version : %hhu.%hhu",
+ any->major_version, any->minor_version);
+ return 1;
+ }
+ break;
+
+ default:
+ g_warning("Unsupported trace version : %hhu.%hhu",
+ any->major_version, any->minor_version);
+ return 1;
+ }
+
+
+ return 0;
+}
+
+
/*****************************************************************************
*Function name
}
// Is the file large enough to contain a trace
- if(lTDFStat.st_size < (off_t)(sizeof(struct ltt_block_start_header))){
+ if(lTDFStat.st_size <
+ (off_t)(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any))){
g_print("The input data file %s does not contain a trace\n", fileName);
goto close_file;
}
/* Temporarily map the buffer start header to get trace information */
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(sizeof(struct ltt_block_start_header)), PROT_READ,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)), PROT_READ,
MAP_PRIVATE, tf->fd, 0);
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
header = (struct ltt_block_start_header*)tf->buffer.head;
- if(header->trace.magic_number == LTT_MAGIC_NUMBER)
- tf->reverse_bo = 0;
- else if(header->trace.magic_number == LTT_REV_MAGIC_NUMBER)
- tf->reverse_bo = 1;
- else /* invalid magic number, bad tracefile ! */
+ if(parse_trace_header(header->trace, tf, NULL)) {
+ g_warning("parse_trace_header error");
goto unmap_file;
+ }
//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->block_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
- tf->num_blocks = tf->file_size / tf->block_size;
-
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
+ tf->num_blocks = tf->file_size / tf->buf_size;
+
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)));
+ perror("munmap error");
+ g_assert(0);
+ }
tf->buffer.head = NULL;
//read the first block
/* Error */
unmap_file:
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)
+ + sizeof(struct ltt_trace_header_any)));
+ perror("munmap error");
+ g_assert(0);
+ }
close_file:
close(tf->fd);
end:
void ltt_tracefile_close(LttTracefile *t)
{
+ int page_size = getpagesize();
+
if(t->buffer.head != NULL)
- munmap(t->buffer.head, t->buf_size);
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(t->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+
close(t->fd);
}
{
guint raw_name_len = strlen(raw_name);
gchar char_name[PATH_MAX];
- gchar *digit_begin;
int i;
int underscore_pos;
long int cpu_num;
g_debug("Tracefile file or directory : %s\n", path);
+ if(strcmp(rel_path, "/eventdefs") == 0) continue;
+
if(S_ISDIR(stat_buf.st_mode)) {
g_debug("Entering subdirectory...\n");
GQuark name;
guint num;
GArray *group;
- LttTracefile *tf;
- guint len;
if(get_tracefile_name_number(rel_path, &name, &num))
continue; /* invalid name */
/* ltt_get_facility_description
*
- * Opens the trace corresponding to the requested facility (identified by fac_id
+ * Opens the file corresponding to the requested facility (identified by fac_id
* and checksum).
*
* The name searched is : %trace root%/eventdefs/facname_checksum.xml
textlen+=strlen(text);
if(textlen >= PATH_MAX) goto name_error;
strcat(desc_file_name, text);
-
+#if 0
text = "_";
textlen+=strlen(text);
if(textlen >= PATH_MAX) goto name_error;
textlen=strlen(desc_file_name);
+#endif //0
text = ".xml";
textlen+=strlen(text);
if(textlen >= PATH_MAX) goto name_error;
strcat(desc_file_name, text);
-
+
err = ltt_facility_open(f, t, desc_file_name);
if(err) goto facility_error;
return 0;
/* Error handling */
-facility_error:
event_id_error:
fac_id_error:
update_error:
LttTrace * t;
LttTracefile *tf;
GArray *group;
- int i;
+ int i, ret;
struct ltt_block_start_header *header;
+ DIR *dir;
+ struct dirent *entry;
+ guint control_found = 0;
+ guint eventdefs_found = 0;
+ struct stat stat_buf;
+ gchar path[PATH_MAX];
t = g_new(LttTrace, 1);
if(!t) goto alloc_error;
get_absolute_pathname(pathname, abs_path);
t->pathname = g_quark_from_string(abs_path);
- /* Open all the tracefiles */
g_datalist_init(&t->tracefiles);
- if(open_tracefiles(t, abs_path, ""))
- goto open_error;
+
+ /* Test to see if it looks like a trace */
+ dir = opendir(abs_path);
+ if(dir == NULL) {
+ perror(abs_path);
+ goto open_error;
+ }
+ while((entry = readdir(dir)) != NULL) {
+ strcpy(path, abs_path);
+ strcat(path, "/");
+ strcat(path, entry->d_name);
+ ret = stat(path, &stat_buf);
+ if(ret == -1) {
+ perror(path);
+ continue;
+ }
+ if(S_ISDIR(stat_buf.st_mode)) {
+ if(strcmp(entry->d_name, "control") == 0) {
+ control_found = 1;
+ }
+ if(strcmp(entry->d_name, "eventdefs") == 0) {
+ eventdefs_found = 1;
+ }
+ }
+ }
+ closedir(dir);
+
+ if(!control_found || !eventdefs_found) goto find_error;
+
+ /* Open all the tracefiles */
+ if(open_tracefiles(t, abs_path, "")) {
+ g_warning("Error opening tracefile %s", abs_path);
+ goto find_error;
+ }
/* Prepare the facilities containers : array and mapping */
/* Array is zeroed : the "exists" field is set to false by default */
g_assert(group->len > 0);
tf = &g_array_index (group, LttTracefile, 0);
header = (struct ltt_block_start_header*)tf->buffer.head;
- t->arch_type = ltt_get_uint32(LTT_GET_BO(tf), &header->trace.arch_type);
- t->arch_variant = ltt_get_uint32(LTT_GET_BO(tf), &header->trace.arch_variant);
- t->arch_size = header->trace.arch_size;
- t->ltt_major_version = header->trace.major_version;
- t->ltt_minor_version = header->trace.minor_version;
- t->flight_recorder = header->trace.flight_recorder;
- t->has_heartbeat = header->trace.has_heartbeat;
- t->has_alignment = header->trace.has_alignment;
- t->has_tsc = header->trace.has_tsc;
-
+ g_assert(parse_trace_header(header->trace,
+ tf, t) == 0);
+
+ t->num_cpu = group->len;
for(i=0; i<group->len; i++) {
tf = &g_array_index (group, LttTracefile, i);
facilities_error:
g_datalist_clear(&t->facilities_by_name);
g_array_free(t->facilities_by_num, TRUE);
-open_error:
+find_error:
g_datalist_clear(&t->tracefiles);
+open_error:
g_free(t);
alloc_error:
return NULL;
}
-GQuark ltt_trace_name(LttTrace *t)
+GQuark ltt_trace_name(const LttTrace *t)
{
return t->pathname;
}
static void ltt_tracefile_time_span_get(LttTracefile *tf,
LttTime *start, LttTime *end)
{
- struct ltt_block_start_header * header;
int err;
err = map_block(tf, 0);
*Get the name of a tracefile
****************************************************************************/
-GQuark ltt_tracefile_name(LttTracefile *tf)
+GQuark ltt_tracefile_name(const LttTracefile *tf)
{
return tf->name;
}
+GQuark ltt_tracefile_long_name(const LttTracefile *tf)
+{
+ return tf->long_name;
+}
+
+
guint ltt_tracefile_num(LttTracefile *tf)
{
if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
- if(ltt_time_compare(time, tf->event.event_time) >= 0)
- break;
+ if(ltt_time_compare(time, tf->event.event_time) <= 0)
+ goto found;
}
} else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
/* go to lower part */
- high = block_num;
+ high = block_num - 1;
} else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
/* go to higher part */
- low = block_num;
+ low = block_num + 1;
} else {/* The event is right in the buffer!
(or in the next buffer first event) */
while(1) {
if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
- if(ltt_time_compare(time, tf->event.event_time) >= 0)
+ if(ltt_time_compare(time, tf->event.event_time) <= 0)
break;
}
goto found;
err = ltt_tracefile_read_op(tf);
if(err) goto fail;
- return;
+ return 0;
fail:
g_error("ltt_tracefile_seek_time failed on tracefile %s",
g_quark_to_string(tf->name));
+ return 1;
}
/* Calculate the real event time based on the buffer boundaries */
g_assert(tf->trace->has_tsc);
- time = ltt_time_from_uint64(
- (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
- tf->buffer.nsecs_per_cycle);
- time = ltt_time_add(tf->buffer.begin.timestamp, time);
+// time = ltt_time_from_uint64(
+// cycles_2_ns(tf, (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count)));
+ time = ltt_time_from_uint64((tf->buffer.tsc - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq);
+ //time = ltt_time_add(tf->buffer.begin.timestamp, time);
+ time = ltt_time_add(tf->trace->start_time, time);
return time;
}
*Return value
*
* Returns 0 if an event can be used in tf->event.
- * Returns ERANGE on end of trace. The event in tf->event still can be used.
+ * Returns ERANGE on end of trace. The event in tf->event still can be used
+ * (if the last block was not empty).
* Returns EPERM on error.
*
* This function does make the tracefile event structure point to the event
/* do specific operation on events */
int ltt_tracefile_read_op(LttTracefile *tf)
{
- int err;
- LttFacility *f;
- void * pos;
LttEvent *event;
event = &tf->event;
* event specific operation. */
int ltt_tracefile_read_update_event(LttTracefile *tf)
{
- int err;
- LttFacility *f;
void * pos;
LttEvent *event;
g_assert(block_num < tf->num_blocks);
- if(tf->buffer.head != NULL)
- munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size));
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(tf->block_size),
+ PAGE_ALIGN(tf->buf_size),
PROT_READ, MAP_PRIVATE, tf->fd,
- PAGE_ALIGN((off_t)tf->block_size * (off_t)block_num));
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
header = (struct ltt_block_start_header*)tf->buffer.head;
- tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf),
- &header->begin.timestamp);
- tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC;
- g_warning("block %u begin : %lu.%lu", block_num,
- tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
+#if 0
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
- tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf),
- &header->end.timestamp);
- tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC;
- g_warning("block %u end : %lu.%lu", block_num,
- tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
+ tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.freq);
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ (tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time);
+#if 0
+
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.cycle_count);
+ tf->buffer.end.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.freq);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
- &header->lost_size);
-
+ &header->lost_size);
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ (tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time);
+
tf->buffer.tsc = tf->buffer.begin.cycle_count;
tf->event.tsc = tf->buffer.tsc;
+ tf->buffer.freq = tf->buffer.begin.freq;
/* FIXME
* eventually support variable buffer size : will need a partial pre-read of
* the headers to create an index when we open the trace... eventually. */
- g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf),
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
&header->buf_size));
/* Now that the buffer is mapped, calculate the time interpolation for the
* block. */
- tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
/* Make the current event point to the beginning of the buffer :
* it means that the event read must get the first event. */
switch((enum ltt_core_events)tf->event.event_id) {
case LTT_EVENT_FACILITY_LOAD:
size = strlen((char*)tf->event.data) + 1;
- g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
+ //g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
size += sizeof(struct LttFacilityLoad);
break;
case LTT_EVENT_FACILITY_UNLOAD:
- g_debug("Update Event facility unload");
+ //g_debug("Update Event facility unload");
size = sizeof(struct LttFacilityUnload);
break;
case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
size = strlen((char*)tf->event.data) + 1;
- g_debug("Update Event facility load state dump of facility %s",
- (char*)tf->event.data);
+ //g_debug("Update Event facility load state dump of facility %s",
+ // (char*)tf->event.data);
size += sizeof(struct LttStateDumpFacilityLoad);
break;
case LTT_EVENT_HEARTBEAT:
- g_debug("Update Event heartbeat");
+ //g_debug("Update Event heartbeat");
size = sizeof(TimeHeartbeat);
break;
default:
g_quark_to_string(tf->name));
goto event_type_error;
}
-
+
if(event_type->root_field)
size = get_field_type_size(tf, event_type,
0, 0, event_type->root_field, tf->event.data);
else
size = 0;
- g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id,
- tf->event.event_id, size);
+ //g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ // tf->event.facility_id,
+ // tf->event.event_id, size);
}
tf->event.data_size = size;
/* Check consistency between kernel and LTTV structure sizes */
g_assert(tf->event.data_size == tf->event.event_size);
-
+
return;
facility_error:
{
int ret = 0;
void *pos;
- ssize_t event_size;
/* seek over the buffer header if we are at the buffer start */
if(tf->event.offset == 0) {
- tf->event.offset += sizeof(struct ltt_block_start_header);
+ tf->event.offset += tf->buffer_header_size;
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
}
goto found;
tf->event.offset = pos - tf->buffer.head;
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
goto found;
}
+ g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
found:
return ret;
return ENOPROTOOPT;
}
-
+#if 0
/*****************************************************************************
*Function name
* calc_nsecs_per_cycle : calculate nsecs per cycle for current block
+ *
+ * 1.0 / (freq(khz) *1000) * 1000000000
*Input Params
* t : tracefile
****************************************************************************/
-
-static double calc_nsecs_per_cycle(LttTracefile * tf)
+/* from timer_tsc.c */
+#define CYC2NS_SCALE_FACTOR 10
+static guint32 calc_nsecs_per_cycle(LttTracefile * tf)
{
- LttTime lBufTotalTime; /* Total time for this buffer */
- double lBufTotalNSec; /* Total time for this buffer in nsecs */
- LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
-
- /* Calculate the total time for this buffer */
- lBufTotalTime = ltt_time_sub(
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp),
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp));
-
- /* Calculate the total cycles for this bufffer */
- lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count);
- lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf),
- &tf->buffer.begin.cycle_count);
-
- /* Convert the total time to double */
- lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
+ //return 1e6 / (double)tf->buffer.freq;
+ guint32 cpu_mhz = tf->buffer.freq / 1000;
+ guint32 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
- return lBufTotalNSec / (double)lBufTotalCycle;
+ return cyc2ns_scale;
+ // return 1e6 / (double)tf->buffer.freq;
+}
+static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles)
+{
+ return (cycles * tf->buffer.cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
+#endif //0
+
#if 0
void setFieldsOffset(LttTracefile *tf, LttEventType *evT,void *evD)
{
LttField *field1, LttField *field2)
{
guint different = 0;
- enum field_status local_fixed_root, local_fixed_parent;
guint i;
LttType *type1;
LttType *type2;
type1 = field1->field_type;
type2 = field2->field_type;
- size_t current_root_offset;
- size_t current_offset;
- enum field_status current_child_status, final_child_status;
- size_t max_size;
-
if(type1->type_class != type2->type_class) {
different = 1;
goto end;