#define g_close close
+/* Those macros must be called from within a function where page_size is a known
+ * variable */
+#define PAGE_MASK (~(page_size-1))
+#define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
+
/* obtain the time of an event */
static inline LttTime getEventTime(LttTracefile * tf);
{
struct stat lTDFStat; /* Trace data file status */
struct ltt_block_start_header *header;
+ int page_size = getpagesize();
//open the file
- tf->name = g_quark_from_string(fileName);
+ tf->long_name = g_quark_from_string(fileName);
tf->trace = t;
tf->fd = open(fileName, O_RDONLY);
if(tf->fd < 0){
/* Temporarily map the buffer start header to get trace information */
/* Multiple of pages aligned head */
- tf->buffer.head = mmap(0, sizeof(struct ltt_block_start_header), PROT_READ,
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)), PROT_READ,
MAP_PRIVATE, tf->fd, 0);
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->block_size = header->buf_size;
- tf->num_blocks = tf->file_size / tf->block_size;
-
- munmap(tf->buffer.head, sizeof(struct ltt_block_start_header));
+ tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
+ tf->num_blocks = tf->file_size / tf->buf_size;
+
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
tf->buffer.head = NULL;
//read the first block
/* Error */
unmap_file:
- munmap(tf->buffer.head, sizeof(struct ltt_block_start_header));
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
close_file:
close(tf->fd);
end:
return -1;
}
+LttTrace *ltt_tracefile_get_trace(LttTracefile *tf)
+{
+ return tf->trace;
+}
+
#if 0
/*****************************************************************************
*Open control and per cpu tracefiles
void ltt_tracefile_close(LttTracefile *t)
{
+ int page_size = getpagesize();
+
if(t->buffer.head != NULL)
- munmap(t->buffer.head, t->buf_size);
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(t->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+
close(t->fd);
}
strncpy(char_name, raw_name, underscore_pos);
+ char_name[underscore_pos] = '\0';
+
*name = g_quark_from_string(char_name);
*num = cpu_num;
* A tracefile group is simply an array where all the per cpu tracefiles sits.
*/
-static int open_tracefiles(LttTrace *trace, char *root_path,
- char *relative_path)
+static int open_tracefiles(LttTrace *trace, gchar *root_path,
+ gchar *relative_path)
{
DIR *dir = opendir(root_path);
struct dirent *entry;
struct stat stat_buf;
int ret;
- char path[PATH_MAX];
+ gchar path[PATH_MAX];
int path_len;
- char *path_ptr;
+ gchar *path_ptr;
int rel_path_len;
- char rel_path[PATH_MAX];
- char *rel_path_ptr;
+ gchar rel_path[PATH_MAX];
+ gchar *rel_path_ptr;
+ LttTracefile tmp_tf;
if(dir == NULL) {
perror(root_path);
ret = open_tracefiles(trace, path, rel_path);
if(ret < 0) continue;
} else if(S_ISREG(stat_buf.st_mode)) {
- g_debug("Opening file.\n");
-
GQuark name;
guint num;
GArray *group;
if(get_tracefile_name_number(rel_path, &name, &num))
continue; /* invalid name */
+ g_debug("Opening file.\n");
+ if(ltt_tracefile_open(trace, path, &tmp_tf)) {
+ g_info("Error opening tracefile %s", path);
+
+ continue; /* error opening the tracefile : bad magic number ? */
+ }
+
g_debug("Tracefile name is %s and number is %u",
g_quark_to_string(name), num);
+ tmp_tf.cpu_online = 1;
+ tmp_tf.cpu_num = num;
+ tmp_tf.name = name;
+
group = g_datalist_id_get_data(&trace->tracefiles, name);
if(group == NULL) {
/* Elements are automatically cleared when the array is allocated.
g_datalist_id_set_data_full(&trace->tracefiles, name,
group, ltt_tracefile_group_destroy);
}
+
/* Add the per cpu tracefile to the named group */
unsigned int old_len = group->len;
if(num+1 > old_len)
group = g_array_set_size(group, num+1);
- tf = &g_array_index (group, LttTracefile, num);
-
- if(ltt_tracefile_open(trace, path, tf)) {
- g_info("Error opening tracefile %s", path);
- g_array_set_size(group, old_len);
-
- if(!ltt_tracefile_group_has_cpu_online(group))
- g_datalist_id_remove_data(&trace->tracefiles, name);
+ g_array_index (group, LttTracefile, num) = tmp_tf;
- continue; /* error opening the tracefile : bad magic number ? */
- }
- tf->cpu_online = 1;
- tf->cpu_num = num;
}
}
const gchar *text;
guint textlen;
gint err;
- int i, j;
- LttEventType *et;
text = g_quark_to_string(t->pathname);
textlen = strlen(text);
err = snprintf(desc_file_name+textlen, PATH_MAX-textlen-1,
"%u", f->checksum);
- if(err) goto name_error;
+ if(err < 0) goto name_error;
textlen=strlen(desc_file_name);
err = ltt_facility_open(f, t, desc_file_name);
if(err) goto facility_error;
-
- for(i=0;i<t->facilities_by_num->len;i++){
- f = &g_array_index(t->facilities_by_num, LttFacility, i);
- if(f->exists) {
- for(j=0; j<f->events->len; j++){
- et = &g_array_index(f->events, LttEventType, j);
- set_fields_offsets(fac_tf, et);
- }
- }
- }
-
return 0;
static void ltt_fac_ids_destroy(gpointer data)
{
GArray *fac_ids = (GArray *)data;
- int i;
- LttFacility *fac;
-
- for(i=0; i<fac_ids->len; i++) {
- fac = &g_array_index (fac_ids, LttFacility, i);
- ltt_facility_close(fac);
- }
g_array_free(fac_ids, TRUE);
}
int err;
LttFacility *fac;
GArray *fac_ids;
+ guint i;
+ LttEventType *et;
while(1) {
err = ltt_tracefile_read_seek(tf);
* 0 : facility load
* 1 : facility unload
* 2 : state dump facility load
- * Facility 1 : (heartbeat)
- * 0 : heartbeat
+ * 3 : heartbeat
*/
- if(tf->event.facility_id > 1) { /* Should only contain core and heartbeat
- facilities */
+ if(tf->event.facility_id != LTT_FACILITY_CORE) {
+ /* Should only contain core facility */
g_warning("Error in processing facility file %s, "
"should not contain facility id %u.", g_quark_to_string(tf->name),
tf->event.facility_id);
err = EPERM;
goto fac_id_error;
- } else if(tf->event.facility_id == LTT_FACILITY_CORE) {
+ } else {
struct LttFacilityLoad *fac_load_data;
+ struct LttStateDumpFacilityLoad *fac_state_dump_load_data;
char *fac_name;
// FIXME align
switch((enum ltt_core_events)tf->event.event_id) {
case LTT_EVENT_FACILITY_LOAD:
+ fac_name = (char*)(tf->event.data);
+ g_debug("Doing LTT_EVENT_FACILITY_LOAD of facility %s",
+ fac_name);
fac_load_data =
- (struct LttFacilityLoad *)tf->event.data;
- fac_name =
- (char*)(tf->event.data + sizeof(struct LttFacilityLoad));
+ (struct LttFacilityLoad *)
+ (tf->event.data + strlen(fac_name) + 1);
fac = &g_array_index (tf->trace->facilities_by_num, LttFacility,
ltt_get_uint32(LTT_GET_BO(tf), &fac_load_data->id));
g_assert(fac->exists == 0);
fac->id = ltt_get_uint32(LTT_GET_BO(tf), &fac_load_data->id);
fac->pointer_size = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->pointer_size);
+ fac->long_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &fac_load_data->long_size);
fac->size_t_size = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->size_t_size);
fac->alignment = ltt_get_uint32(LTT_GET_BO(tf),
&fac_load_data->alignment);
if(ltt_get_facility_description(fac, tf->trace, tf))
- goto facility_error;
+ continue; /* error opening description */
fac->trace = tf->trace;
+
+ /* Preset the field offsets */
+ for(i=0; i<fac->events->len; i++){
+ et = &g_array_index(fac->events, LttEventType, i);
+ set_fields_offsets(tf, et);
+ }
+
fac->exists = 1;
fac_ids = g_datalist_id_get_data(&tf->trace->facilities_by_name,
break;
case LTT_EVENT_FACILITY_UNLOAD:
+ g_debug("Doing LTT_EVENT_FACILITY_UNLOAD");
/* We don't care about unload : facilities ID are valid for the whole
* trace. They simply won't be used after the unload. */
break;
case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
- fac_load_data =
- (struct LttFacilityLoad *)tf->event.data;
- fac_name =
- (char*)(tf->event.data + sizeof(struct LttFacilityLoad));
+ fac_name = (char*)(tf->event.data);
+ g_debug("Doing LTT_EVENT_STATE_DUMP_FACILITY_LOAD of facility %s",
+ fac_name);
+ fac_state_dump_load_data =
+ (struct LttStateDumpFacilityLoad *)
+ (tf->event.data + strlen(fac_name) + 1);
fac = &g_array_index (tf->trace->facilities_by_num, LttFacility,
- ltt_get_uint32(LTT_GET_BO(tf), &fac_load_data->id));
+ ltt_get_uint32(LTT_GET_BO(tf), &fac_state_dump_load_data->id));
g_assert(fac->exists == 0);
fac->name = g_quark_from_string(fac_name);
fac->checksum = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->checksum);
- fac->id = fac_load_data->id;
+ &fac_state_dump_load_data->checksum);
+ fac->id = fac_state_dump_load_data->id;
fac->pointer_size = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->pointer_size);
+ &fac_state_dump_load_data->pointer_size);
+ fac->long_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &fac_state_dump_load_data->long_size);
fac->size_t_size = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->size_t_size);
+ &fac_state_dump_load_data->size_t_size);
fac->alignment = ltt_get_uint32(LTT_GET_BO(tf),
- &fac_load_data->alignment);
+ &fac_state_dump_load_data->alignment);
if(ltt_get_facility_description(fac, tf->trace, tf))
- goto facility_error;
+ continue; /* error opening description */
fac->trace = tf->trace;
+ /* Preset the field offsets */
+ for(i=0; i<fac->events->len; i++){
+ et = &g_array_index(fac->events, LttEventType, i);
+ set_fields_offsets(tf, et);
+ }
+
fac->exists = 1;
fac_ids = g_datalist_id_get_data(&tf->trace->facilities_by_name,
fac_id_error:
update_error:
seek_error:
+ g_warning("An error occured in facility tracefile parsing");
return err;
}
/* Open all the tracefiles */
g_datalist_init(&t->tracefiles);
- if(open_tracefiles(t, abs_path, ""))
+ if(open_tracefiles(t, abs_path, "")) {
+ g_warning("Error opening tracefile %s", abs_path);
goto open_error;
+ }
/* Prepare the facilities containers : array and mapping */
/* Array is zeroed : the "exists" field is set to false by default */
void ltt_trace_close(LttTrace *t)
{
+ guint i;
+ LttFacility *fac;
+
+ for(i=0; i<t->facilities_by_num->len; i++) {
+ fac = &g_array_index (t->facilities_by_num, LttFacility, i);
+ if(fac->exists)
+ ltt_facility_close(fac);
+ }
+
g_datalist_clear(&t->facilities_by_name);
g_array_free(t->facilities_by_num, TRUE);
g_datalist_clear(&t->tracefiles);
return tf->name;
}
+
+guint ltt_tracefile_num(LttTracefile *tf)
+{
+ return tf->cpu_num;
+}
+
/*****************************************************************************
* Get the number of blocks in the tracefile
****************************************************************************/
* the time passed in parameter.
*
* If the time parameter is outside the tracefile time span, seek to the first
- * or the last event of the tracefile.
+ * event or if after, return ERANGE.
*
* If the time parameter is before the first event, we have to seek specially to
* there.
*
- * If the time is after the end of the trace, get the last event.
+ * If the time is after the end of the trace, return ERANGE.
*
* Do a binary search to find the right block, then a sequential search in the
* block to find the event.
* you will jump over an event if you do.
*
* Return value : 0 : no error, the tf->event can be used
+ * ERANGE : time if after the last event of the trace
* otherwise : this is an error.
*
* */
* go to the first event. */
if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range;
+ else if (ret) goto fail;
goto found; /* There is either no event in the trace or the event points
to the first event in the trace */
}
goto fail;
}
- /* If the time is after the end of the trace, get the last event. */
- if(ltt_time_compare(time, tf->buffer.end.timestamp) >= 0) {
- /* While the ltt_tracefile_read doesn't return ERANGE or EPERM,
- * continue reading.
- */
- while(1) {
- ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
- else if(ret) goto fail;
- }
+ /* If the time is after the end of the trace, return ERANGE. */
+ if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
+ goto range;
}
/* Binary search the block */
* (or in the next buffer first event) */
while(1) {
ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
- break;
+ goto found;
}
- } if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
+ } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
/* go to lower part */
high = block_num;
} else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
} else {/* The event is right in the buffer!
(or in the next buffer first event) */
while(1) {
- ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
found:
return 0;
+range:
+ return ERANGE;
/* Error handling */
fail:
g_assert(tf->trace->has_tsc);
time = ltt_time_from_uint64(
- (guint64)tf->buffer.tsc*tf->buffer.nsecs_per_cycle);
+ (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
+ tf->buffer.nsecs_per_cycle);
time = ltt_time_add(tf->buffer.begin.timestamp, time);
return time;
*Return value
*
* Returns 0 if an event can be used in tf->event.
- * Returns ERANGE on end of trace. The event in tf->event still can be used.
+ * Returns ERANGE on end of trace. The event in tf->event still can be used
+ * (if the last block was not empty).
* Returns EPERM on error.
*
* This function does make the tracefile event structure point to the event
event->event_id = *(guint8*)pos;
pos += sizeof(guint8);
+ event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
+ pos += sizeof(guint16);
+
event->data = pos;
/* get the data size and update the event fields with the current
static gint map_block(LttTracefile * tf, guint block_num)
{
+ int page_size = getpagesize();
struct ltt_block_start_header *header;
g_assert(block_num < tf->num_blocks);
- if(tf->buffer.head != NULL)
- munmap(tf->buffer.head, tf->buf_size);
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
/* Multiple of pages aligned head */
- tf->buffer.head = mmap(0, tf->block_size, PROT_READ, MAP_PRIVATE, tf->fd,
- (off_t)tf->block_size * (off_t)block_num);
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(tf->buf_size),
+ PROT_READ, MAP_PRIVATE, tf->fd,
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->begin.timestamp);
tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_debug("block %u begin : %lu.%lu", block_num,
+ tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->end.timestamp);
tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_debug("block %u end : %lu.%lu", block_num,
+ tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.cycle_count);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
/* FIXME
* eventually support variable buffer size : will need a partial pre-read of
* the headers to create an index when we open the trace... eventually. */
- g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf),
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
&header->buf_size));
/* Now that the buffer is mapped, calculate the time interpolation for the
if(likely(tf->event.facility_id == LTT_FACILITY_CORE)) {
switch((enum ltt_core_events)tf->event.event_id) {
case LTT_EVENT_FACILITY_LOAD:
- size = strlen((char*)tf->event.data);
+ size = strlen((char*)tf->event.data) + 1;
+ g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
size += sizeof(struct LttFacilityLoad);
break;
case LTT_EVENT_FACILITY_UNLOAD:
+ g_debug("Update Event facility unload");
size = sizeof(struct LttFacilityUnload);
break;
case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
- size = strlen((char*)tf->event.data);
+ size = strlen((char*)tf->event.data) + 1;
+ g_debug("Update Event facility load state dump of facility %s",
+ (char*)tf->event.data);
size += sizeof(struct LttStateDumpFacilityLoad);
break;
case LTT_EVENT_HEARTBEAT:
+ g_debug("Update Event heartbeat");
size = sizeof(TimeHeartbeat);
break;
default:
g_quark_to_string(tf->name));
goto event_type_error;
}
+
+ if(event_type->root_field)
+ size = get_field_type_size(tf, event_type,
+ 0, 0, event_type->root_field, tf->event.data);
+ else
+ size = 0;
- size = get_field_type_size(tf, event_type,
- 0, 0, event_type->root_field, tf->event.data);
- g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id,
+ g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ tf->event.facility_id,
tf->event.event_id, size);
}
tf->event.data_size = size;
+ /* Check consistency between kernel and LTTV structure sizes */
+ g_assert(tf->event.data_size == tf->event.event_size);
+
return;
facility_error:
/* seek over the buffer header if we are at the buffer start */
if(tf->event.offset == 0) {
tf->event.offset += sizeof(struct ltt_block_start_header);
- goto found;
- }
-
- if(tf->event.offset == tf->buffer.lost_size) {
- ret = ERANGE;
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ ret = ERANGE;
+ }
goto found;
}
+
pos = tf->event.data;
if(tf->event.data_size < 0) goto error;
pos += (size_t)tf->event.data_size;
tf->event.offset = pos - tf->buffer.head;
+
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ ret = ERANGE;
+ goto found;
+ }
found:
return ret;
LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
- lBufTotalTime = ltt_time_sub(
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp),
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp));
+ lBufTotalTime = ltt_time_sub(tf->buffer.end.timestamp,
+ tf->buffer.begin.timestamp);
/* Calculate the total cycles for this bufffer */
- lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count);
- lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf),
- &tf->buffer.begin.cycle_count);
+ lBufTotalCycle = tf->buffer.end.cycle_count;
+ lBufTotalCycle -= tf->buffer.begin.cycle_count;
/* Convert the total time to double */
lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
break;
case LTT_LONG:
case LTT_ULONG:
- field->field_size = (off_t)event_type->facility->pointer_size;
+ field->field_size = (off_t)event_type->facility->long_size;
field->fixed_size = FIELD_FIXED;
break;
case LTT_SIZE_T:
field->child[0]);
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_STRING:
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_ARRAY:
local_fixed_root = FIELD_VARIABLE;
&local_fixed_root, &local_fixed_parent,
field->child[0]);
field->fixed_size = field->child[0]->fixed_size;
- if(field->fixed_size == FIELD_FIXED)
+ if(field->fixed_size == FIELD_FIXED) {
field->field_size = type->element_number * field->child[0]->field_size;
- else
+ } else {
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
+ }
break;
case LTT_STRUCT:
current_root_offset = field->offset_root;