self->nb_event = 0;
/* Seek time to beginning */
- g_tree_destroy(self->parent.ts_context->pqueue);
- self->parent.ts_context->pqueue = g_tree_new(compare_tracefile);
+ // Mathieu : fix : don't seek traceset here : causes inconsistency in seek
+ // closest. It's the tracecontext job to seek the trace to the beginning
+ // anyway : the init state might be used at the middle of the trace as well...
+ //g_tree_destroy(self->parent.ts_context->pqueue);
+ //self->parent.ts_context->pqueue = g_tree_new(compare_tracefile);
- lttv_process_trace_seek_time(&self->parent, ltt_time_zero);
+
+ //lttv_process_trace_seek_time(&self->parent, ltt_time_zero);
nb_cpus = ltt_trace_get_num_cpu(self->parent.t);
nb_tracefile = self->parent.tracefiles->len;
- g_tree_destroy(tsc->pqueue);
- tsc->pqueue = g_tree_new(compare_tracefile);
+ //g_tree_destroy(tsc->pqueue);
+ //tsc->pqueue = g_tree_new(compare_tracefile);
for(i = 0 ; i < nb_tracefile ; i++) {
tfcs =
g_assert(tfcs->parent.t_context != NULL);
LttvTracefileContext *tfc = LTTV_TRACEFILE_CONTEXT(tfcs);
+ g_tree_remove(tsc->pqueue, tfc);
if(ep != NULL) {
g_assert(ltt_tracefile_seek_position(tfc->tf, ep) == 0);
g_free(name_tables);
}
+#ifdef HASH_TABLE_DEBUG
+
+static void test_process(gpointer key, gpointer value, gpointer user_data)
+{
+ LttvProcessState *process = (LttvProcessState *)value;
+
+ /* Test for process corruption */
+ guint stack_len = process->execution_stack->len;
+}
+
+static void hash_table_check(GHashTable *table)
+{
+ g_hash_table_foreach(table, test_process, NULL);
+}
+
+
+#endif
+
+
static void push_state(LttvTracefileState *tfs, LttvExecutionMode t,
guint state_id)
{
guint cpu = ltt_tracefile_num(tfs->parent.tf);
LttvTraceState *ts = (LttvTraceState*)tfs->parent.t_context;
+
+#ifdef HASH_TABLE_DEBUG
+ hash_table_check(ts->processes);
+#endif
LttvProcessState *process = ts->running_process[cpu];
guint depth = process->execution_stack->len;
//process->last_cpu_index = ltt_tracefile_num(((LttvTracefileContext*)tfs)->tf);
process->execution_stack = g_array_sized_new(FALSE, FALSE,
sizeof(LttvExecutionState), PREALLOCATED_EXECUTION_STACK);
- process->execution_stack = g_array_set_size(process->execution_stack, 1);
+ process->execution_stack = g_array_set_size(process->execution_stack, 2);
es = process->state = &g_array_index(process->execution_stack,
LttvExecutionState, 0);
es->t = LTTV_STATE_USER_MODE;
es->entry = *timestamp;
//g_assert(timestamp->tv_sec != 0);
es->change = *timestamp;
+ es->s = LTTV_STATE_RUN;
+
+ es = process->state = &g_array_index(process->execution_stack,
+ LttvExecutionState, 1);
+ es->t = LTTV_STATE_SYSCALL;
+ es->n = LTTV_STATE_SUBMODE_NONE;
+ es->entry = *timestamp;
+ //g_assert(timestamp->tv_sec != 0);
+ es->change = *timestamp;
es->s = LTTV_STATE_WAIT_FORK;
return process;
guint cpu = ltt_tracefile_num(s->parent.tf);
LttvTraceState *ts = (LttvTraceState*)s->parent.t_context;
LttvProcessState *process = ts->running_process[cpu];
+ LttvProcessState *child_process;
/* Parent PID */
f = thf->f1;
f = thf->f2;
child_pid = ltt_event_get_unsigned(e, f);
+ /* Mathieu : it seems like the process might have been scheduled in before the
+ * fork, and, in a rare case, might be the current process. This might happen
+ * in a SMP case where we don't have enough precision on the clocks.
+ *
+ * Test reenabled after precision fixes on time. (Mathieu) */
+
zombie_process = lttv_state_find_process(ts, ANY_CPU, child_pid);
if(unlikely(zombie_process != NULL)) {
/* Reutilisation of PID. Only now we are sure that the old PID
* has been released. FIXME : should know when release_task happens instead.
*/
+ guint num_cpus = ltt_trace_get_num_cpu(ts->parent.t);
+ guint i;
+ for(i=0; i< num_cpus; i++) {
+ g_assert(zombie_process != ts->running_process[i]);
+ }
+
exit_process(s, zombie_process);
}
+
g_assert(process->pid != child_pid);
// FIXME : Add this test in the "known state" section
// g_assert(process->pid == parent_pid);
- lttv_state_create_process(ts, process, cpu, child_pid, &s->parent.timestamp);
+ child_process = lttv_state_find_process(ts, ANY_CPU, child_pid);
+ if(child_process == NULL) {
+ lttv_state_create_process(ts, process, cpu,
+ child_pid, &s->parent.timestamp);
+ } else {
+ /* The process has already been created : due to time imprecision between
+ * multiple CPUs : it has been scheduled in before creation.
+ *
+ * Simply put a correct parent.
+ */
+ child_process->ppid = process->pid;
+ }
return FALSE;
}
/* PID of the process to release */
release_pid = ltt_event_get_unsigned(e, thf->f1);
+
+ g_assert(release_pid != 0);
process = lttv_state_find_process(ts, ANY_CPU, release_pid);
if(likely(process != NULL)) {
/* release_task is happening at kernel level : we can now safely release
* the data structure of the process */
+ //This test is fun, though, as it may happen that
+ //at time t : CPU 0 : process_free
+ //at time t+150ns : CPU 1 : schedule out
+ //Clearly due to time imprecision, we disable it. (Mathieu)
+ //If this weird case happen, we have no choice but to put the
+ //Currently running process on the cpu to 0.
+ guint num_cpus = ltt_trace_get_num_cpu(ts->parent.t);
+ guint i;
+ for(i=0; i< num_cpus; i++) {
+ //g_assert(process != ts->running_process[i]);
+ if(process == ts->running_process[i]) {
+ ts->running_process[i] = lttv_state_find_process(ts, i, 0);
+ }
+ }
exit_process(s, process);
}
return 0;
}
-static guint test_event_count = 0;
void lttv_state_remove_event_hooks(LttvTracesetState *self)
{
LttvTraceset *traceset = self->parent.ts;
lttv_trace_hook_destroy(&g_array_index(hooks, LttvTraceHook, k));
g_array_free(hooks, TRUE);
}
- g_info("EVENT COUNT TEST : %u", test_event_count);
}
static gboolean state_save_event_hook(void *hook_data, void *call_data)
{
guint *event_count = (guint*)hook_data;
- test_event_count++;
/* Only save at LTTV_STATE_SAVE_INTERVAL */
if(likely((*event_count)++ < LTTV_STATE_SAVE_INTERVAL))
return FALSE;
return FALSE;
}
+static gboolean state_save_after_trace_hook(void *hook_data, void *call_data)
+{
+ LttvTraceState *tcs = (LttvTraceState *)(call_data);
+
+ *(tcs->max_time_state_recomputed_in_seek) = tcs->parent.time_span.end_time;
+
+ return FALSE;
+}
+
#if 0
static gboolean block_start(void *hook_data, void *call_data)
{
}
}
+
+ lttv_process_traceset_begin(&self->parent,
+ NULL, NULL, NULL, NULL, NULL);
+
}
gint lttv_state_save_hook_add_event_hooks(void *hook_data, void *call_data)
LttvTracefileState *tfs;
+ LttvHooks *after_trace = lttv_hooks_new();
+
+ lttv_hooks_add(after_trace,
+ state_save_after_trace_hook,
+ NULL,
+ LTTV_PRIO_STATE);
+
+
+ lttv_process_traceset_end(&self->parent,
+ NULL, after_trace, NULL, NULL, NULL);
+ lttv_hooks_destroy(after_trace);
+
nb_trace = lttv_traceset_number(traceset);
for(i = 0 ; i < nb_trace ; i++) {
LttvTracefileContext*, j));
event_count = lttv_hooks_remove(tfs->parent.event,
state_save_event_hook);
-
}
g_free(event_count);
}
LttvAttribute *saved_states_tree, *saved_state_tree, *closest_tree;
- g_tree_destroy(self->parent.pqueue);
- self->parent.pqueue = g_tree_new(compare_tracefile);
+ //g_tree_destroy(self->parent.pqueue);
+ //self->parent.pqueue = g_tree_new(compare_tracefile);
g_info("Entering seek_time_closest for time %lu.%lu", t.tv_sec, t.tv_nsec);