self->nb_event = 0;
/* Seek time to beginning */
- g_tree_destroy(self->parent.ts_context->pqueue);
- self->parent.ts_context->pqueue = g_tree_new(compare_tracefile);
+ // Mathieu : fix : don't seek traceset here : causes inconsistency in seek
+ // closest. It's the tracecontext job to seek the trace to the beginning
+ // anyway : the init state might be used at the middle of the trace as well...
+ //g_tree_destroy(self->parent.ts_context->pqueue);
+ //self->parent.ts_context->pqueue = g_tree_new(compare_tracefile);
- lttv_process_trace_seek_time(&self->parent, ltt_time_zero);
+
+ //lttv_process_trace_seek_time(&self->parent, ltt_time_zero);
nb_cpus = ltt_trace_get_num_cpu(self->parent.t);
g_free(name_tables);
}
+#ifdef HASH_TABLE_DEBUG
+
+static void test_process(gpointer key, gpointer value, gpointer user_data)
+{
+ LttvProcessState *process = (LttvProcessState *)value;
+
+ /* Test for process corruption */
+ guint stack_len = process->execution_stack->len;
+}
+
+static void hash_table_check(GHashTable *table)
+{
+ g_hash_table_foreach(table, test_process, NULL);
+}
+
+
+#endif
+
+
static void push_state(LttvTracefileState *tfs, LttvExecutionMode t,
guint state_id)
{
guint cpu = ltt_tracefile_num(tfs->parent.tf);
LttvTraceState *ts = (LttvTraceState*)tfs->parent.t_context;
+
+#ifdef HASH_TABLE_DEBUG
+ hash_table_check(ts->processes);
+#endif
LttvProcessState *process = ts->running_process[cpu];
guint depth = process->execution_stack->len;
guint cpu = ltt_tracefile_num(s->parent.tf);
LttvTraceState *ts = (LttvTraceState*)s->parent.t_context;
LttvProcessState *process = ts->running_process[cpu];
+ LttvProcessState *child_process;
/* Parent PID */
f = thf->f1;
f = thf->f2;
child_pid = ltt_event_get_unsigned(e, f);
+ /* Mathieu : it seems like the process might have been scheduled in before the
+ * fork, and, in a rare case, might be the current process. This might happen
+ * in a SMP case where we don't have enough precision on the clocks */
+#if 0
zombie_process = lttv_state_find_process(ts, ANY_CPU, child_pid);
if(unlikely(zombie_process != NULL)) {
/* Reutilisation of PID. Only now we are sure that the old PID
* has been released. FIXME : should know when release_task happens instead.
*/
+ guint num_cpus = ltt_trace_get_num_cpu(ts->parent.t);
+ guint i;
+ for(i=0; i< num_cpus; i++) {
+ g_assert(process != ts->running_process[i]);
+ }
+
exit_process(s, zombie_process);
}
+#endif //0
g_assert(process->pid != child_pid);
// FIXME : Add this test in the "known state" section
// g_assert(process->pid == parent_pid);
- lttv_state_create_process(ts, process, cpu, child_pid, &s->parent.timestamp);
+ child_process = lttv_state_find_process(ts, ANY_CPU, child_pid);
+ if(child_process == NULL) {
+ lttv_state_create_process(ts, process, cpu,
+ child_pid, &s->parent.timestamp);
+ } else {
+ /* The process has already been created : due to time imprecision between
+ * multiple CPUs : it has been scheduled in before creation.
+ *
+ * Simply put a correct parent.
+ */
+ child_process->ppid = process->pid;
+ }
return FALSE;
}
/* PID of the process to release */
release_pid = ltt_event_get_unsigned(e, thf->f1);
+
+ g_assert(release_pid != 0);
process = lttv_state_find_process(ts, ANY_CPU, release_pid);
if(likely(process != NULL)) {
/* release_task is happening at kernel level : we can now safely release
* the data structure of the process */
+ guint num_cpus = ltt_trace_get_num_cpu(ts->parent.t);
+ guint i;
+ for(i=0; i< num_cpus; i++) {
+ g_assert(process != ts->running_process[i]);
+ }
exit_process(s, process);
}