discuss marker types
authorcompudj <compudj@04897980-b3bd-0310-b5e0-8ef037075253>
Sat, 24 Feb 2007 07:57:57 +0000 (07:57 +0000)
committercompudj <compudj@04897980-b3bd-0310-b5e0-8ef037075253>
Sat, 24 Feb 2007 07:57:57 +0000 (07:57 +0000)
git-svn-id: http://ltt.polymtl.ca/svn@2399 04897980-b3bd-0310-b5e0-8ef037075253

ltt/branches/poly/doc/developer/time-monotonic-accurate.txt

index 2fb657e49aa0c60eef946ee32b728182a0bc0646..52de9ae84b3ae044298bdd16f39c85afd8274e50 100644 (file)
@@ -27,6 +27,9 @@ static struct time_struct {
 
 DECLARE_PERCPU(struct time_struct, cpu_time);
 
+/* Number of times the scheduler is called on each CPU */
+DECLARE_PERCPU(unsigned long, sched_nr);
+
 /* On frequency change event */
 /* In irq context */
 void freq_change_cb(unsigned int new_freq)
@@ -83,9 +86,9 @@ wake_from_hlt()
 /* If the update_count changes while we read the context, it may be invalid.
  * This would happen if we are scheduled out for a period of time long enough to
  * permit 2 frequency changes. We simply start the loop again if it happens.
- * We detect it by comparing the update_count running counter. */
-/* FIXME : if thread is migrated to another CPU, get_cycles() is bad */
-/* Pb with get cpu id / migrate / get_cycles() / migrate / get cpu id and check
+ * We detect it by comparing the update_count running counter.
+ * We detect preemption by incrementing a counter sched_nr within schedule(). 
+ * This counter is readable by user space through the vsyscall page. */
  */
 u64 read_time(void)
 {
@@ -94,16 +97,22 @@ u64 read_time(void)
        struct time_struct this_cpu_time;
        struct time_info *current_time;
        unsigned int cpu;
+       long prev_sched_nr;
        do {
                cpu = _smp_processor_id();
+               prev_sched_nr = per_cpu(sched_nr, cpu);
+               if(cpu != _smp_processor_id())
+                       continue;       /* changed CPU between CPUID and getting
+                                          sched_nr */
                this_cpu_time = per_cpu(cpu_time, cpu);
                update_count = this_cpu_time->update_count;
                current_time = this_cpu_time->time_sel[update_count&1];
                walltime = current_time->walltime + 
                                (get_cycles() - current_time->tsc) /
                                current_time->freq;
-       } while(this_cpu_time->update_count != update_count
-               || cpu != _smp_processor_id());
+               if(per_cpu(sched_nr, cpu) != prev_sched_nr)
+                       continue;       /* been preempted */
+       } while(this_cpu_time->update_count != update_count);
        return walltime;
 }
 
This page took 0.026496 seconds and 4 git commands to generate.