update script
[lttv.git] / ltt / branches / poly / doc / developer / time-monotonic-accurate.txt
index c1bc89d4d267a9a13550c056f42d943728c9e947..52de9ae84b3ae044298bdd16f39c85afd8274e50 100644 (file)
@@ -27,6 +27,9 @@ static struct time_struct {
 
 DECLARE_PERCPU(struct time_struct, cpu_time);
 
+/* Number of times the scheduler is called on each CPU */
+DECLARE_PERCPU(unsigned long, sched_nr);
+
 /* On frequency change event */
 /* In irq context */
 void freq_change_cb(unsigned int new_freq)
@@ -83,20 +86,32 @@ wake_from_hlt()
 /* If the update_count changes while we read the context, it may be invalid.
  * This would happen if we are scheduled out for a period of time long enough to
  * permit 2 frequency changes. We simply start the loop again if it happens.
- * We detect it by comparing the update_count running counter. */
+ * We detect it by comparing the update_count running counter.
+ * We detect preemption by incrementing a counter sched_nr within schedule(). 
+ * This counter is readable by user space through the vsyscall page. */
+ */
 u64 read_time(void)
 {
        u64 walltime;
        long update_count;
-       struct time_struct this_cpu_time = 
-               per_cpu(cpu_time, smp_processor_id());
+       struct time_struct this_cpu_time;
        struct time_info *current_time;
+       unsigned int cpu;
+       long prev_sched_nr;
        do {
+               cpu = _smp_processor_id();
+               prev_sched_nr = per_cpu(sched_nr, cpu);
+               if(cpu != _smp_processor_id())
+                       continue;       /* changed CPU between CPUID and getting
+                                          sched_nr */
+               this_cpu_time = per_cpu(cpu_time, cpu);
                update_count = this_cpu_time->update_count;
                current_time = this_cpu_time->time_sel[update_count&1];
                walltime = current_time->walltime + 
                                (get_cycles() - current_time->tsc) /
                                current_time->freq;
+               if(per_cpu(sched_nr, cpu) != prev_sched_nr)
+                       continue;       /* been preempted */
        } while(this_cpu_time->update_count != update_count);
        return walltime;
 }
This page took 0.0238 seconds and 4 git commands to generate.