static DEFINE_SPINLOCK(time_sync_lock);
 static unsigned long long prev_global_time;
 
-static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
+static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&time_sync_lock, flags);
+       /*
+        * We want this inlined, to not get tracer function calls
+        * in this critical section:
+        */
+       spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
+       __raw_spin_lock(&time_sync_lock.raw_lock);
 
        if (time < prev_global_time) {
                per_cpu(time_offset, cpu) += prev_global_time - time;
                prev_global_time = time;
        }
 
-       spin_unlock_irqrestore(&time_sync_lock, flags);
+       __raw_spin_unlock(&time_sync_lock.raw_lock);
+       spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
 
        return time;
 }
 static unsigned long long __cpu_clock(int cpu)
 {
        unsigned long long now;
-       unsigned long flags;
        struct rq *rq;
 
        /*
        if (unlikely(!scheduler_running))
                return 0;
 
-       local_irq_save(flags);
        rq = cpu_rq(cpu);
        update_rq_clock(rq);
        now = rq->clock;
-       local_irq_restore(flags);
 
        return now;
 }
 unsigned long long cpu_clock(int cpu)
 {
        unsigned long long prev_cpu_time, time, delta_time;
+       unsigned long flags;
 
+       local_irq_save(flags);
        prev_cpu_time = per_cpu(prev_cpu_time, cpu);
        time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
        delta_time = time-prev_cpu_time;
 
-       if (unlikely(delta_time > time_sync_thresh))
+       if (unlikely(delta_time > time_sync_thresh)) {
                time = __sync_cpu_clock(time, cpu);
+               per_cpu(prev_cpu_time, cpu) = time;
+       }
+       local_irq_restore(flags);
 
        return time;
 }