#include <linux/time.h>
 #include <asm/uaccess.h>
 
-static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
 static unsigned int blktrace_seq __read_mostly = 1;
 
 /*
                const int cpu = smp_processor_id();
 
                t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-               t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
+               t->time = ktime_to_ns(ktime_get());
                t->device = bt->dev;
                t->action = action;
                t->pid = pid;
 
                t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
                t->sequence = ++(*sequence);
-               t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
+               t->time = ktime_to_ns(ktime_get());
                t->sector = sector;
                t->bytes = bytes;
                t->action = what;
        }
 }
 
-/*
- * Average offset over two calls to cpu_clock() with a gettimeofday()
- * in the middle
- */
-static void blk_check_time(unsigned long long *t, int this_cpu)
-{
-       unsigned long long a, b;
-       struct timeval tv;
-
-       a = cpu_clock(this_cpu);
-       do_gettimeofday(&tv);
-       b = cpu_clock(this_cpu);
-
-       *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
-       *t -= (a + b) / 2;
-}
-
-/*
- * calibrate our inter-CPU timings
- */
-static void blk_trace_check_cpu_time(void *data)
-{
-       unsigned long long *t;
-       int this_cpu = get_cpu();
-
-       t = &per_cpu(blk_trace_cpu_offset, this_cpu);
-
-       /*
-        * Just call it twice, hopefully the second call will be cache hot
-        * and a little more precise
-        */
-       blk_check_time(t, this_cpu);
-       blk_check_time(t, this_cpu);
-
-       put_cpu();
-}
-
-static void blk_trace_set_ht_offsets(void)
-{
-#if defined(CONFIG_SCHED_SMT)
-       int cpu, i;
-
-       /*
-        * now make sure HT siblings have the same time offset
-        */
-       preempt_disable();
-       for_each_online_cpu(cpu) {
-               unsigned long long *cpu_off, *sibling_off;
-
-               for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) {
-                       if (i == cpu)
-                               continue;
-
-                       cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
-                       sibling_off = &per_cpu(blk_trace_cpu_offset, i);
-                       *sibling_off = *cpu_off;
-               }
-       }
-       preempt_enable();
-#endif
-}
-
 static __init int blk_trace_init(void)
 {
        mutex_init(&blk_tree_mutex);
-       on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1);
-       blk_trace_set_ht_offsets();
 
        return 0;
 }