]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/tsc_32.c
x86: merge the TSC cpu-freq code
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / tsc_32.c
index 65b70637ad9796d8256b9305c4dfd4ed3c378e6d..bbc153d36f84eeb3eaa6001d5ac088563ccf2015 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/sched.h>
 #include <linux/clocksource.h>
 #include <linux/workqueue.h>
+#include <linux/delay.h>
 #include <linux/cpufreq.h>
 #include <linux/jiffies.h>
 #include <linux/init.h>
 
 #include "mach_timer.h"
 
-/* native_sched_clock() is called before tsc_init(), so
-   we must start with the TSC soft disabled to prevent
-   erroneous rdtsc usage on !cpu_has_tsc processors */
-static int tsc_disabled = -1;
-
-/*
- * On some systems the TSC frequency does not
- * change with the cpu frequency. So we need
- * an extra value to store the TSC freq
- */
-unsigned int tsc_khz;
-EXPORT_SYMBOL_GPL(tsc_khz);
-
-#ifdef CONFIG_X86_TSC
-static int __init tsc_setup(char *str)
-{
-       printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
-              "cannot disable TSC completely.\n");
-       tsc_disabled = 1;
-       return 1;
-}
-#else
-/*
- * disable flag for tsc. Takes effect by clearing the TSC cpu flag
- * in cpu/common.c
- */
-static int __init tsc_setup(char *str)
-{
-       setup_clear_cpu_cap(X86_FEATURE_TSC);
-       return 1;
-}
-#endif
-
-__setup("notsc", tsc_setup);
-
-/*
- * code to mark and check if the TSC is unstable
- * due to cpufreq or due to unsynced TSCs
- */
-static int tsc_unstable;
-
-int check_tsc_unstable(void)
-{
-       return tsc_unstable;
-}
-EXPORT_SYMBOL_GPL(check_tsc_unstable);
-
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- *  basic equation:
- *             ns = cycles / (freq / ns_per_sec)
- *             ns = cycles * (ns_per_sec / freq)
- *             ns = cycles * (10^9 / (cpu_khz * 10^3))
- *             ns = cycles * (10^6 / cpu_khz)
- *
- *     Then we use scaling math (suggested by george@mvista.com) to get:
- *             ns = cycles * (10^6 * SC / cpu_khz) / SC
- *             ns = cycles * cyc2ns_scale / SC
- *
- *     And since SC is a constant power of two, we can convert the div
- *  into a shift.
- *
- *  We can use khz divisor instead of mhz to keep a better precision, since
- *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- *  (mathieu.desnoyers@polymtl.ca)
- *
- *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-
-DEFINE_PER_CPU(unsigned long, cyc2ns);
-
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
-{
-       unsigned long long tsc_now, ns_now;
-       unsigned long flags, *scale;
-
-       local_irq_save(flags);
-       sched_clock_idle_sleep_event();
-
-       scale = &per_cpu(cyc2ns, cpu);
-
-       rdtscll(tsc_now);
-       ns_now = __cycles_2_ns(tsc_now);
-
-       if (cpu_khz)
-               *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
-
-       /*
-        * Start smoothly with the new frequency:
-        */
-       sched_clock_idle_wakeup_event(0);
-       local_irq_restore(flags);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long native_sched_clock(void)
-{
-       unsigned long long this_offset;
-
-       /*
-        * Fall back to jiffies if there's no TSC available:
-        * ( But note that we still use it if the TSC is marked
-        *   unstable. We do this because unlike Time Of Day,
-        *   the scheduler clock tolerates small errors and it's
-        *   very important for it to be as fast as the platform
-        *   can achive it. )
-        */
-       if (unlikely(tsc_disabled))
-               /* No locking but a rare wrong value is not a big deal: */
-               return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
-
-       /* read the Time Stamp Counter: */
-       rdtscll(this_offset);
-
-       /* return the value in ns */
-       return cycles_2_ns(this_offset);
-}
-
-/* We need to define a real function for sched_clock, to override the
-   weak default version */
-#ifdef CONFIG_PARAVIRT
-unsigned long long sched_clock(void)
-{
-       return paravirt_sched_clock();
-}
-#else
-unsigned long long sched_clock(void)
-       __attribute__((alias("native_sched_clock")));
-#endif
-
-unsigned long native_calculate_cpu_khz(void)
-{
-       unsigned long long start, end;
-       unsigned long count;
-       u64 delta64 = (u64)ULLONG_MAX;
-       int i;
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       /* run 3 times to ensure the cache is warm and to get an accurate reading */
-       for (i = 0; i < 3; i++) {
-               mach_prepare_counter();
-               rdtscll(start);
-               mach_countup(&count);
-               rdtscll(end);
-
-               /*
-                * Error: ECTCNEVERSET
-                * The CTC wasn't reliable: we got a hit on the very first read,
-                * or the CPU was so fast/slow that the quotient wouldn't fit in
-                * 32 bits..
-                */
-               if (count <= 1)
-                       continue;
-
-               /* cpu freq too slow: */
-               if ((end - start) <= CALIBRATE_TIME_MSEC)
-                       continue;
-
-               /*
-                * We want the minimum time of all runs in case one of them
-                * is inaccurate due to SMI or other delay
-                */
-               delta64 = min(delta64, (end - start));
-       }
-
-       /* cpu freq too fast (or every run was bad): */
-       if (delta64 > (1ULL<<32))
-               goto err;
-
-       delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
-       do_div(delta64,CALIBRATE_TIME_MSEC);
-
-       local_irq_restore(flags);
-       return (unsigned long)delta64;
-err:
-       local_irq_restore(flags);
-       return 0;
-}
-
-int recalibrate_cpu_khz(void)
-{
-#ifndef CONFIG_SMP
-       unsigned long cpu_khz_old = cpu_khz;
-
-       if (cpu_has_tsc) {
-               cpu_khz = calculate_cpu_khz();
-               tsc_khz = cpu_khz;
-               cpu_data(0).loops_per_jiffy =
-                       cpufreq_scale(cpu_data(0).loops_per_jiffy,
-                                       cpu_khz_old, cpu_khz);
-               return 0;
-       } else
-               return -ENODEV;
-#else
-       return -ENODEV;
-#endif
-}
-
-EXPORT_SYMBOL(recalibrate_cpu_khz);
-
-#ifdef CONFIG_CPU_FREQ
-
-/*
- * if the CPU frequency is scaled, TSC-based delays will need a different
- * loops_per_jiffy value to function properly.
- */
-static unsigned int ref_freq;
-static unsigned long loops_per_jiffy_ref;
-static unsigned long cpu_khz_ref;
-
-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
-{
-       struct cpufreq_freqs *freq = data;
-
-       if (!ref_freq) {
-               if (!freq->old){
-                       ref_freq = freq->new;
-                       return 0;
-               }
-               ref_freq = freq->old;
-               loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
-               cpu_khz_ref = cpu_khz;
-       }
-
-       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE)) {
-               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       cpu_data(freq->cpu).loops_per_jiffy =
-                               cpufreq_scale(loops_per_jiffy_ref,
-                                               ref_freq, freq->new);
-
-               if (cpu_khz) {
-
-                       if (num_online_cpus() == 1)
-                               cpu_khz = cpufreq_scale(cpu_khz_ref,
-                                               ref_freq, freq->new);
-                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
-                               tsc_khz = cpu_khz;
-                               set_cyc2ns_scale(cpu_khz, freq->cpu);
-                               /*
-                                * TSC based sched_clock turns
-                                * to junk w/ cpufreq
-                                */
-                               mark_tsc_unstable("cpufreq changes");
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
-       .notifier_call  = time_cpufreq_notifier
-};
-
-static int __init cpufreq_tsc(void)
-{
-       return cpufreq_register_notifier(&time_cpufreq_notifier_block,
-                                        CPUFREQ_TRANSITION_NOTIFIER);
-}
-core_initcall(cpufreq_tsc);
-
-#endif
+extern int tsc_unstable;
+extern int tsc_disabled;
 
 /* clock source code */
 
-static unsigned long current_tsc_khz;
 static struct clocksource clocksource_tsc;
 
 /*
@@ -404,6 +137,7 @@ static inline void check_geode_tsc_reliable(void) { }
 void __init tsc_init(void)
 {
        int cpu;
+       u64 lpj;
 
        if (!cpu_has_tsc || tsc_disabled > 0)
                return;
@@ -416,6 +150,10 @@ void __init tsc_init(void)
                return;
        }
 
+       lpj = ((u64)tsc_khz * 1000);
+       do_div(lpj, HZ);
+       lpj_fine = lpj;
+
        /* now allow native_sched_clock() to use rdtsc */
        tsc_disabled = 0;
 
@@ -439,9 +177,8 @@ void __init tsc_init(void)
 
        unsynchronized_tsc();
        check_geode_tsc_reliable();
-       current_tsc_khz = tsc_khz;
-       clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
-                                                       clocksource_tsc.shift);
+       clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
+                                                   clocksource_tsc.shift);
        /* lower the rating if we already know its unstable: */
        if (check_tsc_unstable()) {
                clocksource_tsc.rating = 0;