]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kernel/time_64.c
64cd03ed9bfcc3ed35fae0cba900ed75213de3b5
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / time_64.c
1 /*
2  *  "High Precision Event Timer" based timekeeping.
3  *
4  *  Copyright (c) 1991,1992,1995  Linus Torvalds
5  *  Copyright (c) 1994  Alan Modra
6  *  Copyright (c) 1995  Markus Kuhn
7  *  Copyright (c) 1996  Ingo Molnar
8  *  Copyright (c) 1998  Andrea Arcangeli
9  *  Copyright (c) 2002,2006  Vojtech Pavlik
10  *  Copyright (c) 2003  Andi Kleen
11  *  RTC support code taken from arch/i386/kernel/timers/time_hpet.c
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/init.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/time.h>
20 #include <linux/ioport.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/sysdev.h>
24 #include <linux/bcd.h>
25 #include <linux/notifier.h>
26 #include <linux/cpu.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
29 #include <linux/clockchips.h>
30
31 #ifdef CONFIG_ACPI
32 #include <acpi/achware.h>       /* for PM timer frequency */
33 #include <acpi/acpi_bus.h>
34 #endif
35 #include <asm/i8253.h>
36 #include <asm/pgtable.h>
37 #include <asm/vsyscall.h>
38 #include <asm/timex.h>
39 #include <asm/proto.h>
40 #include <asm/hpet.h>
41 #include <asm/sections.h>
42 #include <linux/hpet.h>
43 #include <asm/apic.h>
44 #include <asm/hpet.h>
45 #include <asm/mpspec.h>
46 #include <asm/nmi.h>
47 #include <asm/vgtod.h>
48
49 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
50
51 unsigned long profile_pc(struct pt_regs *regs)
52 {
53         unsigned long pc = instruction_pointer(regs);
54
55         /* Assume the lock function has either no stack frame or a copy
56            of eflags from PUSHF
57            Eflags always has bits 22 and up cleared unlike kernel addresses. */
58         if (!user_mode(regs) && in_lock_functions(pc)) {
59                 unsigned long *sp = (unsigned long *)regs->rsp;
60                 if (sp[0] >> 22)
61                         return sp[0];
62                 if (sp[1] >> 22)
63                         return sp[1];
64         }
65         return pc;
66 }
67 EXPORT_SYMBOL(profile_pc);
68
69 static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
70 {
71         add_pda(irq0_irqs, 1);
72
73         global_clock_event->event_handler(global_clock_event);
74
75         return IRQ_HANDLED;
76 }
77
78 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
79  * processor frequency */
80 #define TICK_COUNT 100000000
81 static unsigned int __init tsc_calibrate_cpu_khz(void)
82 {
83         int tsc_start, tsc_now;
84         int i, no_ctr_free;
85         unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
86         unsigned long flags;
87
88         for (i = 0; i < 4; i++)
89                 if (avail_to_resrv_perfctr_nmi_bit(i))
90                         break;
91         no_ctr_free = (i == 4);
92         if (no_ctr_free) {
93                 i = 3;
94                 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
95                 wrmsrl(MSR_K7_EVNTSEL3, 0);
96                 rdmsrl(MSR_K7_PERFCTR3, pmc3);
97         } else {
98                 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
99                 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
100         }
101         local_irq_save(flags);
102         /* start meauring cycles, incrementing from 0 */
103         wrmsrl(MSR_K7_PERFCTR0 + i, 0);
104         wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
105         rdtscl(tsc_start);
106         do {
107                 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
108                 tsc_now = get_cycles_sync();
109         } while ((tsc_now - tsc_start) < TICK_COUNT);
110
111         local_irq_restore(flags);
112         if (no_ctr_free) {
113                 wrmsrl(MSR_K7_EVNTSEL3, 0);
114                 wrmsrl(MSR_K7_PERFCTR3, pmc3);
115                 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
116         } else {
117                 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
118                 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
119         }
120
121         return pmc_now * tsc_khz / (tsc_now - tsc_start);
122 }
123
124 static struct irqaction irq0 = {
125         .handler        = timer_event_interrupt,
126         .flags          = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
127         .mask           = CPU_MASK_NONE,
128         .name           = "timer"
129 };
130
131 void __init time_init(void)
132 {
133         if (!hpet_enable())
134                 setup_pit_timer();
135
136         setup_irq(0, &irq0);
137
138         tsc_calibrate();
139
140         cpu_khz = tsc_khz;
141         if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
142                 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
143                 boot_cpu_data.x86 == 16)
144                 cpu_khz = tsc_calibrate_cpu_khz();
145
146         if (unsynchronized_tsc())
147                 mark_tsc_unstable("TSCs unsynchronized");
148
149         if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
150                 vgetcpu_mode = VGETCPU_RDTSCP;
151         else
152                 vgetcpu_mode = VGETCPU_LSL;
153
154         printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
155                 cpu_khz / 1000, cpu_khz % 1000);
156         init_tsc_clocksource();
157 }