2 * "High Precision Event Timer" based timekeeping.
4 * Copyright (c) 1991,1992,1995 Linus Torvalds
5 * Copyright (c) 1994 Alan Modra
6 * Copyright (c) 1995 Markus Kuhn
7 * Copyright (c) 1996 Ingo Molnar
8 * Copyright (c) 1998 Andrea Arcangeli
9 * Copyright (c) 2002,2006 Vojtech Pavlik
10 * Copyright (c) 2003 Andi Kleen
11 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/init.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/time.h>
20 #include <linux/ioport.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/sysdev.h>
24 #include <linux/bcd.h>
25 #include <linux/notifier.h>
26 #include <linux/cpu.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
29 #include <linux/clockchips.h>
32 #include <acpi/achware.h> /* for PM timer frequency */
33 #include <acpi/acpi_bus.h>
35 #include <asm/i8253.h>
36 #include <asm/pgtable.h>
37 #include <asm/vsyscall.h>
38 #include <asm/timex.h>
39 #include <asm/proto.h>
41 #include <asm/sections.h>
42 #include <linux/hpet.h>
45 #include <asm/mpspec.h>
47 #include <asm/vgtod.h>
49 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
51 unsigned long profile_pc(struct pt_regs *regs)
53 unsigned long pc = instruction_pointer(regs);
55 /* Assume the lock function has either no stack frame or a copy
57 Eflags always has bits 22 and up cleared unlike kernel addresses. */
58 if (!user_mode(regs) && in_lock_functions(pc)) {
59 unsigned long *sp = (unsigned long *)regs->rsp;
67 EXPORT_SYMBOL(profile_pc);
69 static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
71 add_pda(irq0_irqs, 1);
73 global_clock_event->event_handler(global_clock_event);
78 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
79 * processor frequency */
80 #define TICK_COUNT 100000000
81 static unsigned int __init tsc_calibrate_cpu_khz(void)
83 int tsc_start, tsc_now;
85 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
88 for (i = 0; i < 4; i++)
89 if (avail_to_resrv_perfctr_nmi_bit(i))
91 no_ctr_free = (i == 4);
94 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
95 wrmsrl(MSR_K7_EVNTSEL3, 0);
96 rdmsrl(MSR_K7_PERFCTR3, pmc3);
98 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
99 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
101 local_irq_save(flags);
102 /* start meauring cycles, incrementing from 0 */
103 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
104 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
107 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
108 tsc_now = get_cycles_sync();
109 } while ((tsc_now - tsc_start) < TICK_COUNT);
111 local_irq_restore(flags);
113 wrmsrl(MSR_K7_EVNTSEL3, 0);
114 wrmsrl(MSR_K7_PERFCTR3, pmc3);
115 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
117 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
118 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
121 return pmc_now * tsc_khz / (tsc_now - tsc_start);
124 static struct irqaction irq0 = {
125 .handler = timer_event_interrupt,
126 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
127 .mask = CPU_MASK_NONE,
131 void __init time_init(void)
141 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
142 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
143 boot_cpu_data.x86 == 16)
144 cpu_khz = tsc_calibrate_cpu_khz();
146 if (unsynchronized_tsc())
147 mark_tsc_unstable("TSCs unsynchronized");
149 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
150 vgetcpu_mode = VGETCPU_RDTSCP;
152 vgetcpu_mode = VGETCPU_LSL;
154 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
155 cpu_khz / 1000, cpu_khz % 1000);
156 init_tsc_clocksource();