#include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/cpu.h>
+#include <linux/module.h>
 
 #include <asm/atomic.h>
 #include <asm/smp.h>
 #include <asm/i8253.h>
 
 #include <mach_apic.h>
+#include <mach_ipi.h>
 
 #include "io_ports.h"
 
+/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_bcast_ipi;
+
 /*
  * Knob to control our willingness to enable the local APIC.
  */
 static void __setup_APIC_LVTT(unsigned int clocks)
 {
        unsigned int lvtt_value, tmp_value, ver;
+       int cpu = smp_processor_id();
 
        ver = GET_APIC_VERSION(apic_read(APIC_LVR));
        lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
        if (!APIC_INTEGRATED(ver))
                lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
+
+       if (cpu_isset(cpu, timer_bcast_ipi))
+               lvtt_value |= APIC_LVT_MASKED;
+
        apic_write_around(APIC_LVTT, lvtt_value);
 
        /*
        setup_APIC_timer(calibration_result);
 }
 
-void __devinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
 {
        if (using_apic_timer) {
                unsigned long v;
 
 void enable_APIC_timer(void)
 {
-       if (using_apic_timer) {
+       int cpu = smp_processor_id();
+
+       if (using_apic_timer &&
+           !cpu_isset(cpu, timer_bcast_ipi)) {
                unsigned long v;
 
                v = apic_read(APIC_LVTT);
        }
 }
 
+void switch_APIC_timer_to_ipi(void *cpumask)
+{
+       cpumask_t mask = *(cpumask_t *)cpumask;
+       int cpu = smp_processor_id();
+
+       if (cpu_isset(cpu, mask) &&
+           !cpu_isset(cpu, timer_bcast_ipi)) {
+               disable_APIC_timer();
+               cpu_set(cpu, timer_bcast_ipi);
+       }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
+
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+       cpumask_t mask = *(cpumask_t *)cpumask;
+       int cpu = smp_processor_id();
+
+       if (cpu_isset(cpu, mask) &&
+           cpu_isset(cpu, timer_bcast_ipi)) {
+               cpu_clear(cpu, timer_bcast_ipi);
+               enable_APIC_timer();
+       }
+}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
+
 #undef APIC_DIVISOR
 
 /*
        irq_exit();
 }
 
+#ifndef CONFIG_SMP
+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
+{
+       int cpu = smp_processor_id();
+
+       /*
+        * the NMI deadlock-detector uses this.
+        */
+       per_cpu(irq_stat, cpu).apic_timer_irqs++;
+
+       smp_local_timer_interrupt(regs);
+}
+#endif
+
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
+{
+       cpumask_t mask;
+
+       cpus_and(mask, cpu_online_map, timer_bcast_ipi);
+       if (!cpus_empty(mask)) {
+#ifdef CONFIG_SMP
+               send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#else
+               /*
+                * We can directly call the apic timer interrupt handler
+                * in UP case. Minus all irq related functions
+                */
+               up_apic_timer_interrupt_call(regs);
+#endif
+       }
+}
+
 int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 
        unsigned int i;
        unsigned int working = 0;
 
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+       struct cpuinfo_x86 *c = cpu_data + pr->id;
+       cpumask_t mask = cpumask_of_cpu(pr->id);
+
+       if (c->x86_vendor == X86_VENDOR_INTEL) {
+               on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+       }
+#endif
+
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
                struct acpi_processor_cx *cx = &pr->power.states[i];
 
 
                case ACPI_STATE_C3:
                        acpi_processor_power_verify_c3(pr, cx);
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+                       if (c->x86_vendor == X86_VENDOR_INTEL) {
+                               on_each_cpu(switch_APIC_timer_to_ipi,
+                                               &mask, 1, 1);
+                       }
+#endif
                        break;
                }