#include <linux/threads.h>
 #include <linux/irq.h>
-#include <asm/pda.h>
 #include <asm/apic.h>
 
+typedef struct {
+       unsigned int __softirq_pending;
+       unsigned int __nmi_count;       /* arch dependent */
+       unsigned int apic_timer_irqs;   /* arch dependent */
+       unsigned int irq0_irqs;
+       unsigned int irq_resched_count;
+       unsigned int irq_call_count;
+       unsigned int irq_tlb_count;
+       unsigned int irq_thermal_count;
+       unsigned int irq_spurious_count;
+       unsigned int irq_threshold_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
 
 #define __ARCH_IRQ_STAT 1
 
-#define inc_irq_stat(member)   add_pda(member, 1)
+#define inc_irq_stat(member)   percpu_add(irq_stat.member, 1)
 
-#define local_softirq_pending() read_pda(__softirq_pending)
+#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
 
 #define __ARCH_SET_SOFTIRQ_PENDING 1
 
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x)  or_pda(__softirq_pending, (x))
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x)  percpu_or(irq_stat.__softirq_pending, (x))
 
 extern void ack_bad_irq(unsigned int irq);
 
 
        char *irqstackptr;
        short nodenumber;               /* number of current node (32k max) */
        short in_bootmem;               /* pda lives in bootmem */
-       unsigned int __softirq_pending;
-       unsigned int __nmi_count;       /* number of NMI on this CPUs */
        short mmu_state;
        short isidle;
        struct mm_struct *active_mm;
-       unsigned apic_timer_irqs;
-       unsigned irq0_irqs;
-       unsigned irq_resched_count;
-       unsigned irq_call_count;
-       unsigned irq_tlb_count;
-       unsigned irq_thermal_count;
-       unsigned irq_threshold_count;
-       unsigned irq_spurious_count;
 } ____cacheline_aligned_in_smp;
 
 DECLARE_PER_CPU(struct x8664_pda, __pda);
 
 #endif
 }
 
-#ifdef CONFIG_X86_32
-# define irq_stats(x)          (&per_cpu(irq_stat, x))
-#else
-# define irq_stats(x)          cpu_pda(x)
-#endif
+#define irq_stats(x)           (&per_cpu(irq_stat, x))
 /*
  * /proc/interrupts printing:
  */
 
 #include <asm/io_apic.h>
 #include <asm/idle.h>
 
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
 /*
  * Probabilistic stack overflow check:
  *
 
 
 static inline unsigned int get_nmi_count(int cpu)
 {
-#ifdef CONFIG_X86_64
-       return cpu_pda(cpu)->__nmi_count;
-#else
-       return nmi_count(cpu);
-#endif
+       return per_cpu(irq_stat, cpu).__nmi_count;
 }
 
 static inline int mce_in_progress(void)
  */
 static inline unsigned int get_timer_irqs(int cpu)
 {
-#ifdef CONFIG_X86_64
-       return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
-#else
        return per_cpu(irq_stat, cpu).apic_timer_irqs +
                per_cpu(irq_stat, cpu).irq0_irqs;
-#endif
 }
 
 #ifdef CONFIG_SMP
 
  */
 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 {
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_resched_count++;
-#else
-       add_pda(irq_resched_count, 1);
-#endif
+       inc_irq_stat(irq_resched_count);
 
        return IRQ_HANDLED;
 }
 {
        irq_enter();
        generic_smp_call_function_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
 {
        irq_enter();
        generic_smp_call_function_single_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;