fix non-sparseirq architectures.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
 #endif
 #ifdef CONFIG_HAVE_DYN_ARRAY
        unsigned int            *kstat_irqs;
-#else
-       unsigned int            kstat_irqs[NR_CPUS];
 #endif
 #if defined(CONFIG_INTR_REMAP) && defined(CONFIG_HAVE_SPARSE_IRQ)
        struct irq_2_iommu      *irq_2_iommu;
 
 #endif
 
+#ifdef CONFIG_HAVE_DYN_ARRAY
 #define kstat_irqs_this_cpu(DESC) \
        ((DESC)->kstat_irqs[smp_processor_id()])
+#endif
 
 /*
  * Migration helpers for obsolete names, they will go away:
 
 
 struct kernel_stat {
        struct cpu_usage_stat   cpustat;
-#ifndef CONFIG_GENERIC_HARDIRQS
+#ifndef CONFIG_HAVE_DYN_ARRAY
        unsigned int irqs[NR_IRQS];
 #endif
 };
 
 extern unsigned long long nr_context_switches(void);
 
-#ifndef CONFIG_GENERIC_HARDIRQS
+#ifndef CONFIG_HAVE_DYN_ARRAY
+#define kstat_irqs_this_cpu(irq) \
+       (kstat_this_cpu.irqs[irq])
+#endif
+
+
+#ifndef CONFIG_HAVE_DYN_ARRAY
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
        return kstat_cpu(cpu).irqs[irq];
 
        if (unlikely(desc->status & IRQ_INPROGRESS))
                goto out_unlock;
        desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
 
        action = desc->action;
        if (unlikely(!action || (desc->status & IRQ_DISABLED)))
        if (unlikely(desc->status & IRQ_INPROGRESS))
                goto out_unlock;
        desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
 
        /*
         * If its disabled or no action available
                goto out;
 
        desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
 
        /*
         * If its disabled or no action available
                mask_ack_irq(desc, irq);
                goto out_unlock;
        }
-
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
 
        /* Start handling the irq */
        desc->chip->ack(irq);
 {
        irqreturn_t action_ret;
 
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
 
        if (desc->chip->ack)
                desc->chip->ack(irq);
 
 handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 {
        print_irq_desc(irq, desc);
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
        ack_bad_irq(irq);
 }
 
        struct irqaction *action;
        unsigned int status;
 
+#ifdef CONFIG_HAVE_DYN_ARRAY
        kstat_irqs_this_cpu(desc)++;
+#else
+       kstat_irqs_this_cpu(irq)++;
+#endif
        if (CHECK_IRQ_PER_CPU(desc->status)) {
                irqreturn_t action_ret;
 
 }
 #endif
 
+#ifdef CONFIG_HAVE_DYN_ARRAY
 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        return desc->kstat_irqs[cpu];
 }
+#endif
 EXPORT_SYMBOL(kstat_irqs_cpu);