]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sparc64/kernel/irq.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-omap-h63xx.git] / arch / sparc64 / kernel / irq.c
index b441a26b73b03ab514aa772609c92e8a179d2e12..9b6689d9d57097239641d51653dc921e2607b997 100644 (file)
@@ -621,8 +621,9 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
 {
        struct irq_handler_data *data;
-       struct ino_bucket *bucket;
        unsigned long hv_err, cookie;
+       struct ino_bucket *bucket;
+       struct irq_desc *desc;
        unsigned int virt_irq;
 
        bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
@@ -643,6 +644,13 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
        if (unlikely(!data))
                return 0;
 
+       /* In order to make the LDC channel startup sequence easier,
+        * especially wrt. locking, we do not let request_irq() enable
+        * the interrupt.
+        */
+       desc = irq_desc + virt_irq;
+       desc->status |= IRQ_NOAUTOEN;
+
        set_irq_chip_data(virt_irq, data);
 
        /* Catch accidental accesses to these things.  IMAP/ICLR handling
@@ -674,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
               ino, virt_irq);
 }
 
+void *hardirq_stack[NR_CPUS];
+void *softirq_stack[NR_CPUS];
+
+static __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+       void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+       __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+       if (orig_sp < sp ||
+           orig_sp > (sp + THREAD_SIZE)) {
+               sp += THREAD_SIZE - 192 - STACK_BIAS;
+               __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+       }
+
+       return orig_sp;
+}
+static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+       __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
 void handler_irq(int irq, struct pt_regs *regs)
 {
        unsigned long pstate, bucket_pa;
        struct pt_regs *old_regs;
+       void *orig_sp;
 
        clear_softint(1 << irq);
 
@@ -695,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
                               "i" (PSTATE_IE)
                             : "memory");
 
+       orig_sp = set_hardirq_stack();
+
        while (bucket_pa) {
                struct irq_desc *desc;
                unsigned long next_pa;
@@ -711,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
                bucket_pa = next_pa;
        }
 
+       restore_hardirq_stack(orig_sp);
+
        irq_exit();
        set_irq_regs(old_regs);
 }
 
+void do_softirq(void)
+{
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       if (local_softirq_pending()) {
+               void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+
+               sp += THREAD_SIZE - 192 - STACK_BIAS;
+
+               __asm__ __volatile__("mov %%sp, %0\n\t"
+                                    "mov %1, %%sp"
+                                    : "=&r" (orig_sp)
+                                    : "r" (sp));
+               __do_softirq();
+               __asm__ __volatile__("mov %0, %%sp"
+                                    : : "r" (orig_sp));
+       }
+
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(void)
 {
@@ -907,12 +967,18 @@ static void __init sun4v_init_mondo_queues(void)
                alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
                alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
                               tb->nonresum_qmask);
+       }
+}
+
+static void __init init_send_mondo_info(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct trap_per_cpu *tb = &trap_block[cpu];
 
                init_cpu_send_mondo_info(tb);
        }
-
-       /* Load up the boot cpu's entries.  */
-       sun4v_register_mondo_queues(hard_smp_processor_id());
 }
 
 static struct irqaction timer_irq_action = {
@@ -941,6 +1007,13 @@ void __init init_IRQ(void)
        if (tlb_type == hypervisor)
                sun4v_init_mondo_queues();
 
+       init_send_mondo_info();
+
+       if (tlb_type == hypervisor) {
+               /* Load up the boot cpu's entries.  */
+               sun4v_register_mondo_queues(hard_smp_processor_id());
+       }
+
        /* We need to clear any IRQ's pending in the soft interrupt
         * registers, a spurious one could be left around from the
         * PROM timer which we just disabled.