X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=arch%2Fsparc64%2Fkernel%2Firq.c;h=9b6689d9d57097239641d51653dc921e2607b997;hb=6a23312bd31878db555afad98b44705fc1590896;hp=30431bd24e1efbe7ea2153043d2d06605433febe;hpb=46015977e70f672ae6b20a1b5fb1e361208365ba;p=linux-2.6-omap-h63xx.git diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 30431bd24e1..9b6689d9d57 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -1,6 +1,6 @@ /* irq.c: UltraSparc IRQ handling/init/registry. * - * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) */ @@ -44,27 +44,10 @@ #include #include -/* UPA nodes send interrupt packet to UltraSparc with first data reg - * value low 5 (7 on Starfire) bits holding the IRQ identifier being - * delivered. We must translate this into a non-vector IRQ so we can - * set the softint on this cpu. - * - * To make processing these packets efficient and race free we use - * an array of irq buckets below. The interrupt vector handler in - * entry.S feeds incoming packets into per-cpu pil-indexed lists. - * - * If you make changes to ino_bucket, please update hand coded assembler - * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S - */ -struct ino_bucket { -/*0x00*/unsigned long __irq_chain_pa; - - /* Virtual interrupt number assigned to this INO. */ -/*0x08*/unsigned int __virt_irq; -/*0x0c*/unsigned int __pad; -}; +#include "entry.h" #define NUM_IVECS (IMAP_INR + 1) + struct ino_bucket *ivector_table; unsigned long ivector_table_pa; @@ -325,6 +308,7 @@ static void sun4u_irq_enable(unsigned int virt_irq) IMAP_AID_SAFARI | IMAP_NID_SAFARI); val |= tid | IMAP_VALID; upa_writeq(val, imap); + upa_writeq(ICLR_IDLE, data->iclr); } } @@ -522,7 +506,7 @@ static struct irq_chip sun4v_virq = { .set_affinity = sun4v_virt_set_affinity, }; -static void fastcall pre_flow_handler(unsigned int virt_irq, +static void pre_flow_handler(unsigned int virt_irq, struct irq_desc *desc) { struct irq_handler_data *data = get_irq_chip_data(virt_irq); @@ -637,8 +621,9 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) { struct irq_handler_data *data; - struct ino_bucket *bucket; unsigned long hv_err, cookie; + struct ino_bucket *bucket; + struct irq_desc *desc; unsigned int virt_irq; bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); @@ -659,6 +644,13 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) if (unlikely(!data)) return 0; + /* In order to make the LDC channel startup sequence easier, + * especially wrt. locking, we do not let request_irq() enable + * the interrupt. + */ + desc = irq_desc + virt_irq; + desc->status |= IRQ_NOAUTOEN; + set_irq_chip_data(virt_irq, data); /* Catch accidental accesses to these things. IMAP/ICLR handling @@ -690,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq) ino, virt_irq); } +void *hardirq_stack[NR_CPUS]; +void *softirq_stack[NR_CPUS]; + +static __attribute__((always_inline)) void *set_hardirq_stack(void) +{ + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; + + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); + if (orig_sp < sp || + orig_sp > (sp + THREAD_SIZE)) { + sp += THREAD_SIZE - 192 - STACK_BIAS; + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); + } + + return orig_sp; +} +static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) +{ + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); +} + void handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; + void *orig_sp; clear_softint(1 << irq); @@ -711,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs) "i" (PSTATE_IE) : "memory"); + orig_sp = set_hardirq_stack(); + while (bucket_pa) { struct irq_desc *desc; unsigned long next_pa; @@ -727,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs) bucket_pa = next_pa; } + restore_hardirq_stack(orig_sp); + irq_exit(); set_irq_regs(old_regs); } +void do_softirq(void) +{ + unsigned long flags; + + if (in_interrupt()) + return; + + local_irq_save(flags); + + if (local_softirq_pending()) { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; + + sp += THREAD_SIZE - 192 - STACK_BIAS; + + __asm__ __volatile__("mov %%sp, %0\n\t" + "mov %1, %%sp" + : "=&r" (orig_sp) + : "r" (sp)); + __do_softirq(); + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } + + local_irq_restore(flags); +} + #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) { @@ -923,12 +967,18 @@ static void __init sun4v_init_mondo_queues(void) alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask); alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask); + } +} + +static void __init init_send_mondo_info(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct trap_per_cpu *tb = &trap_block[cpu]; init_cpu_send_mondo_info(tb); } - - /* Load up the boot cpu's entries. */ - sun4v_register_mondo_queues(hard_smp_processor_id()); } static struct irqaction timer_irq_action = { @@ -957,6 +1007,13 @@ void __init init_IRQ(void) if (tlb_type == hypervisor) sun4v_init_mondo_queues(); + init_send_mondo_info(); + + if (tlb_type == hypervisor) { + /* Load up the boot cpu's entries. */ + sun4v_register_mondo_queues(hard_smp_processor_id()); + } + /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled.