;;
        SAVE_REST
        ;;
+       MCA_RECOVER_RANGE(interrupt)
        alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
        mov out0=cr.ivr         // pass cr.ivr as first arg
        add out1=16,sp          // pass pointer to pt_regs as second arg
 
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
 
+#include "mca_drv.h"
 #include "entry.h"
 
 #if defined(IA64_MCA_DEBUG_INFO)
                ia64_sal_clear_state_info(sal_info_type);
 }
 
+/*
+ * search_mca_table
+ *  See if the MCA surfaced in an instruction range
+ *  that has been tagged as recoverable.
+ *
+ *  Inputs
+ *     first   First address range to check
+ *     last    Last address range to check
+ *     ip      Instruction pointer, address we are looking for
+ *
+ * Return value:
+ *      1 on Success (in the table)/ 0 on Failure (not in the  table)
+ */
+int
+search_mca_table (const struct mca_table_entry *first,
+                const struct mca_table_entry *last,
+                unsigned long ip)
+{
+        const struct mca_table_entry *curr;
+        u64 curr_start, curr_end;
+
+        curr = first;
+        while (curr <= last) {
+                curr_start = (u64) &curr->start_addr + curr->start_addr;
+                curr_end = (u64) &curr->end_addr + curr->end_addr;
+
+                if ((ip >= curr_start) && (ip <= curr_end)) {
+                        return 1;
+                }
+                curr++;
+        }
+        return 0;
+}
+
+/* Given an address, look for it in the mca tables. */
+int mca_recover_range(unsigned long addr)
+{
+       extern struct mca_table_entry __start___mca_table[];
+       extern struct mca_table_entry __stop___mca_table[];
+
+       return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
+}
+EXPORT_SYMBOL_GPL(mca_recover_range);
+
 #ifdef CONFIG_ACPI
 
 int cpe_vector = -1;
                ia64_mca_modify_comm(previous_current);
                goto no_mod;
        }
-       if (r13 != sos->prev_IA64_KR_CURRENT) {
-               msg = "inconsistent previous current and r13";
-               goto no_mod;
-       }
-       if ((r12 - r13) >= KERNEL_STACK_SIZE) {
-               msg = "inconsistent r12 and r13";
-               goto no_mod;
-       }
-       if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
-               msg = "inconsistent ar.bspstore and r13";
-               goto no_mod;
-       }
-       va.p = old_bspstore;
-       if (va.f.reg < 5) {
-               msg = "old_bspstore is in the wrong region";
-               goto no_mod;
-       }
-       if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
-               msg = "inconsistent ar.bsp and r13";
-               goto no_mod;
-       }
-       size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
-       if (ar_bspstore + size > r12) {
-               msg = "no room for blocked state";
-               goto no_mod;
+
+       if (!mca_recover_range(ms->pmsa_iip)) {
+               if (r13 != sos->prev_IA64_KR_CURRENT) {
+                       msg = "inconsistent previous current and r13";
+                       goto no_mod;
+               }
+               if ((r12 - r13) >= KERNEL_STACK_SIZE) {
+                       msg = "inconsistent r12 and r13";
+                       goto no_mod;
+               }
+               if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
+                       msg = "inconsistent ar.bspstore and r13";
+                       goto no_mod;
+               }
+               va.p = old_bspstore;
+               if (va.f.reg < 5) {
+                       msg = "old_bspstore is in the wrong region";
+                       goto no_mod;
+               }
+               if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
+                       msg = "inconsistent ar.bsp and r13";
+                       goto no_mod;
+               }
+               size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
+               if (ar_bspstore + size > r12) {
+                       msg = "no room for blocked state";
+                       goto no_mod;
+               }
        }
 
        ia64_mca_modify_comm(previous_current);
 
  * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
  * Copyright (C) 2005 Silicon Graphics, Inc
  * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
  */
 #include <linux/config.h>
 #include <linux/types.h>
  */
 
 void
-mca_handler_bh(unsigned long paddr)
+mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
 {
-       printk(KERN_ERR
-               "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n",
-               current->pid, current->comm, paddr);
+       printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
+               "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
+               raw_smp_processor_id(), current->pid, current->uid,
+               iip, ipsr, paddr, current->comm);
 
        spin_lock(&mca_bh_lock);
        switch (mca_page_isolate(paddr)) {
        if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
                return 0;
        psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
+       psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
 
        /*
         *  Check the privilege level of interrupted context.
         *   If it is user-mode, then terminate affected process.
         */
-       if (psr1->cpl != 0) {
+
+       pmsa = sos->pal_min_state;
+       if (psr1->cpl != 0 ||
+          ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
                smei = peidx_bus_check(peidx, 0);
                if (smei->valid.target_identifier) {
                        /*
                         *  setup for resume to bottom half of MCA,
                         * "mca_handler_bhhook"
                         */
-                       pmsa = sos->pal_min_state;
-                       /* pass to bhhook as 1st argument (gr8) */
+                       /* pass to bhhook as argument (gr8, ...) */
                        pmsa->pmsa_gr[8-1] = smei->target_identifier;
+                       pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
+                       pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
                        /* set interrupted return address (but no use) */
                        pmsa->pmsa_br0 = pmsa->pmsa_iip;
                        /* change resume address to bottom half */
                        psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
                        psr2->cpl = 0;
                        psr2->ri  = 0;
+                       psr2->bn  = 1;
                        psr2->i  = 0;
 
                        return 1;
 
        slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\
        __count; })
 
+struct mca_table_entry {
+       int start_addr; /* location-relative starting address of MCA recoverable range */
+       int end_addr;   /* location-relative ending address of MCA recoverable range */
+};
+
+extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
+extern int mca_recover_range(unsigned long);
 
 
 GLOBAL_ENTRY(mca_handler_bhhook)
        invala                          // clear RSE ?
-       ;;
        cover
        ;;
        clrrrb
        ;;                                              
-       alloc   r16=ar.pfs,0,2,1,0      // make a new frame
-       ;;
+       alloc   r16=ar.pfs,0,2,3,0      // make a new frame
        mov     ar.rsc=0
-       ;;
        mov     r13=IA64_KR(CURRENT)    // current task pointer
        ;;
        mov     r2=r13
        addl    r22=IA64_RBS_OFFSET,r2
        ;;
        mov     ar.bspstore=r22
-       ;;
        addl    sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
        ;;
        adds    r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
        movl    loc1=mca_handler_bh     // recovery C function
        ;;
        mov     out0=r8                 // poisoned address
+       mov     out1=r9                 // iip
+       mov     out2=r10                // psr
        mov     b6=loc1
        ;;
        mov     loc1=rp
-       ;;
-       ssm     psr.i
-       ;;
+       ssm     psr.i | psr.ic
        br.call.sptk.many rp=b6         // does not return ...
        ;;
        mov     ar.pfs=loc0
        ;;
        mov     r8=r0
        br.ret.sptk.many rp
-       ;;
 END(mca_handler_bhhook)
 
          __initcall_end = .;
        }
 
+  /* MCA table */
+  . = ALIGN(16);
+  __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
+       {
+         __start___mca_table = .;
+         *(__mca_table)
+         __stop___mca_table = .;
+       }
+
   .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
        {
          __start___vtop_patchlist = .;
 
        .xdata4 "__ex_table", 99f-., y-.+4;     \
   [99:]        x
 
+/*
+ * Tag MCA recoverable instruction ranges.
+ */
+
+       .section "__mca_table", "a"             // declare section & section attributes
+       .previous
+
+# define MCA_RECOVER_RANGE(y)                  \
+       .xdata4 "__mca_table", y-., 99f-.;      \
+  [99:]
+
 /*
  * Mark instructions that need a load of a virtual address patched to be
  * a load of a physical address.  We use this either in critical performance