}
 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
 
-static int __spu_trap_invalid_dma(struct spu *spu)
-{
-       pr_debug("%s\n", __FUNCTION__);
-       spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
-       return 0;
-}
-
-static int __spu_trap_dma_align(struct spu *spu)
-{
-       pr_debug("%s\n", __FUNCTION__);
-       spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
-       return 0;
-}
-
-static int __spu_trap_error(struct spu *spu)
-{
-       pr_debug("%s\n", __FUNCTION__);
-       spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
-       return 0;
-}
-
 static void spu_restart_dma(struct spu *spu)
 {
        struct spu_priv2 __iomem *priv2 = spu->priv2;
                return 1;
        }
 
+       spu->class_0_pending = 0;
        spu->dar = ea;
        spu->dsisr = dsisr;
-       mb();
+
        spu->stop_callback(spu);
+
        return 0;
 }
 
 
        spu = data;
 
+       spin_lock(&spu->register_lock);
        mask = spu_int_mask_get(spu, 0);
-       stat = spu_int_stat_get(spu, 0);
-       stat &= mask;
+       stat = spu_int_stat_get(spu, 0) & mask;
 
-       spin_lock(&spu->register_lock);
        spu->class_0_pending |= stat;
+       spu->dsisr = spu_mfc_dsisr_get(spu);
+       spu->dar = spu_mfc_dar_get(spu);
        spin_unlock(&spu->register_lock);
 
        spu->stop_callback(spu);
        return IRQ_HANDLED;
 }
 
-int
-spu_irq_class_0_bottom(struct spu *spu)
-{
-       unsigned long flags;
-       unsigned long stat;
-
-       spin_lock_irqsave(&spu->register_lock, flags);
-       stat = spu->class_0_pending;
-       spu->class_0_pending = 0;
-
-       if (stat & CLASS0_DMA_ALIGNMENT_INTR)
-               __spu_trap_dma_align(spu);
-
-       if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
-               __spu_trap_invalid_dma(spu);
-
-       if (stat & CLASS0_SPU_ERROR_INTR)
-               __spu_trap_error(spu);
-
-       spin_unlock_irqrestore(&spu->register_lock, flags);
-
-       return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
-
 static irqreturn_t
 spu_irq_class_1(int irq, void *data)
 {
 
 
 #include "spufs.h"
 
-static void spufs_handle_dma_error(struct spu_context *ctx,
+/**
+ * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
+ *
+ * If the context was created with events, we just set the return event.
+ * Otherwise, send an appropriate signal to the process.
+ */
+static void spufs_handle_event(struct spu_context *ctx,
                                unsigned long ea, int type)
 {
+       siginfo_t info;
+
        if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
                ctx->event_return |= type;
                wake_up_all(&ctx->stop_wq);
-       } else {
-               siginfo_t info;
-               memset(&info, 0, sizeof(info));
-
-               switch (type) {
-               case SPE_EVENT_INVALID_DMA:
-                       info.si_signo = SIGBUS;
-                       info.si_code = BUS_OBJERR;
-                       break;
-               case SPE_EVENT_SPE_DATA_STORAGE:
-                       info.si_signo = SIGBUS;
-                       info.si_addr = (void __user *)ea;
-                       info.si_code = BUS_ADRERR;
-                       break;
-               case SPE_EVENT_DMA_ALIGNMENT:
-                       info.si_signo = SIGBUS;
-                       /* DAR isn't set for an alignment fault :( */
-                       info.si_code = BUS_ADRALN;
-                       break;
-               case SPE_EVENT_SPE_ERROR:
-                       info.si_signo = SIGILL;
-                       info.si_addr = (void __user *)(unsigned long)
-                               ctx->ops->npc_read(ctx) - 4;
-                       info.si_code = ILL_ILLOPC;
-                       break;
-               }
-               if (info.si_signo)
-                       force_sig_info(info.si_signo, &info, current);
+               return;
        }
+
+       memset(&info, 0, sizeof(info));
+
+       switch (type) {
+       case SPE_EVENT_INVALID_DMA:
+               info.si_signo = SIGBUS;
+               info.si_code = BUS_OBJERR;
+               break;
+       case SPE_EVENT_SPE_DATA_STORAGE:
+               info.si_signo = SIGBUS;
+               info.si_addr = (void __user *)ea;
+               info.si_code = BUS_ADRERR;
+               break;
+       case SPE_EVENT_DMA_ALIGNMENT:
+               info.si_signo = SIGBUS;
+               /* DAR isn't set for an alignment fault :( */
+               info.si_code = BUS_ADRALN;
+               break;
+       case SPE_EVENT_SPE_ERROR:
+               info.si_signo = SIGILL;
+               info.si_addr = (void __user *)(unsigned long)
+                       ctx->ops->npc_read(ctx) - 4;
+               info.si_code = ILL_ILLOPC;
+               break;
+       }
+
+       if (info.si_signo)
+               force_sig_info(info.si_signo, &info, current);
 }
 
-void spufs_dma_callback(struct spu *spu, int type)
+int spufs_handle_class0(struct spu_context *ctx)
 {
-       spufs_handle_dma_error(spu->ctx, spu->dar, type);
+       unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
+
+       if (likely(!stat))
+               return 0;
+
+       if (stat & CLASS0_DMA_ALIGNMENT_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
+
+       if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
+
+       if (stat & CLASS0_SPU_ERROR_INTR)
+               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
+
+       return -EIO;
 }
 
 /*
         * in time, we can still expect to get the same fault
         * the immediately after the context restore.
         */
-       if (ctx->state == SPU_STATE_RUNNABLE) {
-               ea = ctx->spu->dar;
-               dsisr = ctx->spu->dsisr;
-               ctx->spu->dar= ctx->spu->dsisr = 0;
-       } else {
-               ea = ctx->csa.priv1.mfc_dar_RW;
-               dsisr = ctx->csa.priv1.mfc_dsisr_RW;
-               ctx->csa.priv1.mfc_dar_RW = 0;
-               ctx->csa.priv1.mfc_dsisr_RW = 0;
-       }
+       ea = ctx->csa.dar;
+       dsisr = ctx->csa.dsisr;
 
        if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
                return 0;
                ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
 
        spu_acquire(ctx);
+
+       /*
+        * Clear dsisr under ctxt lock after handling the fault, so that
+        * time slicing will not preempt the context while the page fault
+        * handler is running. Context switch code removes mappings.
+        */
+       ctx->csa.dar = ctx->csa.dsisr = 0;
+
        /*
         * If we handled the fault successfully and are in runnable
         * state, restart the DMA.
                if (ctx->spu)
                        ctx->ops->restart_dma(ctx);
        } else
-               spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+               spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
 
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
        return ret;
 
 {
        struct spu_context *ctx = spu->ctx;
 
-       wake_up_all(&ctx->stop_wq);
+       /*
+        * It should be impossible to preempt a context while an exception
+        * is being processed, since the context switch code is specially
+        * coded to deal with interrupts ... But, just in case, sanity check
+        * the context pointer.  It is OK to return doing nothing since
+        * the exception will be regenerated when the context is resumed.
+        */
+       if (ctx) {
+               /* Copy exception arguments into module specific structure */
+               ctx->csa.class_0_pending = spu->class_0_pending;
+               ctx->csa.dsisr = spu->dsisr;
+               ctx->csa.dar = spu->dar;
+
+               /* ensure that the exception status has hit memory before a
+                * thread waiting on the context's stop queue is woken */
+               smp_wmb();
+
+               wake_up_all(&ctx->stop_wq);
+       }
+
+       /* Clear callback arguments from spu structure */
+       spu->class_0_pending = 0;
+       spu->dsisr = 0;
+       spu->dar = 0;
 }
 
 static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
        if (ctx->state != SPU_STATE_RUNNABLE ||
            test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
                return 1;
-       pte_fault = spu->dsisr &
+       pte_fault = ctx->csa.dsisr &
            (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
-       return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+       return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
                1 : 0;
 }
 
        return ret;
 }
 
-static inline int spu_process_events(struct spu_context *ctx)
-{
-       struct spu *spu = ctx->spu;
-       int ret = 0;
-
-       if (spu->class_0_pending)
-               ret = spu_irq_class_0_bottom(spu);
-       if (!ret && signal_pending(current))
-               ret = -ERESTARTSYS;
-       return ret;
-}
-
 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
 {
        int ret;
                if (ret)
                        break;
 
+               ret = spufs_handle_class0(ctx);
+               if (ret)
+                       break;
+
                if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
                        ret = spu_reacquire_runnable(ctx, npc, &status);
                        if (ret)
                                goto out2;
                        continue;
                }
-               ret = spu_process_events(ctx);
+
+               if (signal_pending(current))
+                       ret = -ERESTARTSYS;
+
 
        } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
                                      SPU_STATUS_STOPPED_BY_HALT |
 
        spu->wbox_callback = spufs_wbox_callback;
        spu->stop_callback = spufs_stop_callback;
        spu->mfc_callback = spufs_mfc_callback;
-       spu->dma_callback = spufs_dma_callback;
        mb();
        spu_unmap_mappings(ctx);
        spu_restore(&ctx->csa, spu);
        spu->wbox_callback = NULL;
        spu->stop_callback = NULL;
        spu->mfc_callback = NULL;
-       spu->dma_callback = NULL;
        spu_associate_mm(spu, NULL);
        spu->pid = 0;
        spu->tgid = 0;
 
 
 /* fault handling */
 int spufs_handle_class1(struct spu_context *ctx);
+int spufs_handle_class0(struct spu_context *ctx);
 
 /* affinity */
 struct spu *affinity_check(struct spu_context *ctx);
 
        int rc;
 
        acquire_spu_lock(spu);          /* Step 1.     */
-       prev->dar = spu->dar;
-       prev->dsisr = spu->dsisr;
-       spu->dar = 0;
-       spu->dsisr = 0;
        rc = __do_spu_save(prev, spu);  /* Steps 2-53. */
        release_spu_lock(spu);
        if (rc != 0 && rc != 2 && rc != 6) {
        acquire_spu_lock(spu);
        harvest(NULL, spu);
        spu->slb_replace = 0;
-       new->dar = 0;
-       new->dsisr = 0;
-       spu->class_0_pending = 0;
        rc = __do_spu_restore(new, spu);
        release_spu_lock(spu);
        if (rc) {
 
        void (* ibox_callback)(struct spu *spu);
        void (* stop_callback)(struct spu *spu);
        void (* mfc_callback)(struct spu *spu);
-       void (* dma_callback)(struct spu *spu, int type);
 
        char irq_c0[8];
        char irq_c1[8];
 extern struct cbe_spu_info cbe_spu_info[];
 
 void spu_init_channels(struct spu *spu);
-int spu_irq_class_0_bottom(struct spu *spu);
-int spu_irq_class_1_bottom(struct spu *spu);
 void spu_irq_setaffinity(struct spu *spu, int cpu);
 
 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 
        u64 spu_chnldata_RW[32];
        u32 spu_mailbox_data[4];
        u32 pu_mailbox_data[1];
-       u64 dar, dsisr;
+       u64 dar, dsisr, class_0_pending;
        unsigned long suspend_time;
        spinlock_t register_lock;
 };