]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sh/mm/fault_32.c
Merge branch 'for-2.6.28' of git://linux-nfs.org/~bfields/linux
[linux-2.6-omap-h63xx.git] / arch / sh / mm / fault_32.c
index 0c776fdfbddae6800480dc0a3dc7be54103509b3..898d477e47c171e6930832d950d85f0a3bd9a617 100644 (file)
@@ -2,7 +2,7 @@
  * Page fault handler for SH with an MMU.
  *
  *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2003 - 2007  Paul Mundt
+ *  Copyright (C) 2003 - 2008  Paul Mundt
  *
  *  Based on linux/arch/i386/mm/fault.c:
  *   Copyright (C) 1995  Linus Torvalds
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
+#include <linux/marker.h>
 #include <asm/io_trapped.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
@@ -37,10 +38,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        int fault;
        siginfo_t info;
 
-#ifdef CONFIG_SH_KGDB
-       if (kgdb_nofault && kgdb_bus_err_hook)
-               kgdb_bus_err_hook();
-#endif
+       /*
+        * We don't bother with any notifier callbacks here, as they are
+        * all handled through the __do_page_fault() fast-path.
+        */
 
        tsk = current;
        si_code = SEGV_MAPERR;
@@ -61,7 +62,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
                pgd = get_TTB() + offset;
                pgd_k = swapper_pg_dir + offset;
 
-               /* This will never happen with the folded page table. */
                if (!pgd_present(*pgd)) {
                        if (!pgd_present(*pgd_k))
                                goto bad_area_nosemaphore;
@@ -71,9 +71,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 
                pud = pud_offset(pgd, address);
                pud_k = pud_offset(pgd_k, address);
-               if (pud_present(*pud) || !pud_present(*pud_k))
-                       goto bad_area_nosemaphore;
-               set_pud(pud, *pud_k);
+
+               if (!pud_present(*pud)) {
+                       if (!pud_present(*pud_k))
+                               goto bad_area_nosemaphore;
+                       set_pud(pud, *pud_k);
+                       return;
+               }
 
                pmd = pmd_offset(pud, address);
                pmd_k = pmd_offset(pud_k, address);
@@ -242,6 +246,25 @@ do_sigbus:
                goto no_context;
 }
 
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+       int ret = 0;
+
+       trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
+                  trap >> 5, instruction_pointer(regs));
+
+#ifdef CONFIG_KPROBES
+       if (!user_mode(regs)) {
+               preempt_disable();
+               if (kprobe_running() && kprobe_fault_handler(regs, trap))
+                       ret = 1;
+               preempt_enable();
+       }
+#endif
+
+       return ret;
+}
+
 #ifdef CONFIG_SH_STORE_QUEUES
 /*
  * This is a special case for the SH-4 store queues, as pages for this
@@ -265,12 +288,18 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
        pmd_t *pmd;
        pte_t *pte;
        pte_t entry;
+       int ret = 0;
+
+       if (notify_page_fault(regs, lookup_exception_vector()))
+               goto out;
 
 #ifdef CONFIG_SH_KGDB
        if (kgdb_nofault && kgdb_bus_err_hook)
                kgdb_bus_err_hook();
 #endif
 
+       ret = 1;
+
        /*
         * We don't take page faults for P1, P2, and parts of P4, these
         * are always mapped, whether it be due to legacy behaviour in
@@ -280,24 +309,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
                pgd = pgd_offset_k(address);
        } else {
                if (unlikely(address >= TASK_SIZE || !current->mm))
-                       return 1;
+                       goto out;
 
                pgd = pgd_offset(current->mm, address);
        }
 
        pud = pud_offset(pgd, address);
        if (pud_none_or_clear_bad(pud))
-               return 1;
+               goto out;
        pmd = pmd_offset(pud, address);
        if (pmd_none_or_clear_bad(pmd))
-               return 1;
-
+               goto out;
        pte = pte_offset_kernel(pmd, address);
        entry = *pte;
        if (unlikely(pte_none(entry) || pte_not_present(entry)))
-               return 1;
+               goto out;
        if (unlikely(writeaccess && !pte_write(entry)))
-               return 1;
+               goto out;
 
        if (writeaccess)
                entry = pte_mkdirty(entry);
@@ -314,5 +342,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
        set_pte(pte, entry);
        update_mmu_cache(NULL, address, entry);
 
-       return 0;
+       ret = 0;
+out:
+       trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
+       return ret;
 }