2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2008 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/io_trapped.h>
19 #include <asm/system.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
24 static inline int notify_page_fault(struct pt_regs *regs, int trap)
29 if (!user_mode(regs)) {
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
41 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate
45 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
46 unsigned long writeaccess,
47 unsigned long address)
49 struct task_struct *tsk;
51 struct vm_area_struct * vma;
56 if (notify_page_fault(regs, lookup_exception_vector()))
60 if (kgdb_nofault && kgdb_bus_err_hook)
65 si_code = SEGV_MAPERR;
67 if (unlikely(address >= TASK_SIZE)) {
69 * Synchronize this task's top level page-table
70 * with the 'reference' page table.
72 * Do _not_ use "tsk" here. We might be inside
73 * an interrupt in the middle of a task switch..
75 int offset = pgd_index(address);
80 pgd = get_TTB() + offset;
81 pgd_k = swapper_pg_dir + offset;
83 if (!pgd_present(*pgd)) {
84 if (!pgd_present(*pgd_k))
85 goto bad_area_nosemaphore;
90 pud = pud_offset(pgd, address);
91 pud_k = pud_offset(pgd_k, address);
93 if (!pud_present(*pud)) {
94 if (!pud_present(*pud_k))
95 goto bad_area_nosemaphore;
100 pmd = pmd_offset(pud, address);
101 pmd_k = pmd_offset(pud_k, address);
102 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
103 goto bad_area_nosemaphore;
104 set_pmd(pmd, *pmd_k);
109 /* Only enable interrupts if they were on before the fault */
110 if ((regs->sr & SR_IMASK) != SR_IMASK) {
118 * If we're in an interrupt or have no user
119 * context, we must not take the fault..
121 if (in_atomic() || !mm)
124 down_read(&mm->mmap_sem);
126 vma = find_vma(mm, address);
129 if (vma->vm_start <= address)
131 if (!(vma->vm_flags & VM_GROWSDOWN))
133 if (expand_stack(vma, address))
136 * Ok, we have a good vm_area for this memory access, so
140 si_code = SEGV_ACCERR;
142 if (!(vma->vm_flags & VM_WRITE))
145 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
150 * If for any reason at all we couldn't handle the fault,
151 * make sure we exit gracefully rather than endlessly redo
155 fault = handle_mm_fault(mm, vma, address, writeaccess);
156 if (unlikely(fault & VM_FAULT_ERROR)) {
157 if (fault & VM_FAULT_OOM)
159 else if (fault & VM_FAULT_SIGBUS)
163 if (fault & VM_FAULT_MAJOR)
168 up_read(&mm->mmap_sem);
172 * Something tried to access memory that isn't in our memory map..
173 * Fix it, but check if it's kernel or user first..
176 up_read(&mm->mmap_sem);
178 bad_area_nosemaphore:
179 if (user_mode(regs)) {
180 info.si_signo = SIGSEGV;
182 info.si_code = si_code;
183 info.si_addr = (void *) address;
184 force_sig_info(SIGSEGV, &info, tsk);
189 /* Are we prepared to handle this kernel fault? */
190 if (fixup_exception(regs))
193 if (handle_trapped_io(regs, address))
196 * Oops. The kernel tried to access some bad page. We'll have to
197 * terminate things with extreme prejudice.
203 if (oops_may_print()) {
206 if (address < PAGE_SIZE)
207 printk(KERN_ALERT "Unable to handle kernel NULL "
208 "pointer dereference");
210 printk(KERN_ALERT "Unable to handle kernel paging "
212 printk(" at virtual address %08lx\n", address);
213 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
214 page = (unsigned long)get_TTB();
216 page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
217 printk(KERN_ALERT "*pde = %08lx\n", page);
218 if (page & _PAGE_PRESENT) {
220 address &= 0x003ff000;
221 page = ((__typeof__(page) *)
222 __va(page))[address >>
224 printk(KERN_ALERT "*pte = %08lx\n", page);
229 die("Oops", regs, writeaccess);
234 * We ran out of memory, or some other thing happened to us that made
235 * us unable to handle the page fault gracefully.
238 up_read(&mm->mmap_sem);
239 if (is_global_init(current)) {
241 down_read(&mm->mmap_sem);
244 printk("VM: killing process %s\n", tsk->comm);
246 do_group_exit(SIGKILL);
250 up_read(&mm->mmap_sem);
253 * Send a sigbus, regardless of whether we were in kernel
256 info.si_signo = SIGBUS;
258 info.si_code = BUS_ADRERR;
259 info.si_addr = (void *)address;
260 force_sig_info(SIGBUS, &info, tsk);
262 /* Kernel mode? Handle exceptions or die */
263 if (!user_mode(regs))
267 #ifdef CONFIG_SH_STORE_QUEUES
269 * This is a special case for the SH-4 store queues, as pages for this
270 * space still need to be faulted in before it's possible to flush the
271 * store queue cache for writeout to the remapped region.
273 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
275 #define P3_ADDR_MAX P4SEG
279 * Called with interrupts disabled.
281 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
282 unsigned long writeaccess,
283 unsigned long address)
291 if (notify_page_fault(regs, lookup_exception_vector()))
294 #ifdef CONFIG_SH_KGDB
295 if (kgdb_nofault && kgdb_bus_err_hook)
300 * We don't take page faults for P1, P2, and parts of P4, these
301 * are always mapped, whether it be due to legacy behaviour in
302 * 29-bit mode, or due to PMB configuration in 32-bit mode.
304 if (address >= P3SEG && address < P3_ADDR_MAX) {
305 pgd = pgd_offset_k(address);
307 if (unlikely(address >= TASK_SIZE || !current->mm))
310 pgd = pgd_offset(current->mm, address);
313 pud = pud_offset(pgd, address);
314 if (pud_none_or_clear_bad(pud))
316 pmd = pmd_offset(pud, address);
317 if (pmd_none_or_clear_bad(pmd))
320 pte = pte_offset_kernel(pmd, address);
322 if (unlikely(pte_none(entry) || pte_not_present(entry)))
324 if (unlikely(writeaccess && !pte_write(entry)))
328 entry = pte_mkdirty(entry);
329 entry = pte_mkyoung(entry);
331 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
333 * ITLB is not affected by "ldtlb" instruction.
334 * So, we need to flush the entry by ourselves.
336 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
340 update_mmu_cache(NULL, address, entry);