2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
24 * This routine handles page faults. It determines the address,
25 * and the problem, and then passes it off to one of the appropriate
28 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
29 unsigned long writeaccess,
30 unsigned long address)
32 struct task_struct *tsk;
34 struct vm_area_struct * vma;
42 if (kgdb_nofault && kgdb_bus_err_hook)
48 si_code = SEGV_MAPERR;
50 if (unlikely(address >= TASK_SIZE)) {
52 * Synchronize this task's top level page-table
53 * with the 'reference' page table.
55 * Do _not_ use "tsk" here. We might be inside
56 * an interrupt in the middle of a task switch..
58 int offset = pgd_index(address);
63 pgd = get_TTB() + offset;
64 pgd_k = swapper_pg_dir + offset;
66 /* This will never happen with the folded page table. */
67 if (!pgd_present(*pgd)) {
68 if (!pgd_present(*pgd_k))
69 goto bad_area_nosemaphore;
74 pud = pud_offset(pgd, address);
75 pud_k = pud_offset(pgd_k, address);
76 if (pud_present(*pud) || !pud_present(*pud_k))
77 goto bad_area_nosemaphore;
80 pmd = pmd_offset(pud, address);
81 pmd_k = pmd_offset(pud_k, address);
82 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
83 goto bad_area_nosemaphore;
90 * If we're in an interrupt or have no user
91 * context, we must not take the fault..
93 if (in_atomic() || !mm)
96 down_read(&mm->mmap_sem);
98 vma = find_vma(mm, address);
101 if (vma->vm_start <= address)
103 if (!(vma->vm_flags & VM_GROWSDOWN))
105 if (expand_stack(vma, address))
108 * Ok, we have a good vm_area for this memory access, so
112 si_code = SEGV_ACCERR;
114 if (!(vma->vm_flags & VM_WRITE))
117 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
122 * If for any reason at all we couldn't handle the fault,
123 * make sure we exit gracefully rather than endlessly redo
127 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
134 case VM_FAULT_SIGBUS:
142 up_read(&mm->mmap_sem);
146 * Something tried to access memory that isn't in our memory map..
147 * Fix it, but check if it's kernel or user first..
150 up_read(&mm->mmap_sem);
152 bad_area_nosemaphore:
153 if (user_mode(regs)) {
154 info.si_signo = SIGSEGV;
156 info.si_code = si_code;
157 info.si_addr = (void *) address;
158 force_sig_info(SIGSEGV, &info, tsk);
163 /* Are we prepared to handle this kernel fault? */
164 if (fixup_exception(regs))
168 * Oops. The kernel tried to access some bad page. We'll have to
169 * terminate things with extreme prejudice.
175 if (oops_may_print()) {
176 __typeof__(pte_val(__pte(0))) page;
178 if (address < PAGE_SIZE)
179 printk(KERN_ALERT "Unable to handle kernel NULL "
180 "pointer dereference");
182 printk(KERN_ALERT "Unable to handle kernel paging "
184 printk(" at virtual address %08lx\n", address);
185 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
186 page = (unsigned long)get_TTB();
188 page = ((__typeof__(page) *) __va(page))[address >>
190 printk(KERN_ALERT "*pde = %08lx\n", page);
191 if (page & _PAGE_PRESENT) {
193 address &= 0x003ff000;
194 page = ((__typeof__(page) *)
195 __va(page))[address >>
197 printk(KERN_ALERT "*pte = %08lx\n", page);
202 die("Oops", regs, writeaccess);
207 * We ran out of memory, or some other thing happened to us that made
208 * us unable to handle the page fault gracefully.
211 up_read(&mm->mmap_sem);
212 if (is_init(current)) {
214 down_read(&mm->mmap_sem);
217 printk("VM: killing process %s\n", tsk->comm);
223 up_read(&mm->mmap_sem);
226 * Send a sigbus, regardless of whether we were in kernel
229 info.si_signo = SIGBUS;
231 info.si_code = BUS_ADRERR;
232 info.si_addr = (void *)address;
233 force_sig_info(SIGBUS, &info, tsk);
235 /* Kernel mode? Handle exceptions or die */
236 if (!user_mode(regs))
240 #ifdef CONFIG_SH_STORE_QUEUES
242 * This is a special case for the SH-4 store queues, as pages for this
243 * space still need to be faulted in before it's possible to flush the
244 * store queue cache for writeout to the remapped region.
246 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
248 #define P3_ADDR_MAX P4SEG
252 * Called with interrupts disabled.
254 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
255 unsigned long writeaccess,
256 unsigned long address)
263 struct mm_struct *mm = current->mm;
264 spinlock_t *ptl = NULL;
267 #ifdef CONFIG_SH_KGDB
268 if (kgdb_nofault && kgdb_bus_err_hook)
273 * We don't take page faults for P1, P2, and parts of P4, these
274 * are always mapped, whether it be due to legacy behaviour in
275 * 29-bit mode, or due to PMB configuration in 32-bit mode.
277 if (address >= P3SEG && address < P3_ADDR_MAX) {
278 pgd = pgd_offset_k(address);
281 if (unlikely(address >= TASK_SIZE || !mm))
284 pgd = pgd_offset(mm, address);
287 pud = pud_offset(pgd, address);
288 if (pud_none_or_clear_bad(pud))
290 pmd = pmd_offset(pud, address);
291 if (pmd_none_or_clear_bad(pmd))
295 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
297 pte = pte_offset_kernel(pmd, address);
300 if (unlikely(pte_none(entry) || pte_not_present(entry)))
302 if (unlikely(writeaccess && !pte_write(entry)))
306 entry = pte_mkdirty(entry);
307 entry = pte_mkyoung(entry);
309 #ifdef CONFIG_CPU_SH4
311 * ITLB is not affected by "ldtlb" instruction.
312 * So, we need to flush the entry by ourselves.
314 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
318 update_mmu_cache(NULL, address, entry);
322 pte_unmap_unlock(pte, ptl);