]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/sh/mm/fault.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[linux-2.6-omap-h63xx.git] / arch / sh / mm / fault.c
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/kgdb.h>
22
23 /*
24  * This routine handles page faults.  It determines the address,
25  * and the problem, and then passes it off to one of the appropriate
26  * routines.
27  */
28 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
29                                         unsigned long writeaccess,
30                                         unsigned long address)
31 {
32         struct task_struct *tsk;
33         struct mm_struct *mm;
34         struct vm_area_struct * vma;
35         int si_code;
36         siginfo_t info;
37
38         trace_hardirqs_on();
39         local_irq_enable();
40
41 #ifdef CONFIG_SH_KGDB
42         if (kgdb_nofault && kgdb_bus_err_hook)
43                 kgdb_bus_err_hook();
44 #endif
45
46         tsk = current;
47         mm = tsk->mm;
48         si_code = SEGV_MAPERR;
49
50         if (unlikely(address >= TASK_SIZE)) {
51                 /*
52                  * Synchronize this task's top level page-table
53                  * with the 'reference' page table.
54                  *
55                  * Do _not_ use "tsk" here. We might be inside
56                  * an interrupt in the middle of a task switch..
57                  */
58                 int offset = pgd_index(address);
59                 pgd_t *pgd, *pgd_k;
60                 pud_t *pud, *pud_k;
61                 pmd_t *pmd, *pmd_k;
62
63                 pgd = get_TTB() + offset;
64                 pgd_k = swapper_pg_dir + offset;
65
66                 /* This will never happen with the folded page table. */
67                 if (!pgd_present(*pgd)) {
68                         if (!pgd_present(*pgd_k))
69                                 goto bad_area_nosemaphore;
70                         set_pgd(pgd, *pgd_k);
71                         return;
72                 }
73
74                 pud = pud_offset(pgd, address);
75                 pud_k = pud_offset(pgd_k, address);
76                 if (pud_present(*pud) || !pud_present(*pud_k))
77                         goto bad_area_nosemaphore;
78                 set_pud(pud, *pud_k);
79
80                 pmd = pmd_offset(pud, address);
81                 pmd_k = pmd_offset(pud_k, address);
82                 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
83                         goto bad_area_nosemaphore;
84                 set_pmd(pmd, *pmd_k);
85
86                 return;
87         }
88
89         /*
90          * If we're in an interrupt or have no user
91          * context, we must not take the fault..
92          */
93         if (in_atomic() || !mm)
94                 goto no_context;
95
96         down_read(&mm->mmap_sem);
97
98         vma = find_vma(mm, address);
99         if (!vma)
100                 goto bad_area;
101         if (vma->vm_start <= address)
102                 goto good_area;
103         if (!(vma->vm_flags & VM_GROWSDOWN))
104                 goto bad_area;
105         if (expand_stack(vma, address))
106                 goto bad_area;
107 /*
108  * Ok, we have a good vm_area for this memory access, so
109  * we can handle it..
110  */
111 good_area:
112         si_code = SEGV_ACCERR;
113         if (writeaccess) {
114                 if (!(vma->vm_flags & VM_WRITE))
115                         goto bad_area;
116         } else {
117                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
118                         goto bad_area;
119         }
120
121         /*
122          * If for any reason at all we couldn't handle the fault,
123          * make sure we exit gracefully rather than endlessly redo
124          * the fault.
125          */
126 survive:
127         switch (handle_mm_fault(mm, vma, address, writeaccess)) {
128                 case VM_FAULT_MINOR:
129                         tsk->min_flt++;
130                         break;
131                 case VM_FAULT_MAJOR:
132                         tsk->maj_flt++;
133                         break;
134                 case VM_FAULT_SIGBUS:
135                         goto do_sigbus;
136                 case VM_FAULT_OOM:
137                         goto out_of_memory;
138                 default:
139                         BUG();
140         }
141
142         up_read(&mm->mmap_sem);
143         return;
144
145 /*
146  * Something tried to access memory that isn't in our memory map..
147  * Fix it, but check if it's kernel or user first..
148  */
149 bad_area:
150         up_read(&mm->mmap_sem);
151
152 bad_area_nosemaphore:
153         if (user_mode(regs)) {
154                 info.si_signo = SIGSEGV;
155                 info.si_errno = 0;
156                 info.si_code = si_code;
157                 info.si_addr = (void *) address;
158                 force_sig_info(SIGSEGV, &info, tsk);
159                 return;
160         }
161
162 no_context:
163         /* Are we prepared to handle this kernel fault?  */
164         if (fixup_exception(regs))
165                 return;
166
167 /*
168  * Oops. The kernel tried to access some bad page. We'll have to
169  * terminate things with extreme prejudice.
170  *
171  */
172
173         bust_spinlocks(1);
174
175         if (oops_may_print()) {
176                 __typeof__(pte_val(__pte(0))) page;
177
178                 if (address < PAGE_SIZE)
179                         printk(KERN_ALERT "Unable to handle kernel NULL "
180                                           "pointer dereference");
181                 else
182                         printk(KERN_ALERT "Unable to handle kernel paging "
183                                           "request");
184                 printk(" at virtual address %08lx\n", address);
185                 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
186                 page = (unsigned long)get_TTB();
187                 if (page) {
188                         page = ((__typeof__(page) *) __va(page))[address >>
189                                                                  PGDIR_SHIFT];
190                         printk(KERN_ALERT "*pde = %08lx\n", page);
191                         if (page & _PAGE_PRESENT) {
192                                 page &= PAGE_MASK;
193                                 address &= 0x003ff000;
194                                 page = ((__typeof__(page) *)
195                                                 __va(page))[address >>
196                                                             PAGE_SHIFT];
197                                 printk(KERN_ALERT "*pte = %08lx\n", page);
198                         }
199                 }
200         }
201
202         die("Oops", regs, writeaccess);
203         bust_spinlocks(0);
204         do_exit(SIGKILL);
205
206 /*
207  * We ran out of memory, or some other thing happened to us that made
208  * us unable to handle the page fault gracefully.
209  */
210 out_of_memory:
211         up_read(&mm->mmap_sem);
212         if (is_init(current)) {
213                 yield();
214                 down_read(&mm->mmap_sem);
215                 goto survive;
216         }
217         printk("VM: killing process %s\n", tsk->comm);
218         if (user_mode(regs))
219                 do_exit(SIGKILL);
220         goto no_context;
221
222 do_sigbus:
223         up_read(&mm->mmap_sem);
224
225         /*
226          * Send a sigbus, regardless of whether we were in kernel
227          * or user mode.
228          */
229         info.si_signo = SIGBUS;
230         info.si_errno = 0;
231         info.si_code = BUS_ADRERR;
232         info.si_addr = (void *)address;
233         force_sig_info(SIGBUS, &info, tsk);
234
235         /* Kernel mode? Handle exceptions or die */
236         if (!user_mode(regs))
237                 goto no_context;
238 }
239
240 #ifdef CONFIG_SH_STORE_QUEUES
241 /*
242  * This is a special case for the SH-4 store queues, as pages for this
243  * space still need to be faulted in before it's possible to flush the
244  * store queue cache for writeout to the remapped region.
245  */
246 #define P3_ADDR_MAX             (P4SEG_STORE_QUE + 0x04000000)
247 #else
248 #define P3_ADDR_MAX             P4SEG
249 #endif
250
251 /*
252  * Called with interrupts disabled.
253  */
254 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
255                                          unsigned long writeaccess,
256                                          unsigned long address)
257 {
258         pgd_t *pgd;
259         pud_t *pud;
260         pmd_t *pmd;
261         pte_t *pte;
262         pte_t entry;
263         struct mm_struct *mm = current->mm;
264         spinlock_t *ptl = NULL;
265         int ret = 1;
266
267 #ifdef CONFIG_SH_KGDB
268         if (kgdb_nofault && kgdb_bus_err_hook)
269                 kgdb_bus_err_hook();
270 #endif
271
272         /*
273          * We don't take page faults for P1, P2, and parts of P4, these
274          * are always mapped, whether it be due to legacy behaviour in
275          * 29-bit mode, or due to PMB configuration in 32-bit mode.
276          */
277         if (address >= P3SEG && address < P3_ADDR_MAX) {
278                 pgd = pgd_offset_k(address);
279                 mm = NULL;
280         } else {
281                 if (unlikely(address >= TASK_SIZE || !mm))
282                         return 1;
283
284                 pgd = pgd_offset(mm, address);
285         }
286
287         pud = pud_offset(pgd, address);
288         if (pud_none_or_clear_bad(pud))
289                 return 1;
290         pmd = pmd_offset(pud, address);
291         if (pmd_none_or_clear_bad(pmd))
292                 return 1;
293
294         if (mm)
295                 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
296         else
297                 pte = pte_offset_kernel(pmd, address);
298
299         entry = *pte;
300         if (unlikely(pte_none(entry) || pte_not_present(entry)))
301                 goto unlock;
302         if (unlikely(writeaccess && !pte_write(entry)))
303                 goto unlock;
304
305         if (writeaccess)
306                 entry = pte_mkdirty(entry);
307         entry = pte_mkyoung(entry);
308
309 #ifdef CONFIG_CPU_SH4
310         /*
311          * ITLB is not affected by "ldtlb" instruction.
312          * So, we need to flush the entry by ourselves.
313          */
314         local_flush_tlb_one(get_asid(), address & PAGE_MASK);
315 #endif
316
317         set_pte(pte, entry);
318         update_mmu_cache(NULL, address, entry);
319         ret = 0;
320 unlock:
321         if (mm)
322                 pte_unmap_unlock(pte, ptl);
323         return ret;
324 }