2 * linux/arch/arm/mach-omap2/mmu.c
4 * Support for non-MPU OMAP1 MMUs.
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 * and Paul Mundt <paul.mundt@nokia.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/types.h>
26 #include <linux/init.h>
27 #include <linux/rwsem.h>
28 #include <linux/device.h>
29 #include <linux/kernel.h>
31 #include <linux/interrupt.h>
32 #include <linux/err.h>
34 #include <asm/tlbflush.h>
36 static void *dspvect_page;
37 #define DSP_INIT_PAGE 0xfff000
39 #define MMUFAULT_MASK (OMAP_MMU_FAULT_ST_PERM |\
40 OMAP_MMU_FAULT_ST_TLB_MISS |\
41 OMAP_MMU_FAULT_ST_TRANS)
43 static unsigned int get_cam_l_va_mask(u16 pgsz)
46 case OMAP_MMU_CAM_PAGESIZE_1MB:
47 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
48 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1MB;
49 case OMAP_MMU_CAM_PAGESIZE_64KB:
50 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
51 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_64KB;
52 case OMAP_MMU_CAM_PAGESIZE_4KB:
53 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
54 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_4KB;
55 case OMAP_MMU_CAM_PAGESIZE_1KB:
56 return OMAP_MMU_CAM_L_VA_TAG_L1_MASK |
57 OMAP_MMU_CAM_L_VA_TAG_L2_MASK_1KB;
62 #define get_cam_va_mask(pgsz) \
63 ((u32)OMAP_MMU_CAM_H_VA_TAG_H_MASK << 22 | \
64 (u32)get_cam_l_va_mask(pgsz) << 6)
66 static int intmem_usecount;
69 void dsp_mem_usecount_clear(void)
71 if (intmem_usecount != 0) {
73 "MMU: unbalanced memory request/release detected.\n"
74 " intmem_usecount is not zero at where "
75 "it should be! ... fixed to be zero.\n");
77 omap_dsp_release_mem();
80 EXPORT_SYMBOL_GPL(dsp_mem_usecount_clear);
82 static int omap1_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
86 if (omap_mmu_internal_memory(mmu, addr)) {
87 if (intmem_usecount++ == 0)
88 ret = omap_dsp_request_mem();
95 static int omap1_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
99 if (omap_mmu_internal_memory(mmu, addr)) {
100 if (--intmem_usecount == 0)
101 omap_dsp_release_mem();
109 omap1_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
111 /* read a TLB entry */
112 omap_mmu_write_reg(mmu, OMAP_MMU_LD_TLB_RD, OMAP_MMU_LD_TLB);
114 cr->cam_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_H);
115 cr->cam_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_CAM_L);
116 cr->ram_h = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_H);
117 cr->ram_l = omap_mmu_read_reg(mmu, OMAP_MMU_READ_RAM_L);
121 omap1_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
123 /* Set the CAM and RAM entries */
124 omap_mmu_write_reg(mmu, cr->cam_h, OMAP_MMU_CAM_H);
125 omap_mmu_write_reg(mmu, cr->cam_l, OMAP_MMU_CAM_L);
126 omap_mmu_write_reg(mmu, cr->ram_h, OMAP_MMU_RAM_H);
127 omap_mmu_write_reg(mmu, cr->ram_l, OMAP_MMU_RAM_L);
130 static ssize_t omap1_mmu_show(struct omap_mmu *mmu, char *buf,
131 struct omap_mmu_tlb_lock *tlb_lock)
135 len = sprintf(buf, "P: preserved, V: valid\n"
136 "ety P V size cam_va ram_pa ap\n");
137 /* 00: P V 4KB 0x300000 0x10171800 FA */
139 for (i = 0; i < mmu->nr_tlb_entries; i++) {
140 struct omap_mmu_tlb_entry ent;
141 struct cam_ram_regset cr;
142 struct omap_mmu_tlb_lock entry_lock;
143 char *pgsz_str, *ap_str;
145 /* read a TLB entry */
146 entry_lock.base = tlb_lock->base;
147 entry_lock.victim = i;
148 omap_mmu_read_tlb(mmu, &entry_lock, &cr);
150 ent.pgsz = cr.cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
151 ent.prsvd = cr.cam_l & OMAP_MMU_CAM_P;
152 ent.valid = cr.cam_l & OMAP_MMU_CAM_V;
153 ent.ap = cr.ram_l & OMAP_MMU_RAM_L_AP_MASK;
154 ent.va = (u32)(cr.cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
155 (u32)(cr.cam_l & get_cam_l_va_mask(ent.pgsz)) << 6;
156 ent.pa = (unsigned long)cr.ram_h << 16 |
157 (cr.ram_l & OMAP_MMU_RAM_L_RAM_LSB_MASK);
159 pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB) ? " 1MB":
160 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
161 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB) ? " 4KB":
162 (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1KB) ? " 1KB":
164 ap_str = (ent.ap == OMAP_MMU_RAM_L_AP_RO) ? "RO":
165 (ent.ap == OMAP_MMU_RAM_L_AP_FA) ? "FA":
166 (ent.ap == OMAP_MMU_RAM_L_AP_NA) ? "NA":
169 if (i == tlb_lock->base)
170 len += sprintf(buf + len, "lock base = %d\n",
172 if (i == tlb_lock->victim)
173 len += sprintf(buf + len, "victim = %d\n",
175 len += sprintf(buf + len,
176 /* 00: P V 4KB 0x300000 0x10171800 FA */
177 "%02d: %c %c %s 0x%06lx 0x%08lx %s\n",
179 ent.prsvd ? 'P' : ' ',
180 ent.valid ? 'V' : ' ',
181 pgsz_str, ent.va, ent.pa, ap_str);
187 static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
191 exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
196 static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
198 exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
201 static int omap1_mmu_startup(struct omap_mmu *mmu)
203 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
204 if (dspvect_page == NULL) {
205 printk(KERN_ERR "MMU: failed to allocate memory "
206 "for dsp vector table\n");
210 mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
215 static void omap1_mmu_shutdown(struct omap_mmu *mmu)
217 exmap_clear_preserved_entries(mmu);
219 if (dspvect_page != NULL) {
222 down_read(&mmu->exmap_sem);
224 virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
225 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
226 free_page((unsigned long)dspvect_page);
229 up_read(&mmu->exmap_sem);
233 static inline unsigned long omap1_mmu_cam_va(struct cam_ram_regset *cr)
235 unsigned int page_size = cr->cam_l & OMAP_MMU_CAM_PAGESIZE_MASK;
237 return (u32)(cr->cam_h & OMAP_MMU_CAM_H_VA_TAG_H_MASK) << 22 |
238 (u32)(cr->cam_l & get_cam_l_va_mask(page_size)) << 6;
241 static struct cam_ram_regset *
242 omap1_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
244 struct cam_ram_regset *cr;
246 if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
247 printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
248 "aligned boundary\n", entry->va);
249 return ERR_PTR(-EINVAL);
252 cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
254 cr->cam_h = entry->va >> 22;
255 cr->cam_l = (entry->va >> 6 & get_cam_l_va_mask(entry->pgsz)) |
256 entry->prsvd | entry->pgsz;
257 cr->ram_h = entry->pa >> 16;
258 cr->ram_l = (entry->pa & OMAP_MMU_RAM_L_RAM_LSB_MASK) | entry->ap;
263 static inline int omap1_mmu_cam_ram_valid(struct cam_ram_regset *cr)
265 return cr->cam_l & OMAP_MMU_CAM_V;
268 static void omap1_mmu_interrupt(struct omap_mmu *mmu)
270 unsigned long status;
271 unsigned long adh, adl;
275 status = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_ST);
276 adh = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_H);
277 adl = omap_mmu_read_reg(mmu, OMAP_MMU_FAULT_AD_L);
278 dp = adh & OMAP_MMU_FAULT_AD_H_DP;
279 va = (((adh & OMAP_MMU_FAULT_AD_H_ADR_MASK) << 16) | adl);
281 /* if the fault is masked, nothing to do */
282 if ((status & MMUFAULT_MASK) == 0) {
283 pr_debug( "MMU interrupt, but ignoring.\n");
286 * when CACHE + DMA domain gets out of idle in DSP,
287 * MMU interrupt occurs but MMU_FAULT_ST is not set.
288 * in this case, we just ignore the interrupt.
291 pr_debug( "%s%s%s%s\n",
292 (status & OMAP_MMU_FAULT_ST_PREF)?
293 " (prefetch err)" : "",
294 (status & OMAP_MMU_FAULT_ST_PERM)?
295 " (permission fault)" : "",
296 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
298 (status & OMAP_MMU_FAULT_ST_TRANS) ?
299 " (translation fault)": "");
300 pr_debug( "fault address = %#08lx\n", va);
302 enable_irq(mmu->irq);
306 pr_info("%s%s%s%s\n",
307 (status & OMAP_MMU_FAULT_ST_PREF)?
308 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PREF)?
312 (status & OMAP_MMU_FAULT_ST_PERM)?
313 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_PERM)?
315 " (permission fault)":
317 (status & OMAP_MMU_FAULT_ST_TLB_MISS)?
318 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TLB_MISS)?
322 (status & OMAP_MMU_FAULT_ST_TRANS)?
323 (MMUFAULT_MASK & OMAP_MMU_FAULT_ST_TRANS)?
324 " translation fault":
325 " (translation fault)":
327 pr_info("fault address = %#08lx\n", va);
329 mmu->fault_address = va;
330 schedule_work(&mmu->irq_work);
333 struct omap_mmu_ops omap1_mmu_ops = {
334 .startup = omap1_mmu_startup,
335 .shutdown = omap1_mmu_shutdown,
336 .mem_enable = omap1_mmu_mem_enable,
337 .mem_disable = omap1_mmu_mem_disable,
338 .read_tlb = omap1_mmu_read_tlb,
339 .load_tlb = omap1_mmu_load_tlb,
340 .show = omap1_mmu_show,
341 .cam_va = omap1_mmu_cam_va,
342 .cam_ram_alloc = omap1_mmu_cam_ram_alloc,
343 .cam_ram_valid = omap1_mmu_cam_ram_valid,
344 .interrupt = omap1_mmu_interrupt,
346 EXPORT_SYMBOL_GPL(omap1_mmu_ops);