]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
Merge with /home/tmlind/src/kernel/linux-2.6
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * linux/arch/arm/mach-omap/dsp/dsp_mem.c
3  *
4  * OMAP DSP memory driver
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *
10  * Conversion to mempool API and ARM MMU section mapping
11  * by Paul Mundt <paul.mundt@nokia.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  *
27  * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
28  * 2005/06/09:  DSP Gateway version 3.3
29  */
30
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/major.h>
34 #include <linux/fs.h>
35 #include <linux/bootmem.h>
36 #include <linux/fb.h>
37 #include <linux/interrupt.h>
38 #include <linux/delay.h>
39 #include <linux/mempool.h>
40 #include <linux/platform_device.h>
41 #include <linux/clk.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/ioctls.h>
45 #include <asm/irq.h>
46 #include <asm/pgalloc.h>
47 #include <asm/pgtable.h>
48 #include <asm/arch/tc.h>
49 #include <asm/arch/omapfb.h>
50 #include <asm/arch/dsp.h>
51 #include <asm/arch/dsp_common.h>
52 #include "uaccess_dsp.h"
53 #include "ipbuf.h"
54 #include "dsp.h"
55
56 #define SZ_1MB  0x100000
57 #define SZ_64KB 0x10000
58 #define SZ_4KB  0x1000
59 #define SZ_1KB  0x400
60 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
61 #define ORDER_1MB       (20 - PAGE_SHIFT)
62 #define ORDER_64KB      (16 - PAGE_SHIFT)
63 #define ORDER_4KB       (12 - PAGE_SHIFT)
64
65 #define PGDIR_MASK              (~(PGDIR_SIZE-1))
66 #define PGDIR_ALIGN(addr)       (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
67
68 #define dsp_mmu_enable() \
69         do { \
70                 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
71                             DSPMMU_CNTL); \
72         } while(0)
73 #define dsp_mmu_disable() \
74         do { omap_writew(0, DSPMMU_CNTL); } while(0)
75 #define dsp_mmu_flush() \
76         do { \
77                 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
78                             DSPMMU_FLUSH_ENTRY); \
79         } while(0)
80 #define __dsp_mmu_gflush() \
81         do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
82 #define __dsp_mmu_itack() \
83         do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
84
85 #define EMIF_PRIO_LB_MASK       0x0000f000
86 #define EMIF_PRIO_LB_SHIFT      12
87 #define EMIF_PRIO_DMA_MASK      0x00000f00
88 #define EMIF_PRIO_DMA_SHIFT     8
89 #define EMIF_PRIO_DSP_MASK      0x00000070
90 #define EMIF_PRIO_DSP_SHIFT     4
91 #define EMIF_PRIO_MPU_MASK      0x00000007
92 #define EMIF_PRIO_MPU_SHIFT     0
93 #define set_emiff_dma_prio(prio) \
94         do { \
95                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
96         ~EMIF_PRIO_DMA_MASK) | \
97                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
98                             OMAP_TC_OCPT1_PRIOR); \
99         } while(0)
100
101 enum exmap_type {
102         EXMAP_TYPE_MEM,
103         EXMAP_TYPE_FB
104 };
105
106 struct exmap_tbl {
107         unsigned int valid:1;
108         unsigned int cntnu:1;   /* grouping */
109         int usecount;           /* reference count by mmap */
110         enum exmap_type type;
111         void *buf;              /* virtual address of the buffer,
112                                  * i.e. 0xc0000000 - */
113         void *vadr;             /* DSP shadow space,
114                                  * i.e. 0xe0000000 - 0xe0ffffff */
115         unsigned int order;
116 };
117 #define DSPMMU_TLB_LINES        32
118 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
119 static DECLARE_RWSEM(exmap_sem);
120
121 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
122 static struct omapfb_notifier_block *omapfb_nb;
123 static int omapfb_ready;
124 #endif
125
126 static int dsp_exunmap(unsigned long dspadr);
127
128 static void *dspvect_page;
129 static unsigned long dsp_fault_adr;
130 static struct mem_sync_struct mem_sync;
131
132 static void *mempool_alloc_from_pool(mempool_t *pool,
133                                      unsigned int __nocast gfp_mask)
134 {
135         spin_lock_irq(&pool->lock);
136         if (likely(pool->curr_nr)) {
137                 void *element = pool->elements[--pool->curr_nr];
138                 spin_unlock_irq(&pool->lock);
139                 return element;
140         }
141
142         spin_unlock_irq(&pool->lock);
143         return mempool_alloc(pool, gfp_mask);
144 }
145
146 static __inline__ unsigned long lineup_offset(unsigned long adr,
147                                               unsigned long ref,
148                                               unsigned long mask)
149 {
150         unsigned long newadr;
151
152         newadr = (adr & ~mask) | (ref & mask);
153         if (newadr < adr)
154                 newadr += mask + 1;
155         return newadr;
156 }
157
158 void dsp_mem_sync_inc(void)
159 {
160         /*
161          * FIXME: dsp_mem_enable()!!!
162          */
163         if (mem_sync.DARAM)
164                 mem_sync.DARAM->ad_arm++;
165         if (mem_sync.SARAM)
166                 mem_sync.SARAM->ad_arm++;
167         if (mem_sync.SDRAM)
168                 mem_sync.SDRAM->ad_arm++;
169 }
170
171 /*
172  * dsp_mem_sync_config() is called from mbx1 workqueue
173  */
174 int dsp_mem_sync_config(struct mem_sync_struct *sync)
175 {
176         size_t sync_seq_sz = sizeof(struct sync_seq);
177
178 #ifdef OLD_BINARY_SUPPORT
179         if (sync == NULL) {
180                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
181                 return 0;
182         }
183 #endif
184         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
185             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
186             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
187                 printk(KERN_ERR
188                        "omapdsp: mem_sync address validation failure!\n"
189                        "  mem_sync.DARAM = 0x%p,\n"
190                        "  mem_sync.SARAM = 0x%p,\n"
191                        "  mem_sync.SDRAM = 0x%p,\n",
192                        sync->DARAM, sync->SARAM, sync->SDRAM);
193                 return -1;
194         }
195         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
196         return 0;
197 }
198
199 static mempool_t *kmem_pool_1M;
200 static mempool_t *kmem_pool_64K;
201
202 static void *dsp_pool_alloc(unsigned int __nocast gfp, void *order)
203 {
204         return (void *)__get_dma_pages(gfp, (unsigned int)order);
205 }
206
207 static void dsp_pool_free(void *buf, void *order)
208 {
209         free_pages((unsigned long)buf, (unsigned int)order);
210 }
211
212 static void dsp_kmem_release(void)
213 {
214         if (kmem_pool_64K) {
215                 mempool_destroy(kmem_pool_64K);
216                 kmem_pool_64K = NULL;
217         }
218
219         if (kmem_pool_1M) {
220                 mempool_destroy(kmem_pool_1M);
221                 kmem_pool_1M = NULL;
222         }
223 }
224
225 static int dsp_kmem_reserve(unsigned long size)
226 {
227         unsigned long len = size;
228
229         /* alignment check */
230         if (!is_aligned(size, SZ_64KB)) {
231                 printk(KERN_ERR
232                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
233                 return -EINVAL;
234         }
235         if (size > DSPSPACE_SIZE) {
236                 printk(KERN_ERR
237                        "omapdsp: size(0x%lx) is larger than DSP memory space "
238                        "size (0x%x.\n", size, DSPSPACE_SIZE);
239                 return -EINVAL;
240         }
241
242         if (size >= SZ_1MB) {
243                 int nr = size >> 20;
244
245                 if (likely(!kmem_pool_1M))
246                         kmem_pool_1M = mempool_create(nr,
247                                                       dsp_pool_alloc,
248                                                       dsp_pool_free,
249                                                       (void *)ORDER_1MB);
250                 else
251                         mempool_resize(kmem_pool_1M, kmem_pool_1M->min_nr + nr,
252                                        GFP_KERNEL);
253
254                 size &= ~(0xf << 20);
255         }
256
257         if (size >= SZ_64KB) {
258                 int nr = size >> 16;
259
260                 if (likely(!kmem_pool_64K))
261                         kmem_pool_64K = mempool_create(nr,
262                                                        dsp_pool_alloc,
263                                                        dsp_pool_free,
264                                                        (void *)ORDER_64KB);
265                 else
266                         mempool_resize(kmem_pool_64K,
267                                        kmem_pool_64K->min_nr + nr, GFP_KERNEL);
268
269                 size &= ~(0xf << 16);
270         }
271
272         if (size)
273                 len -= size;
274
275         return len;
276 }
277
278 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
279 {
280         struct page *page, *ps, *pe;
281
282         ps = virt_to_page(buf);
283         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
284
285         for (page = ps; page < pe; page++)
286                 ClearPageReserved(page);
287
288         if (buf) {
289                 if ((order == ORDER_64KB) && likely(kmem_pool_64K))
290                         mempool_free((void *)buf, kmem_pool_64K);
291                 else if ((order == ORDER_1MB) && likely(kmem_pool_1M))
292                         mempool_free((void *)buf, kmem_pool_1M);
293                 else
294                         free_pages(buf, order);
295         }
296 }
297
298 static inline void
299 exmap_alloc_pte(unsigned long virt, unsigned long phys, pgprot_t prot)
300 {
301         pgd_t *pgd;
302         pud_t *pud;
303         pmd_t *pmd;
304         pte_t *pte;
305
306         pgd = pgd_offset_k(virt);
307         pud = pud_offset(pgd, virt);
308         pmd = pmd_offset(pud, virt);
309
310         if (pmd_none(*pmd)) {
311                 pte = pte_alloc_one_kernel(&init_mm, 0);
312                 if (!pte)
313                         return;
314
315                 /* note: two PMDs will be set  */
316                 pmd_populate_kernel(&init_mm, pmd, pte);
317         }
318
319         pte = pte_offset_kernel(pmd, virt);
320         set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
321 }
322
323 static inline int
324 exmap_alloc_sect(unsigned long virt, unsigned long phys, int prot)
325 {
326         pgd_t *pgd;
327         pud_t *pud;
328         pmd_t *pmd;
329
330         pgd = pgd_offset_k(virt);
331         pud = pud_alloc(&init_mm, pgd, virt);
332         pmd = pmd_alloc(&init_mm, pud, virt);
333
334         if (virt & (1 << 20))
335                 pmd++;
336
337         if (!pmd_none(*pmd))
338                 /* No good, fall back on smaller mappings. */
339                 return -EINVAL;
340
341         *pmd = __pmd(phys | prot);
342         flush_pmd_entry(pmd);
343
344         return 0;
345 }
346
347 /*
348  * ARM MMU operations
349  */
350 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
351                             unsigned long size)
352 {
353         long off;
354         pgprot_t prot_pte;
355         int prot_sect;
356
357         printk(KERN_DEBUG
358                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
359                virt, phys, size);
360
361         prot_pte = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
362                             L_PTE_DIRTY | L_PTE_WRITE);
363
364         prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
365                     PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
366
367         if (cpu_architecture() <= CPU_ARCH_ARMv5)
368                 prot_sect |= PMD_BIT4;
369
370         off = phys - virt;
371
372         while ((virt & 0xfffff || (virt + off) & 0xfffff) && size >= PAGE_SIZE) {
373                 exmap_alloc_pte(virt, virt + off, prot_pte);
374
375                 virt += PAGE_SIZE;
376                 size -= PAGE_SIZE;
377         }
378
379         /* XXX: Not yet.. confuses dspfb -- PFM. */
380 #if 0
381         while (size >= (PGDIR_SIZE / 2)) {
382                 if (exmap_alloc_sect(virt, virt + off, prot_sect) < 0)
383                         break;
384
385                 virt += (PGDIR_SIZE / 2);
386                 size -= (PGDIR_SIZE / 2);
387         }
388 #endif
389
390         while (size >= PAGE_SIZE) {
391                 exmap_alloc_pte(virt, virt + off, prot_pte);
392
393                 virt += PAGE_SIZE;
394                 size -= PAGE_SIZE;
395         }
396
397         BUG_ON(size);
398
399         return 0;
400 }
401
402 static inline void
403 exmap_clear_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
404 {
405         pte_t *pte;
406
407         pte = pte_offset_map(pmd, addr);
408         do {
409                 if (pte_none(*pte))
410                         continue;
411
412                 pte_clear(&init_mm, addr, pte);
413         } while (pte++, addr += PAGE_SIZE, addr != end);
414
415         pte_unmap(pte - 1);
416 }
417
418 static inline void
419 exmap_clear_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
420 {
421         pmd_t *pmd;
422         unsigned long next;
423
424         pmd = pmd_offset(pud, addr);
425         do {
426                 next = pmd_addr_end(addr, end);
427
428                 if (addr & (1 << 20))
429                         pmd++;
430
431                 if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT) {
432                         *pmd = __pmd(0);
433                         clean_pmd_entry(pmd);
434                         continue;
435                 }
436
437                 if (pmd_none_or_clear_bad(pmd))
438                         continue;
439
440                 exmap_clear_pte_range(pmd, addr, next);
441         } while (pmd++, addr = next, addr != end);
442 }
443
444 static inline void
445 exmap_clear_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
446 {
447         pud_t *pud;
448         unsigned long next;
449
450         pud = pud_offset(pgd, addr);
451         do {
452                 next = pud_addr_end(addr, end);
453                 if (pud_none_or_clear_bad(pud))
454                         continue;
455
456                 exmap_clear_pmd_range(pud, addr, next);
457         } while (pud++, addr = next, addr != end);
458 }
459
460 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
461 {
462         unsigned long next, end;
463         pgd_t *pgd;
464
465         printk(KERN_DEBUG
466                "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
467                virt, size);
468
469         pgd = pgd_offset_k(virt);
470         end = virt + size;
471         do {
472                 next = pgd_addr_end(virt, end);
473                 if (pgd_none_or_clear_bad(pgd))
474                         continue;
475
476                 exmap_clear_pud_range(pgd, virt, next);
477         } while (pgd++, virt = next, virt != end);
478 }
479
480 static int exmap_valid(void *vadr, size_t len)
481 {
482         /* exmap_sem should be held before calling this function */
483         int i;
484
485 start:
486         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
487                 void *mapadr;
488                 unsigned long mapsize;
489                 struct exmap_tbl *ent = &exmap_tbl[i];
490
491                 if (!ent->valid)
492                         continue;
493                 mapadr = (void *)ent->vadr;
494                 mapsize = 1 << (ent->order + PAGE_SHIFT);
495                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
496                         if (vadr + len <= mapadr + mapsize) {
497                                 /* this map covers whole address. */
498                                 return 1;
499                         } else {
500                                 /*
501                                  * this map covers partially.
502                                  * check rest portion.
503                                  */
504                                 len -= mapadr + mapsize - vadr;
505                                 vadr = mapadr + mapsize;
506                                 goto start;
507                         }
508                 }
509         }
510
511         return 0;
512 }
513
514 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
515 {
516         void *ds = (void *)daram_base;
517         void *de = (void *)daram_base + daram_size;
518         void *ss = (void *)saram_base;
519         void *se = (void *)saram_base + saram_size;
520         int ret;
521
522         if ((vadr >= ds) && (vadr < de)) {
523                 if (vadr + len > de)
524                         return MEM_TYPE_CROSSING;
525                 else
526                         return MEM_TYPE_DARAM;
527         } else if ((vadr >= ss) && (vadr < se)) {
528                 if (vadr + len > se)
529                         return MEM_TYPE_CROSSING;
530                 else
531                         return MEM_TYPE_SARAM;
532         } else {
533                 down_read(&exmap_sem);
534                 if (exmap_valid(vadr, len))
535                         ret = MEM_TYPE_EXTERN;
536                 else
537                         ret = MEM_TYPE_NONE;
538                 up_read(&exmap_sem);
539                 return ret;
540         }
541 }
542
543 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
544 {
545         if (dsp_mem_type(p, len) <= 0) {
546                 if (fmt != NULL) {
547                         char s[64];
548                         va_list args;
549
550                         va_start(args, fmt);
551                         vsprintf(s, fmt, args);
552                         va_end(args);
553                         printk(KERN_ERR
554                                "omapdsp: %s address(0x%p) and size(0x%x) is "
555                                "not valid!\n"
556                                "         (crossing different type of memories, or \n"
557                                "          external memory space where no "
558                                "actual memory is mapped)\n",
559                                s, p, len);
560                 }
561                 return -1;
562         }
563
564         return 0;
565 }
566
567 /*
568  * exmap_use(), unuse(): 
569  * when the mapped area is exported to user space with mmap,
570  * the usecount is incremented.
571  * while the usecount > 0, that area can't be released.
572  */
573 void exmap_use(void *vadr, size_t len)
574 {
575         int i;
576
577         down_write(&exmap_sem);
578         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
579                 void *mapadr;
580                 unsigned long mapsize;
581                 struct exmap_tbl *ent = &exmap_tbl[i];
582
583                 if (!ent->valid)
584                         continue;
585                 mapadr = (void *)ent->vadr;
586                 mapsize = 1 << (ent->order + PAGE_SHIFT);
587                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
588                         ent->usecount++;
589                 }
590         }
591         up_write(&exmap_sem);
592 }
593
594 void exmap_unuse(void *vadr, size_t len)
595 {
596         int i;
597
598         down_write(&exmap_sem);
599         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
600                 void *mapadr;
601                 unsigned long mapsize;
602                 struct exmap_tbl *ent = &exmap_tbl[i];
603
604                 if (!ent->valid)
605                         continue;
606                 mapadr = (void *)ent->vadr;
607                 mapsize = 1 << (ent->order + PAGE_SHIFT);
608                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
609                         ent->usecount--;
610                 }
611         }
612         up_write(&exmap_sem);
613 }
614
615 /*
616  * dsp_virt_to_phys()
617  * returns physical address, and sets len to valid length
618  */
619 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
620 {
621         int i;
622
623         if (is_dsp_internal_mem(vadr)) {
624                 /* DSRAM or SARAM */
625                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
626                 return (unsigned long)vadr;
627         }
628
629         /* EXRAM */
630         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
631                 void *mapadr;
632                 unsigned long mapsize;
633                 struct exmap_tbl *ent = &exmap_tbl[i];
634
635                 if (!ent->valid)
636                         continue;
637                 mapadr = (void *)ent->vadr;
638                 mapsize = 1 << (ent->order + PAGE_SHIFT);
639                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
640                         *len = mapadr + mapsize - vadr;
641                         return __pa(ent->buf) + vadr - mapadr;
642                 }
643         }
644
645         /* valid mapping not found */
646         return 0;
647 }
648
649 /*
650  * DSP MMU operations
651  */
652 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
653 {
654         switch (slst) {
655         case DSPMMU_CAM_L_SLST_1MB:
656                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
657                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
658         case DSPMMU_CAM_L_SLST_64KB:
659                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
660                        DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
661         case DSPMMU_CAM_L_SLST_4KB:
662                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
663                        DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
664         case DSPMMU_CAM_L_SLST_1KB:
665                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
666                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
667         }
668         return 0;
669 }
670
671 static __inline__ void get_tlb_lock(int *base, int *victim)
672 {
673         unsigned short lock = omap_readw(DSPMMU_LOCK);
674         if (base != NULL)
675                 *base = (lock & DSPMMU_LOCK_BASE_MASK)
676                         >> DSPMMU_LOCK_BASE_SHIFT;
677         if (victim != NULL)
678                 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
679                           >> DSPMMU_LOCK_VICTIM_SHIFT;
680 }
681
682 static __inline__ void set_tlb_lock(int base, int victim)
683 {
684         omap_writew((base   << DSPMMU_LOCK_BASE_SHIFT) |
685                     (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
686 }
687
688 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
689                                   unsigned short *cam_h, unsigned short *cam_l,
690                                   unsigned short *ram_h, unsigned short *ram_l)
691 {
692         /* set victim */
693         set_tlb_lock(lbase, victim);
694
695         /* read a TLB entry */
696         omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
697
698         if (cam_h != NULL)
699                 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
700         if (cam_l != NULL)
701                 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
702         if (ram_h != NULL)
703                 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
704         if (ram_l != NULL)
705                 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
706 }
707
708 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
709                                   unsigned short ram_h, unsigned short ram_l)
710 {
711         omap_writew(cam_h, DSPMMU_CAM_H);
712         omap_writew(cam_l, DSPMMU_CAM_L);
713         omap_writew(ram_h, DSPMMU_RAM_H);
714         omap_writew(ram_l, DSPMMU_RAM_L);
715
716         /* flush the entry */
717         dsp_mmu_flush();
718
719         /* load a TLB entry */
720         omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
721 }
722
723 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
724                             unsigned short slst, unsigned short prsvd,
725                             unsigned short ap)
726 {
727         int lbase, victim;
728         unsigned short cam_l_va_mask;
729
730         clk_enable(dsp_ck_handle);
731
732         get_tlb_lock(&lbase, NULL);
733         for (victim = 0; victim < lbase; victim++) {
734                 unsigned short cam_l;
735
736                 /* read a TLB entry */
737                 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
738                 if (!(cam_l & DSPMMU_CAM_L_V))
739                         goto found_victim;
740         }
741         set_tlb_lock(lbase, victim);
742
743 found_victim:
744         /* The last (31st) entry cannot be locked? */
745         if (victim == 31) {
746                 printk(KERN_ERR "omapdsp: TLB is full.\n");
747                 return -EBUSY;
748         }
749
750         cam_l_va_mask = get_cam_l_va_mask(slst);
751         if (vadr &
752             ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
753               (unsigned long)cam_l_va_mask << 6)) {
754                 printk(KERN_ERR
755                        "omapdsp: mapping vadr (0x%06lx) is not "
756                        "aligned boundary\n", vadr);
757                 return -EINVAL;
758         }
759
760         __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
761                    padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
762
763         /* update lock base */
764         if (victim == lbase)
765                 lbase++;
766         set_tlb_lock(lbase, lbase);
767
768         clk_disable(dsp_ck_handle);
769         return 0;
770 }
771
772 static int dsp_mmu_clear_tlb(unsigned long vadr)
773 {
774         int lbase;
775         int i;
776         int max_valid = 0;
777
778         clk_enable(dsp_ck_handle);
779
780         get_tlb_lock(&lbase, NULL);
781         for (i = 0; i < lbase; i++) {
782                 unsigned short cam_h, cam_l;
783                 unsigned short cam_l_va_mask, cam_vld, slst;
784                 unsigned long cam_va;
785
786                 /* read a TLB entry */
787                 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
788
789                 cam_vld = cam_l & DSPMMU_CAM_L_V;
790                 if (!cam_vld)
791                         continue;
792
793                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
794                 cam_l_va_mask = get_cam_l_va_mask(slst);
795                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
796                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
797
798                 if (cam_va == vadr)
799                         /* flush the entry */
800                         dsp_mmu_flush();
801                 else
802                         max_valid = i;
803         }
804
805         /* set new lock base */
806         set_tlb_lock(max_valid+1, max_valid+1);
807
808         clk_disable(dsp_ck_handle);
809         return 0;
810 }
811
812 static void dsp_mmu_gflush(void)
813 {
814         clk_enable(dsp_ck_handle);
815
816         __dsp_mmu_gflush();
817         set_tlb_lock(1, 1);
818
819         clk_disable(dsp_ck_handle);
820 }
821
822 /*
823  * dsp_exmap()
824  *
825  * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
826  * In this case, the buffer for DSP is allocated in this routine,
827  * then it is mapped.
828  * On the other hand, for example - frame buffer sharing, calls
829  * this function with padr set. It means some known address space
830  * pointed with padr is going to be shared with DSP.
831  */
832 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
833                      unsigned long size, enum exmap_type type)
834 {
835         unsigned short slst;
836         void *buf;
837         unsigned int order = 0;
838         unsigned long unit;
839         unsigned int cntnu = 0;
840         unsigned long _dspadr = dspadr;
841         unsigned long _padr = padr;
842         void *_vadr = dspbyte_to_virt(dspadr);
843         unsigned long _size = size;
844         struct exmap_tbl *exmap_ent;
845         int status;
846         int i;
847
848 #define MINIMUM_PAGESZ  SZ_4KB
849         /*
850          * alignment check
851          */
852         if (!is_aligned(size, MINIMUM_PAGESZ)) {
853                 printk(KERN_ERR
854                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
855                 return -EINVAL;
856         }
857         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
858                 printk(KERN_ERR
859                        "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
860                 return -EINVAL;
861         }
862         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
863                 printk(KERN_ERR
864                        "omapdsp: physical address(0x%lx) is not aligned.\n",
865                        padr);
866                 return -EINVAL;
867         }
868
869         /* address validity check */
870         if ((dspadr < dspmem_size) ||
871             (dspadr >= DSPSPACE_SIZE) ||
872             ((dspadr + size > DSP_INIT_PAGE) &&
873              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
874                 printk(KERN_ERR
875                        "omapdsp: illegal address/size for dsp_exmap().\n");
876                 return -EINVAL;
877         }
878
879         down_write(&exmap_sem);
880
881         /* overlap check */
882         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
883                 unsigned long mapsize;
884                 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
885
886                 if (!tmp_ent->valid)
887                         continue;
888                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
889                 if ((_vadr + size > tmp_ent->vadr) &&
890                     (_vadr < tmp_ent->vadr + mapsize)) {
891                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
892                         up_write(&exmap_sem);
893                         return -EINVAL;
894                 }
895         }
896
897 start:
898         buf = NULL;
899         /* Are there any free TLB lines?  */
900         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
901                 if (!exmap_tbl[i].valid)
902                         goto found_free;
903         }
904         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
905         status = -EBUSY;
906         goto fail;
907
908 found_free:
909         exmap_ent = &exmap_tbl[i];
910
911         if ((_size >= SZ_1MB) &&
912             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
913             is_aligned(_dspadr, SZ_1MB)) {
914                 unit = SZ_1MB;
915                 slst = DSPMMU_CAM_L_SLST_1MB;
916         } else if ((_size >= SZ_64KB) &&
917                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
918                    is_aligned(_dspadr, SZ_64KB)) {
919                 unit = SZ_64KB;
920                 slst = DSPMMU_CAM_L_SLST_64KB;
921         } else {
922                 unit = SZ_4KB;
923                 slst = DSPMMU_CAM_L_SLST_4KB;
924         }
925
926         order = get_order(unit);
927
928         /* buffer allocation */
929         if (type == EXMAP_TYPE_MEM) {
930                 struct page *page, *ps, *pe;
931
932                 if ((order == ORDER_1MB) && likely(kmem_pool_1M))
933                         buf = mempool_alloc_from_pool(kmem_pool_1M, GFP_KERNEL);
934                 else if ((order == ORDER_64KB) && likely(kmem_pool_64K))
935                         buf = mempool_alloc_from_pool(kmem_pool_64K,GFP_KERNEL);
936                 else {
937                         buf = (void *)__get_dma_pages(GFP_KERNEL, order);
938                         if (buf == NULL) {
939                                 status = -ENOMEM;
940                                 goto fail;
941                         }
942                 }
943
944                 /* mark the pages as reserved; this is needed for mmap */
945                 ps = virt_to_page(buf);
946                 pe = virt_to_page(buf + unit);
947
948                 for (page = ps; page < pe; page++)
949                         SetPageReserved(page);
950
951                 _padr = __pa(buf);
952         }
953
954         /*
955          * mapping for ARM MMU:
956          * we should not access to the allocated memory through 'buf'
957          * since this area should not be cashed.
958          */
959         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
960         if (status < 0)
961                 goto fail;
962
963         /* loading DSP TLB entry */
964         status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
965         if (status < 0) {
966                 exmap_clear_armmmu((unsigned long)_vadr, unit);
967                 goto fail;
968         }
969
970         exmap_ent->buf      = buf;
971         exmap_ent->vadr     = _vadr;
972         exmap_ent->order    = order;
973         exmap_ent->valid    = 1;
974         exmap_ent->cntnu    = cntnu;
975         exmap_ent->type     = type;
976         exmap_ent->usecount = 0;
977
978         if ((_size -= unit) == 0) {     /* normal completion */
979                 up_write(&exmap_sem);
980                 return size;
981         }
982
983         _dspadr += unit;
984         _vadr   += unit;
985         _padr = padr ? _padr + unit : 0;
986         cntnu = 1;
987         goto start;
988
989 fail:
990         up_write(&exmap_sem);
991         if (buf)
992                 dsp_mem_free_pages((unsigned long)buf, order);
993         dsp_exunmap(dspadr);
994         return status;
995 }
996
997 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
998 {
999         unsigned long size;
1000
1001         /* clearing ARM MMU */
1002         size = 1 << (ent->order + PAGE_SHIFT);
1003         exmap_clear_armmmu((unsigned long)ent->vadr, size);
1004
1005         /* freeing allocated memory */
1006         if (ent->type == EXMAP_TYPE_MEM) {
1007                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
1008                 printk(KERN_DEBUG
1009                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
1010                        size, ent->buf);
1011         }
1012 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1013         else if (ent->type == EXMAP_TYPE_FB) {
1014                 int status;
1015                 if (omapfb_nb) {
1016                         status = omapfb_unregister_client(omapfb_nb);
1017                         if (!status)
1018                                 printk("omapfb_unregister_client(): "
1019                                        "success\n");
1020                         else
1021                                 printk("omapfb_runegister_client(): "
1022                                        "failure(%d)\n", status);
1023                         kfree(omapfb_nb);
1024                         omapfb_nb = NULL;
1025                         omapfb_ready = 0;
1026                 }
1027         }
1028 #endif
1029
1030         return size;
1031 }
1032
1033 static int dsp_exunmap(unsigned long dspadr)
1034 {
1035         void *vadr;
1036         unsigned long size;
1037         int total = 0;
1038         struct exmap_tbl *ent;
1039         int idx;
1040
1041         vadr = dspbyte_to_virt(dspadr);
1042         down_write(&exmap_sem);
1043         for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
1044                 ent = &exmap_tbl[idx];
1045                 if (!ent->valid)
1046                         continue;
1047                 if (ent->vadr == vadr)
1048                         goto found_map;
1049         }
1050         up_write(&exmap_sem);
1051         printk(KERN_WARNING
1052                "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
1053         return -EINVAL;
1054
1055 found_map:
1056         if (ent->usecount > 0) {
1057                 printk(KERN_ERR
1058                        "omapdsp: exmap reference count is not 0.\n"
1059                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
1060                        idx, ent->vadr, ent->order, ent->usecount);
1061                 up_write(&exmap_sem);
1062                 return -EINVAL;
1063         }
1064         /* clearing DSP TLB entry */
1065         dsp_mmu_clear_tlb(dspadr);
1066
1067         /* clear ARM MMU and free buffer */
1068         size = unmap_free_arm(ent);
1069         ent->valid = 0;
1070         total += size;
1071
1072         /* we don't free PTEs */
1073
1074         /* flush TLB */
1075         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
1076
1077         /* check if next mapping is in same group */
1078         if (++idx == DSPMMU_TLB_LINES)
1079                 goto up_out;    /* normal completion */
1080         ent = &exmap_tbl[idx];
1081         if (!ent->valid || !ent->cntnu)
1082                 goto up_out;    /* normal completion */
1083
1084         dspadr += size;
1085         vadr   += size;
1086         if (ent->vadr == vadr)
1087                 goto found_map; /* continue */
1088
1089         printk(KERN_ERR
1090                "omapdsp: illegal exmap_tbl grouping!\n"
1091                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1092                vadr, idx, ent->vadr);
1093         up_write(&exmap_sem);
1094         return -EINVAL;
1095
1096 up_out:
1097         up_write(&exmap_sem);
1098         return total;
1099 }
1100
1101 static void exmap_flush(void)
1102 {
1103         struct exmap_tbl *ent;
1104         int i;
1105
1106         down_write(&exmap_sem);
1107
1108         /* clearing DSP TLB entry */
1109         dsp_mmu_gflush();
1110
1111         /* exmap_tbl[0] should be preserved */
1112         for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1113                 ent = &exmap_tbl[i];
1114                 if (ent->valid) {
1115                         unmap_free_arm(ent);
1116                         ent->valid = 0;
1117                 }
1118         }
1119
1120         /* flush TLB */
1121         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1122                                dspmem_base + DSPSPACE_SIZE);
1123         up_write(&exmap_sem);
1124 }
1125
1126 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1127 #ifndef CONFIG_FB
1128 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1129 #endif /* CONFIG_FB */
1130
1131 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1132 static int omapfb_notifier_cb(struct omapfb_notifier_block *omapfb_nb,
1133                                unsigned long event, struct omapfb_device *fbdev)
1134 {
1135         /* XXX */
1136         printk("omapfb_notifier_cb(): event = %s\n",
1137                (event == OMAPFB_EVENT_READY)    ? "READY" :
1138                (event == OMAPFB_EVENT_DISABLED) ? "DISABLED" : "Unknown");
1139         if (event == OMAPFB_EVENT_READY)
1140                 omapfb_ready = 1;
1141         else if (event == OMAPFB_EVENT_DISABLED)
1142                 omapfb_ready = 0;
1143         return 0;
1144 }
1145 #endif
1146
1147 static int dsp_fbexport(unsigned long *dspadr)
1148 {
1149         unsigned long dspadr_actual;
1150         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1151         int cnt;
1152 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1153         int status;
1154 #endif
1155
1156         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1157
1158 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1159         if (omapfb_nb) {
1160                 printk(KERN_WARNING
1161                        "omapdsp: frame buffer has been exported already!\n");
1162                 return -EBUSY;
1163         }
1164 #endif
1165
1166         if (num_registered_fb == 0) {
1167                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1168                 return -EINVAL;
1169         }
1170         if (num_registered_fb != 1) {
1171                 printk(KERN_INFO
1172                        "omapdsp: %d frame buffers found. we use first one.\n",
1173                        num_registered_fb);
1174         }
1175         padr_sys = registered_fb[0]->fix.smem_start;
1176         fbsz_sys = registered_fb[0]->fix.smem_len;
1177         if (fbsz_sys == 0) {
1178                 printk(KERN_ERR
1179                        "omapdsp: framebuffer doesn't seem to be configured "
1180                        "correctly! (size=0)\n");
1181                 return -EINVAL;
1182         }
1183
1184         /*
1185          * align padr and fbsz to 4kB boundary
1186          * (should be noted to the user afterwards!)
1187          */
1188         padr = padr_sys & ~(SZ_4KB-1);
1189         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1190
1191         /* line up dspadr offset with padr */
1192         dspadr_actual =
1193                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1194                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1195                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1196         if (dspadr_actual != *dspadr)
1197                 printk(KERN_DEBUG
1198                        "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1199                        dspadr_actual);
1200         *dspadr = dspadr_actual;
1201
1202         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1203         if (cnt < 0) {
1204                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1205                 return cnt;
1206         }
1207
1208         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1209                 printk(KERN_WARNING
1210 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1211 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1212 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1213 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1214 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1215 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1216 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1217                        padr_sys, fbsz_sys, padr, fbsz);
1218         }
1219
1220         /* increase the DMA priority */
1221         set_emiff_dma_prio(15);
1222
1223 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1224         omapfb_nb = kmalloc(sizeof(struct omapfb_notifier_block), GFP_KERNEL);
1225         if (omapfb_nb == NULL) {
1226                 printk(KERN_ERR
1227                        "omapdsp: failed to allocate memory for omapfb_nb!\n");
1228                 dsp_exunmap(dspadr_actual);
1229                 return -ENOMEM;
1230         }
1231         status = omapfb_register_client(omapfb_nb, omapfb_notifier_cb, NULL);
1232         if (!status)
1233                 printk("omapfb_register_client(): success\n");
1234         else
1235                 printk("omapfb_register_client(): failure(%d)\n", status);
1236 #endif
1237
1238         return cnt;
1239 }
1240
1241 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1242
1243 static int dsp_fbexport(unsigned long *dspadr)
1244 {
1245         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1246         return -EINVAL;
1247 }
1248
1249 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1250
1251 static int dsp_mmu_itack(void)
1252 {
1253         unsigned long dspadr;
1254
1255         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1256         if (!dsp_err_mmu_isset()) {
1257                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1258                 return -EINVAL;
1259         }
1260         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1261         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1262         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1263         dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1264         __dsp_mmu_itack();
1265         udelay(100);
1266         dsp_exunmap(dspadr);
1267         dsp_err_mmu_clear();
1268         return 0;
1269 }
1270
1271 static void dsp_mmu_init(void)
1272 {
1273         unsigned long phys;
1274         void *virt;
1275
1276         clk_enable(dsp_ck_handle);
1277         down_write(&exmap_sem);
1278
1279         dsp_mmu_disable();      /* clear all */
1280         udelay(100);
1281         dsp_mmu_enable();
1282
1283         /* mapping for ARM MMU */
1284         phys = __pa(dspvect_page);
1285         virt = dspbyte_to_virt(DSP_INIT_PAGE);  /* 0xe0fff000 */
1286         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1287         exmap_tbl[0].buf      = dspvect_page;
1288         exmap_tbl[0].vadr     = virt;
1289         exmap_tbl[0].usecount = 0;
1290         exmap_tbl[0].order    = 0;
1291         exmap_tbl[0].valid    = 1;
1292         exmap_tbl[0].cntnu    = 0;
1293
1294         /* DSP TLB initialization */
1295         set_tlb_lock(0, 0);
1296         /* preserved, full access */
1297         dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1298                          DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1299         up_write(&exmap_sem);
1300         clk_disable(dsp_ck_handle);
1301 }
1302
1303 static void dsp_mmu_shutdown(void)
1304 {
1305         exmap_flush();
1306         dsp_mmu_disable();      /* clear all */
1307 }
1308
1309 /*
1310  * intmem_enable() / disable():
1311  * if the address is in DSP internal memories,
1312  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1313  * when ARM is accessing to those memories.
1314  */
1315 static int intmem_enable(void)
1316 {
1317         int ret = 0;
1318
1319         if (dsp_is_ready())
1320                 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1321                                  DSPREG_ICR_DMA_IDLE_DOMAIN);
1322
1323         return ret;
1324 }
1325
1326 static void intmem_disable(void) {
1327         if (dsp_is_ready())
1328                 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1329                            DSPREG_ICR_DMA_IDLE_DOMAIN);
1330 }
1331
1332 /*
1333  * dsp_mem_enable() / disable()
1334  */
1335 int intmem_usecount;
1336
1337 int dsp_mem_enable(void *adr)
1338 {
1339         int ret = 0;
1340
1341         if (is_dsp_internal_mem(adr)) {
1342                 if (intmem_usecount++ == 0)
1343                         ret = omap_dsp_request_mem();
1344         } else
1345                 down_read(&exmap_sem);
1346
1347         return ret;
1348 }
1349
1350 void dsp_mem_disable(void *adr)
1351 {
1352         if (is_dsp_internal_mem(adr)) {
1353                 if (--intmem_usecount == 0)
1354                         omap_dsp_release_mem();
1355         } else
1356                 up_read(&exmap_sem);
1357 }
1358
1359 /* for safety */
1360 void dsp_mem_usecount_clear(void)
1361 {
1362         if (intmem_usecount != 0) {
1363                 printk(KERN_WARNING
1364                        "omapdsp: unbalanced memory request/release detected.\n"
1365                        "         intmem_usecount is not zero at where "
1366                        "it should be! ... fixed to be zero.\n");
1367                 intmem_usecount = 0;
1368                 omap_dsp_release_mem();
1369         }
1370 }
1371
1372 /*
1373  * dsp_mem file operations
1374  */
1375 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1376 {
1377         loff_t ret;
1378
1379         mutex_lock(&file->f_dentry->d_inode->i_mutex);
1380         switch (orig) {
1381         case 0:
1382                 file->f_pos = offset;
1383                 ret = file->f_pos;
1384                 break;
1385         case 1:
1386                 file->f_pos += offset;
1387                 ret = file->f_pos;
1388                 break;
1389         default:
1390                 ret = -EINVAL;
1391         }
1392         mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1393         return ret;
1394 }
1395
1396 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1397                            loff_t *ppos)
1398 {
1399         unsigned long p = *ppos;
1400         void *vadr = dspbyte_to_virt(p);
1401         ssize_t size = dspmem_size;
1402         ssize_t read;
1403
1404         if (p >= size)
1405                 return 0;
1406         clk_enable(api_ck_handle);
1407         read = count;
1408         if (count > size - p)
1409                 read = size - p;
1410         if (copy_to_user(buf, vadr, read)) {
1411                 read = -EFAULT;
1412                 goto out;
1413         }
1414         *ppos += read;
1415 out:
1416         clk_disable(api_ck_handle);
1417         return read;
1418 }
1419
1420 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1421                           loff_t *ppos)
1422 {
1423         unsigned long p = *ppos;
1424         void *vadr = dspbyte_to_virt(p);
1425
1426         if (!exmap_valid(vadr, count)) {
1427                 printk(KERN_ERR
1428                        "omapdsp: DSP address %08lx / size %08x "
1429                        "is not valid!\n", p, count);
1430                 return -EFAULT;
1431         }
1432         if (count > DSPSPACE_SIZE - p)
1433                 count = DSPSPACE_SIZE - p;
1434         if (copy_to_user(buf, vadr, count))
1435                 return -EFAULT;
1436         *ppos += count;
1437
1438         return count;
1439 }
1440
1441 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1442                             loff_t *ppos)
1443 {
1444         int ret;
1445         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1446
1447         if (dsp_mem_enable(vadr) < 0)
1448                 return -EBUSY;
1449         if (is_dspbyte_internal_mem(*ppos))
1450                 ret = intmem_read(file, buf, count, ppos);
1451         else
1452                 ret = exmem_read(file, buf, count, ppos);
1453         dsp_mem_disable(vadr);
1454
1455         return ret;
1456 }
1457
1458 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1459                             loff_t *ppos)
1460 {
1461         unsigned long p = *ppos;
1462         void *vadr = dspbyte_to_virt(p);
1463         ssize_t size = dspmem_size;
1464         ssize_t written;
1465
1466         if (p >= size)
1467                 return 0;
1468         clk_enable(api_ck_handle);
1469         written = count;
1470         if (count > size - p)
1471                 written = size - p;
1472         if (copy_from_user(vadr, buf, written)) {
1473                 written = -EFAULT;
1474                 goto out;
1475         }
1476         *ppos += written;
1477 out:
1478         clk_disable(api_ck_handle);
1479         return written;
1480 }
1481
1482 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1483                            loff_t *ppos)
1484 {
1485         unsigned long p = *ppos;
1486         void *vadr = dspbyte_to_virt(p);
1487
1488         if (!exmap_valid(vadr, count)) {
1489                 printk(KERN_ERR
1490                        "omapdsp: DSP address %08lx / size %08x "
1491                        "is not valid!\n", p, count);
1492                 return -EFAULT;
1493         }
1494         if (count > DSPSPACE_SIZE - p)
1495                 count = DSPSPACE_SIZE - p;
1496         if (copy_from_user(vadr, buf, count))
1497                 return -EFAULT;
1498         *ppos += count;
1499
1500         return count;
1501 }
1502
1503 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1504                              loff_t *ppos)
1505 {
1506         int ret;
1507         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1508
1509         if (dsp_mem_enable(vadr) < 0)
1510                 return -EBUSY;
1511         if (is_dspbyte_internal_mem(*ppos))
1512                 ret = intmem_write(file, buf, count, ppos);
1513         else
1514                 ret = exmem_write(file, buf, count, ppos);
1515         dsp_mem_disable(vadr);
1516
1517         return ret;
1518 }
1519
1520 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1521                          unsigned int cmd, unsigned long arg)
1522 {
1523         switch (cmd) {
1524         case OMAP_DSP_MEM_IOCTL_MMUINIT:
1525                 dsp_mmu_init();
1526                 return 0;
1527
1528         case OMAP_DSP_MEM_IOCTL_EXMAP:
1529                 {
1530                         struct omap_dsp_mapinfo mapinfo;
1531                         if (copy_from_user(&mapinfo, (void *)arg,
1532                                            sizeof(mapinfo)))
1533                                 return -EFAULT;
1534                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1535                                          EXMAP_TYPE_MEM);
1536                 }
1537
1538         case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1539                 return dsp_exunmap((unsigned long)arg);
1540
1541         case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1542                 exmap_flush();
1543                 return 0;
1544
1545         case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1546                 {
1547                         unsigned long dspadr;
1548                         int ret;
1549                         if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1550                                 return -EFAULT;
1551                         ret = dsp_fbexport(&dspadr);
1552                         if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1553                                 return -EFAULT;
1554                         return ret;
1555                 }
1556
1557         case OMAP_DSP_MEM_IOCTL_MMUITACK:
1558                 return dsp_mmu_itack();
1559
1560         case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1561                 {
1562                         unsigned long size;
1563                         if (copy_from_user(&size, (void *)arg, sizeof(long)))
1564                                 return -EFAULT;
1565                         return dsp_kmem_reserve(size);
1566                 }
1567
1568         case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1569                 dsp_kmem_release();
1570                 return 0;
1571
1572         default:
1573                 return -ENOIOCTLCMD;
1574         }
1575 }
1576
1577 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1578 {
1579         /*
1580          * FIXME
1581          */
1582         return -ENOSYS;
1583 }
1584
1585 static int dsp_mem_open(struct inode *inode, struct file *file)
1586 {
1587         if (!capable(CAP_SYS_RAWIO))
1588                 return -EPERM;
1589
1590         return 0;
1591 }
1592
1593 static int dsp_mem_release(struct inode *inode, struct file *file)
1594 {
1595         return 0;
1596 }
1597
1598 #ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1599 /*
1600  * fb update functions:
1601  * fbupd_response() is executed by the workqueue.
1602  * fbupd_cb() is called when fb update is done, in interrupt context.
1603  * mbx1_fbupd() is called when KFUNC:FBCTL:UPD is received from DSP.
1604  */
1605 static void fbupd_response(void *arg)
1606 {
1607         int status;
1608
1609         status = dsp_mbsend(MBCMD(KFUNC), OMAP_DSP_MBCMD_KFUNC_FBCTL,
1610                             OMAP_DSP_MBCMD_FBCTL_UPD);
1611         if (status < 0) {
1612                 /* FIXME: DSP is busy !! */
1613                 printk(KERN_ERR
1614                        "omapdsp: DSP is busy when trying to send FBCTL:UPD "
1615                        "response!\n");
1616         }
1617 }
1618
1619 static DECLARE_WORK(fbupd_response_work, (void (*)(void *))fbupd_response,
1620                     NULL);
1621
1622 static void fbupd_cb(void *arg)
1623 {
1624         schedule_work(&fbupd_response_work);
1625 }
1626
1627 void mbx1_fbctl_upd(void)
1628 {
1629         struct omapfb_update_window win;
1630         volatile unsigned short *buf = ipbuf_sys_da->d;
1631
1632         /* FIXME: try count sometimes exceeds 1000. */
1633         if (sync_with_dsp(&ipbuf_sys_da->s, OMAP_DSP_TID_ANON, 5000) < 0) {
1634                 printk(KERN_ERR "mbx: FBCTL:UPD - IPBUF sync failed!\n");
1635                 return;
1636         }
1637         win.x = buf[0];
1638         win.y = buf[1];
1639         win.width = buf[2];
1640         win.height = buf[3];
1641         win.format = buf[4];
1642         release_ipbuf_pvt(ipbuf_sys_da);
1643
1644         if (!omapfb_ready) {
1645                 printk(KERN_WARNING
1646                        "omapdsp: fbupd() called while HWA742 is not ready!\n");
1647                 return;
1648         }
1649         //printk("calling omapfb_update_window_async()\n");
1650         omapfb_update_window_async(&win, fbupd_cb, NULL);
1651 }
1652
1653 #else /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
1654
1655 void mbx1_fbctl_upd(void)
1656 {
1657 }
1658 #endif /* CONFIG_FB_OMAP_LCDC_EXTERNAL */
1659
1660 /*
1661  * sysfs files
1662  */
1663 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1664                         char *buf)
1665 {
1666         int len;
1667         int lbase, victim;
1668         int i;
1669
1670         clk_enable(dsp_ck_handle);
1671         down_read(&exmap_sem);
1672
1673         get_tlb_lock(&lbase, &victim);
1674
1675         len = sprintf(buf, "p: preserved,  v: valid\n"
1676                            "ety       cam_va     ram_pa   sz ap\n");
1677                         /* 00: p v 0x300000 0x10171800 64KB FA */
1678         for (i = 0; i < 32; i++) {
1679                 unsigned short cam_h, cam_l, ram_h, ram_l;
1680                 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1681                 unsigned long cam_va;
1682                 unsigned short ram_l_ap;
1683                 unsigned long ram_pa;
1684                 char *pgsz_str, *ap_str;
1685
1686                 /* read a TLB entry */
1687                 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1688
1689                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1690                 cam_l_va_mask = get_cam_l_va_mask(slst);
1691                 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1692                            (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1693                            (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1694                                                              " 1KB";
1695                 prsvd    = cam_l & DSPMMU_CAM_L_P;
1696                 cam_vld  = cam_l & DSPMMU_CAM_L_V;
1697                 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1698                 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1699                          (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1700                                                             "NA";
1701                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1702                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
1703                 ram_pa = (unsigned long)ram_h << 16 |
1704                          (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1705
1706                 if (i == lbase)
1707                         len += sprintf(buf + len, "lock base = %d\n", lbase);
1708                 if (i == victim)
1709                         len += sprintf(buf + len, "victim    = %d\n", victim);
1710                 /* 00: p v 0x300000 0x10171800 64KB FA */
1711                 len += sprintf(buf + len,
1712                                "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1713                                i,
1714                                prsvd   ? 'p' : ' ',
1715                                cam_vld ? 'v' : ' ',
1716                                cam_va, ram_pa, pgsz_str, ap_str);
1717         }
1718
1719         /* restore victim entry */
1720         set_tlb_lock(lbase, victim);
1721
1722         up_read(&exmap_sem);
1723         clk_disable(dsp_ck_handle);
1724         return len;
1725 }
1726
1727 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1728
1729 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1730                           char *buf)
1731 {
1732         int len;
1733         int i;
1734
1735         down_read(&exmap_sem);
1736         len = sprintf(buf, "v: valid,  c: cntnu\n"
1737                            "ety           vadr        buf od uc\n");
1738                          /* 00: v c 0xe0300000 0xc0171800  0 */
1739         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1740                 struct exmap_tbl *ent = &exmap_tbl[i];
1741                 /* 00: v c 0xe0300000 0xc0171800  0 */
1742                 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1743                                i,
1744                                ent->valid ? 'v' : ' ',
1745                                ent->cntnu ? 'c' : ' ',
1746                                ent->vadr, ent->buf, ent->order, ent->usecount);
1747         }
1748
1749         up_read(&exmap_sem);
1750         return len;
1751 }
1752
1753 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1754
1755 static ssize_t kmem_pool_show(struct device *dev,
1756                               struct device_attribute *attr, char *buf)
1757 {
1758         int nr_1M, nr_64K, total;
1759
1760         nr_1M = kmem_pool_1M->min_nr;
1761         nr_64K = kmem_pool_64K->min_nr;
1762         total = nr_1M * SZ_1MB + nr_64K * SZ_64KB;
1763
1764         return sprintf(buf, "0x%x %d %d\n", total, nr_1M, nr_64K);
1765 }
1766
1767 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1768
1769 /*
1770  * DSP MMU interrupt handler
1771  */
1772
1773 /*
1774  * MMU fault mask:
1775  * We ignore prefetch err.
1776  */
1777 #define MMUFAULT_MASK \
1778         (DSPMMU_FAULT_ST_PERM |\
1779          DSPMMU_FAULT_ST_TLB_MISS |\
1780          DSPMMU_FAULT_ST_TRANS)
1781 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1782 {
1783         unsigned short status;
1784         unsigned short adh, adl;
1785         unsigned short dp;
1786
1787         status = omap_readw(DSPMMU_FAULT_ST);
1788         adh = omap_readw(DSPMMU_FAULT_AD_H);
1789         adl = omap_readw(DSPMMU_FAULT_AD_L);
1790         dp = adh & DSPMMU_FAULT_AD_H_DP;
1791         dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1792         /* if the fault is masked, nothing to do */
1793         if ((status & MMUFAULT_MASK) == 0) {
1794                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1795                 /*
1796                  * note: in OMAP1710,
1797                  * when CACHE + DMA domain gets out of idle in DSP,
1798                  * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1799                  * in this case, we just ignore the interrupt.
1800                  */
1801                 if (status) {
1802                         printk(KERN_DEBUG "%s%s%s%s\n",
1803                                (status & DSPMMU_FAULT_ST_PREF)?
1804                                         "  (prefetch err)" : "",
1805                                (status & DSPMMU_FAULT_ST_PERM)?
1806                                         "  (permission fault)" : "",
1807                                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1808                                         "  (TLB miss)" : "",
1809                                (status & DSPMMU_FAULT_ST_TRANS) ?
1810                                         "  (translation fault)": "");
1811                         printk(KERN_DEBUG
1812                                "fault address = %s: 0x%06lx\n",
1813                                dp ? "DATA" : "PROGRAM",
1814                                dsp_fault_adr);
1815                 }
1816                 return IRQ_HANDLED;
1817         }
1818
1819         printk(KERN_INFO "DSP MMU interrupt!\n");
1820         printk(KERN_INFO "%s%s%s%s\n",
1821                (status & DSPMMU_FAULT_ST_PREF)?
1822                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1823                                 "  prefetch err":
1824                                 "  (prefetch err)":
1825                                 "",
1826                (status & DSPMMU_FAULT_ST_PERM)?
1827                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1828                                 "  permission fault":
1829                                 "  (permission fault)":
1830                                 "",
1831                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1832                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1833                                 "  TLB miss":
1834                                 "  (TLB miss)":
1835                                 "",
1836                (status & DSPMMU_FAULT_ST_TRANS)?
1837                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1838                                 "  translation fault":
1839                                 "  (translation fault)":
1840                                 "");
1841         printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1842                dp ? "DATA" : "PROGRAM",
1843                dsp_fault_adr);
1844
1845         if (dsp_is_ready()) {
1846                 /*
1847                  * If we call dsp_exmap() here,
1848                  * "kernel BUG at slab.c" occurs.
1849                  */
1850                 /* FIXME */
1851                 dsp_err_mmu_set(dsp_fault_adr);
1852         } else {
1853                 disable_irq(INT_DSP_MMU);
1854                 __dsp_mmu_itack();
1855                 printk(KERN_INFO "Resetting DSP...\n");
1856                 dsp_cpustat_request(CPUSTAT_RESET);
1857                 enable_irq(INT_DSP_MMU);
1858                 /*
1859                  * if we enable followings, semaphore lock should be avoided.
1860                  *
1861                 printk(KERN_INFO "Flushing DSP MMU...\n");
1862                 exmap_flush();
1863                 dsp_mmu_init();
1864                  */
1865         }
1866
1867         return IRQ_HANDLED;
1868 }
1869
1870 /*
1871  *
1872  */
1873 struct file_operations dsp_mem_fops = {
1874         .owner   = THIS_MODULE,
1875         .llseek  = dsp_mem_lseek,
1876         .read    = dsp_mem_read,
1877         .write   = dsp_mem_write,
1878         .ioctl   = dsp_mem_ioctl,
1879         .mmap    = dsp_mem_mmap,
1880         .open    = dsp_mem_open,
1881         .release = dsp_mem_release,
1882 };
1883
1884 void dsp_mem_start(void)
1885 {
1886         dsp_register_mem_cb(intmem_enable, intmem_disable);
1887 }
1888
1889 void dsp_mem_stop(void)
1890 {
1891         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1892         dsp_unregister_mem_cb();
1893 }
1894
1895 int __init dsp_mem_init(void)
1896 {
1897         int i;
1898
1899         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1900                 exmap_tbl[i].valid = 0;
1901         }
1902
1903         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1904         if (dspvect_page == NULL) {
1905                 printk(KERN_ERR
1906                        "omapdsp: failed to allocate memory "
1907                        "for dsp vector table\n");
1908                 return -ENOMEM;
1909         }
1910         dsp_mmu_init();
1911         dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1912
1913         device_create_file(&dsp_device.dev, &dev_attr_mmu);
1914         device_create_file(&dsp_device.dev, &dev_attr_exmap);
1915         device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1916
1917         return 0;
1918 }
1919
1920 void dsp_mem_exit(void)
1921 {
1922         dsp_mmu_shutdown();
1923         dsp_kmem_release();
1924
1925         if (dspvect_page != NULL) {
1926                 unsigned long virt;
1927
1928                 down_read(&exmap_sem);
1929
1930                 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1931                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1932                 free_page((unsigned long)dspvect_page);
1933                 dspvect_page = NULL;
1934
1935                 up_read(&exmap_sem);
1936         }
1937
1938         device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1939         device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1940         device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);
1941 }