]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/plat-omap/dsp/dsp_mem.c
Merge with ../linux-2.6
[linux-2.6-omap-h63xx.git] / arch / arm / plat-omap / dsp / dsp_mem.c
1 /*
2  * linux/arch/arm/mach-omap/dsp/dsp_mem.c
3  *
4  * OMAP DSP memory driver
5  *
6  * Copyright (C) 2002-2005 Nokia Corporation
7  *
8  * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  *
24  * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
25  * 2005/06/09:  DSP Gateway version 3.3
26  */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bootmem.h>
33 #include <linux/fb.h>
34 #include <linux/interrupt.h>
35 #include <linux/delay.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/ioctls.h>
39 #include <asm/irq.h>
40 #include <asm/pgalloc.h>
41 #include <asm/pgtable.h>
42 #include <asm/hardware/clock.h>
43 #include <asm/arch/tc.h>
44 #include <asm/arch/dsp.h>
45 #include <asm/arch/dsp_common.h>
46 #include "uaccess_dsp.h"
47 #include "dsp.h"
48
49 #define SZ_1MB  0x100000
50 #define SZ_64KB 0x10000
51 #define SZ_4KB  0x1000
52 #define SZ_1KB  0x400
53 #define is_aligned(adr,align)   (!((adr)&((align)-1)))
54 #define ORDER_1MB       (20 - PAGE_SHIFT)
55 #define ORDER_64KB      (16 - PAGE_SHIFT)
56 #define ORDER_4KB       (12 - PAGE_SHIFT)
57
58 #define PGDIR_MASK              (~(PGDIR_SIZE-1))
59 #define PGDIR_ALIGN(addr)       (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
60
61 #define dsp_mmu_enable() \
62         do { \
63                 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
64                             DSPMMU_CNTL); \
65         } while(0)
66 #define dsp_mmu_disable() \
67         do { omap_writew(0, DSPMMU_CNTL); } while(0)
68 #define dsp_mmu_flush() \
69         do { \
70                 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
71                             DSPMMU_FLUSH_ENTRY); \
72         } while(0)
73 #define __dsp_mmu_gflush() \
74         do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
75 #define __dsp_mmu_itack() \
76         do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
77
78 #define EMIF_PRIO_LB_MASK       0x0000f000
79 #define EMIF_PRIO_LB_SHIFT      12
80 #define EMIF_PRIO_DMA_MASK      0x00000f00
81 #define EMIF_PRIO_DMA_SHIFT     8
82 #define EMIF_PRIO_DSP_MASK      0x00000070
83 #define EMIF_PRIO_DSP_SHIFT     4
84 #define EMIF_PRIO_MPU_MASK      0x00000007
85 #define EMIF_PRIO_MPU_SHIFT     0
86 #define set_emiff_dma_prio(prio) \
87         do { \
88                 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
89         ~EMIF_PRIO_DMA_MASK) | \
90                             ((prio) << EMIF_PRIO_DMA_SHIFT), \
91                             OMAP_TC_OCPT1_PRIOR); \
92         } while(0)
93
94 enum exmap_type {
95         EXMAP_TYPE_MEM,
96         EXMAP_TYPE_FB
97 };
98
99 struct exmap_tbl {
100         unsigned int valid:1;
101         unsigned int cntnu:1;   /* grouping */
102         int usecount;           /* reference count by mmap */
103         enum exmap_type type;
104         void *buf;              /* virtual address of the buffer,
105                                  * i.e. 0xc0000000 - */
106         void *vadr;             /* DSP shadow space,
107                                  * i.e. 0xe0000000 - 0xe0ffffff */
108         unsigned int order;
109 };
110 #define DSPMMU_TLB_LINES        32
111 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
112 static DECLARE_RWSEM(exmap_sem);
113
114 static int dsp_exunmap(unsigned long dspadr);
115
116 static void *dspvect_page;
117 static unsigned long dsp_fault_adr;
118 static struct mem_sync_struct mem_sync;
119
120 static __inline__ unsigned long lineup_offset(unsigned long adr,
121                                               unsigned long ref,
122                                               unsigned long mask)
123 {
124         unsigned long newadr;
125
126         newadr = (adr & ~mask) | (ref & mask);
127         if (newadr < adr)
128                 newadr += mask + 1;
129         return newadr;
130 }
131
132 void dsp_mem_sync_inc(void)
133 {
134         /*
135          * FIXME: dsp_mem_enable()!!!
136          */
137         if (mem_sync.DARAM)
138                 mem_sync.DARAM->ad_arm++;
139         if (mem_sync.SARAM)
140                 mem_sync.SARAM->ad_arm++;
141         if (mem_sync.SDRAM)
142                 mem_sync.SDRAM->ad_arm++;
143 }
144
145 /*
146  * dsp_mem_sync_config() is called from mbx1 workqueue
147  */
148 int dsp_mem_sync_config(struct mem_sync_struct *sync)
149 {
150         size_t sync_seq_sz = sizeof(struct sync_seq);
151
152 #ifdef OLD_BINARY_SUPPORT
153         if (sync == NULL) {
154                 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
155                 return 0;
156         }
157 #endif
158         if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
159             (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
160             (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
161                 printk(KERN_ERR
162                        "omapdsp: mem_sync address validation failure!\n"
163                        "  mem_sync.DARAM = 0x%p,\n"
164                        "  mem_sync.SARAM = 0x%p,\n"
165                        "  mem_sync.SDRAM = 0x%p,\n",
166                        sync->DARAM, sync->SARAM, sync->SDRAM);
167                 return -1;
168         }
169         memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
170         return 0;
171 }
172
173 /*
174  * kmem_reserve(), kmem_release():
175  * reserve or release kernel memory for exmap().
176  *
177  * exmap() might request consecutive 1MB or 64kB,
178  * but it will be difficult after memory pages are fragmented.
179  * So, user can reserve such memory blocks in the early phase
180  * through kmem_reserve().
181  */
182 struct kmem_pool {
183         struct semaphore sem;
184         unsigned long buf[16];
185         int count;
186 };
187
188 #define KMEM_POOL_INIT(name) \
189 { \
190         .sem = __MUTEX_INITIALIZER((name).sem), \
191 }
192 #define DECLARE_KMEM_POOL(name) \
193         struct kmem_pool name = KMEM_POOL_INIT(name)
194
195 DECLARE_KMEM_POOL(kmem_pool_1M);
196 DECLARE_KMEM_POOL(kmem_pool_64K);
197
198 static void dsp_kmem_release(void)
199 {
200         int i;
201
202         down(&kmem_pool_1M.sem);
203         for (i = 0; i < kmem_pool_1M.count; i++) {
204                 if (kmem_pool_1M.buf[i])
205                         free_pages(kmem_pool_1M.buf[i], ORDER_1MB);
206         }
207         kmem_pool_1M.count = 0;
208         up(&kmem_pool_1M.sem);
209
210         down(&kmem_pool_64K.sem);
211         for (i = 0; i < kmem_pool_64K.count; i++) {
212                 if (kmem_pool_64K.buf[i])
213                         free_pages(kmem_pool_64K.buf[i], ORDER_64KB);
214         }
215         kmem_pool_64K.count = 0;
216         up(&kmem_pool_1M.sem);
217 }
218
219 static int dsp_kmem_reserve(unsigned long size)
220 {
221         unsigned long buf;
222         unsigned int order;
223         unsigned long unit;
224         unsigned long _size;
225         struct kmem_pool *pool;
226         int i;
227
228         /* alignment check */
229         if (!is_aligned(size, SZ_64KB)) {
230                 printk(KERN_ERR
231                        "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
232                 return -EINVAL;
233         }
234         if (size > DSPSPACE_SIZE) {
235                 printk(KERN_ERR
236                        "omapdsp: size(0x%lx) is larger than DSP memory space "
237                        "size (0x%x.\n", size, DSPSPACE_SIZE);
238                 return -EINVAL;
239         }
240
241         for (_size = size; _size; _size -= unit) {
242                 if (_size >= SZ_1MB) {
243                         unit = SZ_1MB;
244                         order = ORDER_1MB;
245                         pool = &kmem_pool_1M;
246                 } else {
247                         unit = SZ_64KB;
248                         order = ORDER_64KB;
249                         pool = &kmem_pool_64K;
250                 }
251
252                 buf = __get_dma_pages(GFP_KERNEL, order);
253                 if (!buf)
254                         return size - _size;
255                 down(&pool->sem);
256                 for (i = 0; i < 16; i++) {
257                         if (!pool->buf[i]) {
258                                 pool->buf[i] = buf;
259                                 pool->count++;
260                                 buf = 0;
261                                 break;
262                         }
263                 }
264                 up(&pool->sem);
265
266                 if (buf) {      /* pool is full */
267                         free_pages(buf, order);
268                         return size - _size;
269                 }
270         }
271
272         return size;
273 }
274
275 static unsigned long dsp_mem_get_dma_pages(unsigned int order)
276 {
277         struct kmem_pool *pool;
278         unsigned long buf = 0;
279         int i;
280
281         switch (order) {
282                 case ORDER_1MB:
283                         pool = &kmem_pool_1M;
284                         break;
285                 case ORDER_64KB:
286                         pool = &kmem_pool_64K;
287                         break;
288                 default:
289                         pool = NULL;
290         }
291
292         if (pool) {
293                 down(&pool->sem);
294                 for (i = 0; i < pool->count; i++) {
295                         if (pool->buf[i]) {
296                                 buf = pool->buf[i];
297                                 pool->buf[i] = 0;
298                                 break;
299                         }
300                 }
301                 up(&pool->sem);
302                 if (buf)
303                         return buf;
304         }
305
306         /* other size or not found in pool */
307         return __get_dma_pages(GFP_KERNEL, order);
308 }
309
310 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
311 {
312         struct kmem_pool *pool;
313         struct page *page, *ps, *pe;
314         int i;
315
316         ps = virt_to_page(buf);
317         pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
318         for (page = ps; page < pe; page++) {
319                 ClearPageReserved(page);
320         }
321
322         /*
323          * return buffer to kmem_pool or paging system
324          */
325         switch (order) {
326                 case ORDER_1MB:
327                         pool = &kmem_pool_1M;
328                         break;
329                 case ORDER_64KB:
330                         pool = &kmem_pool_64K;
331                         break;
332                 default:
333                         pool = NULL;
334         }
335
336         if (pool) {
337                 down(&pool->sem);
338                 for (i = 0; i < pool->count; i++) {
339                         if (!pool->buf[i]) {
340                                 pool->buf[i] = buf;
341                                 buf = 0;
342                         }
343                 }
344                 up(&pool->sem);
345         }
346
347         /* other size or pool is filled */
348         if (buf)
349                 free_pages(buf, order);
350 }
351
352 /*
353  * ARM MMU operations
354  */
355 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
356                             unsigned long size)
357 {
358         long off;
359         unsigned long sz_left;
360         pmd_t *pmdp;
361         pte_t *ptep;
362         int prot_pmd, prot_pte;
363
364         printk(KERN_DEBUG
365                "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
366                virt, phys, size);
367
368         prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
369         prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
370
371         pmdp = pmd_offset(pgd_offset_k(virt), virt);
372         if (pmd_none(*pmdp)) {
373                 ptep = pte_alloc_one_kernel(&init_mm, 0);
374                 if (ptep == NULL)
375                         return -ENOMEM;
376                 /* note: two PMDs will be set  */
377                 pmd_populate_kernel(&init_mm, pmdp, ptep);
378         }
379
380         off = phys - virt;
381         for (sz_left = size;
382              sz_left >= PAGE_SIZE;
383              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
384                 ptep = pte_offset_kernel(pmdp, virt);
385                 set_pte(ptep, __pte((virt + off) | prot_pte));
386         }
387         if (sz_left)
388                 BUG();
389
390         return 0;
391 }
392
393 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
394 {
395         unsigned long sz_left;
396         pmd_t *pmdp;
397         pte_t *ptep;
398
399         printk(KERN_DEBUG
400                "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
401                virt, size);
402
403         for (sz_left = size;
404              sz_left >= PAGE_SIZE;
405              sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
406                 pmdp = pmd_offset(pgd_offset_k(virt), virt);
407                 ptep = pte_offset_kernel(pmdp, virt);
408                 pte_clear(&init_mm, virt, ptep);
409         }
410         if (sz_left)
411                 BUG();
412 }
413
414 static int exmap_valid(void *vadr, size_t len)
415 {
416         /* exmap_sem should be held before calling this function */
417         int i;
418
419 start:
420         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
421                 void *mapadr;
422                 unsigned long mapsize;
423                 struct exmap_tbl *ent = &exmap_tbl[i];
424
425                 if (!ent->valid)
426                         continue;
427                 mapadr = (void *)ent->vadr;
428                 mapsize = 1 << (ent->order + PAGE_SHIFT);
429                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
430                         if (vadr + len <= mapadr + mapsize) {
431                                 /* this map covers whole address. */
432                                 return 1;
433                         } else {
434                                 /*
435                                  * this map covers partially.
436                                  * check rest portion.
437                                  */
438                                 len -= mapadr + mapsize - vadr;
439                                 vadr = mapadr + mapsize;
440                                 goto start;
441                         }
442                 }
443         }
444
445         return 0;
446 }
447
448 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
449 {
450         void *ds = (void *)daram_base;
451         void *de = (void *)daram_base + daram_size;
452         void *ss = (void *)saram_base;
453         void *se = (void *)saram_base + saram_size;
454         int ret;
455
456         if ((vadr >= ds) && (vadr < de)) {
457                 if (vadr + len > de)
458                         return MEM_TYPE_CROSSING;
459                 else
460                         return MEM_TYPE_DARAM;
461         } else if ((vadr >= ss) && (vadr < se)) {
462                 if (vadr + len > se)
463                         return MEM_TYPE_CROSSING;
464                 else
465                         return MEM_TYPE_SARAM;
466         } else {
467                 down_read(&exmap_sem);
468                 if (exmap_valid(vadr, len))
469                         ret = MEM_TYPE_EXTERN;
470                 else
471                         ret = MEM_TYPE_NONE;
472                 up_read(&exmap_sem);
473                 return ret;
474         }
475 }
476
477 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
478 {
479         if (dsp_mem_type(p, len) <= 0) {
480                 if (fmt != NULL) {
481                         char s[64];
482                         va_list args;
483
484                         va_start(args, fmt);
485                         vsprintf(s, fmt, args);
486                         va_end(args);
487                         printk(KERN_ERR
488                                "omapdsp: %s address(0x%p) and size(0x%x) is "
489                                "not valid!\n"
490                                "         (crossing different type of memories, or \n"
491                                "          external memory space where no "
492                                "actual memory is mapped)\n",
493                                s, p, len);
494                 }
495                 return -1;
496         }
497
498         return 0;
499 }
500
501 /*
502  * exmap_use(), unuse(): 
503  * when the mapped area is exported to user space with mmap,
504  * the usecount is incremented.
505  * while the usecount > 0, that area can't be released.
506  */
507 void exmap_use(void *vadr, size_t len)
508 {
509         int i;
510
511         down_write(&exmap_sem);
512         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
513                 void *mapadr;
514                 unsigned long mapsize;
515                 struct exmap_tbl *ent = &exmap_tbl[i];
516
517                 if (!ent->valid)
518                         continue;
519                 mapadr = (void *)ent->vadr;
520                 mapsize = 1 << (ent->order + PAGE_SHIFT);
521                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
522                         ent->usecount++;
523                 }
524         }
525         up_write(&exmap_sem);
526 }
527
528 void exmap_unuse(void *vadr, size_t len)
529 {
530         int i;
531
532         down_write(&exmap_sem);
533         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
534                 void *mapadr;
535                 unsigned long mapsize;
536                 struct exmap_tbl *ent = &exmap_tbl[i];
537
538                 if (!ent->valid)
539                         continue;
540                 mapadr = (void *)ent->vadr;
541                 mapsize = 1 << (ent->order + PAGE_SHIFT);
542                 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
543                         ent->usecount--;
544                 }
545         }
546         up_write(&exmap_sem);
547 }
548
549 /*
550  * dsp_virt_to_phys()
551  * returns physical address, and sets len to valid length
552  */
553 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
554 {
555         int i;
556
557         if (is_dsp_internal_mem(vadr)) {
558                 /* DSRAM or SARAM */
559                 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
560                 return (unsigned long)vadr;
561         }
562
563         /* EXRAM */
564         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
565                 void *mapadr;
566                 unsigned long mapsize;
567                 struct exmap_tbl *ent = &exmap_tbl[i];
568
569                 if (!ent->valid)
570                         continue;
571                 mapadr = (void *)ent->vadr;
572                 mapsize = 1 << (ent->order + PAGE_SHIFT);
573                 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
574                         *len = mapadr + mapsize - vadr;
575                         return __pa(ent->buf) + vadr - mapadr;
576                 }
577         }
578
579         /* valid mapping not found */
580         return 0;
581 }
582
583 /*
584  * DSP MMU operations
585  */
586 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
587 {
588         switch (slst) {
589         case DSPMMU_CAM_L_SLST_1MB:
590                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
591                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
592         case DSPMMU_CAM_L_SLST_64KB:
593                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
594                        DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
595         case DSPMMU_CAM_L_SLST_4KB:
596                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
597                        DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
598         case DSPMMU_CAM_L_SLST_1KB:
599                 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
600                        DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
601         }
602         return 0;
603 }
604
605 static __inline__ void get_tlb_lock(int *base, int *victim)
606 {
607         unsigned short lock = omap_readw(DSPMMU_LOCK);
608         if (base != NULL)
609                 *base = (lock & DSPMMU_LOCK_BASE_MASK)
610                         >> DSPMMU_LOCK_BASE_SHIFT;
611         if (victim != NULL)
612                 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
613                           >> DSPMMU_LOCK_VICTIM_SHIFT;
614 }
615
616 static __inline__ void set_tlb_lock(int base, int victim)
617 {
618         omap_writew((base   << DSPMMU_LOCK_BASE_SHIFT) |
619                     (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
620 }
621
622 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
623                                   unsigned short *cam_h, unsigned short *cam_l,
624                                   unsigned short *ram_h, unsigned short *ram_l)
625 {
626         /* set victim */
627         set_tlb_lock(lbase, victim);
628
629         /* read a TLB entry */
630         omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
631
632         if (cam_h != NULL)
633                 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
634         if (cam_l != NULL)
635                 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
636         if (ram_h != NULL)
637                 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
638         if (ram_l != NULL)
639                 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
640 }
641
642 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
643                                   unsigned short ram_h, unsigned short ram_l)
644 {
645         omap_writew(cam_h, DSPMMU_CAM_H);
646         omap_writew(cam_l, DSPMMU_CAM_L);
647         omap_writew(ram_h, DSPMMU_RAM_H);
648         omap_writew(ram_l, DSPMMU_RAM_L);
649
650         /* flush the entry */
651         dsp_mmu_flush();
652
653         /* load a TLB entry */
654         omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
655 }
656
657 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
658                             unsigned short slst, unsigned short prsvd,
659                             unsigned short ap)
660 {
661         int lbase, victim;
662         unsigned short cam_l_va_mask;
663
664         clk_use(dsp_ck_handle);
665
666         get_tlb_lock(&lbase, NULL);
667         for (victim = 0; victim < lbase; victim++) {
668                 unsigned short cam_l;
669
670                 /* read a TLB entry */
671                 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
672                 if (!(cam_l & DSPMMU_CAM_L_V))
673                         goto found_victim;
674         }
675         set_tlb_lock(lbase, victim);
676
677 found_victim:
678         /* The last (31st) entry cannot be locked? */
679         if (victim == 31) {
680                 printk(KERN_ERR "omapdsp: TLB is full.\n");
681                 return -EBUSY;
682         }
683
684         cam_l_va_mask = get_cam_l_va_mask(slst);
685         if (vadr &
686             ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
687               (unsigned long)cam_l_va_mask << 6)) {
688                 printk(KERN_ERR
689                        "omapdsp: mapping vadr (0x%06lx) is not "
690                        "aligned boundary\n", vadr);
691                 return -EINVAL;
692         }
693
694         __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
695                    padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
696
697         /* update lock base */
698         if (victim == lbase)
699                 lbase++;
700         set_tlb_lock(lbase, lbase);
701
702         clk_unuse(dsp_ck_handle);
703         return 0;
704 }
705
706 static int dsp_mmu_clear_tlb(unsigned long vadr)
707 {
708         int lbase;
709         int i;
710         int max_valid = 0;
711
712         clk_use(dsp_ck_handle);
713
714         get_tlb_lock(&lbase, NULL);
715         for (i = 0; i < lbase; i++) {
716                 unsigned short cam_h, cam_l;
717                 unsigned short cam_l_va_mask, cam_vld, slst;
718                 unsigned long cam_va;
719
720                 /* read a TLB entry */
721                 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
722
723                 cam_vld = cam_l & DSPMMU_CAM_L_V;
724                 if (!cam_vld)
725                         continue;
726
727                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
728                 cam_l_va_mask = get_cam_l_va_mask(slst);
729                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
730                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
731
732                 if (cam_va == vadr)
733                         /* flush the entry */
734                         dsp_mmu_flush();
735                 else
736                         max_valid = i;
737         }
738
739         /* set new lock base */
740         set_tlb_lock(max_valid+1, max_valid+1);
741
742         clk_unuse(dsp_ck_handle);
743         return 0;
744 }
745
746 static void dsp_mmu_gflush(void)
747 {
748         clk_use(dsp_ck_handle);
749
750         __dsp_mmu_gflush();
751         set_tlb_lock(1, 1);
752
753         clk_unuse(dsp_ck_handle);
754 }
755
756 /*
757  * dsp_exmap()
758  *
759  * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
760  * In this case, the buffer for DSP is allocated in this routine,
761  * then it is mapped.
762  * On the other hand, for example - frame buffer sharing, calls
763  * this function with padr set. It means some known address space
764  * pointed with padr is going to be shared with DSP.
765  */
766 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
767                      unsigned long size, enum exmap_type type)
768 {
769         unsigned short slst;
770         void *buf;
771         unsigned int order = 0;
772         unsigned long unit;
773         unsigned int cntnu = 0;
774         unsigned long _dspadr = dspadr;
775         unsigned long _padr = padr;
776         void *_vadr = dspbyte_to_virt(dspadr);
777         unsigned long _size = size;
778         struct exmap_tbl *exmap_ent;
779         int status;
780         int i;
781
782 #define MINIMUM_PAGESZ  SZ_4KB
783         /*
784          * alignment check
785          */
786         if (!is_aligned(size, MINIMUM_PAGESZ)) {
787                 printk(KERN_ERR
788                        "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
789                 return -EINVAL;
790         }
791         if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
792                 printk(KERN_ERR
793                        "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
794                 return -EINVAL;
795         }
796         if (!is_aligned(padr, MINIMUM_PAGESZ)) {
797                 printk(KERN_ERR
798                        "omapdsp: physical address(0x%lx) is not aligned.\n",
799                        padr);
800                 return -EINVAL;
801         }
802
803         /* address validity check */
804         if ((dspadr < dspmem_size) ||
805             (dspadr >= DSPSPACE_SIZE) ||
806             ((dspadr + size > DSP_INIT_PAGE) &&
807              (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
808                 printk(KERN_ERR
809                        "omapdsp: illegal address/size for dsp_exmap().\n");
810                 return -EINVAL;
811         }
812
813         down_write(&exmap_sem);
814
815         /* overlap check */
816         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
817                 unsigned long mapsize;
818                 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
819
820                 if (!tmp_ent->valid)
821                         continue;
822                 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
823                 if ((_vadr + size > tmp_ent->vadr) &&
824                     (_vadr < tmp_ent->vadr + mapsize)) {
825                         printk(KERN_ERR "omapdsp: exmap page overlap!\n");
826                         up_write(&exmap_sem);
827                         return -EINVAL;
828                 }
829         }
830
831 start:
832         buf = NULL;
833         /* Are there any free TLB lines?  */
834         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
835                 if (!exmap_tbl[i].valid)
836                         goto found_free;
837         }
838         printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
839         status = -EBUSY;
840         goto fail;
841
842 found_free:
843         exmap_ent = &exmap_tbl[i];
844
845         if ((_size >= SZ_1MB) &&
846             (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
847             is_aligned(_dspadr, SZ_1MB)) {
848                 unit = SZ_1MB;
849                 slst = DSPMMU_CAM_L_SLST_1MB;
850                 order = ORDER_1MB;
851         } else if ((_size >= SZ_64KB) &&
852                    (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
853                    is_aligned(_dspadr, SZ_64KB)) {
854                 unit = SZ_64KB;
855                 slst = DSPMMU_CAM_L_SLST_64KB;
856                 order = ORDER_64KB;
857         } else /* if (_size >= SZ_4KB) */ {
858                 unit = SZ_4KB;
859                 slst = DSPMMU_CAM_L_SLST_4KB;
860                 order = ORDER_4KB;
861         }
862 #if 0   /* 1KB is not enabled */
863         else if (_size >= SZ_1KB) {
864                 unit = SZ_1KB;
865                 slst = DSPMMU_CAM_L_SLST_1KB;
866                 order = XXX;
867         }
868 #endif
869
870         /* buffer allocation */
871         if (type == EXMAP_TYPE_MEM) {
872                 struct page *page, *ps, *pe;
873
874                 buf = (void *)dsp_mem_get_dma_pages(order);
875                 if (buf == NULL) {
876                         status = -ENOMEM;
877                         goto fail;
878                 }
879                 /* mark the pages as reserved; this is needed for mmap */
880                 ps = virt_to_page(buf);
881                 pe = virt_to_page(buf + unit);
882                 for (page = ps; page < pe; page++) {
883                         SetPageReserved(page);
884                 }
885                 _padr = __pa(buf);
886         }
887
888         /*
889          * mapping for ARM MMU:
890          * we should not access to the allocated memory through 'buf'
891          * since this area should not be cashed.
892          */
893         status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
894         if (status < 0)
895                 goto fail;
896
897         /* loading DSP TLB entry */
898         status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
899         if (status < 0) {
900                 exmap_clear_armmmu((unsigned long)_vadr, unit);
901                 goto fail;
902         }
903
904         exmap_ent->buf      = buf;
905         exmap_ent->vadr     = _vadr;
906         exmap_ent->order    = order;
907         exmap_ent->valid    = 1;
908         exmap_ent->cntnu    = cntnu;
909         exmap_ent->type     = type;
910         exmap_ent->usecount = 0;
911
912         if ((_size -= unit) == 0) {     /* normal completion */
913                 up_write(&exmap_sem);
914                 return size;
915         }
916
917         _dspadr += unit;
918         _vadr   += unit;
919         _padr = padr ? _padr + unit : 0;
920         cntnu = 1;
921         goto start;
922
923 fail:
924         up_write(&exmap_sem);
925         if (buf)
926                 dsp_mem_free_pages((unsigned long)buf, order);
927         dsp_exunmap(dspadr);
928         return status;
929 }
930
931 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
932 {
933         unsigned long size;
934
935         /* clearing ARM MMU */
936         size = 1 << (ent->order + PAGE_SHIFT);
937         exmap_clear_armmmu((unsigned long)ent->vadr, size);
938
939         /* freeing allocated memory */
940         if (ent->type == EXMAP_TYPE_MEM) {
941                 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
942                 printk(KERN_DEBUG
943                        "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
944                        size, ent->buf);
945         }
946
947         return size;
948 }
949
950 static int dsp_exunmap(unsigned long dspadr)
951 {
952         void *vadr;
953         unsigned long size;
954         int total = 0;
955         struct exmap_tbl *ent;
956         int idx;
957
958         vadr = dspbyte_to_virt(dspadr);
959         down_write(&exmap_sem);
960         for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
961                 ent = &exmap_tbl[idx];
962                 if (!ent->valid)
963                         continue;
964                 if (ent->vadr == vadr)
965                         goto found_map;
966         }
967         up_write(&exmap_sem);
968         printk(KERN_WARNING
969                "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
970         return -EINVAL;
971
972 found_map:
973         if (ent->usecount > 0) {
974                 printk(KERN_ERR
975                        "omapdsp: exmap reference count is not 0.\n"
976                        "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
977                        idx, ent->vadr, ent->order, ent->usecount);
978                 up_write(&exmap_sem);
979                 return -EINVAL;
980         }
981         /* clearing DSP TLB entry */
982         dsp_mmu_clear_tlb(dspadr);
983
984         /* clear ARM MMU and free buffer */
985         size = unmap_free_arm(ent);
986         ent->valid = 0;
987         total += size;
988
989         /* we don't free PTEs */
990
991         /* flush TLB */
992         flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
993
994         /* check if next mapping is in same group */
995         if (++idx == DSPMMU_TLB_LINES)
996                 goto up_out;    /* normal completion */
997         ent = &exmap_tbl[idx];
998         if (!ent->valid || !ent->cntnu)
999                 goto up_out;    /* normal completion */
1000
1001         dspadr += size;
1002         vadr   += size;
1003         if (ent->vadr == vadr)
1004                 goto found_map; /* continue */
1005
1006         printk(KERN_ERR
1007                "omapdsp: illegal exmap_tbl grouping!\n"
1008                "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1009                vadr, idx, ent->vadr);
1010         up_write(&exmap_sem);
1011         return -EINVAL;
1012
1013 up_out:
1014         up_write(&exmap_sem);
1015         return total;
1016 }
1017
1018 static void exmap_flush(void)
1019 {
1020         struct exmap_tbl *ent;
1021         int i;
1022
1023         down_write(&exmap_sem);
1024
1025         /* clearing DSP TLB entry */
1026         dsp_mmu_gflush();
1027
1028         /* exmap_tbl[0] should be preserved */
1029         for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1030                 ent = &exmap_tbl[i];
1031                 if (ent->valid) {
1032                         unmap_free_arm(ent);
1033                         ent->valid = 0;
1034                 }
1035         }
1036
1037         /* flush TLB */
1038         flush_tlb_kernel_range(dspmem_base + dspmem_size,
1039                                dspmem_base + DSPSPACE_SIZE);
1040         up_write(&exmap_sem);
1041 }
1042
1043 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1044 #ifndef CONFIG_FB
1045 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1046 #endif /* CONFIG_FB */
1047
1048 static int dsp_fbexport(unsigned long *dspadr)
1049 {
1050         unsigned long dspadr_actual;
1051         unsigned long padr_sys, padr, fbsz_sys, fbsz;
1052         int cnt;
1053
1054         printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1055
1056         if (num_registered_fb == 0) {
1057                 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1058                 return -EINVAL;
1059         }
1060         if (num_registered_fb != 1) {
1061                 printk(KERN_INFO
1062                        "omapdsp: %d frame buffers found. we use first one.\n",
1063                        num_registered_fb);
1064         }
1065         padr_sys = registered_fb[0]->fix.smem_start;
1066         fbsz_sys = registered_fb[0]->fix.smem_len;
1067         if (fbsz_sys == 0) {
1068                 printk(KERN_ERR
1069                        "omapdsp: framebuffer doesn't seem to be configured "
1070                        "correctly! (size=0)\n");
1071                 return -EINVAL;
1072         }
1073
1074         /*
1075          * align padr and fbsz to 4kB boundary
1076          * (should be noted to the user afterwards!)
1077          */
1078         padr = padr_sys & ~(SZ_4KB-1);
1079         fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1080
1081         /* line up dspadr offset with padr */
1082         dspadr_actual =
1083                 (fbsz > SZ_1MB) ?  lineup_offset(*dspadr, padr, SZ_1MB-1) :
1084                 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1085                 /* (fbsz > SZ_4KB) ? */ *dspadr;
1086         if (dspadr_actual != *dspadr)
1087                 printk(KERN_DEBUG
1088                        "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1089                        dspadr_actual);
1090         *dspadr = dspadr_actual;
1091
1092         cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1093         if (cnt < 0) {
1094                 printk(KERN_ERR "omapdsp: exmap failure.\n");
1095                 return cnt;
1096         }
1097
1098         if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1099                 printk(KERN_WARNING
1100 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1101 "  !!  screen base address or size is not aligned in 4kB:           !!\n"
1102 "  !!    actual screen  adr = %08lx, size = %08lx             !!\n"
1103 "  !!    exporting      adr = %08lx, size = %08lx             !!\n"
1104 "  !!  Make sure that the framebuffer is allocated with 4kB-order!  !!\n"
1105 "  !!  Otherwise DSP can corrupt the kernel memory.                 !!\n"
1106 "  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1107                        padr_sys, fbsz_sys, padr, fbsz);
1108         }
1109
1110         /* increase the DMA priority */
1111         set_emiff_dma_prio(15);
1112
1113         return cnt;
1114 }
1115
1116 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1117
1118 static int dsp_fbexport(unsigned long *dspadr)
1119 {
1120         printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1121         return -EINVAL;
1122 }
1123
1124 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1125
1126 static int dsp_mmu_itack(void)
1127 {
1128         unsigned long dspadr;
1129
1130         printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1131         if (!dsp_err_mmu_isset()) {
1132                 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1133                 return -EINVAL;
1134         }
1135         dspadr = dsp_fault_adr & ~(SZ_4K-1);
1136         dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM);    /* FIXME: reserve TLB entry for this */
1137         printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1138         dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1139         __dsp_mmu_itack();
1140         udelay(100);
1141         dsp_exunmap(dspadr);
1142         dsp_err_mmu_clear();
1143         return 0;
1144 }
1145
1146 static void dsp_mmu_init(void)
1147 {
1148         unsigned long phys;
1149         void *virt;
1150
1151         clk_use(dsp_ck_handle);
1152         down_write(&exmap_sem);
1153
1154         dsp_mmu_disable();      /* clear all */
1155         udelay(100);
1156         dsp_mmu_enable();
1157
1158         /* mapping for ARM MMU */
1159         phys = __pa(dspvect_page);
1160         virt = dspbyte_to_virt(DSP_INIT_PAGE);  /* 0xe0fff000 */
1161         exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1162         exmap_tbl[0].buf      = dspvect_page;
1163         exmap_tbl[0].vadr     = virt;
1164         exmap_tbl[0].usecount = 0;
1165         exmap_tbl[0].order    = 0;
1166         exmap_tbl[0].valid    = 1;
1167         exmap_tbl[0].cntnu    = 0;
1168
1169         /* DSP TLB initialization */
1170         set_tlb_lock(0, 0);
1171         /* preserved, full access */
1172         dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1173                          DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1174         up_write(&exmap_sem);
1175         clk_unuse(dsp_ck_handle);
1176 }
1177
1178 static void dsp_mmu_shutdown(void)
1179 {
1180         exmap_flush();
1181         dsp_mmu_disable();      /* clear all */
1182 }
1183
1184 /*
1185  * intmem_enable() / disable():
1186  * if the address is in DSP internal memories,
1187  * we send PM mailbox commands so that DSP DMA domain won't go in idle
1188  * when ARM is accessing to those memories.
1189  */
1190 static int intmem_enable(void)
1191 {
1192         int ret = 0;
1193
1194         if (dsp_is_ready())
1195                 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1196                                  DSPREG_ICR_DMA_IDLE_DOMAIN);
1197
1198         return ret;
1199 }
1200
1201 static void intmem_disable(void) {
1202         if (dsp_is_ready())
1203                 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1204                            DSPREG_ICR_DMA_IDLE_DOMAIN);
1205 }
1206
1207 /*
1208  * dsp_mem_enable() / disable()
1209  */
1210 int intmem_usecount;
1211
1212 int dsp_mem_enable(void *adr)
1213 {
1214         int ret = 0;
1215
1216         if (is_dsp_internal_mem(adr)) {
1217                 if (intmem_usecount++ == 0)
1218                         ret = omap_dsp_request_mem();
1219         } else
1220                 down_read(&exmap_sem);
1221
1222         return ret;
1223 }
1224
1225 void dsp_mem_disable(void *adr)
1226 {
1227         if (is_dsp_internal_mem(adr)) {
1228                 if (--intmem_usecount == 0)
1229                         omap_dsp_release_mem();
1230         } else
1231                 up_read(&exmap_sem);
1232 }
1233
1234 /* for safety */
1235 void dsp_mem_usecount_clear(void)
1236 {
1237         if (intmem_usecount != 0) {
1238                 printk(KERN_WARNING
1239                        "omapdsp: unbalanced memory request/release detected.\n"
1240                        "         intmem_usecount is not zero at where "
1241                        "it should be! ... fixed to be zero.\n");
1242                 intmem_usecount = 0;
1243                 omap_dsp_release_mem();
1244         }
1245 }
1246
1247 /*
1248  * dsp_mem file operations
1249  */
1250 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1251 {
1252         loff_t ret;
1253
1254         down(&file->f_dentry->d_inode->i_sem);
1255         switch (orig) {
1256         case 0:
1257                 file->f_pos = offset;
1258                 ret = file->f_pos;
1259                 break;
1260         case 1:
1261                 file->f_pos += offset;
1262                 ret = file->f_pos;
1263                 break;
1264         default:
1265                 ret = -EINVAL;
1266         }
1267         up(&file->f_dentry->d_inode->i_sem);
1268         return ret;
1269 }
1270
1271 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1272                            loff_t *ppos)
1273 {
1274         unsigned long p = *ppos;
1275         void *vadr = dspbyte_to_virt(p);
1276         ssize_t size = dspmem_size;
1277         ssize_t read;
1278
1279         if (p >= size)
1280                 return 0;
1281         clk_use(api_ck_handle);
1282         read = count;
1283         if (count > size - p)
1284                 read = size - p;
1285         if (copy_to_user(buf, vadr, read)) {
1286                 read = -EFAULT;
1287                 goto out;
1288         }
1289         *ppos += read;
1290 out:
1291         clk_unuse(api_ck_handle);
1292         return read;
1293 }
1294
1295 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1296                           loff_t *ppos)
1297 {
1298         unsigned long p = *ppos;
1299         void *vadr = dspbyte_to_virt(p);
1300
1301         if (!exmap_valid(vadr, count)) {
1302                 printk(KERN_ERR
1303                        "omapdsp: DSP address %08lx / size %08x "
1304                        "is not valid!\n", p, count);
1305                 return -EFAULT;
1306         }
1307         if (count > DSPSPACE_SIZE - p)
1308                 count = DSPSPACE_SIZE - p;
1309         if (copy_to_user(buf, vadr, count))
1310                 return -EFAULT;
1311         *ppos += count;
1312
1313         return count;
1314 }
1315
1316 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1317                             loff_t *ppos)
1318 {
1319         int ret;
1320         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1321
1322         if (dsp_mem_enable(vadr) < 0)
1323                 return -EBUSY;
1324         if (is_dspbyte_internal_mem(*ppos))
1325                 ret = intmem_read(file, buf, count, ppos);
1326         else
1327                 ret = exmem_read(file, buf, count, ppos);
1328         dsp_mem_disable(vadr);
1329
1330         return ret;
1331 }
1332
1333 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1334                             loff_t *ppos)
1335 {
1336         unsigned long p = *ppos;
1337         void *vadr = dspbyte_to_virt(p);
1338         ssize_t size = dspmem_size;
1339         ssize_t written;
1340
1341         if (p >= size)
1342                 return 0;
1343         clk_use(api_ck_handle);
1344         written = count;
1345         if (count > size - p)
1346                 written = size - p;
1347         if (copy_from_user(vadr, buf, written)) {
1348                 written = -EFAULT;
1349                 goto out;
1350         }
1351         *ppos += written;
1352 out:
1353         clk_unuse(api_ck_handle);
1354         return written;
1355 }
1356
1357 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1358                            loff_t *ppos)
1359 {
1360         unsigned long p = *ppos;
1361         void *vadr = dspbyte_to_virt(p);
1362
1363         if (!exmap_valid(vadr, count)) {
1364                 printk(KERN_ERR
1365                        "omapdsp: DSP address %08lx / size %08x "
1366                        "is not valid!\n", p, count);
1367                 return -EFAULT;
1368         }
1369         if (count > DSPSPACE_SIZE - p)
1370                 count = DSPSPACE_SIZE - p;
1371         if (copy_from_user(vadr, buf, count))
1372                 return -EFAULT;
1373         *ppos += count;
1374
1375         return count;
1376 }
1377
1378 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1379                              loff_t *ppos)
1380 {
1381         int ret;
1382         void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1383
1384         if (dsp_mem_enable(vadr) < 0)
1385                 return -EBUSY;
1386         if (is_dspbyte_internal_mem(*ppos))
1387                 ret = intmem_write(file, buf, count, ppos);
1388         else
1389                 ret = exmem_write(file, buf, count, ppos);
1390         dsp_mem_disable(vadr);
1391
1392         return ret;
1393 }
1394
1395 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1396                          unsigned int cmd, unsigned long arg)
1397 {
1398         switch (cmd) {
1399         case OMAP_DSP_MEM_IOCTL_MMUINIT:
1400                 dsp_mmu_init();
1401                 return 0;
1402
1403         case OMAP_DSP_MEM_IOCTL_EXMAP:
1404                 {
1405                         struct omap_dsp_mapinfo mapinfo;
1406                         if (copy_from_user(&mapinfo, (void *)arg,
1407                                            sizeof(mapinfo)))
1408                                 return -EFAULT;
1409                         return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1410                                          EXMAP_TYPE_MEM);
1411                 }
1412
1413         case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1414                 return dsp_exunmap((unsigned long)arg);
1415
1416         case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1417                 exmap_flush();
1418                 return 0;
1419
1420         case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1421                 {
1422                         unsigned long dspadr;
1423                         int ret;
1424                         if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1425                                 return -EFAULT;
1426                         ret = dsp_fbexport(&dspadr);
1427                         if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1428                                 return -EFAULT;
1429                         return ret;
1430                 }
1431
1432         case OMAP_DSP_MEM_IOCTL_MMUITACK:
1433                 return dsp_mmu_itack();
1434
1435         case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1436                 {
1437                         unsigned long size;
1438                         if (copy_from_user(&size, (void *)arg, sizeof(long)))
1439                                 return -EFAULT;
1440                         return dsp_kmem_reserve(size);
1441                 }
1442
1443         case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1444                 dsp_kmem_release();
1445                 return 0;
1446
1447         default:
1448                 return -ENOIOCTLCMD;
1449         }
1450 }
1451
1452 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1453 {
1454         /*
1455          * FIXME
1456          */
1457         return -ENOSYS;
1458 }
1459
1460 static int dsp_mem_open(struct inode *inode, struct file *file)
1461 {
1462         if (!capable(CAP_SYS_RAWIO))
1463                 return -EPERM;
1464
1465         return 0;
1466 }
1467
1468 static int dsp_mem_release(struct inode *inode, struct file *file)
1469 {
1470         return 0;
1471 }
1472
1473 /*
1474  * sysfs files
1475  */
1476 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1477                         char *buf)
1478 {
1479         int len;
1480         int lbase, victim;
1481         int i;
1482
1483         clk_use(dsp_ck_handle);
1484         down_read(&exmap_sem);
1485
1486         get_tlb_lock(&lbase, &victim);
1487
1488         len = sprintf(buf, "p: preserved,  v: valid\n"
1489                            "ety       cam_va     ram_pa   sz ap\n");
1490                         /* 00: p v 0x300000 0x10171800 64KB FA */
1491         for (i = 0; i < 32; i++) {
1492                 unsigned short cam_h, cam_l, ram_h, ram_l;
1493                 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1494                 unsigned long cam_va;
1495                 unsigned short ram_l_ap;
1496                 unsigned long ram_pa;
1497                 char *pgsz_str, *ap_str;
1498
1499                 /* read a TLB entry */
1500                 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1501
1502                 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1503                 cam_l_va_mask = get_cam_l_va_mask(slst);
1504                 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1505                            (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1506                            (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1507                                                              " 1KB";
1508                 prsvd    = cam_l & DSPMMU_CAM_L_P;
1509                 cam_vld  = cam_l & DSPMMU_CAM_L_V;
1510                 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1511                 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1512                          (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1513                                                             "NA";
1514                 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1515                          (unsigned long)(cam_l & cam_l_va_mask) << 6;
1516                 ram_pa = (unsigned long)ram_h << 16 |
1517                          (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1518
1519                 if (i == lbase)
1520                         len += sprintf(buf + len, "lock base = %d\n", lbase);
1521                 if (i == victim)
1522                         len += sprintf(buf + len, "victim    = %d\n", victim);
1523                 /* 00: p v 0x300000 0x10171800 64KB FA */
1524                 len += sprintf(buf + len,
1525                                "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1526                                i,
1527                                prsvd   ? 'p' : ' ',
1528                                cam_vld ? 'v' : ' ',
1529                                cam_va, ram_pa, pgsz_str, ap_str);
1530         }
1531
1532         /* restore victim entry */
1533         set_tlb_lock(lbase, victim);
1534
1535         up_read(&exmap_sem);
1536         clk_unuse(dsp_ck_handle);
1537         return len;
1538 }
1539
1540 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1541
1542 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1543                           char *buf)
1544 {
1545         int len;
1546         int i;
1547
1548         down_read(&exmap_sem);
1549         len = sprintf(buf, "v: valid,  c: cntnu\n"
1550                            "ety           vadr        buf od uc\n");
1551                          /* 00: v c 0xe0300000 0xc0171800  0 */
1552         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1553                 struct exmap_tbl *ent = &exmap_tbl[i];
1554                 /* 00: v c 0xe0300000 0xc0171800  0 */
1555                 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1556                                i,
1557                                ent->valid ? 'v' : ' ',
1558                                ent->cntnu ? 'c' : ' ',
1559                                ent->vadr, ent->buf, ent->order, ent->usecount);
1560         }
1561
1562         up_read(&exmap_sem);
1563         return len;
1564 }
1565
1566 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1567
1568 static ssize_t kmem_pool_show(struct device *dev, char *buf)
1569 {
1570         int count_1M, count_64K, total;
1571
1572         count_1M = kmem_pool_1M.count;
1573         count_64K = kmem_pool_64K.count;
1574         total = count_1M * SZ_1MB + count_64K * SZ_64KB;
1575
1576         return sprintf(buf, "0x%x %d %d\n", total, count_1M, count_64K);
1577 }
1578
1579 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1580
1581 /*
1582  * DSP MMU interrupt handler
1583  */
1584
1585 /*
1586  * MMU fault mask:
1587  * We ignore prefetch err.
1588  */
1589 #define MMUFAULT_MASK \
1590         (DSPMMU_FAULT_ST_PERM |\
1591          DSPMMU_FAULT_ST_TLB_MISS |\
1592          DSPMMU_FAULT_ST_TRANS)
1593 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1594 {
1595         unsigned short status;
1596         unsigned short adh, adl;
1597         unsigned short dp;
1598
1599         status = omap_readw(DSPMMU_FAULT_ST);
1600         adh = omap_readw(DSPMMU_FAULT_AD_H);
1601         adl = omap_readw(DSPMMU_FAULT_AD_L);
1602         dp = adh & DSPMMU_FAULT_AD_H_DP;
1603         dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1604         /* if the fault is masked, nothing to do */
1605         if ((status & MMUFAULT_MASK) == 0) {
1606                 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1607                 /*
1608                  * note: in OMAP1710,
1609                  * when CACHE + DMA domain gets out of idle in DSP,
1610                  * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1611                  * in this case, we just ignore the interrupt.
1612                  */
1613                 if (status) {
1614                         printk(KERN_DEBUG "%s%s%s%s\n",
1615                                (status & DSPMMU_FAULT_ST_PREF)?
1616                                         "  (prefetch err)" : "",
1617                                (status & DSPMMU_FAULT_ST_PERM)?
1618                                         "  (permission fault)" : "",
1619                                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1620                                         "  (TLB miss)" : "",
1621                                (status & DSPMMU_FAULT_ST_TRANS) ?
1622                                         "  (translation fault)": "");
1623                         printk(KERN_DEBUG
1624                                "fault address = %s: 0x%06lx\n",
1625                                dp ? "DATA" : "PROGRAM",
1626                                dsp_fault_adr);
1627                 }
1628                 return IRQ_HANDLED;
1629         }
1630
1631         printk(KERN_INFO "DSP MMU interrupt!\n");
1632         printk(KERN_INFO "%s%s%s%s\n",
1633                (status & DSPMMU_FAULT_ST_PREF)?
1634                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1635                                 "  prefetch err":
1636                                 "  (prefetch err)":
1637                                 "",
1638                (status & DSPMMU_FAULT_ST_PERM)?
1639                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1640                                 "  permission fault":
1641                                 "  (permission fault)":
1642                                 "",
1643                (status & DSPMMU_FAULT_ST_TLB_MISS)?
1644                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1645                                 "  TLB miss":
1646                                 "  (TLB miss)":
1647                                 "",
1648                (status & DSPMMU_FAULT_ST_TRANS)?
1649                         (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1650                                 "  translation fault":
1651                                 "  (translation fault)":
1652                                 "");
1653         printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1654                dp ? "DATA" : "PROGRAM",
1655                dsp_fault_adr);
1656
1657         if (dsp_is_ready()) {
1658                 /*
1659                  * If we call dsp_exmap() here,
1660                  * "kernel BUG at slab.c" occurs.
1661                  */
1662                 /* FIXME */
1663                 dsp_err_mmu_set(dsp_fault_adr);
1664         } else {
1665                 disable_irq(INT_DSP_MMU);
1666                 __dsp_mmu_itack();
1667                 printk(KERN_INFO "Resetting DSP...\n");
1668                 dsp_cpustat_request(CPUSTAT_RESET);
1669                 enable_irq(INT_DSP_MMU);
1670                 /*
1671                  * if we enable followings, semaphore lock should be avoided.
1672                  *
1673                 printk(KERN_INFO "Flushing DSP MMU...\n");
1674                 exmap_flush();
1675                 dsp_mmu_init();
1676                  */
1677         }
1678
1679         return IRQ_HANDLED;
1680 }
1681
1682 /*
1683  *
1684  */
1685 struct file_operations dsp_mem_fops = {
1686         .owner   = THIS_MODULE,
1687         .llseek  = dsp_mem_lseek,
1688         .read    = dsp_mem_read,
1689         .write   = dsp_mem_write,
1690         .ioctl   = dsp_mem_ioctl,
1691         .mmap    = dsp_mem_mmap,
1692         .open    = dsp_mem_open,
1693         .release = dsp_mem_release,
1694 };
1695
1696 void dsp_mem_start(void)
1697 {
1698         dsp_register_mem_cb(intmem_enable, intmem_disable);
1699 }
1700
1701 void dsp_mem_stop(void)
1702 {
1703         memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1704         dsp_unregister_mem_cb();
1705 }
1706
1707 int __init dsp_mem_init(void)
1708 {
1709         int i;
1710
1711         for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1712                 exmap_tbl[i].valid = 0;
1713         }
1714
1715         dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1716         if (dspvect_page == NULL) {
1717                 printk(KERN_ERR
1718                        "omapdsp: failed to allocate memory "
1719                        "for dsp vector table\n");
1720                 return -ENOMEM;
1721         }
1722         dsp_mmu_init();
1723         dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1724
1725         device_create_file(&dsp_device.dev, &dev_attr_mmu);
1726         device_create_file(&dsp_device.dev, &dev_attr_exmap);
1727         device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1728
1729         return 0;
1730 }
1731
1732 void dsp_mem_exit(void)
1733 {
1734         dsp_mmu_shutdown();
1735         dsp_kmem_release();
1736
1737         if (dspvect_page != NULL) {
1738                 unsigned long virt;
1739
1740                 down_read(&exmap_sem);
1741
1742                 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1743                 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1744                 free_page((unsigned long)dspvect_page);
1745                 dspvect_page = NULL;
1746
1747                 up_read(&exmap_sem);
1748         }
1749
1750         device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1751         device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1752         device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);
1753 }