2 * linux/arch/arm/mach-omap/dsp/dsp_mem.c
4 * OMAP DSP memory driver
6 * Copyright (C) 2002-2005 Nokia Corporation
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
25 * 2005/06/09: DSP Gateway version 3.3
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/major.h>
32 #include <linux/bootmem.h>
34 #include <linux/interrupt.h>
35 #include <linux/delay.h>
36 #include <asm/uaccess.h>
38 #include <asm/ioctls.h>
40 #include <asm/pgalloc.h>
41 #include <asm/pgtable.h>
42 #include <asm/hardware/clock.h>
43 #include <asm/arch/tc.h>
44 #include <asm/arch/dsp.h>
45 #include <asm/arch/dsp_common.h>
46 #include "uaccess_dsp.h"
49 #define SZ_1MB 0x100000
50 #define SZ_64KB 0x10000
53 #define is_aligned(adr,align) (!((adr)&((align)-1)))
54 #define ORDER_1MB (20 - PAGE_SHIFT)
55 #define ORDER_64KB (16 - PAGE_SHIFT)
56 #define ORDER_4KB (12 - PAGE_SHIFT)
58 #define PGDIR_MASK (~(PGDIR_SIZE-1))
59 #define PGDIR_ALIGN(addr) (((addr)+PGDIR_SIZE-1)&(PGDIR_MASK))
61 #define dsp_mmu_enable() \
63 omap_writew(DSPMMU_CNTL_MMU_EN | DSPMMU_CNTL_RESET_SW, \
66 #define dsp_mmu_disable() \
67 do { omap_writew(0, DSPMMU_CNTL); } while(0)
68 #define dsp_mmu_flush() \
70 omap_writew(DSPMMU_FLUSH_ENTRY_FLUSH_ENTRY, \
71 DSPMMU_FLUSH_ENTRY); \
73 #define __dsp_mmu_gflush() \
74 do { omap_writew(DSPMMU_GFLUSH_GFLUSH, DSPMMU_GFLUSH); } while(0)
75 #define __dsp_mmu_itack() \
76 do { omap_writew(DSPMMU_IT_ACK_IT_ACK, DSPMMU_IT_ACK); } while(0)
78 #define EMIF_PRIO_LB_MASK 0x0000f000
79 #define EMIF_PRIO_LB_SHIFT 12
80 #define EMIF_PRIO_DMA_MASK 0x00000f00
81 #define EMIF_PRIO_DMA_SHIFT 8
82 #define EMIF_PRIO_DSP_MASK 0x00000070
83 #define EMIF_PRIO_DSP_SHIFT 4
84 #define EMIF_PRIO_MPU_MASK 0x00000007
85 #define EMIF_PRIO_MPU_SHIFT 0
86 #define set_emiff_dma_prio(prio) \
88 omap_writel((omap_readl(OMAP_TC_OCPT1_PRIOR) & \
89 ~EMIF_PRIO_DMA_MASK) | \
90 ((prio) << EMIF_PRIO_DMA_SHIFT), \
91 OMAP_TC_OCPT1_PRIOR); \
100 unsigned int valid:1;
101 unsigned int cntnu:1; /* grouping */
102 int usecount; /* reference count by mmap */
103 enum exmap_type type;
104 void *buf; /* virtual address of the buffer,
105 * i.e. 0xc0000000 - */
106 void *vadr; /* DSP shadow space,
107 * i.e. 0xe0000000 - 0xe0ffffff */
110 #define DSPMMU_TLB_LINES 32
111 static struct exmap_tbl exmap_tbl[DSPMMU_TLB_LINES];
112 static DECLARE_RWSEM(exmap_sem);
114 static int dsp_exunmap(unsigned long dspadr);
116 static void *dspvect_page;
117 static unsigned long dsp_fault_adr;
118 static struct mem_sync_struct mem_sync;
120 static __inline__ unsigned long lineup_offset(unsigned long adr,
124 unsigned long newadr;
126 newadr = (adr & ~mask) | (ref & mask);
132 void dsp_mem_sync_inc(void)
135 * FIXME: dsp_mem_enable()!!!
138 mem_sync.DARAM->ad_arm++;
140 mem_sync.SARAM->ad_arm++;
142 mem_sync.SDRAM->ad_arm++;
146 * dsp_mem_sync_config() is called from mbx1 workqueue
148 int dsp_mem_sync_config(struct mem_sync_struct *sync)
150 size_t sync_seq_sz = sizeof(struct sync_seq);
152 #ifdef OLD_BINARY_SUPPORT
154 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
158 if ((dsp_mem_type(sync->DARAM, sync_seq_sz) != MEM_TYPE_DARAM) ||
159 (dsp_mem_type(sync->SARAM, sync_seq_sz) != MEM_TYPE_SARAM) ||
160 (dsp_mem_type(sync->SDRAM, sync_seq_sz) != MEM_TYPE_EXTERN)) {
162 "omapdsp: mem_sync address validation failure!\n"
163 " mem_sync.DARAM = 0x%p,\n"
164 " mem_sync.SARAM = 0x%p,\n"
165 " mem_sync.SDRAM = 0x%p,\n",
166 sync->DARAM, sync->SARAM, sync->SDRAM);
169 memcpy(&mem_sync, sync, sizeof(struct mem_sync_struct));
174 * kmem_reserve(), kmem_release():
175 * reserve or release kernel memory for exmap().
177 * exmap() might request consecutive 1MB or 64kB,
178 * but it will be difficult after memory pages are fragmented.
179 * So, user can reserve such memory blocks in the early phase
180 * through kmem_reserve().
183 struct semaphore sem;
184 unsigned long buf[16];
188 #define KMEM_POOL_INIT(name) \
190 .sem = __MUTEX_INITIALIZER((name).sem), \
192 #define DECLARE_KMEM_POOL(name) \
193 struct kmem_pool name = KMEM_POOL_INIT(name)
195 DECLARE_KMEM_POOL(kmem_pool_1M);
196 DECLARE_KMEM_POOL(kmem_pool_64K);
198 static void dsp_kmem_release(void)
202 down(&kmem_pool_1M.sem);
203 for (i = 0; i < kmem_pool_1M.count; i++) {
204 if (kmem_pool_1M.buf[i])
205 free_pages(kmem_pool_1M.buf[i], ORDER_1MB);
207 kmem_pool_1M.count = 0;
208 up(&kmem_pool_1M.sem);
210 down(&kmem_pool_64K.sem);
211 for (i = 0; i < kmem_pool_64K.count; i++) {
212 if (kmem_pool_64K.buf[i])
213 free_pages(kmem_pool_64K.buf[i], ORDER_64KB);
215 kmem_pool_64K.count = 0;
216 up(&kmem_pool_1M.sem);
219 static int dsp_kmem_reserve(unsigned long size)
225 struct kmem_pool *pool;
228 /* alignment check */
229 if (!is_aligned(size, SZ_64KB)) {
231 "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
234 if (size > DSPSPACE_SIZE) {
236 "omapdsp: size(0x%lx) is larger than DSP memory space "
237 "size (0x%x.\n", size, DSPSPACE_SIZE);
241 for (_size = size; _size; _size -= unit) {
242 if (_size >= SZ_1MB) {
245 pool = &kmem_pool_1M;
249 pool = &kmem_pool_64K;
252 buf = __get_dma_pages(GFP_KERNEL, order);
256 for (i = 0; i < 16; i++) {
266 if (buf) { /* pool is full */
267 free_pages(buf, order);
275 static unsigned long dsp_mem_get_dma_pages(unsigned int order)
277 struct kmem_pool *pool;
278 unsigned long buf = 0;
283 pool = &kmem_pool_1M;
286 pool = &kmem_pool_64K;
294 for (i = 0; i < pool->count; i++) {
306 /* other size or not found in pool */
307 return __get_dma_pages(GFP_KERNEL, order);
310 static void dsp_mem_free_pages(unsigned long buf, unsigned int order)
312 struct kmem_pool *pool;
313 struct page *page, *ps, *pe;
316 ps = virt_to_page(buf);
317 pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
318 for (page = ps; page < pe; page++) {
319 ClearPageReserved(page);
323 * return buffer to kmem_pool or paging system
327 pool = &kmem_pool_1M;
330 pool = &kmem_pool_64K;
338 for (i = 0; i < pool->count; i++) {
347 /* other size or pool is filled */
349 free_pages(buf, order);
355 static int exmap_set_armmmu(unsigned long virt, unsigned long phys,
359 unsigned long sz_left;
362 int prot_pmd, prot_pte;
365 "omapdsp: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
368 prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
369 prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
371 pmdp = pmd_offset(pgd_offset_k(virt), virt);
372 if (pmd_none(*pmdp)) {
373 ptep = pte_alloc_one_kernel(&init_mm, 0);
376 /* note: two PMDs will be set */
377 pmd_populate_kernel(&init_mm, pmdp, ptep);
382 sz_left >= PAGE_SIZE;
383 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
384 ptep = pte_offset_kernel(pmdp, virt);
385 set_pte(ptep, __pte((virt + off) | prot_pte));
393 static void exmap_clear_armmmu(unsigned long virt, unsigned long size)
395 unsigned long sz_left;
400 "omapdsp: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
404 sz_left >= PAGE_SIZE;
405 sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
406 pmdp = pmd_offset(pgd_offset_k(virt), virt);
407 ptep = pte_offset_kernel(pmdp, virt);
408 pte_clear(&init_mm, virt, ptep);
414 static int exmap_valid(void *vadr, size_t len)
416 /* exmap_sem should be held before calling this function */
420 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
422 unsigned long mapsize;
423 struct exmap_tbl *ent = &exmap_tbl[i];
427 mapadr = (void *)ent->vadr;
428 mapsize = 1 << (ent->order + PAGE_SHIFT);
429 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
430 if (vadr + len <= mapadr + mapsize) {
431 /* this map covers whole address. */
435 * this map covers partially.
436 * check rest portion.
438 len -= mapadr + mapsize - vadr;
439 vadr = mapadr + mapsize;
448 enum dsp_mem_type_e dsp_mem_type(void *vadr, size_t len)
450 void *ds = (void *)daram_base;
451 void *de = (void *)daram_base + daram_size;
452 void *ss = (void *)saram_base;
453 void *se = (void *)saram_base + saram_size;
456 if ((vadr >= ds) && (vadr < de)) {
458 return MEM_TYPE_CROSSING;
460 return MEM_TYPE_DARAM;
461 } else if ((vadr >= ss) && (vadr < se)) {
463 return MEM_TYPE_CROSSING;
465 return MEM_TYPE_SARAM;
467 down_read(&exmap_sem);
468 if (exmap_valid(vadr, len))
469 ret = MEM_TYPE_EXTERN;
477 int dsp_address_validate(void *p, size_t len, char *fmt, ...)
479 if (dsp_mem_type(p, len) <= 0) {
485 vsprintf(s, fmt, args);
488 "omapdsp: %s address(0x%p) and size(0x%x) is "
490 " (crossing different type of memories, or \n"
491 " external memory space where no "
492 "actual memory is mapped)\n",
502 * exmap_use(), unuse():
503 * when the mapped area is exported to user space with mmap,
504 * the usecount is incremented.
505 * while the usecount > 0, that area can't be released.
507 void exmap_use(void *vadr, size_t len)
511 down_write(&exmap_sem);
512 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
514 unsigned long mapsize;
515 struct exmap_tbl *ent = &exmap_tbl[i];
519 mapadr = (void *)ent->vadr;
520 mapsize = 1 << (ent->order + PAGE_SHIFT);
521 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
525 up_write(&exmap_sem);
528 void exmap_unuse(void *vadr, size_t len)
532 down_write(&exmap_sem);
533 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
535 unsigned long mapsize;
536 struct exmap_tbl *ent = &exmap_tbl[i];
540 mapadr = (void *)ent->vadr;
541 mapsize = 1 << (ent->order + PAGE_SHIFT);
542 if ((vadr + len > mapadr) && (vadr < mapadr + mapsize)) {
546 up_write(&exmap_sem);
551 * returns physical address, and sets len to valid length
553 unsigned long dsp_virt_to_phys(void *vadr, size_t *len)
557 if (is_dsp_internal_mem(vadr)) {
559 *len = dspmem_base + dspmem_size - (unsigned long)vadr;
560 return (unsigned long)vadr;
564 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
566 unsigned long mapsize;
567 struct exmap_tbl *ent = &exmap_tbl[i];
571 mapadr = (void *)ent->vadr;
572 mapsize = 1 << (ent->order + PAGE_SHIFT);
573 if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
574 *len = mapadr + mapsize - vadr;
575 return __pa(ent->buf) + vadr - mapadr;
579 /* valid mapping not found */
586 static __inline__ unsigned short get_cam_l_va_mask(unsigned short slst)
589 case DSPMMU_CAM_L_SLST_1MB:
590 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
591 DSPMMU_CAM_L_VA_TAG_L2_MASK_1MB;
592 case DSPMMU_CAM_L_SLST_64KB:
593 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
594 DSPMMU_CAM_L_VA_TAG_L2_MASK_64KB;
595 case DSPMMU_CAM_L_SLST_4KB:
596 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
597 DSPMMU_CAM_L_VA_TAG_L2_MASK_4KB;
598 case DSPMMU_CAM_L_SLST_1KB:
599 return DSPMMU_CAM_L_VA_TAG_L1_MASK |
600 DSPMMU_CAM_L_VA_TAG_L2_MASK_1KB;
605 static __inline__ void get_tlb_lock(int *base, int *victim)
607 unsigned short lock = omap_readw(DSPMMU_LOCK);
609 *base = (lock & DSPMMU_LOCK_BASE_MASK)
610 >> DSPMMU_LOCK_BASE_SHIFT;
612 *victim = (lock & DSPMMU_LOCK_VICTIM_MASK)
613 >> DSPMMU_LOCK_VICTIM_SHIFT;
616 static __inline__ void set_tlb_lock(int base, int victim)
618 omap_writew((base << DSPMMU_LOCK_BASE_SHIFT) |
619 (victim << DSPMMU_LOCK_VICTIM_SHIFT), DSPMMU_LOCK);
622 static __inline__ void __read_tlb(unsigned short lbase, unsigned short victim,
623 unsigned short *cam_h, unsigned short *cam_l,
624 unsigned short *ram_h, unsigned short *ram_l)
627 set_tlb_lock(lbase, victim);
629 /* read a TLB entry */
630 omap_writew(DSPMMU_LD_TLB_RD, DSPMMU_LD_TLB);
633 *cam_h = omap_readw(DSPMMU_READ_CAM_H);
635 *cam_l = omap_readw(DSPMMU_READ_CAM_L);
637 *ram_h = omap_readw(DSPMMU_READ_RAM_H);
639 *ram_l = omap_readw(DSPMMU_READ_RAM_L);
642 static __inline__ void __load_tlb(unsigned short cam_h, unsigned short cam_l,
643 unsigned short ram_h, unsigned short ram_l)
645 omap_writew(cam_h, DSPMMU_CAM_H);
646 omap_writew(cam_l, DSPMMU_CAM_L);
647 omap_writew(ram_h, DSPMMU_RAM_H);
648 omap_writew(ram_l, DSPMMU_RAM_L);
650 /* flush the entry */
653 /* load a TLB entry */
654 omap_writew(DSPMMU_LD_TLB_LD, DSPMMU_LD_TLB);
657 static int dsp_mmu_load_tlb(unsigned long vadr, unsigned long padr,
658 unsigned short slst, unsigned short prsvd,
662 unsigned short cam_l_va_mask;
664 clk_use(dsp_ck_handle);
666 get_tlb_lock(&lbase, NULL);
667 for (victim = 0; victim < lbase; victim++) {
668 unsigned short cam_l;
670 /* read a TLB entry */
671 __read_tlb(lbase, victim, NULL, &cam_l, NULL, NULL);
672 if (!(cam_l & DSPMMU_CAM_L_V))
675 set_tlb_lock(lbase, victim);
678 /* The last (31st) entry cannot be locked? */
680 printk(KERN_ERR "omapdsp: TLB is full.\n");
684 cam_l_va_mask = get_cam_l_va_mask(slst);
686 ~(DSPMMU_CAM_H_VA_TAG_H_MASK << 22 |
687 (unsigned long)cam_l_va_mask << 6)) {
689 "omapdsp: mapping vadr (0x%06lx) is not "
690 "aligned boundary\n", vadr);
694 __load_tlb(vadr >> 22, (vadr >> 6 & cam_l_va_mask) | prsvd | slst,
695 padr >> 16, (padr & DSPMMU_RAM_L_RAM_LSB_MASK) | ap);
697 /* update lock base */
700 set_tlb_lock(lbase, lbase);
702 clk_unuse(dsp_ck_handle);
706 static int dsp_mmu_clear_tlb(unsigned long vadr)
712 clk_use(dsp_ck_handle);
714 get_tlb_lock(&lbase, NULL);
715 for (i = 0; i < lbase; i++) {
716 unsigned short cam_h, cam_l;
717 unsigned short cam_l_va_mask, cam_vld, slst;
718 unsigned long cam_va;
720 /* read a TLB entry */
721 __read_tlb(lbase, i, &cam_h, &cam_l, NULL, NULL);
723 cam_vld = cam_l & DSPMMU_CAM_L_V;
727 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
728 cam_l_va_mask = get_cam_l_va_mask(slst);
729 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
730 (unsigned long)(cam_l & cam_l_va_mask) << 6;
733 /* flush the entry */
739 /* set new lock base */
740 set_tlb_lock(max_valid+1, max_valid+1);
742 clk_unuse(dsp_ck_handle);
746 static void dsp_mmu_gflush(void)
748 clk_use(dsp_ck_handle);
753 clk_unuse(dsp_ck_handle);
759 * OMAP_DSP_MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
760 * In this case, the buffer for DSP is allocated in this routine,
762 * On the other hand, for example - frame buffer sharing, calls
763 * this function with padr set. It means some known address space
764 * pointed with padr is going to be shared with DSP.
766 static int dsp_exmap(unsigned long dspadr, unsigned long padr,
767 unsigned long size, enum exmap_type type)
771 unsigned int order = 0;
773 unsigned int cntnu = 0;
774 unsigned long _dspadr = dspadr;
775 unsigned long _padr = padr;
776 void *_vadr = dspbyte_to_virt(dspadr);
777 unsigned long _size = size;
778 struct exmap_tbl *exmap_ent;
782 #define MINIMUM_PAGESZ SZ_4KB
786 if (!is_aligned(size, MINIMUM_PAGESZ)) {
788 "omapdsp: size(0x%lx) is not multiple of 4KB.\n", size);
791 if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
793 "omapdsp: DSP address(0x%lx) is not aligned.\n", dspadr);
796 if (!is_aligned(padr, MINIMUM_PAGESZ)) {
798 "omapdsp: physical address(0x%lx) is not aligned.\n",
803 /* address validity check */
804 if ((dspadr < dspmem_size) ||
805 (dspadr >= DSPSPACE_SIZE) ||
806 ((dspadr + size > DSP_INIT_PAGE) &&
807 (dspadr < DSP_INIT_PAGE + PAGE_SIZE))) {
809 "omapdsp: illegal address/size for dsp_exmap().\n");
813 down_write(&exmap_sem);
816 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
817 unsigned long mapsize;
818 struct exmap_tbl *tmp_ent = &exmap_tbl[i];
822 mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
823 if ((_vadr + size > tmp_ent->vadr) &&
824 (_vadr < tmp_ent->vadr + mapsize)) {
825 printk(KERN_ERR "omapdsp: exmap page overlap!\n");
826 up_write(&exmap_sem);
833 /* Are there any free TLB lines? */
834 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
835 if (!exmap_tbl[i].valid)
838 printk(KERN_ERR "omapdsp: DSP TLB is full.\n");
843 exmap_ent = &exmap_tbl[i];
845 if ((_size >= SZ_1MB) &&
846 (is_aligned(_padr, SZ_1MB) || (padr == 0)) &&
847 is_aligned(_dspadr, SZ_1MB)) {
849 slst = DSPMMU_CAM_L_SLST_1MB;
851 } else if ((_size >= SZ_64KB) &&
852 (is_aligned(_padr, SZ_64KB) || (padr == 0)) &&
853 is_aligned(_dspadr, SZ_64KB)) {
855 slst = DSPMMU_CAM_L_SLST_64KB;
857 } else /* if (_size >= SZ_4KB) */ {
859 slst = DSPMMU_CAM_L_SLST_4KB;
862 #if 0 /* 1KB is not enabled */
863 else if (_size >= SZ_1KB) {
865 slst = DSPMMU_CAM_L_SLST_1KB;
870 /* buffer allocation */
871 if (type == EXMAP_TYPE_MEM) {
872 struct page *page, *ps, *pe;
874 buf = (void *)dsp_mem_get_dma_pages(order);
879 /* mark the pages as reserved; this is needed for mmap */
880 ps = virt_to_page(buf);
881 pe = virt_to_page(buf + unit);
882 for (page = ps; page < pe; page++) {
883 SetPageReserved(page);
889 * mapping for ARM MMU:
890 * we should not access to the allocated memory through 'buf'
891 * since this area should not be cashed.
893 status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
897 /* loading DSP TLB entry */
898 status = dsp_mmu_load_tlb(_dspadr, _padr, slst, 0, DSPMMU_RAM_L_AP_FA);
900 exmap_clear_armmmu((unsigned long)_vadr, unit);
904 exmap_ent->buf = buf;
905 exmap_ent->vadr = _vadr;
906 exmap_ent->order = order;
907 exmap_ent->valid = 1;
908 exmap_ent->cntnu = cntnu;
909 exmap_ent->type = type;
910 exmap_ent->usecount = 0;
912 if ((_size -= unit) == 0) { /* normal completion */
913 up_write(&exmap_sem);
919 _padr = padr ? _padr + unit : 0;
924 up_write(&exmap_sem);
926 dsp_mem_free_pages((unsigned long)buf, order);
931 static unsigned long unmap_free_arm(struct exmap_tbl *ent)
935 /* clearing ARM MMU */
936 size = 1 << (ent->order + PAGE_SHIFT);
937 exmap_clear_armmmu((unsigned long)ent->vadr, size);
939 /* freeing allocated memory */
940 if (ent->type == EXMAP_TYPE_MEM) {
941 dsp_mem_free_pages((unsigned long)ent->buf, ent->order);
943 "omapdsp: freeing 0x%lx bytes @ adr 0x%8p\n",
950 static int dsp_exunmap(unsigned long dspadr)
955 struct exmap_tbl *ent;
958 vadr = dspbyte_to_virt(dspadr);
959 down_write(&exmap_sem);
960 for (idx = 0; idx < DSPMMU_TLB_LINES; idx++) {
961 ent = &exmap_tbl[idx];
964 if (ent->vadr == vadr)
967 up_write(&exmap_sem);
969 "omapdsp: address %06lx not found in exmap_tbl.\n", dspadr);
973 if (ent->usecount > 0) {
975 "omapdsp: exmap reference count is not 0.\n"
976 " idx=%d, vadr=%p, order=%d, usecount=%d\n",
977 idx, ent->vadr, ent->order, ent->usecount);
978 up_write(&exmap_sem);
981 /* clearing DSP TLB entry */
982 dsp_mmu_clear_tlb(dspadr);
984 /* clear ARM MMU and free buffer */
985 size = unmap_free_arm(ent);
989 /* we don't free PTEs */
992 flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
994 /* check if next mapping is in same group */
995 if (++idx == DSPMMU_TLB_LINES)
996 goto up_out; /* normal completion */
997 ent = &exmap_tbl[idx];
998 if (!ent->valid || !ent->cntnu)
999 goto up_out; /* normal completion */
1003 if (ent->vadr == vadr)
1004 goto found_map; /* continue */
1007 "omapdsp: illegal exmap_tbl grouping!\n"
1008 "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
1009 vadr, idx, ent->vadr);
1010 up_write(&exmap_sem);
1014 up_write(&exmap_sem);
1018 static void exmap_flush(void)
1020 struct exmap_tbl *ent;
1023 down_write(&exmap_sem);
1025 /* clearing DSP TLB entry */
1028 /* exmap_tbl[0] should be preserved */
1029 for (i = 1; i < DSPMMU_TLB_LINES; i++) {
1030 ent = &exmap_tbl[i];
1032 unmap_free_arm(ent);
1038 flush_tlb_kernel_range(dspmem_base + dspmem_size,
1039 dspmem_base + DSPSPACE_SIZE);
1040 up_write(&exmap_sem);
1043 #ifdef CONFIG_OMAP_DSP_FBEXPORT
1045 #error You configured OMAP_DSP_FBEXPORT, but FB was not configured!
1046 #endif /* CONFIG_FB */
1048 static int dsp_fbexport(unsigned long *dspadr)
1050 unsigned long dspadr_actual;
1051 unsigned long padr_sys, padr, fbsz_sys, fbsz;
1054 printk(KERN_DEBUG "omapdsp: frame buffer export\n");
1056 if (num_registered_fb == 0) {
1057 printk(KERN_INFO "omapdsp: frame buffer not registered.\n");
1060 if (num_registered_fb != 1) {
1062 "omapdsp: %d frame buffers found. we use first one.\n",
1065 padr_sys = registered_fb[0]->fix.smem_start;
1066 fbsz_sys = registered_fb[0]->fix.smem_len;
1067 if (fbsz_sys == 0) {
1069 "omapdsp: framebuffer doesn't seem to be configured "
1070 "correctly! (size=0)\n");
1075 * align padr and fbsz to 4kB boundary
1076 * (should be noted to the user afterwards!)
1078 padr = padr_sys & ~(SZ_4KB-1);
1079 fbsz = (fbsz_sys + padr_sys - padr + SZ_4KB-1) & ~(SZ_4KB-1);
1081 /* line up dspadr offset with padr */
1083 (fbsz > SZ_1MB) ? lineup_offset(*dspadr, padr, SZ_1MB-1) :
1084 (fbsz > SZ_64KB) ? lineup_offset(*dspadr, padr, SZ_64KB-1) :
1085 /* (fbsz > SZ_4KB) ? */ *dspadr;
1086 if (dspadr_actual != *dspadr)
1088 "omapdsp: actual dspadr for FBEXPORT = %08lx\n",
1090 *dspadr = dspadr_actual;
1092 cnt = dsp_exmap(dspadr_actual, padr, fbsz, EXMAP_TYPE_FB);
1094 printk(KERN_ERR "omapdsp: exmap failure.\n");
1098 if ((padr != padr_sys) || (fbsz != fbsz_sys)) {
1100 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
1101 " !! screen base address or size is not aligned in 4kB: !!\n"
1102 " !! actual screen adr = %08lx, size = %08lx !!\n"
1103 " !! exporting adr = %08lx, size = %08lx !!\n"
1104 " !! Make sure that the framebuffer is allocated with 4kB-order! !!\n"
1105 " !! Otherwise DSP can corrupt the kernel memory. !!\n"
1106 " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
1107 padr_sys, fbsz_sys, padr, fbsz);
1110 /* increase the DMA priority */
1111 set_emiff_dma_prio(15);
1116 #else /* CONFIG_OMAP_DSP_FBEXPORT */
1118 static int dsp_fbexport(unsigned long *dspadr)
1120 printk(KERN_ERR "omapdsp: FBEXPORT function is not enabled.\n");
1124 #endif /* CONFIG_OMAP_DSP_FBEXPORT */
1126 static int dsp_mmu_itack(void)
1128 unsigned long dspadr;
1130 printk(KERN_INFO "omapdsp: sending DSP MMU interrupt ack.\n");
1131 if (!dsp_err_mmu_isset()) {
1132 printk(KERN_ERR "omapdsp: DSP MMU error has not been set.\n");
1135 dspadr = dsp_fault_adr & ~(SZ_4K-1);
1136 dsp_exmap(dspadr, 0, SZ_4K, EXMAP_TYPE_MEM); /* FIXME: reserve TLB entry for this */
1137 printk(KERN_INFO "omapdsp: falling into recovery runlevel...\n");
1138 dsp_runlevel(OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY);
1141 dsp_exunmap(dspadr);
1142 dsp_err_mmu_clear();
1146 static void dsp_mmu_init(void)
1151 clk_use(dsp_ck_handle);
1152 down_write(&exmap_sem);
1154 dsp_mmu_disable(); /* clear all */
1158 /* mapping for ARM MMU */
1159 phys = __pa(dspvect_page);
1160 virt = dspbyte_to_virt(DSP_INIT_PAGE); /* 0xe0fff000 */
1161 exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
1162 exmap_tbl[0].buf = dspvect_page;
1163 exmap_tbl[0].vadr = virt;
1164 exmap_tbl[0].usecount = 0;
1165 exmap_tbl[0].order = 0;
1166 exmap_tbl[0].valid = 1;
1167 exmap_tbl[0].cntnu = 0;
1169 /* DSP TLB initialization */
1171 /* preserved, full access */
1172 dsp_mmu_load_tlb(DSP_INIT_PAGE, phys, DSPMMU_CAM_L_SLST_4KB,
1173 DSPMMU_CAM_L_P, DSPMMU_RAM_L_AP_FA);
1174 up_write(&exmap_sem);
1175 clk_unuse(dsp_ck_handle);
1178 static void dsp_mmu_shutdown(void)
1181 dsp_mmu_disable(); /* clear all */
1185 * intmem_enable() / disable():
1186 * if the address is in DSP internal memories,
1187 * we send PM mailbox commands so that DSP DMA domain won't go in idle
1188 * when ARM is accessing to those memories.
1190 static int intmem_enable(void)
1195 ret = dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_ENABLE,
1196 DSPREG_ICR_DMA_IDLE_DOMAIN);
1201 static void intmem_disable(void) {
1203 dsp_mbsend(MBCMD(PM), OMAP_DSP_MBCMD_PM_DISABLE,
1204 DSPREG_ICR_DMA_IDLE_DOMAIN);
1208 * dsp_mem_enable() / disable()
1210 int intmem_usecount;
1212 int dsp_mem_enable(void *adr)
1216 if (is_dsp_internal_mem(adr)) {
1217 if (intmem_usecount++ == 0)
1218 ret = omap_dsp_request_mem();
1220 down_read(&exmap_sem);
1225 void dsp_mem_disable(void *adr)
1227 if (is_dsp_internal_mem(adr)) {
1228 if (--intmem_usecount == 0)
1229 omap_dsp_release_mem();
1231 up_read(&exmap_sem);
1235 void dsp_mem_usecount_clear(void)
1237 if (intmem_usecount != 0) {
1239 "omapdsp: unbalanced memory request/release detected.\n"
1240 " intmem_usecount is not zero at where "
1241 "it should be! ... fixed to be zero.\n");
1242 intmem_usecount = 0;
1243 omap_dsp_release_mem();
1248 * dsp_mem file operations
1250 static loff_t dsp_mem_lseek(struct file *file, loff_t offset, int orig)
1254 down(&file->f_dentry->d_inode->i_sem);
1257 file->f_pos = offset;
1261 file->f_pos += offset;
1267 up(&file->f_dentry->d_inode->i_sem);
1271 static ssize_t intmem_read(struct file *file, char *buf, size_t count,
1274 unsigned long p = *ppos;
1275 void *vadr = dspbyte_to_virt(p);
1276 ssize_t size = dspmem_size;
1281 clk_use(api_ck_handle);
1283 if (count > size - p)
1285 if (copy_to_user(buf, vadr, read)) {
1291 clk_unuse(api_ck_handle);
1295 static ssize_t exmem_read(struct file *file, char *buf, size_t count,
1298 unsigned long p = *ppos;
1299 void *vadr = dspbyte_to_virt(p);
1301 if (!exmap_valid(vadr, count)) {
1303 "omapdsp: DSP address %08lx / size %08x "
1304 "is not valid!\n", p, count);
1307 if (count > DSPSPACE_SIZE - p)
1308 count = DSPSPACE_SIZE - p;
1309 if (copy_to_user(buf, vadr, count))
1316 static ssize_t dsp_mem_read(struct file *file, char *buf, size_t count,
1320 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1322 if (dsp_mem_enable(vadr) < 0)
1324 if (is_dspbyte_internal_mem(*ppos))
1325 ret = intmem_read(file, buf, count, ppos);
1327 ret = exmem_read(file, buf, count, ppos);
1328 dsp_mem_disable(vadr);
1333 static ssize_t intmem_write(struct file *file, const char *buf, size_t count,
1336 unsigned long p = *ppos;
1337 void *vadr = dspbyte_to_virt(p);
1338 ssize_t size = dspmem_size;
1343 clk_use(api_ck_handle);
1345 if (count > size - p)
1347 if (copy_from_user(vadr, buf, written)) {
1353 clk_unuse(api_ck_handle);
1357 static ssize_t exmem_write(struct file *file, const char *buf, size_t count,
1360 unsigned long p = *ppos;
1361 void *vadr = dspbyte_to_virt(p);
1363 if (!exmap_valid(vadr, count)) {
1365 "omapdsp: DSP address %08lx / size %08x "
1366 "is not valid!\n", p, count);
1369 if (count > DSPSPACE_SIZE - p)
1370 count = DSPSPACE_SIZE - p;
1371 if (copy_from_user(vadr, buf, count))
1378 static ssize_t dsp_mem_write(struct file *file, const char *buf, size_t count,
1382 void *vadr = dspbyte_to_virt(*(unsigned long *)ppos);
1384 if (dsp_mem_enable(vadr) < 0)
1386 if (is_dspbyte_internal_mem(*ppos))
1387 ret = intmem_write(file, buf, count, ppos);
1389 ret = exmem_write(file, buf, count, ppos);
1390 dsp_mem_disable(vadr);
1395 static int dsp_mem_ioctl(struct inode *inode, struct file *file,
1396 unsigned int cmd, unsigned long arg)
1399 case OMAP_DSP_MEM_IOCTL_MMUINIT:
1403 case OMAP_DSP_MEM_IOCTL_EXMAP:
1405 struct omap_dsp_mapinfo mapinfo;
1406 if (copy_from_user(&mapinfo, (void *)arg,
1409 return dsp_exmap(mapinfo.dspadr, 0, mapinfo.size,
1413 case OMAP_DSP_MEM_IOCTL_EXUNMAP:
1414 return dsp_exunmap((unsigned long)arg);
1416 case OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH:
1420 case OMAP_DSP_MEM_IOCTL_FBEXPORT:
1422 unsigned long dspadr;
1424 if (copy_from_user(&dspadr, (void *)arg, sizeof(long)))
1426 ret = dsp_fbexport(&dspadr);
1427 if (copy_to_user((void *)arg, &dspadr, sizeof(long)))
1432 case OMAP_DSP_MEM_IOCTL_MMUITACK:
1433 return dsp_mmu_itack();
1435 case OMAP_DSP_MEM_IOCTL_KMEM_RESERVE:
1438 if (copy_from_user(&size, (void *)arg, sizeof(long)))
1440 return dsp_kmem_reserve(size);
1443 case OMAP_DSP_MEM_IOCTL_KMEM_RELEASE:
1448 return -ENOIOCTLCMD;
1452 static int dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
1460 static int dsp_mem_open(struct inode *inode, struct file *file)
1462 if (!capable(CAP_SYS_RAWIO))
1468 static int dsp_mem_release(struct inode *inode, struct file *file)
1476 static ssize_t mmu_show(struct device *dev, struct device_attribute *attr,
1483 clk_use(dsp_ck_handle);
1484 down_read(&exmap_sem);
1486 get_tlb_lock(&lbase, &victim);
1488 len = sprintf(buf, "p: preserved, v: valid\n"
1489 "ety cam_va ram_pa sz ap\n");
1490 /* 00: p v 0x300000 0x10171800 64KB FA */
1491 for (i = 0; i < 32; i++) {
1492 unsigned short cam_h, cam_l, ram_h, ram_l;
1493 unsigned short cam_l_va_mask, prsvd, cam_vld, slst;
1494 unsigned long cam_va;
1495 unsigned short ram_l_ap;
1496 unsigned long ram_pa;
1497 char *pgsz_str, *ap_str;
1499 /* read a TLB entry */
1500 __read_tlb(lbase, i, &cam_h, &cam_l, &ram_h, &ram_l);
1502 slst = cam_l & DSPMMU_CAM_L_SLST_MASK;
1503 cam_l_va_mask = get_cam_l_va_mask(slst);
1504 pgsz_str = (slst == DSPMMU_CAM_L_SLST_1MB) ? " 1MB":
1505 (slst == DSPMMU_CAM_L_SLST_64KB)? "64KB":
1506 (slst == DSPMMU_CAM_L_SLST_4KB) ? " 4KB":
1508 prsvd = cam_l & DSPMMU_CAM_L_P;
1509 cam_vld = cam_l & DSPMMU_CAM_L_V;
1510 ram_l_ap = ram_l & DSPMMU_RAM_L_AP_MASK;
1511 ap_str = (ram_l_ap == DSPMMU_RAM_L_AP_RO) ? "RO":
1512 (ram_l_ap == DSPMMU_RAM_L_AP_FA) ? "FA":
1514 cam_va = (unsigned long)(cam_h & DSPMMU_CAM_H_VA_TAG_H_MASK) << 22 |
1515 (unsigned long)(cam_l & cam_l_va_mask) << 6;
1516 ram_pa = (unsigned long)ram_h << 16 |
1517 (ram_l & DSPMMU_RAM_L_RAM_LSB_MASK);
1520 len += sprintf(buf + len, "lock base = %d\n", lbase);
1522 len += sprintf(buf + len, "victim = %d\n", victim);
1523 /* 00: p v 0x300000 0x10171800 64KB FA */
1524 len += sprintf(buf + len,
1525 "%02d: %c %c 0x%06lx 0x%08lx %s %s\n",
1528 cam_vld ? 'v' : ' ',
1529 cam_va, ram_pa, pgsz_str, ap_str);
1532 /* restore victim entry */
1533 set_tlb_lock(lbase, victim);
1535 up_read(&exmap_sem);
1536 clk_unuse(dsp_ck_handle);
1540 static struct device_attribute dev_attr_mmu = __ATTR_RO(mmu);
1542 static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
1548 down_read(&exmap_sem);
1549 len = sprintf(buf, "v: valid, c: cntnu\n"
1550 "ety vadr buf od uc\n");
1551 /* 00: v c 0xe0300000 0xc0171800 0 */
1552 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1553 struct exmap_tbl *ent = &exmap_tbl[i];
1554 /* 00: v c 0xe0300000 0xc0171800 0 */
1555 len += sprintf(buf + len, "%02d: %c %c 0x%8p 0x%8p %2d %2d\n",
1557 ent->valid ? 'v' : ' ',
1558 ent->cntnu ? 'c' : ' ',
1559 ent->vadr, ent->buf, ent->order, ent->usecount);
1562 up_read(&exmap_sem);
1566 static struct device_attribute dev_attr_exmap = __ATTR_RO(exmap);
1568 static ssize_t kmem_pool_show(struct device *dev, char *buf)
1570 int count_1M, count_64K, total;
1572 count_1M = kmem_pool_1M.count;
1573 count_64K = kmem_pool_64K.count;
1574 total = count_1M * SZ_1MB + count_64K * SZ_64KB;
1576 return sprintf(buf, "0x%x %d %d\n", total, count_1M, count_64K);
1579 static struct device_attribute dev_attr_kmem_pool = __ATTR_RO(kmem_pool);
1582 * DSP MMU interrupt handler
1587 * We ignore prefetch err.
1589 #define MMUFAULT_MASK \
1590 (DSPMMU_FAULT_ST_PERM |\
1591 DSPMMU_FAULT_ST_TLB_MISS |\
1592 DSPMMU_FAULT_ST_TRANS)
1593 irqreturn_t dsp_mmu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1595 unsigned short status;
1596 unsigned short adh, adl;
1599 status = omap_readw(DSPMMU_FAULT_ST);
1600 adh = omap_readw(DSPMMU_FAULT_AD_H);
1601 adl = omap_readw(DSPMMU_FAULT_AD_L);
1602 dp = adh & DSPMMU_FAULT_AD_H_DP;
1603 dsp_fault_adr = MKLONG(adh & DSPMMU_FAULT_AD_H_ADR_MASK, adl);
1604 /* if the fault is masked, nothing to do */
1605 if ((status & MMUFAULT_MASK) == 0) {
1606 printk(KERN_DEBUG "DSP MMU interrupt, but ignoring.\n");
1608 * note: in OMAP1710,
1609 * when CACHE + DMA domain gets out of idle in DSP,
1610 * MMU interrupt occurs but DSPMMU_FAULT_ST is not set.
1611 * in this case, we just ignore the interrupt.
1614 printk(KERN_DEBUG "%s%s%s%s\n",
1615 (status & DSPMMU_FAULT_ST_PREF)?
1616 " (prefetch err)" : "",
1617 (status & DSPMMU_FAULT_ST_PERM)?
1618 " (permission fault)" : "",
1619 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1621 (status & DSPMMU_FAULT_ST_TRANS) ?
1622 " (translation fault)": "");
1624 "fault address = %s: 0x%06lx\n",
1625 dp ? "DATA" : "PROGRAM",
1631 printk(KERN_INFO "DSP MMU interrupt!\n");
1632 printk(KERN_INFO "%s%s%s%s\n",
1633 (status & DSPMMU_FAULT_ST_PREF)?
1634 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PREF)?
1638 (status & DSPMMU_FAULT_ST_PERM)?
1639 (MMUFAULT_MASK & DSPMMU_FAULT_ST_PERM)?
1640 " permission fault":
1641 " (permission fault)":
1643 (status & DSPMMU_FAULT_ST_TLB_MISS)?
1644 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TLB_MISS)?
1648 (status & DSPMMU_FAULT_ST_TRANS)?
1649 (MMUFAULT_MASK & DSPMMU_FAULT_ST_TRANS)?
1650 " translation fault":
1651 " (translation fault)":
1653 printk(KERN_INFO "fault address = %s: 0x%06lx\n",
1654 dp ? "DATA" : "PROGRAM",
1657 if (dsp_is_ready()) {
1659 * If we call dsp_exmap() here,
1660 * "kernel BUG at slab.c" occurs.
1663 dsp_err_mmu_set(dsp_fault_adr);
1665 disable_irq(INT_DSP_MMU);
1667 printk(KERN_INFO "Resetting DSP...\n");
1668 dsp_cpustat_request(CPUSTAT_RESET);
1669 enable_irq(INT_DSP_MMU);
1671 * if we enable followings, semaphore lock should be avoided.
1673 printk(KERN_INFO "Flushing DSP MMU...\n");
1685 struct file_operations dsp_mem_fops = {
1686 .owner = THIS_MODULE,
1687 .llseek = dsp_mem_lseek,
1688 .read = dsp_mem_read,
1689 .write = dsp_mem_write,
1690 .ioctl = dsp_mem_ioctl,
1691 .mmap = dsp_mem_mmap,
1692 .open = dsp_mem_open,
1693 .release = dsp_mem_release,
1696 void dsp_mem_start(void)
1698 dsp_register_mem_cb(intmem_enable, intmem_disable);
1701 void dsp_mem_stop(void)
1703 memset(&mem_sync, 0, sizeof(struct mem_sync_struct));
1704 dsp_unregister_mem_cb();
1707 int __init dsp_mem_init(void)
1711 for (i = 0; i < DSPMMU_TLB_LINES; i++) {
1712 exmap_tbl[i].valid = 0;
1715 dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
1716 if (dspvect_page == NULL) {
1718 "omapdsp: failed to allocate memory "
1719 "for dsp vector table\n");
1723 dsp_set_idle_boot_base(IDLEPG_BASE, IDLEPG_SIZE);
1725 device_create_file(&dsp_device.dev, &dev_attr_mmu);
1726 device_create_file(&dsp_device.dev, &dev_attr_exmap);
1727 device_create_file(&dsp_device.dev, &dev_attr_kmem_pool);
1732 void dsp_mem_exit(void)
1737 if (dspvect_page != NULL) {
1740 down_read(&exmap_sem);
1742 virt = (unsigned long)dspbyte_to_virt(DSP_INIT_PAGE);
1743 flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
1744 free_page((unsigned long)dspvect_page);
1745 dspvect_page = NULL;
1747 up_read(&exmap_sem);
1750 device_remove_file(&dsp_device.dev, &dev_attr_mmu);
1751 device_remove_file(&dsp_device.dev, &dev_attr_exmap);
1752 device_remove_file(&dsp_device.dev, &dev_attr_kmem_pool);