4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2005, 2006 Paul Mundt
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
20 #include <asm/pgalloc.h>
21 #include <asm/addrspace.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 static inline void remap_area_pte(pte_t * pte, unsigned long address,
26 unsigned long size, unsigned long phys_addr, unsigned long flags)
30 pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
31 _PAGE_DIRTY | _PAGE_ACCESSED |
32 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
40 pfn = phys_addr >> PAGE_SHIFT;
42 if (!pte_none(*pte)) {
43 printk("remap_area_pte: page already exists\n");
46 set_pte(pte, pfn_pte(pfn, pgprot));
50 } while (address && (address < end));
53 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
54 unsigned long size, unsigned long phys_addr, unsigned long flags)
58 address &= ~PGDIR_MASK;
66 pte_t * pte = pte_alloc_kernel(pmd, address);
69 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
70 address = (address + PMD_SIZE) & PMD_MASK;
72 } while (address && (address < end));
76 int remap_area_pages(unsigned long address, unsigned long phys_addr,
77 unsigned long size, unsigned long flags)
81 unsigned long end = address + size;
84 dir = pgd_offset_k(address);
94 pud = pud_alloc(&init_mm, dir, address);
97 pmd = pmd_alloc(&init_mm, pud, address);
100 if (remap_area_pmd(pmd, address, end - address,
101 phys_addr + address, flags))
104 address = (address + PGDIR_SIZE) & PGDIR_MASK;
106 } while (address && (address < end));
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
120 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
123 struct vm_struct * area;
124 unsigned long offset, last_addr, addr, orig_addr;
126 /* Don't allow wraparound or zero size */
127 last_addr = phys_addr + size - 1;
128 if (!size || last_addr < phys_addr)
132 * Don't remap the low PCI/ISA area, it's always mapped..
134 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
135 return (void __iomem *)phys_to_virt(phys_addr);
138 * Don't allow anybody to remap normal RAM that we're using..
140 if (phys_addr < virt_to_phys(high_memory))
144 * Mappings have to be page-aligned
146 offset = phys_addr & ~PAGE_MASK;
147 phys_addr &= PAGE_MASK;
148 size = PAGE_ALIGN(last_addr+1) - phys_addr;
153 area = get_vm_area(size, VM_IOREMAP);
156 area->phys_addr = phys_addr;
157 orig_addr = addr = (unsigned long)area->addr;
161 * First try to remap through the PMB once a valid VMA has been
162 * established. Smaller allocations (or the rest of the size
163 * remaining after a PMB mapping due to the size not being
164 * perfectly aligned on a PMB size boundary) are then mapped
165 * through the UTLB using conventional page tables.
167 * PMB entries are all pre-faulted.
169 if (unlikely(size >= 0x1000000)) {
170 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
172 if (likely(mapped)) {
181 if (remap_area_pages(addr, phys_addr, size, flags)) {
182 vunmap((void *)orig_addr);
186 return (void __iomem *)(offset + (char *)orig_addr);
188 EXPORT_SYMBOL(__ioremap);
190 void __iounmap(void __iomem *addr)
192 unsigned long vaddr = (unsigned long __force)addr;
195 if (PXSEG(vaddr) < P3SEG)
200 * Purge any PMB entries that may have been established for this
201 * mapping, then proceed with conventional VMA teardown.
203 * XXX: Note that due to the way that remove_vm_area() does
204 * matching of the resultant VMA, we aren't able to fast-forward
205 * the address past the PMB space until the end of the VMA where
206 * the page tables reside. As such, unmap_vm_area() will be
207 * forced to linearly scan over the area until it finds the page
208 * tables where PTEs that need to be unmapped actually reside,
209 * which is far from optimal. Perhaps we need to use a separate
210 * VMA for the PMB mappings?
216 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
218 printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
224 EXPORT_SYMBOL(__iounmap);