]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mm/ioremap.c
x86: add table_top check for alloc_low_page in 64 bit
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
index d176b23110cc875187be67d3c22f227ec38cbdeb..092b3d72498cb2e85a5be8756cf632dab1797913 100644 (file)
@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-                              unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+               unsigned long size, unsigned long prot_val, void *caller)
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
@@ -142,14 +142,15 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        /*
         * Don't remap the low PCI/ISA area, it's always mapped..
         */
-       if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+       if (is_ISA_range(phys_addr, last_addr))
                return (__force void __iomem *)phys_to_virt(phys_addr);
 
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
-                               (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+                               (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+                               pfn++) {
 
                int is_ram = page_is_ram(pfn);
 
@@ -176,11 +177,11 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                /*
                 * Do not fallback to certain memory types with certain
                 * requested type:
-                * - request is uncached, return cannot be write-back
-                * - request is uncached, return cannot be write-combine
+                * - request is uc-, return cannot be write-back
+                * - request is uc-, return cannot be write-combine
                 * - request is write-combine, return cannot be write-back
                 */
-               if ((prot_val == _PAGE_CACHE_UC &&
+               if ((prot_val == _PAGE_CACHE_UC_MINUS &&
                     (new_prot_val == _PAGE_CACHE_WB ||
                      new_prot_val == _PAGE_CACHE_WC)) ||
                    (prot_val == _PAGE_CACHE_WC &&
@@ -201,6 +202,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        default:
                prot = PAGE_KERNEL_NOCACHE;
                break;
+       case _PAGE_CACHE_UC_MINUS:
+               prot = PAGE_KERNEL_UC_MINUS;
+               break;
        case _PAGE_CACHE_WC:
                prot = PAGE_KERNEL_WC;
                break;
@@ -212,7 +216,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
@@ -255,7 +259,17 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+       /*
+        * Ideally, this should be:
+        *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *
+        * Till we fix all X drivers to use ioremap_wc(), we will use
+        * UC MINUS.
+        */
+       unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+       return __ioremap_caller(phys_addr, size, val,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
@@ -271,8 +285,9 @@ EXPORT_SYMBOL(ioremap_nocache);
  */
 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 {
-       if (pat_wc_enabled)
-               return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+       if (pat_enabled)
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+                                       __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
 }
@@ -280,7 +295,8 @@ EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
 
@@ -302,8 +318,8 @@ void iounmap(volatile void __iomem *addr)
         * vm_area and by simply returning an address into the kernel mapping
         * of ISA space.   So handle that here.
         */
-       if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
-           addr < phys_to_virt(ISA_END_ADDRESS))
+       if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
+           (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
                return;
 
        addr = (volatile void __iomem *)
@@ -316,7 +332,7 @@ void iounmap(volatile void __iomem *addr)
           cpa takes care of the direct mappings. */
        read_lock(&vmlist_lock);
        for (p = vmlist; p; p = p->next) {
-               if (p->addr == addr)
+               if (p->addr == (void __force *)addr)
                        break;
        }
        read_unlock(&vmlist_lock);
@@ -330,7 +346,7 @@ void iounmap(volatile void __iomem *addr)
        free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 
        /* Finally remove it */
-       o = remove_vm_area((void *)addr);
+       o = remove_vm_area((void __force *)addr);
        BUG_ON(p != o || o == NULL);
        kfree(p);
 }
@@ -349,7 +365,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
        if (page_is_ram(start >> PAGE_SHIFT))
                return __va(phys);
 
-       addr = (void *)ioremap(start, PAGE_SIZE);
+       addr = (void __force *)ioremap(start, PAGE_SIZE);
        if (addr)
                addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 
@@ -365,8 +381,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
        return;
 }
 
-#ifdef CONFIG_X86_32
-
 int __initdata early_ioremap_debug;
 
 static int __init early_ioremap_debug_setup(char *str)
@@ -378,8 +392,7 @@ static int __init early_ioremap_debug_setup(char *str)
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
-               __section(.bss.page_aligned);
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
@@ -468,6 +481,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
                return;
        }
        pte = early_ioremap_pte(addr);
+
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
@@ -577,10 +591,11 @@ void __init early_iounmap(void *addr, unsigned long size)
        unsigned long offset;
        unsigned int nrpages;
        enum fixed_addresses idx;
-       unsigned int nesting;
+       int nesting;
 
        nesting = --early_ioremap_nested;
-       WARN_ON(nesting < 0);
+       if (WARN_ON(nesting < 0))
+               return;
 
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
@@ -608,5 +623,3 @@ void __this_fixmap_does_not_exist(void)
 {
        WARN_ON(1);
 }
-
-#endif /* CONFIG_X86_32 */