]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mm/ioremap.c
Merge branch 'linus' into tracing/mmiotrace-mergefixups
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
index d176b23110cc875187be67d3c22f227ec38cbdeb..e92aa461f4d6ddd62a77ea1cf5382a2ad2a0e0c7 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/mmiotrace.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -117,15 +118,18 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-                              unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+               unsigned long size, unsigned long prot_val, void *caller)
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
+       const resource_size_t unaligned_phys_addr = phys_addr;
+       const unsigned long unaligned_size = size;
        struct vm_struct *area;
        unsigned long new_prot_val;
        pgprot_t prot;
        int retval;
+       void __iomem *ret_addr;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -149,7 +153,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
-                               (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+                               (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+                               pfn++) {
 
                int is_ram = page_is_ram(pfn);
 
@@ -176,11 +181,11 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                /*
                 * Do not fallback to certain memory types with certain
                 * requested type:
-                * - request is uncached, return cannot be write-back
-                * - request is uncached, return cannot be write-combine
+                * - request is uc-, return cannot be write-back
+                * - request is uc-, return cannot be write-combine
                 * - request is write-combine, return cannot be write-back
                 */
-               if ((prot_val == _PAGE_CACHE_UC &&
+               if ((prot_val == _PAGE_CACHE_UC_MINUS &&
                     (new_prot_val == _PAGE_CACHE_WB ||
                      new_prot_val == _PAGE_CACHE_WC)) ||
                    (prot_val == _PAGE_CACHE_WC &&
@@ -201,6 +206,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        default:
                prot = PAGE_KERNEL_NOCACHE;
                break;
+       case _PAGE_CACHE_UC_MINUS:
+               prot = PAGE_KERNEL_UC_MINUS;
+               break;
        case _PAGE_CACHE_WC:
                prot = PAGE_KERNEL_WC;
                break;
@@ -212,7 +220,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
@@ -229,7 +237,10 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                return NULL;
        }
 
-       return (void __iomem *) (vaddr + offset);
+       ret_addr = (void __iomem *) (vaddr + offset);
+       mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
+
+       return ret_addr;
 }
 
 /**
@@ -255,7 +266,17 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+       /*
+        * Ideally, this should be:
+        *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *
+        * Till we fix all X drivers to use ioremap_wc(), we will use
+        * UC MINUS.
+        */
+       unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+       return __ioremap_caller(phys_addr, size, val,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
@@ -272,7 +293,8 @@ EXPORT_SYMBOL(ioremap_nocache);
 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 {
        if (pat_wc_enabled)
-               return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+                                       __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
 }
@@ -280,7 +302,8 @@ EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
 
@@ -309,6 +332,8 @@ void iounmap(volatile void __iomem *addr)
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
+       mmiotrace_iounmap(addr);
+
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
@@ -577,10 +602,11 @@ void __init early_iounmap(void *addr, unsigned long size)
        unsigned long offset;
        unsigned int nrpages;
        enum fixed_addresses idx;
-       unsigned int nesting;
+       int nesting;
 
        nesting = --early_ioremap_nested;
-       WARN_ON(nesting < 0);
+       if (WARN_ON(nesting < 0))
+               return;
 
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,