]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/arm/mm/copypage-xscale.c
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6-omap-h63xx.git] / arch / arm / mm / copypage-xscale.c
index bad49331bbf94a22ded76f9222a15da063bd7846..d18f2397ee2dd7b0903514317466251990a78d7f 100644 (file)
@@ -15,8 +15,8 @@
  */
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 
-#include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
@@ -35,7 +35,7 @@
 static DEFINE_SPINLOCK(minicache_lock);
 
 /*
- * XScale mini-dcache optimised copy_user_page
+ * XScale mini-dcache optimised copy_user_highpage
  *
  * We flush the destination cache lines just before we write the data into the
  * corresponding address.  Since the Dcache is read-allocate, this removes the
@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to)
        : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
 }
 
-void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+       unsigned long vaddr)
 {
-       struct page *page = virt_to_page(kfrom);
+       void *kto = kmap_atomic(to, KM_USER1);
 
-       if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-               __flush_dcache_page(page_mapping(page), page);
+       if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+               __flush_dcache_page(page_mapping(from), from);
 
        spin_lock(&minicache_lock);
 
-       set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
+       set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
        flush_tlb_kernel_page(COPYPAGE_MINICACHE);
 
        mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 
        spin_unlock(&minicache_lock);
+
+       kunmap_atomic(kto, KM_USER1);
 }
 
 /*
  * XScale optimised clear_user_page
  */
-void __attribute__((naked))
-xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
+void
+xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
+       void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
        asm volatile(
-       "mov    r1, %0                          \n\
+       "mov    r1, %2                          \n\
        mov     r2, #0                          \n\
        mov     r3, #0                          \n\
-1:     mov     ip, r0                          \n\
-       strd    r2, [r0], #8                    \n\
-       strd    r2, [r0], #8                    \n\
-       strd    r2, [r0], #8                    \n\
-       strd    r2, [r0], #8                    \n\
+1:     mov     ip, %0                          \n\
+       strd    r2, [%0], #8                    \n\
+       strd    r2, [%0], #8                    \n\
+       strd    r2, [%0], #8                    \n\
+       strd    r2, [%0], #8                    \n\
        mcr     p15, 0, ip, c7, c10, 1          @ clean D line\n\
        subs    r1, r1, #1                      \n\
        mcr     p15, 0, ip, c7, c6, 1           @ invalidate D line\n\
-       bne     1b                              \n\
-       mov     pc, lr"
-       :
-       : "I" (PAGE_SIZE / 32));
+       bne     1b"
+       : "=r" (ptr)
+       : "0" (kaddr), "I" (PAGE_SIZE / 32)
+       : "r1", "r2", "r3", "ip");
+       kunmap_atomic(kaddr, KM_USER0);
 }
 
 struct cpu_user_fns xscale_mc_user_fns __initdata = {
-       .cpu_clear_user_page    = xscale_mc_clear_user_page, 
-       .cpu_copy_user_page     = xscale_mc_copy_user_page,
+       .cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
+       .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
 };