* arch/sh/mm/pg-sh4.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2002 - 2005 Paul Mundt
+ * Copyright (C) 2002 - 2007 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/mm.h>
+#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-extern struct mutex p3map_mutex[];
-
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
+static pte_t *kmap_coherent_pte;
+
+void __init kmap_coherent_init(void)
+{
+ unsigned long vaddr;
+
+ /* cache the first coherent kmap pte */
+ vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+ kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+}
+
+static inline void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr, flags;
+ pte_t pte;
+
+ inc_preempt_count();
+
+ idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
+ vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+ pte = mk_pte(page, PAGE_KERNEL);
+
+ local_irq_save(flags);
+ flush_tlb_one(get_asid(), vaddr);
+ local_irq_restore(flags);
+
+ update_mmu_cache(NULL, vaddr, pte);
+
+ set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+
+ return (void *)vaddr;
+}
+
+static inline void kunmap_coherent(struct page *page)
+{
+ dec_preempt_count();
+ preempt_check_resched();
+}
+
/*
* clear_user_page
* @to: P1 address
void clear_user_page(void *to, unsigned long address, struct page *page)
{
__set_bit(PG_mapped, &page->flags);
- if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
- clear_page(to);
- else {
- unsigned long phys_addr = PHYSADDR(to);
- unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
- pgd_t *pgd = pgd_offset_k(p3_addr);
- pud_t *pud = pud_offset(pgd, p3_addr);
- pmd_t *pmd = pmd_offset(pud, p3_addr);
- pte_t *pte = pte_offset_kernel(pmd, p3_addr);
- pte_t entry;
- unsigned long flags;
-
- entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
- mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
- set_pte(pte, entry);
- local_irq_save(flags);
- flush_tlb_one(get_asid(), p3_addr);
- local_irq_restore(flags);
- update_mmu_cache(NULL, p3_addr, entry);
- __clear_user_page((void *)p3_addr, to);
- pte_clear(&init_mm, p3_addr, pte);
- mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
- }
+
+ clear_page(to);
+ if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
+ __flush_wback_region(to, PAGE_SIZE);
}
-/*
- * copy_user_page
- * @to: P1 address
- * @from: P1 address
- * @address: U0 address to be mapped
- * @page: page (virt_to_page(to))
- */
-void copy_user_page(void *to, void *from, unsigned long address,
- struct page *page)
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
{
+ void *vto;
+
__set_bit(PG_mapped, &page->flags);
- if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
- copy_page(to, from);
- else {
- unsigned long phys_addr = PHYSADDR(to);
- unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
- pgd_t *pgd = pgd_offset_k(p3_addr);
- pud_t *pud = pud_offset(pgd, p3_addr);
- pmd_t *pmd = pmd_offset(pud, p3_addr);
- pte_t *pte = pte_offset_kernel(pmd, p3_addr);
- pte_t entry;
- unsigned long flags;
-
- entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
- mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
- set_pte(pte, entry);
- local_irq_save(flags);
- flush_tlb_one(get_asid(), p3_addr);
- local_irq_restore(flags);
- update_mmu_cache(NULL, p3_addr, entry);
- __copy_user_page((void *)p3_addr, from, to);
- pte_clear(&init_mm, p3_addr, pte);
- mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
- }
+
+ vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(vto, src, len);
+ kunmap_coherent(vto);
+
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ void *vfrom;
+
+ __set_bit(PG_mapped, &page->flags);
+
+ vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(dst, vfrom, len);
+ kunmap_coherent(vfrom);
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ __set_bit(PG_mapped, &to->flags);
+
+ vto = kmap_atomic(to, KM_USER1);
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+ kunmap_coherent(vfrom);
+
+ if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
+ __flush_wback_region(vto, PAGE_SIZE);
+
+ kunmap_atomic(vto, KM_USER1);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
}
+EXPORT_SYMBOL(copy_user_highpage);
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
-inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;