]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memory.c
r8169: add helpers for per-device hw_start handler
[linux-2.6-omap-h63xx.git] / mm / memory.c
index e7066e71dfa3b36daeb4830e3032091e38f0bfc1..f64cbf9baa3633fc1755f675ec2b46c920c4ecef 100644 (file)
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        page = vm_normal_page(vma, addr, pte);
        if (page) {
                get_page(page);
-               page_dup_rmap(page);
+               page_dup_rmap(page, vma, addr);
                rss[!!PageAnon(page)]++;
        }
 
@@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
+static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pte_t *pte;
+       int err;
+       struct page *pmd_page;
+       spinlock_t *uninitialized_var(ptl);
+
+       pte = (mm == &init_mm) ?
+               pte_alloc_kernel(pmd, addr) :
+               pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+               return -ENOMEM;
+
+       BUG_ON(pmd_huge(*pmd));
+
+       pmd_page = pmd_page(*pmd);
+
+       do {
+               err = fn(pte, pmd_page, addr, data);
+               if (err)
+                       break;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (mm != &init_mm)
+               pte_unmap_unlock(pte-1, ptl);
+       return err;
+}
+
+static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pmd_t *pmd;
+       unsigned long next;
+       int err;
+
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+               next = pmd_addr_end(addr, end);
+               err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pmd++, addr = next, addr != end);
+       return err;
+}
+
+static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       pud_t *pud;
+       unsigned long next;
+       int err;
+
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+               next = pud_addr_end(addr, end);
+               err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pud++, addr = next, addr != end);
+       return err;
+}
+
+/*
+ * Scan a region of virtual memory, filling in page tables as necessary
+ * and calling a provided function on each leaf page table.
+ */
+int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
+                       unsigned long size, pte_fn_t fn, void *data)
+{
+       pgd_t *pgd;
+       unsigned long next;
+       unsigned long end = addr + size;
+       int err;
+
+       BUG_ON(addr >= end);
+       pgd = pgd_offset(mm, addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (pgd++, addr = next, addr != end);
+       return err;
+}
+EXPORT_SYMBOL_GPL(apply_to_page_range);
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry
  * which was read non-atomically.  Before making any commitment, on
@@ -1597,9 +1691,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               ptep_set_access_flags(vma, address, page_table, entry, 1);
-               update_mmu_cache(vma, address, entry);
-               lazy_mmu_prot_update(entry);
+               if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+                       update_mmu_cache(vma, address, entry);
+                       lazy_mmu_prot_update(entry);
+               }
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -2431,10 +2526,9 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                pte_t *pte, pmd_t *pmd, int write_access)
 {
        pte_t entry;
-       pte_t old_entry;
        spinlock_t *ptl;
 
-       old_entry = entry = *pte;
+       entry = *pte;
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
@@ -2467,8 +2561,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (!pte_same(old_entry, entry)) {
-               ptep_set_access_flags(vma, address, pte, entry, write_access);
+       if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
                update_mmu_cache(vma, address, entry);
                lazy_mmu_prot_update(entry);
        } else {
@@ -2539,12 +2632,6 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
-#else
-/* Workaround for gcc 2.96 */
-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-{
-       return 0;
-}
 #endif /* __PAGETABLE_PUD_FOLDED */
 
 #ifndef __PAGETABLE_PMD_FOLDED
@@ -2573,12 +2660,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
-#else
-/* Workaround for gcc 2.96 */
-int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-{
-       return 0;
-}
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 int make_pages_present(unsigned long addr, unsigned long end)