#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-/*
- * Called with mm->page_table_lock held to protect against other
- * threads/the swapper from ripping pte's out from under us.
- */
-
static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
+ spinlock_t *ptl;
int progress = 0;
again:
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
unsigned long pfn;
struct page *page;
if (progress >= 64) {
progress = 0;
- if (need_resched() ||
- need_lockbreak(&mm->page_table_lock))
+ if (need_resched() || need_lockbreak(ptl))
break;
}
progress++;
set_page_dirty(page);
progress += 3;
} while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap(pte - 1);
- cond_resched_lock(&mm->page_table_lock);
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
if (addr != end)
goto again;
}
static void msync_page_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
return;
BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
+ pgd = pgd_offset(vma->vm_mm, addr);
flush_cache_range(vma, addr, end);
- spin_lock(&mm->page_table_lock);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
msync_pud_range(vma, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
- spin_unlock(&mm->page_table_lock);
}
/*