* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within inode_lock in __sync_single_inode)
+ * zone->lock (within radix tree node alloc)
*/
#include <linux/mm.h>
struct kmem_cache *anon_vma_cachep;
-static inline void validate_anon_vma(struct vm_area_struct *find_vma)
-{
-#ifdef CONFIG_DEBUG_VM
- struct anon_vma *anon_vma = find_vma->anon_vma;
- struct vm_area_struct *vma;
- unsigned int mapcount = 0;
- int found = 0;
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- mapcount++;
- BUG_ON(mapcount > 100000);
- if (vma == find_vma)
- found = 1;
- }
- BUG_ON(!found);
-#endif
-}
-
/* This must be called under the mmap_sem. */
int anon_vma_prepare(struct vm_area_struct *vma)
{
{
struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma) {
+ if (anon_vma)
list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- validate_anon_vma(vma);
- }
}
void anon_vma_link(struct vm_area_struct *vma)
if (anon_vma) {
spin_lock(&anon_vma->lock);
list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- validate_anon_vma(vma);
spin_unlock(&anon_vma->lock);
}
}
return;
spin_lock(&anon_vma->lock);
- validate_anon_vma(vma);
list_del(&vma->anon_vma_node);
/* We must garbage collect the anon_vma if it's empty */
anon_vma_free(anon_vma);
}
-static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
- unsigned long flags)
+static void anon_vma_ctor(struct kmem_cache *cachep, void *data)
{
- if (flags & SLAB_CTOR_CONSTRUCTOR) {
- struct anon_vma *anon_vma = data;
+ struct anon_vma *anon_vma = data;
- spin_lock_init(&anon_vma->lock);
- INIT_LIST_HEAD(&anon_vma->head);
- }
+ spin_lock_init(&anon_vma->lock);
+ INIT_LIST_HEAD(&anon_vma->head);
}
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
}
/*
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(mm, address, pte, entry);
- lazy_mmu_prot_update(entry);
ret = 1;
}
__inc_zone_page_state(page, NR_ANON_PAGES);
}
+/**
+ * page_set_anon_rmap - sanity check anonymous rmap addition
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_check_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The page's anon-rmap details (mapping and index) are guaranteed to
+ * be set up correctly at this point.
+ *
+ * We have exclusion against page_add_anon_rmap because the caller
+ * always holds the page locked, except if called from page_dup_rmap,
+ * in which case the page is already known to be setup.
+ *
+ * We have exclusion against page_add_new_anon_rmap because those pages
+ * are initially only visible via the pagetables, and the pte is locked
+ * over the call to page_add_new_anon_rmap.
+ */
+ struct anon_vma *anon_vma = vma->anon_vma;
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ BUG_ON(page->mapping != (struct address_space *)anon_vma);
+ BUG_ON(page->index != linear_page_index(vma, address));
+#endif
+}
+
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock.
+ * The caller needs to hold the pte lock and the page must be locked.
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address);
- /* else checking page index and mapping is racy */
+ else
+ __page_check_anon_rmap(page, vma, address);
}
/*
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
+ * Page does not have to be locked.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address);
}
__inc_zone_page_state(page, NR_FILE_MAPPED);
}
+#ifdef CONFIG_DEBUG_VM
+/**
+ * page_dup_rmap - duplicate pte mapping to a page
+ * @page: the page to add the mapping to
+ *
+ * For copy_page_range only: minimal extract from page_add_file_rmap /
+ * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
+ * quicker.
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
+{
+ BUG_ON(page_mapcount(page) == 0);
+ if (PageAnon(page))
+ __page_check_anon_rmap(page, vma, address);
+ atomic_inc(&page->_mapcount);
+}
+#endif
+
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
printk (KERN_EMERG " page->count = %x\n", page_count(page));
printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
- if (vma->vm_ops)
+ if (vma->vm_ops) {
print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
+ print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
+ }
if (vma->vm_file && vma->vm_file->f_op)
print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
BUG();