X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=mm%2Fmemory.c;h=9abc6008544baae37d9d88a4416a4ec81e966132;hb=4cf808eb443ead42777a0230b73aec0cee7fb298;hp=d22f78c8a381a2dbcc78fbb4f15c7e4ef8f9125f;hpb=525352eb6d355bef6adf597252fc6d04f2dbe66c;p=linux-2.6-omap-h63xx.git diff --git a/mm/memory.c b/mm/memory.c index d22f78c8a38..9abc6008544 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); EXPORT_SYMBOL(vmalloc_earlyreserve); +int randomize_va_space __read_mostly = 1; + +static int __init disable_randmaps(char *s) +{ + randomize_va_space = 0; + return 0; +} +__setup("norandmaps", disable_randmaps); + + /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but @@ -574,7 +584,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ - if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { if (!vma->anon_vma) return 0; } @@ -1228,6 +1238,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page * return -EFAULT; if (!page_count(page)) return -EINVAL; + vma->vm_flags |= VM_INSERTPAGE; return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); @@ -1497,7 +1508,7 @@ gotten: update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); lru_cache_add_active(new_page); - page_add_anon_rmap(new_page, vma, address); + page_add_new_anon_rmap(new_page, vma, address); /* Free the old page.. */ new_page = old_page; @@ -1769,9 +1780,32 @@ out_big: out_busy: return -ETXTBSY; } - EXPORT_SYMBOL(vmtruncate); +int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) +{ + struct address_space *mapping = inode->i_mapping; + + /* + * If the underlying filesystem is not going to provide + * a way to truncate a range of blocks (punch a hole) - + * we should return failure right now. + */ + if (!inode->i_op || !inode->i_op->truncate_range) + return -ENOSYS; + + mutex_lock(&inode->i_mutex); + down_write(&inode->i_alloc_sem); + unmap_mapping_range(mapping, offset, (end - offset), 1); + truncate_inode_pages_range(mapping, offset, end); + inode->i_op->truncate_range(inode, offset, end); + up_write(&inode->i_alloc_sem); + mutex_unlock(&inode->i_mutex); + + return 0; +} +EXPORT_SYMBOL(vmtruncate_range); + /* * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen @@ -1847,6 +1881,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out; entry = pte_to_swp_entry(orig_pte); +again: page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry, address, vma); @@ -1870,6 +1905,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, mark_page_accessed(page); lock_page(page); + if (!PageSwapCache(page)) { + /* Page migration has occured */ + unlock_page(page); + page_cache_release(page); + goto again; + } /* * Back out if somebody else already faulted in this pte. @@ -1953,8 +1994,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto release; inc_mm_counter(mm, anon_rss); lru_cache_add_active(page); - SetPageReferenced(page); - page_add_anon_rmap(page, vma, address); + page_add_new_anon_rmap(page, vma, address); } else { /* Map the ZERO_PAGE - vm_page_prot is readonly */ page = ZERO_PAGE(address); @@ -2085,7 +2125,7 @@ retry: if (anon) { inc_mm_counter(mm, anon_rss); lru_cache_add_active(new_page); - page_add_anon_rmap(new_page, vma, address); + page_add_new_anon_rmap(new_page, vma, address); } else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); @@ -2244,6 +2284,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, return handle_pte_fault(mm, vma, address, pte, pmd, write_access); } +EXPORT_SYMBOL_GPL(__handle_mm_fault); + #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory.