]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memory.c
fuse: save space in struct fuse_req
[linux-2.6-omap-h63xx.git] / mm / memory.c
index bc137751da7f39d14c618497d2e88b067553044e..7bb70728bb526f357deca4879f821038a2b9fd62 100644 (file)
@@ -305,7 +305,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
        spin_lock(&mm->page_table_lock);
        if (pmd_present(*pmd)) {        /* Another has populated it */
                pte_lock_deinit(new);
-               pte_free(new);
+               pte_free(mm, new);
        } else {
                mm->nr_ptes++;
                inc_zone_page_state(new, NR_PAGETABLE);
@@ -323,7 +323,7 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 
        spin_lock(&init_mm.page_table_lock);
        if (pmd_present(*pmd))          /* Another has populated it */
-               pte_free_kernel(new);
+               pte_free_kernel(&init_mm, new);
        else
                pmd_populate_kernel(&init_mm, pmd, new);
        spin_unlock(&init_mm.page_table_lock);
@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 }
 EXPORT_SYMBOL(get_user_pages);
 
-pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
+pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                       spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
        pud_t * pud = pud_alloc(mm, pgd, addr);
@@ -1517,10 +1518,8 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                        memset(kaddr, 0, PAGE_SIZE);
                kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(dst);
-               return;
-
-       }
-       copy_user_highpage(dst, src, va, vma);
+       } else
+               copy_user_highpage(dst, src, va, vma);
 }
 
 /*
@@ -1629,6 +1628,7 @@ gotten:
        if (!new_page)
                goto oom;
        cow_user_page(new_page, old_page, address, vma);
+       __SetPageUptodate(new_page);
 
        /*
         * Re-check the pte - we dropped the lock
@@ -1909,50 +1909,49 @@ EXPORT_SYMBOL(unmap_mapping_range);
  */
 int vmtruncate(struct inode * inode, loff_t offset)
 {
-       struct address_space *mapping = inode->i_mapping;
-       unsigned long limit;
+       if (inode->i_size < offset) {
+               unsigned long limit;
 
-       if (inode->i_size < offset)
-               goto do_expand;
-       /*
-        * truncation of in-use swapfiles is disallowed - it would cause
-        * subsequent swapout to scribble on the now-freed blocks.
-        */
-       if (IS_SWAPFILE(inode))
-               goto out_busy;
-       i_size_write(inode, offset);
+               limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+               if (limit != RLIM_INFINITY && offset > limit)
+                       goto out_sig;
+               if (offset > inode->i_sb->s_maxbytes)
+                       goto out_big;
+               i_size_write(inode, offset);
+       } else {
+               struct address_space *mapping = inode->i_mapping;
+
+               /*
+                * truncation of in-use swapfiles is disallowed - it would
+                * cause subsequent swapout to scribble on the now-freed
+                * blocks.
+                */
+               if (IS_SWAPFILE(inode))
+                       return -ETXTBSY;
+               i_size_write(inode, offset);
+
+               /*
+                * unmap_mapping_range is called twice, first simply for
+                * efficiency so that truncate_inode_pages does fewer
+                * single-page unmaps.  However after this first call, and
+                * before truncate_inode_pages finishes, it is possible for
+                * private pages to be COWed, which remain after
+                * truncate_inode_pages finishes, hence the second
+                * unmap_mapping_range call must be made for correctness.
+                */
+               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+               truncate_inode_pages(mapping, offset);
+               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+       }
 
-       /*
-        * unmap_mapping_range is called twice, first simply for efficiency
-        * so that truncate_inode_pages does fewer single-page unmaps. However
-        * after this first call, and before truncate_inode_pages finishes,
-        * it is possible for private pages to be COWed, which remain after
-        * truncate_inode_pages finishes, hence the second unmap_mapping_range
-        * call must be made for correctness.
-        */
-       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       truncate_inode_pages(mapping, offset);
-       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       goto out_truncate;
-
-do_expand:
-       limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && offset > limit)
-               goto out_sig;
-       if (offset > inode->i_sb->s_maxbytes)
-               goto out_big;
-       i_size_write(inode, offset);
-
-out_truncate:
        if (inode->i_op && inode->i_op->truncate)
                inode->i_op->truncate(inode);
        return 0;
+
 out_sig:
        send_sig(SIGXFSZ, current, 0);
 out_big:
        return -EFBIG;
-out_busy:
-       return -ETXTBSY;
 }
 EXPORT_SYMBOL(vmtruncate);
 
@@ -2102,6 +2101,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page = alloc_zeroed_user_highpage_movable(vma, address);
        if (!page)
                goto oom;
+       __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2202,6 +2202,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                goto out;
                        }
                        copy_user_highpage(page, vmf.page, address, vma);
+                       __SetPageUptodate(page);
                } else {
                        /*
                         * If the page will be shareable, see if the backing
@@ -2502,7 +2503,7 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
-               pud_free(new);
+               pud_free(mm, new);
        else
                pgd_populate(mm, pgd, new);
        spin_unlock(&mm->page_table_lock);
@@ -2524,12 +2525,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud))          /* Another has populated it */
-               pmd_free(new);
+               pmd_free(mm, new);
        else
                pud_populate(mm, pud, new);
 #else
        if (pgd_present(*pud))          /* Another has populated it */
-               pmd_free(new);
+               pmd_free(mm, new);
        else
                pgd_populate(mm, pud, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */