]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/filemap.c
[NET]: uninline skb_trim, de-bloats
[linux-2.6-omap-h63xx.git] / mm / filemap.c
index 81fb9bff0d4f9022db02f9d3194ebc5ccd0c350e..df343d1e6345ffb33ef3fa4eacf6c784c0ef8178 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
-#include <linux/backing-dev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
 #include "internal.h"
 
 /*
@@ -118,6 +118,7 @@ void __remove_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page->mapping;
 
+       mem_cgroup_uncharge_page(page);
        radix_tree_delete(&mapping->page_tree, page->index);
        page->mapping = NULL;
        mapping->nrpages--;
@@ -458,8 +459,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 int add_to_page_cache(struct page *page, struct address_space *mapping,
                pgoff_t offset, gfp_t gfp_mask)
 {
-       int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+       int error = mem_cgroup_cache_charge(page, current->mm,
+                                       gfp_mask & ~__GFP_HIGHMEM);
+       if (error)
+               goto out;
 
+       error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (error == 0) {
                write_lock_irq(&mapping->tree_lock);
                error = radix_tree_insert(&mapping->page_tree, offset, page);
@@ -470,10 +475,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
                        page->index = offset;
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
-               }
+               } else
+                       mem_cgroup_uncharge_page(page);
+
                write_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
-       }
+       } else
+               mem_cgroup_uncharge_page(page);
+out:
        return error;
 }
 EXPORT_SYMBOL(add_to_page_cache);
@@ -594,7 +603,7 @@ void __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
-int fastcall __lock_page_killable(struct page *page)
+int __lock_page_killable(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
@@ -865,9 +874,7 @@ static void shrink_readahead_size_eio(struct file *filp,
 }
 
 /**
- * do_generic_mapping_read - generic file read routine
- * @mapping:   address_space to be read
- * @ra:                file's readahead state
+ * do_generic_file_read - generic file read routine
  * @filp:      the file to read
  * @ppos:      current file position
  * @desc:      read_descriptor
@@ -878,18 +885,13 @@ static void shrink_readahead_size_eio(struct file *filp,
  *
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
- *
- * Note the struct file* is only passed for the use of readpage.
- * It may be NULL.
  */
-void do_generic_mapping_read(struct address_space *mapping,
-                            struct file_ra_state *ra,
-                            struct file *filp,
-                            loff_t *ppos,
-                            read_descriptor_t *desc,
-                            read_actor_t actor)
+static void do_generic_file_read(struct file *filp, loff_t *ppos,
+               read_descriptor_t *desc, read_actor_t actor)
 {
+       struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
+       struct file_ra_state *ra = &filp->f_ra;
        pgoff_t index;
        pgoff_t last_index;
        pgoff_t prev_index;
@@ -1081,7 +1083,6 @@ out:
        if (filp)
                file_accessed(filp);
 }
-EXPORT_SYMBOL(do_generic_mapping_read);
 
 int file_read_actor(read_descriptor_t *desc, struct page *page,
                        unsigned long offset, unsigned long size)
@@ -1322,7 +1323,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size;
+       pgoff_t size;
        int did_readaround = 0;
        int ret = 0;
 
@@ -1741,21 +1742,27 @@ size_t iov_iter_copy_from_user(struct page *page,
 }
 EXPORT_SYMBOL(iov_iter_copy_from_user);
 
-static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
 {
+       BUG_ON(i->count < bytes);
+
        if (likely(i->nr_segs == 1)) {
                i->iov_offset += bytes;
+               i->count -= bytes;
        } else {
                const struct iovec *iov = i->iov;
                size_t base = i->iov_offset;
 
                /*
                 * The !iov->iov_len check ensures we skip over unlikely
-                * zero-length segments.
+                * zero-length segments (without overruning the iovec).
                 */
-               while (bytes || !iov->iov_len) {
-                       int copy = min(bytes, iov->iov_len - base);
+               while (bytes || unlikely(!iov->iov_len && i->count)) {
+                       int copy;
 
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
                        bytes -= copy;
                        base += copy;
                        if (iov->iov_len == base) {
@@ -1767,14 +1774,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
                i->iov_offset = base;
        }
 }
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       __iov_iter_advance_iov(i, bytes);
-       i->count -= bytes;
-}
 EXPORT_SYMBOL(iov_iter_advance);
 
 /*