X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Ffilemap.c;h=2dead9adf8b76806e66ac1b0169510773be58e14;hb=7f3d4ee108c184ab215036051087aaaaa8de7661;hp=b7b1be6dbd83840ce287b515e0f1fab422acba7a;hpb=ab1ecbabb1c7b1599b1eb70c291407c557ea4ef3;p=linux-2.6-omap-h63xx.git diff --git a/mm/filemap.c b/mm/filemap.c index b7b1be6dbd8..2dead9adf8b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -344,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, EXPORT_SYMBOL(sync_page_range); /** - * sync_page_range_nolock + * sync_page_range_nolock - write & wait on all pages in the passed range without locking * @inode: target inode * @mapping: target address_space * @pos: beginning offset in pages to write @@ -577,10 +576,12 @@ EXPORT_SYMBOL(unlock_page); */ void end_page_writeback(struct page *page) { - if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { - if (!test_clear_page_writeback(page)) - BUG(); - } + if (TestClearPageReclaim(page)) + rotate_reclaimable_page(page); + + if (!test_clear_page_writeback(page)) + BUG(); + smp_mb__after_clear_bit(); wake_up_page(page, PG_writeback); } @@ -604,7 +605,7 @@ void __lock_page(struct page *page) } EXPORT_SYMBOL(__lock_page); -int fastcall __lock_page_killable(struct page *page) +int __lock_page_killable(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); @@ -612,7 +613,10 @@ int fastcall __lock_page_killable(struct page *page) sync_page_killable, TASK_KILLABLE); } -/* +/** + * __lock_page_nosync - get a lock on the page, without calling sync_page() + * @page: the page to lock + * * Variant of lock_page that does not require the caller to hold a reference * on the page's mapping. */ @@ -1539,9 +1543,20 @@ repeat: return page; } -/* +/** + * read_cache_page_async - read into page cache, fill it if needed + * @mapping: the page's address_space + * @index: the page index + * @filler: function to perform the read + * @data: destination for read data + * * Same as read_cache_page, but don't wait for page to become unlocked * after submitting it to the filler. + * + * Read into the page cache. If a page already exists, and PageUptodate() is + * not set, try to fill the page but don't wait for it to become unlocked. + * + * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page_async(struct address_space *mapping, pgoff_t index, @@ -1640,7 +1655,7 @@ int should_remove_suid(struct dentry *dentry) } EXPORT_SYMBOL(should_remove_suid); -int __remove_suid(struct dentry *dentry, int kill) +static int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; @@ -1743,21 +1758,27 @@ size_t iov_iter_copy_from_user(struct page *page, } EXPORT_SYMBOL(iov_iter_copy_from_user); -static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) +void iov_iter_advance(struct iov_iter *i, size_t bytes) { + BUG_ON(i->count < bytes); + if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; + i->count -= bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; /* * The !iov->iov_len check ensures we skip over unlikely - * zero-length segments. + * zero-length segments (without overruning the iovec). */ - while (bytes || !iov->iov_len) { - int copy = min(bytes, iov->iov_len - base); + while (bytes || unlikely(!iov->iov_len && i->count)) { + int copy; + copy = min(bytes, iov->iov_len - base); + BUG_ON(!i->count || i->count < copy); + i->count -= copy; bytes -= copy; base += copy; if (iov->iov_len == base) { @@ -1769,14 +1790,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) i->iov_offset = base; } } - -void iov_iter_advance(struct iov_iter *i, size_t bytes) -{ - BUG_ON(i->count < bytes); - - __iov_iter_advance_iov(i, bytes); - i->count -= bytes; -} EXPORT_SYMBOL(iov_iter_advance); /*