]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/truncate.c
[ARM] omap: Fix IO_ADDRESS() macros
[linux-2.6-omap-h63xx.git] / mm / truncate.c
index cadc15653ddeeaa311c80f794b6a564ae2f36fc8..6650c1d878b463a114e0e024688ecdc1d0fa6b74 100644 (file)
@@ -21,7 +21,7 @@
 
 
 /**
- * do_invalidatepage - invalidate part of all of a page
+ * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
  * @offset: the index of the truncation point
  *
@@ -48,7 +48,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
 
 static inline void truncate_partial_page(struct page *page, unsigned partial)
 {
-       zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
+       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
        if (PagePrivate(page))
                do_invalidatepage(page, partial);
 }
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(cancel_dirty_page);
 
 /*
  * If truncate cannot remove the fs-private metadata from the page, the page
- * becomes anonymous.  It will be left on the LRU and may even be mapped into
+ * becomes orphaned.  It will be left on the LRU and may even be mapped into
  * user pagetables if we're racing with filemap_fault().
  *
  * We need to bale out if page->mapping is no longer equal to the original
@@ -98,13 +98,12 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
        if (page->mapping != mapping)
                return;
 
-       cancel_dirty_page(page, PAGE_CACHE_SIZE);
-
        if (PagePrivate(page))
                do_invalidatepage(page, 0);
 
+       cancel_dirty_page(page, PAGE_CACHE_SIZE);
+
        remove_from_page_cache(page);
-       ClearPageUptodate(page);
        ClearPageMappedToDisk(page);
        page_cache_release(page);       /* pagecache ref */
 }
@@ -134,8 +133,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
 }
 
 /**
- * truncate_inode_pages - truncate range of pages specified by start and
- * end byte offsets
+ * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
  * @lend: offset to which to truncate
@@ -189,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        if (page_index > next)
                                next = page_index;
                        next++;
-                       if (TestSetPageLocked(page))
+                       if (!trylock_page(page))
                                continue;
                        if (PageWriteback(page)) {
                                unlock_page(page);
@@ -282,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping,
                        pgoff_t index;
                        int lock_failed;
 
-                       lock_failed = TestSetPageLocked(page);
+                       lock_failed = !trylock_page(page);
 
                        /*
                         * We really shouldn't be looking at the ->index of an
@@ -350,18 +348,17 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        if (PageDirty(page))
                goto failed;
 
        BUG_ON(PagePrivate(page));
        __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
-       ClearPageUptodate(page);
+       spin_unlock_irq(&mapping->tree_lock);
        page_cache_release(page);       /* pagecache ref */
        return 1;
 failed:
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
        return 0;
 }
 
@@ -383,7 +380,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
  * Any pages which are found to be mapped into pagetables are unmapped prior to
  * invalidation.
  *
- * Returns -EIO if any pages could not be invalidated.
+ * Returns -EBUSY if any pages could not be invalidated.
  */
 int invalidate_inode_pages2_range(struct address_space *mapping,
                                  pgoff_t start, pgoff_t end)
@@ -392,6 +389,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        pgoff_t next;
        int i;
        int ret = 0;
+       int ret2 = 0;
        int did_range_unmap = 0;
        int wrapped = 0;
 
@@ -439,9 +437,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                }
                        }
                        BUG_ON(page_mapped(page));
-                       ret = do_launder_page(mapping, page);
-                       if (ret == 0 && !invalidate_complete_page2(mapping, page))
-                               ret = -EIO;
+                       ret2 = do_launder_page(mapping, page);
+                       if (ret2 == 0) {
+                               if (!invalidate_complete_page2(mapping, page))
+                                       ret2 = -EBUSY;
+                       }
+                       if (ret2 < 0)
+                               ret = ret2;
                        unlock_page(page);
                }
                pagevec_release(&pvec);