return ret;
 }
 
+void set_page_extent_mapped(struct page *page)
+{
+       if (!PagePrivate(page)) {
+               SetPagePrivate(page);
+               WARN_ON(!page->mapping->a_ops->invalidatepage);
+               set_page_private(page, 1);
+               page_cache_get(page);
+       }
+}
+
 /*
  * basic readpage implementation.  Locked extent state structs are inserted
  * into the tree that are removed when the IO is done (by the end_io
        size_t iosize;
        size_t blocksize = inode->i_sb->s_blocksize;
 
-       if (!PagePrivate(page)) {
-               SetPagePrivate(page);
-               WARN_ON(!page->mapping->a_ops->invalidatepage);
-               set_page_private(page, 1);
-               page_cache_get(page);
-       }
+       set_page_extent_mapped(page);
 
        end = page_end;
        lock_extent(tree, start, end, GFP_NOFS);
                               PAGE_CACHE_SIZE - offset, KM_USER0);
        }
 
-       if (!PagePrivate(page)) {
-               SetPagePrivate(page);
-               set_page_private(page, 1);
-               WARN_ON(!page->mapping->a_ops->invalidatepage);
-               page_cache_get(page);
-       }
+       set_page_extent_mapped(page);
 
        lock_extent(tree, start, page_end, GFP_NOFS);
        nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
 {
        loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
 
-       if (!PagePrivate(page)) {
-               SetPagePrivate(page);
-               set_page_private(page, 1);
-               WARN_ON(!page->mapping->a_ops->invalidatepage);
-               page_cache_get(page);
-       }
-
+       set_page_extent_mapped(page);
        set_page_dirty(page);
 
        if (pos > inode->i_size) {
        int ret = 0;
        int isnew;
 
-       if (!PagePrivate(page)) {
-               SetPagePrivate(page);
-               set_page_private(page, 1);
-               WARN_ON(!page->mapping->a_ops->invalidatepage);
-               page_cache_get(page);
-       }
+       set_page_extent_mapped(page);
+
        block_start = (page_start + from) & ~((u64)blocksize - 1);
        block_end = (page_start + to - 1) | (blocksize - 1);
        orig_block_start = block_start;
 
 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end);
 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private);
 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private);
+void set_page_extent_mapped(struct page *page);
 #endif
 
                }
                cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
                wait_on_page_writeback(pages[i]);
-               if (!PagePrivate(pages[i])) {
-                       SetPagePrivate(pages[i]);
-                       set_page_private(pages[i], 1);
-                       WARN_ON(!pages[i]->mapping->a_ops->invalidatepage);
-                       page_cache_get(pages[i]);
-               }
+               set_page_extent_mapped(pages[i]);
                WARN_ON(!PageLocked(pages[i]));
        }
        return 0;
 
        u64 page_start = page->index << PAGE_CACHE_SHIFT;
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
 
-       if (!PagePrivate(page)) {
-               SetPagePrivate(page);
-               set_page_private(page, 1);
-               WARN_ON(!page->mapping->a_ops->invalidatepage);
-               page_cache_get(page);
-       }
+       set_page_extent_mapped(page);
 
        lock_extent(em_tree, page_start, page_end, GFP_NOFS);
        set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start,