]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/xfs/linux-2.6/xfs_aops.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-omap-h63xx.git] / fs / xfs / linux-2.6 / xfs_aops.c
index 98d26c8e05659f1892527cc5a4a12b388560068b..d1db8c17a74e8eb7b5ccd0a0a971808d1960606f 100644 (file)
@@ -54,7 +54,6 @@ xfs_page_trace(
        int             mask)
 {
        xfs_inode_t     *ip;
-       bhv_desc_t      *bdp;
        vnode_t         *vp = LINVFS_GET_VP(inode);
        loff_t          isize = i_size_read(inode);
        loff_t          offset = page_offset(page);
@@ -63,8 +62,7 @@ xfs_page_trace(
        if (page_has_buffers(page))
                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 
-       bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
-       ip = XFS_BHVTOI(bdp);
+       ip = xfs_vtoi(vp);
        if (!ip->i_rwtrace)
                return;
 
@@ -414,7 +412,7 @@ STATIC void
 xfs_add_to_ioend(
        struct inode            *inode,
        struct buffer_head      *bh,
-       unsigned int            p_offset,
+       xfs_off_t               offset,
        unsigned int            type,
        xfs_ioend_t             **result,
        int                     need_ioend)
@@ -423,10 +421,7 @@ xfs_add_to_ioend(
 
        if (!ioend || need_ioend || type != ioend->io_type) {
                xfs_ioend_t     *previous = *result;
-               xfs_off_t       offset;
 
-               offset = (xfs_off_t)bh->b_page->index << PAGE_CACHE_SHIFT;
-               offset += p_offset;
                ioend = xfs_alloc_ioend(inode, type);
                ioend->io_offset = offset;
                ioend->io_buffer_head = bh;
@@ -473,13 +468,13 @@ xfs_map_at_offset(
 }
 
 /*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
  */
 STATIC unsigned int
-xfs_probe_unmapped_page(
+xfs_probe_page(
        struct page             *page,
-       unsigned int            pg_offset)
+       unsigned int            pg_offset,
+       int                     mapped)
 {
        int                     ret = 0;
 
@@ -492,25 +487,28 @@ xfs_probe_unmapped_page(
 
                        bh = head = page_buffers(page);
                        do {
-                               if (buffer_mapped(bh) || !buffer_uptodate(bh))
+                               if (!buffer_uptodate(bh))
+                                       break;
+                               if (mapped != buffer_mapped(bh))
                                        break;
                                ret += bh->b_size;
                                if (ret >= pg_offset)
                                        break;
                        } while ((bh = bh->b_this_page) != head);
                } else
-                       ret = PAGE_CACHE_SIZE;
+                       ret = mapped ? 0 : PAGE_CACHE_SIZE;
        }
 
        return ret;
 }
 
 STATIC size_t
-xfs_probe_unmapped_cluster(
+xfs_probe_cluster(
        struct inode            *inode,
        struct page             *startpage,
        struct buffer_head      *bh,
-       struct buffer_head      *head)
+       struct buffer_head      *head,
+       int                     mapped)
 {
        struct pagevec          pvec;
        pgoff_t                 tindex, tlast, tloff;
@@ -519,7 +517,7 @@ xfs_probe_unmapped_cluster(
 
        /* First sum forwards in this page */
        do {
-               if (buffer_mapped(bh))
+               if (mapped != buffer_mapped(bh))
                        return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
@@ -553,7 +551,7 @@ xfs_probe_unmapped_cluster(
                                pg_offset = PAGE_CACHE_SIZE;
 
                        if (page->index == tindex && !TestSetPageLocked(page)) {
-                               len = xfs_probe_unmapped_page(page, pg_offset);
+                               len = xfs_probe_page(page, pg_offset, mapped);
                                unlock_page(page);
                        }
 
@@ -595,6 +593,8 @@ xfs_is_delayed_page(
                                acceptable = (type == IOMAP_UNWRITTEN);
                        else if (buffer_delay(bh))
                                acceptable = (type == IOMAP_DELAY);
+                       else if (buffer_mapped(bh))
+                               acceptable = (type == 0);
                        else
                                break;
                } while ((bh = bh->b_this_page) != head);
@@ -666,7 +666,6 @@ xfs_convert_page(
        p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
        page_dirty = p_offset / len;
 
-       p_offset = 0;
        bh = head = page_buffers(page);
        do {
                if (offset >= end_offset)
@@ -694,7 +693,7 @@ xfs_convert_page(
 
                        xfs_map_at_offset(bh, offset, bbits, mp);
                        if (startio) {
-                               xfs_add_to_ioend(inode, bh, p_offset,
+                               xfs_add_to_ioend(inode, bh, offset,
                                                type, ioendp, done);
                        } else {
                                set_buffer_dirty(bh);
@@ -707,7 +706,7 @@ xfs_convert_page(
                        type = 0;
                        if (buffer_mapped(bh) && all_bh && startio) {
                                lock_buffer(bh);
-                               xfs_add_to_ioend(inode, bh, p_offset,
+                               xfs_add_to_ioend(inode, bh, offset,
                                                type, ioendp, done);
                                count++;
                                page_dirty--;
@@ -715,15 +714,23 @@ xfs_convert_page(
                                done = 1;
                        }
                }
-       } while (offset += len, p_offset += len,
-                (bh = bh->b_this_page) != head);
+       } while (offset += len, (bh = bh->b_this_page) != head);
 
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
        if (startio) {
-               if (count)
-                       wbc->nr_to_write--;
+               if (count) {
+                       struct backing_dev_info *bdi;
+
+                       bdi = inode->i_mapping->backing_dev_info;
+                       if (bdi_write_congested(bdi)) {
+                               wbc->encountered_congestion = 1;
+                               done = 1;
+                       } else if (--wbc->nr_to_write <= 0) {
+                               done = 1;
+                       }
+               }
                xfs_start_page_writeback(page, wbc, !page_dirty, count);
        }
 
@@ -809,9 +816,10 @@ xfs_page_state_convert(
        ssize_t                 size, len;
        int                     flags, err, iomap_valid = 0, uptodate = 1;
        int                     page_dirty, count = 0, trylock_flag = 0;
+       int                     all_bh = unmapped;
 
        /* wait for other IO threads? */
-       if (startio && wbc->sync_mode != WB_SYNC_NONE)
+       if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
                trylock_flag |= BMAPI_TRYLOCK;
 
        /* Is this page beyond the end of the file? */
@@ -850,6 +858,8 @@ xfs_page_state_convert(
 
        bh = head = page_buffers(page);
        offset = page_offset(page);
+       flags = -1;
+       type = 0;
 
        /* TODO: cleanup count and page_dirty */
 
@@ -883,6 +893,12 @@ xfs_page_state_convert(
                if (buffer_unwritten(bh) || buffer_delay(bh) ||
                    ((buffer_uptodate(bh) || PageUptodate(page)) &&
                     !buffer_mapped(bh) && (unmapped || startio))) {
+                       /*
+                        * Make sure we don't use a read-only iomap
+                        */
+                       if (flags == BMAPI_READ)
+                               iomap_valid = 0;
+
                        if (buffer_unwritten(bh)) {
                                type = IOMAP_UNWRITTEN;
                                flags = BMAPI_WRITE|BMAPI_IGNSTATE;
@@ -892,14 +908,14 @@ xfs_page_state_convert(
                                if (!startio)
                                        flags |= trylock_flag;
                        } else {
-                               type = 0;
+                               type = IOMAP_NEW;
                                flags = BMAPI_WRITE|BMAPI_MMAP;
                        }
 
                        if (!iomap_valid) {
-                               if (type == 0) {
-                                       size = xfs_probe_unmapped_cluster(inode,
-                                                       page, bh, head);
+                               if (type == IOMAP_NEW) {
+                                       size = xfs_probe_cluster(inode,
+                                                       page, bh, head, 0);
                                } else {
                                        size = len;
                                }
@@ -914,7 +930,7 @@ xfs_page_state_convert(
                                xfs_map_at_offset(bh, offset,
                                                inode->i_blkbits, &iomap);
                                if (startio) {
-                                       xfs_add_to_ioend(inode, bh, p_offset,
+                                       xfs_add_to_ioend(inode, bh, offset,
                                                        type, &ioend,
                                                        !iomap_valid);
                                } else {
@@ -926,12 +942,28 @@ xfs_page_state_convert(
                                count++;
                        }
                } else if (buffer_uptodate(bh) && startio) {
-                       type = 0;
+                       /*
+                        * we got here because the buffer is already mapped.
+                        * That means it must already have extents allocated
+                        * underneath it. Map the extent by reading it.
+                        */
+                       if (!iomap_valid || type != 0) {
+                               flags = BMAPI_READ;
+                               size = xfs_probe_cluster(inode, page, bh,
+                                                               head, 1);
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
+                                       goto error;
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
+                       }
 
+                       type = 0;
                        if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
                                ASSERT(buffer_mapped(bh));
-                               xfs_add_to_ioend(inode,
-                                               bh, p_offset, type,
+                               if (iomap_valid)
+                                       all_bh = 1;
+                               xfs_add_to_ioend(inode, bh, offset, type,
                                                &ioend, !iomap_valid);
                                page_dirty--;
                                count++;
@@ -959,7 +991,7 @@ xfs_page_state_convert(
                                        PAGE_CACHE_SHIFT;
                tlast = min_t(pgoff_t, offset, last_index);
                xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
-                                       wbc, startio, unmapped, tlast);
+                                       wbc, startio, all_bh, tlast);
        }
 
        if (iohead)