int mask)
{
xfs_inode_t *ip;
- bhv_desc_t *bdp;
vnode_t *vp = LINVFS_GET_VP(inode);
loff_t isize = i_size_read(inode);
loff_t offset = page_offset(page);
if (page_has_buffers(page))
xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
- ip = XFS_BHVTOI(bdp);
+ ip = xfs_vtoi(vp);
if (!ip->i_rwtrace)
return;
xfs_add_to_ioend(
struct inode *inode,
struct buffer_head *bh,
- unsigned int p_offset,
+ xfs_off_t offset,
unsigned int type,
xfs_ioend_t **result,
int need_ioend)
if (!ioend || need_ioend || type != ioend->io_type) {
xfs_ioend_t *previous = *result;
- xfs_off_t offset;
- offset = (xfs_off_t)bh->b_page->index << PAGE_CACHE_SHIFT;
- offset += p_offset;
ioend = xfs_alloc_ioend(inode, type);
ioend->io_offset = offset;
ioend->io_buffer_head = bh;
}
/*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
*/
STATIC unsigned int
-xfs_probe_unmapped_page(
+xfs_probe_page(
struct page *page,
- unsigned int pg_offset)
+ unsigned int pg_offset,
+ int mapped)
{
int ret = 0;
bh = head = page_buffers(page);
do {
- if (buffer_mapped(bh) || !buffer_uptodate(bh))
+ if (!buffer_uptodate(bh))
+ break;
+ if (mapped != buffer_mapped(bh))
break;
ret += bh->b_size;
if (ret >= pg_offset)
break;
} while ((bh = bh->b_this_page) != head);
} else
- ret = PAGE_CACHE_SIZE;
+ ret = mapped ? 0 : PAGE_CACHE_SIZE;
}
return ret;
}
STATIC size_t
-xfs_probe_unmapped_cluster(
+xfs_probe_cluster(
struct inode *inode,
struct page *startpage,
struct buffer_head *bh,
- struct buffer_head *head)
+ struct buffer_head *head,
+ int mapped)
{
struct pagevec pvec;
pgoff_t tindex, tlast, tloff;
/* First sum forwards in this page */
do {
- if (buffer_mapped(bh))
+ if (mapped != buffer_mapped(bh))
return total;
total += bh->b_size;
} while ((bh = bh->b_this_page) != head);
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) {
- len = xfs_probe_unmapped_page(page, pg_offset);
+ len = xfs_probe_page(page, pg_offset, mapped);
unlock_page(page);
}
acceptable = (type == IOMAP_UNWRITTEN);
else if (buffer_delay(bh))
acceptable = (type == IOMAP_DELAY);
+ else if (buffer_mapped(bh))
+ acceptable = (type == 0);
else
break;
} while ((bh = bh->b_this_page) != head);
p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
page_dirty = p_offset / len;
- p_offset = 0;
bh = head = page_buffers(page);
do {
if (offset >= end_offset)
xfs_map_at_offset(bh, offset, bbits, mp);
if (startio) {
- xfs_add_to_ioend(inode, bh, p_offset,
+ xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
} else {
set_buffer_dirty(bh);
type = 0;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
- xfs_add_to_ioend(inode, bh, p_offset,
+ xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
count++;
page_dirty--;
done = 1;
}
}
- } while (offset += len, p_offset += len,
- (bh = bh->b_this_page) != head);
+ } while (offset += len, (bh = bh->b_this_page) != head);
if (uptodate && bh == head)
SetPageUptodate(page);
if (startio) {
- if (count)
- wbc->nr_to_write--;
+ if (count) {
+ struct backing_dev_info *bdi;
+
+ bdi = inode->i_mapping->backing_dev_info;
+ if (bdi_write_congested(bdi)) {
+ wbc->encountered_congestion = 1;
+ done = 1;
+ } else if (--wbc->nr_to_write <= 0) {
+ done = 1;
+ }
+ }
xfs_start_page_writeback(page, wbc, !page_dirty, count);
}
ssize_t size, len;
int flags, err, iomap_valid = 0, uptodate = 1;
int page_dirty, count = 0, trylock_flag = 0;
+ int all_bh = unmapped;
/* wait for other IO threads? */
- if (startio && wbc->sync_mode != WB_SYNC_NONE)
+ if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
trylock_flag |= BMAPI_TRYLOCK;
/* Is this page beyond the end of the file? */
bh = head = page_buffers(page);
offset = page_offset(page);
+ flags = -1;
+ type = 0;
/* TODO: cleanup count and page_dirty */
if (buffer_unwritten(bh) || buffer_delay(bh) ||
((buffer_uptodate(bh) || PageUptodate(page)) &&
!buffer_mapped(bh) && (unmapped || startio))) {
+ /*
+ * Make sure we don't use a read-only iomap
+ */
+ if (flags == BMAPI_READ)
+ iomap_valid = 0;
+
if (buffer_unwritten(bh)) {
type = IOMAP_UNWRITTEN;
flags = BMAPI_WRITE|BMAPI_IGNSTATE;
if (!startio)
flags |= trylock_flag;
} else {
- type = 0;
+ type = IOMAP_NEW;
flags = BMAPI_WRITE|BMAPI_MMAP;
}
if (!iomap_valid) {
- if (type == 0) {
- size = xfs_probe_unmapped_cluster(inode,
- page, bh, head);
+ if (type == IOMAP_NEW) {
+ size = xfs_probe_cluster(inode,
+ page, bh, head, 0);
} else {
size = len;
}
xfs_map_at_offset(bh, offset,
inode->i_blkbits, &iomap);
if (startio) {
- xfs_add_to_ioend(inode, bh, p_offset,
+ xfs_add_to_ioend(inode, bh, offset,
type, &ioend,
!iomap_valid);
} else {
count++;
}
} else if (buffer_uptodate(bh) && startio) {
- type = 0;
+ /*
+ * we got here because the buffer is already mapped.
+ * That means it must already have extents allocated
+ * underneath it. Map the extent by reading it.
+ */
+ if (!iomap_valid || type != 0) {
+ flags = BMAPI_READ;
+ size = xfs_probe_cluster(inode, page, bh,
+ head, 1);
+ err = xfs_map_blocks(inode, offset, size,
+ &iomap, flags);
+ if (err)
+ goto error;
+ iomap_valid = xfs_iomap_valid(&iomap, offset);
+ }
+ type = 0;
if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
ASSERT(buffer_mapped(bh));
- xfs_add_to_ioend(inode,
- bh, p_offset, type,
+ if (iomap_valid)
+ all_bh = 1;
+ xfs_add_to_ioend(inode, bh, offset, type,
&ioend, !iomap_valid);
page_dirty--;
count++;
PAGE_CACHE_SHIFT;
tlast = min_t(pgoff_t, offset, last_index);
xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
- wbc, startio, unmapped, tlast);
+ wbc, startio, all_bh, tlast);
}
if (iohead)