#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_iomap.h"
+#include "xfs_vnodeops.h"
#include <linux/mpage.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#define xfs_page_trace(tag, inode, page, pgoff)
#endif
+STATIC struct block_device *
+xfs_find_bdev_for_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (XFS_IS_REALTIME_INODE(ip))
+ return mp->m_rtdev_targp->bt_bdev;
+ else
+ return mp->m_ddev_targp->bt_bdev;
+}
+
/*
* Schedule IO completion handling on a xfsdatad if this was
* the final hold on this ioend. If we are asked to wait,
next = bh->b_private;
bh->b_end_io(bh, !ioend->io_error);
}
- if (unlikely(ioend->io_error))
- vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
- vn_iowake(ioend->io_vnode);
+ if (unlikely(ioend->io_error)) {
+ vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
+ __FILE__,__LINE__);
+ }
+ vn_iowake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool);
}
/*
* Update on-disk file size now that data has been written to disk.
* The current in-memory file size is i_size. If a write is beyond
- * eof io_new_size will be the intended file size until i_size is
+ * eof i_new_size will be the intended file size until i_size is
* updated. If this write does not extend all the way to the valid
* file size then restrict this update to the end of the write.
*/
xfs_setfilesize(
xfs_ioend_t *ioend)
{
- xfs_inode_t *ip;
+ xfs_inode_t *ip = XFS_I(ioend->io_inode);
xfs_fsize_t isize;
xfs_fsize_t bsize;
- ip = xfs_vtoi(ioend->io_vnode);
- if (!ip)
- return;
-
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
ASSERT(ioend->io_type != IOMAP_READ);
xfs_ilock(ip, XFS_ILOCK_EXCL);
- isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
+ isize = MAX(ip->i_size, ip->i_new_size);
isize = MIN(isize, bsize);
if (ip->i_d.di_size < isize) {
ip->i_d.di_size = isize;
ip->i_update_core = 1;
ip->i_update_size = 1;
- mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
+ mark_inode_dirty_sync(ioend->io_inode);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
- bhv_vnode_t *vp = ioend->io_vnode;
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
if (likely(!ioend->io_error)) {
- bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ int error;
+ error = xfs_iomap_write_unwritten(ip, offset, size);
+ if (error)
+ ioend->io_error = error;
+ }
xfs_setfilesize(ioend);
}
xfs_destroy_ioend(ioend);
ioend->io_error = 0;
ioend->io_list = NULL;
ioend->io_type = type;
- ioend->io_vnode = vn_from_inode(inode);
+ ioend->io_inode = inode;
ioend->io_buffer_head = NULL;
ioend->io_buffer_tail = NULL;
- atomic_inc(&ioend->io_vnode->v_iocount);
+ atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
ioend->io_offset = 0;
ioend->io_size = 0;
xfs_iomap_t *mapp,
int flags)
{
- bhv_vnode_t *vp = vn_from_inode(inode);
+ xfs_inode_t *ip = XFS_I(inode);
int error, nmaps = 1;
- error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
+ error = xfs_iomap(ip, offset, count,
+ flags, mapp, &nmaps);
if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
- VMODIFY(vp);
+ xfs_iflags_set(ip, XFS_IMODIFIED);
return -error;
}
STATIC void
xfs_start_page_writeback(
struct page *page,
- struct writeback_control *wbc,
int clear_dirty,
int buffers)
{
clear_page_dirty_for_io(page);
set_page_writeback(page);
unlock_page(page);
- if (!buffers) {
+ /* If no buffers on the page are to be written, finish it here */
+ if (!buffers)
end_page_writeback(page);
- wbc->pages_skipped++; /* We didn't write this page */
- }
}
static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
unlock_buffer(bh);
} while ((bh = next_bh) != NULL);
- vn_iowake(ioend->io_vnode);
+ vn_iowake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool);
} while ((ioend = next) != NULL);
}
done = 1;
}
}
- xfs_start_page_writeback(page, wbc, !page_dirty, count);
+ xfs_start_page_writeback(page, !page_dirty, count);
}
return done;
SetPageUptodate(page);
if (startio)
- xfs_start_page_writeback(page, wbc, 1, count);
+ xfs_start_page_writeback(page, 1, count);
if (ioend && iomap_valid) {
offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
struct address_space *mapping,
struct writeback_control *wbc)
{
- struct bhv_vnode *vp = vn_from_inode(mapping->host);
-
- if (VN_TRUNC(vp))
- VUNTRUNCATE(vp);
+ xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
return generic_writepages(mapping, wbc);
}
int direct,
bmapi_flags_t flags)
{
- bhv_vnode_t *vp = vn_from_inode(inode);
xfs_iomap_t iomap;
xfs_off_t offset;
ssize_t size;
offset = (xfs_off_t)iblock << inode->i_blkbits;
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
size = bh_result->b_size;
- error = bhv_vop_bmap(vp, offset, size,
+ error = xfs_iomap(XFS_I(inode), offset, size,
create ? flags : BMAPI_READ, &iomap, &niomap);
if (error)
return -error;
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- bhv_vnode_t *vp = vn_from_inode(inode);
- xfs_iomap_t iomap;
- int maps = 1;
- int error;
+ struct block_device *bdev;
ssize_t ret;
- error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
- if (error)
- return -error;
+ bdev = xfs_find_bdev_for_inode(XFS_I(inode));
if (rw == WRITE) {
iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- iomap.iomap_target->bt_bdev,
- iov, offset, nr_segs,
+ bdev, iov, offset, nr_segs,
xfs_get_blocks_direct,
xfs_end_io_direct);
} else {
iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- iomap.iomap_target->bt_bdev,
- iov, offset, nr_segs,
+ bdev, iov, offset, nr_segs,
xfs_get_blocks_direct,
xfs_end_io_direct);
}
sector_t block)
{
struct inode *inode = (struct inode *)mapping->host;
- bhv_vnode_t *vp = vn_from_inode(inode);
+ struct xfs_inode *ip = XFS_I(inode);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- bhv_vop_rwlock(vp, VRWLOCK_READ);
- bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
- bhv_vop_rwunlock(vp, VRWLOCK_READ);
+ xfs_itrace_entry(XFS_I(inode));
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
+ xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return generic_block_bmap(mapping, block, xfs_get_blocks);
}