#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ordered-data.h"
#include "ioctl.h"
#include "print-tree.h"
#include "compat.h"
for (i = 0; i < num_pages; i++) {
if (!pages[i])
break;
+ ClearPageChecked(pages[i]);
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
page_cache_release(pages[i]);
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
u64 inline_size;
+ int did_inline = 0;
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1;
lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
- mutex_lock(&root->fs_info->fs_mutex);
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_join_transaction(root, 1);
if (!trans) {
err = -ENOMEM;
goto out_unlock;
/* FIXME...EIEIO, ENOSPC and more */
/* insert any holes we need to create */
- if (isize < end_pos) {
+ if (isize < start_pos) {
u64 last_pos_in_file;
u64 hole_size;
u64 mask = root->sectorsize - 1;
last_pos_in_file = (isize + mask) & ~mask;
- hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
- if (last_pos_in_file < end_pos) {
+ hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
+ if (hole_size > 0) {
+ btrfs_wait_ordered_range(inode, last_pos_in_file,
+ last_pos_in_file + hole_size);
+ mutex_lock(&BTRFS_I(inode)->extent_mutex);
err = btrfs_drop_extents(trans, root, inode,
last_pos_in_file,
last_pos_in_file + hole_size,
0, 0, hole_size, 0);
btrfs_drop_extent_cache(inode, last_pos_in_file,
last_pos_in_file + hole_size -1);
+ mutex_unlock(&BTRFS_I(inode)->extent_mutex);
btrfs_check_file(root, inode);
}
if (err)
inline_size > root->fs_info->max_inline ||
(inline_size & (root->sectorsize -1)) == 0 ||
inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
- u64 last_end;
- u64 existing_delalloc = 0;
-
+ /* check for reserved extents on each page, we don't want
+ * to reset the delalloc bit on things that already have
+ * extents reserved.
+ */
+ set_extent_delalloc(io_tree, start_pos,
+ end_of_last_block, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
SetPageUptodate(p);
+ ClearPageChecked(p);
set_page_dirty(p);
}
- last_end = (u64)(pages[num_pages -1]->index) <<
- PAGE_CACHE_SHIFT;
- last_end += PAGE_CACHE_SIZE - 1;
- if (start_pos < isize) {
- u64 delalloc_start = start_pos;
- existing_delalloc = count_range_bits(io_tree,
- &delalloc_start,
- end_of_last_block, (u64)-1,
- EXTENT_DELALLOC);
- }
- set_extent_delalloc(io_tree, start_pos, end_of_last_block,
- GFP_NOFS);
- btrfs_add_ordered_inode(inode);
} else {
u64 aligned_end;
/* step one, delete the existing extents in this range */
aligned_end = (pos + write_bytes + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
+ mutex_lock(&BTRFS_I(inode)->extent_mutex);
err = btrfs_drop_extents(trans, root, inode, start_pos,
aligned_end, aligned_end, &hint_byte);
if (err)
inline_size, pages, 0, num_pages);
btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
BUG_ON(err);
+ mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+ did_inline = 1;
}
if (end_pos > isize) {
i_size_write(inode, end_pos);
+ if (did_inline)
+ BTRFS_I(inode)->disk_i_size = end_pos;
btrfs_update_inode(trans, root, inode);
}
failed:
err = btrfs_end_transaction(trans, root);
out_unlock:
- mutex_unlock(&root->fs_info->fs_mutex);
unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
return err;
}
int ret;
int testend = 1;
+ WARN_ON(end < start);
if (end == (u64)-1) {
len = (u64)-1;
testend = 0;
spin_unlock(&em_tree->lock);
break;
}
+ if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+ printk(KERN_CRIT "inode %lu trying to drop pinned "
+ "extent start %llu end %llu, em [%llu %llu]\n",
+ inode->i_ino,
+ (unsigned long long)start,
+ (unsigned long long)end,
+ (unsigned long long)em->start,
+ (unsigned long long)em->len);
+ }
remove_extent_mapping(em_tree, em);
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
struct inode *inode = fdentry(file)->d_inode;
int err = 0;
u64 start_pos;
+ u64 last_pos;
start_pos = pos & ~((u64)root->sectorsize - 1);
+ last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
memset(pages, 0, num_pages * sizeof(struct page *));
-
+again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
if (!pages[i]) {
err = -ENOMEM;
BUG_ON(1);
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- ClearPageDirty(pages[i]);
-#else
- cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
-#endif
wait_on_page_writeback(pages[i]);
- set_page_extent_mapped(pages[i]);
- WARN_ON(!PageLocked(pages[i]));
}
if (start_pos < inode->i_size) {
- u64 last_pos;
- last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
+ struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS);
+ ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
+ if (ordered &&
+ ordered->file_offset + ordered->len > start_pos &&
+ ordered->file_offset < last_pos) {
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent(&BTRFS_I(inode)->io_tree,
+ start_pos, last_pos - 1, GFP_NOFS);
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ btrfs_wait_ordered_range(inode, start_pos,
+ last_pos - start_pos);
+ goto again;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+
clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
GFP_NOFS);
unlock_extent(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS);
}
+ for (i = 0; i < num_pages; i++) {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ ClearPageDirty(pages[i]);
+#else
+ cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
+#endif
+ set_page_extent_mapped(pages[i]);
+ WARN_ON(!PageLocked(pages[i]));
+ }
return 0;
}
#ifdef REMOVE_SUID_PATH
err = remove_suid(&file->f_path);
#else
+# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
+ err = file_remove_suid(file);
+# else
err = remove_suid(fdentry(file));
+# endif
#endif
if (err)
goto out_nolock;
WARN_ON(num_pages > nrptrs);
memset(pages, 0, sizeof(pages));
- mutex_lock(&root->fs_info->fs_mutex);
ret = btrfs_check_free_space(root, write_bytes, 0);
- mutex_unlock(&root->fs_info->fs_mutex);
if (ret)
goto out;
(start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
}
current->backing_dev_info = NULL;
- btrfs_ordered_throttle(root, inode);
return num_written ? num_written : err;
}
+int btrfs_release_file(struct inode * inode, struct file * filp)
+{
+ if (filp->private_data)
+ btrfs_ioctl_trans_end(filp);
+ return 0;
+}
+
static int btrfs_sync_file(struct file *file,
struct dentry *dentry, int datasync)
{
* check the transaction that last modified this inode
* and see if its already been committed
*/
- mutex_lock(&root->fs_info->fs_mutex);
if (!BTRFS_I(inode)->last_trans)
goto out;
+
mutex_lock(&root->fs_info->trans_mutex);
if (BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) {
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
+ if (file->private_data)
+ btrfs_ioctl_trans_end(file);
+
trans = btrfs_start_transaction(root, 1);
if (!trans) {
ret = -ENOMEM;
}
ret = btrfs_commit_transaction(trans, root);
out:
- mutex_unlock(&root->fs_info->fs_mutex);
return ret > 0 ? EIO : ret;
}
.write = btrfs_file_write,
.mmap = btrfs_file_mmap,
.open = generic_file_open,
+ .release = btrfs_release_file,
.fsync = btrfs_sync_file,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
};
-