#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ordered-data.h"
#include "ioctl.h"
#include "print-tree.h"
+#include "compat.h"
static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
for (i = 0; i < num_pages; i++) {
if (!pages[i])
break;
+ ClearPageChecked(pages[i]);
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
page_cache_release(pages[i]);
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
u64 inline_size;
+ int did_inline = 0;
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1;
lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
- mutex_lock(&root->fs_info->fs_mutex);
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_join_transaction(root, 1);
if (!trans) {
err = -ENOMEM;
goto out_unlock;
/* FIXME...EIEIO, ENOSPC and more */
/* insert any holes we need to create */
- if (isize < end_pos) {
+ if (isize < start_pos) {
u64 last_pos_in_file;
u64 hole_size;
u64 mask = root->sectorsize - 1;
last_pos_in_file = (isize + mask) & ~mask;
- hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
- if (last_pos_in_file < end_pos) {
+ hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
+ if (hole_size > 0) {
+ btrfs_wait_ordered_range(inode, last_pos_in_file,
+ last_pos_in_file + hole_size);
+ mutex_lock(&BTRFS_I(inode)->extent_mutex);
err = btrfs_drop_extents(trans, root, inode,
last_pos_in_file,
last_pos_in_file + hole_size,
err = btrfs_insert_file_extent(trans, root,
inode->i_ino,
last_pos_in_file,
- 0, 0, hole_size);
+ 0, 0, hole_size, 0);
btrfs_drop_extent_cache(inode, last_pos_in_file,
last_pos_in_file + hole_size -1);
+ mutex_unlock(&BTRFS_I(inode)->extent_mutex);
btrfs_check_file(root, inode);
}
if (err)
inline_size > root->fs_info->max_inline ||
(inline_size & (root->sectorsize -1)) == 0 ||
inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
- u64 last_end;
- u64 existing_delalloc = 0;
-
+ /* check for reserved extents on each page, we don't want
+ * to reset the delalloc bit on things that already have
+ * extents reserved.
+ */
+ set_extent_delalloc(io_tree, start_pos,
+ end_of_last_block, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
SetPageUptodate(p);
+ ClearPageChecked(p);
set_page_dirty(p);
}
- last_end = (u64)(pages[num_pages -1]->index) <<
- PAGE_CACHE_SHIFT;
- last_end += PAGE_CACHE_SIZE - 1;
- if (start_pos < isize) {
- u64 delalloc_start = start_pos;
- existing_delalloc = count_range_bits(io_tree,
- &delalloc_start,
- end_of_last_block, (u64)-1,
- EXTENT_DELALLOC);
- }
- set_extent_delalloc(io_tree, start_pos, end_of_last_block,
- GFP_NOFS);
- btrfs_add_ordered_inode(inode);
} else {
u64 aligned_end;
/* step one, delete the existing extents in this range */
aligned_end = (pos + write_bytes + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
+ mutex_lock(&BTRFS_I(inode)->extent_mutex);
err = btrfs_drop_extents(trans, root, inode, start_pos,
aligned_end, aligned_end, &hint_byte);
if (err)
inline_size, pages, 0, num_pages);
btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
BUG_ON(err);
+ mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+ did_inline = 1;
}
if (end_pos > isize) {
i_size_write(inode, end_pos);
+ if (did_inline)
+ BTRFS_I(inode)->disk_i_size = end_pos;
btrfs_update_inode(trans, root, inode);
}
failed:
err = btrfs_end_transaction(trans, root);
out_unlock:
- mutex_unlock(&root->fs_info->fs_mutex);
unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
return err;
}
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
{
struct extent_map *em;
+ struct extent_map *split = NULL;
+ struct extent_map *split2 = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 len = end - start + 1;
+ int ret;
+ int testend = 1;
- if (end == (u64)-1)
+ WARN_ON(end < start);
+ if (end == (u64)-1) {
len = (u64)-1;
+ testend = 0;
+ }
while(1) {
+ if (!split)
+ split = alloc_extent_map(GFP_NOFS);
+ if (!split2)
+ split2 = alloc_extent_map(GFP_NOFS);
+
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (!em) {
spin_unlock(&em_tree->lock);
break;
}
+ if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+ printk(KERN_CRIT "inode %lu trying to drop pinned "
+ "extent start %llu end %llu, em [%llu %llu]\n",
+ inode->i_ino,
+ (unsigned long long)start,
+ (unsigned long long)end,
+ (unsigned long long)em->start,
+ (unsigned long long)em->len);
+ }
remove_extent_mapping(em_tree, em);
+
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ em->start < start) {
+ split->start = em->start;
+ split->len = start - em->start;
+ split->block_start = em->block_start;
+ split->bdev = em->bdev;
+ split->flags = em->flags;
+ ret = add_extent_mapping(em_tree, split);
+ BUG_ON(ret);
+ free_extent_map(split);
+ split = split2;
+ split2 = NULL;
+ }
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ testend && em->start + em->len > start + len) {
+ u64 diff = start + len - em->start;
+
+ split->start = start + len;
+ split->len = em->start + em->len - (start + len);
+ split->bdev = em->bdev;
+ split->flags = em->flags;
+
+ split->block_start = em->block_start + diff;
+
+ ret = add_extent_mapping(em_tree, split);
+ BUG_ON(ret);
+ free_extent_map(split);
+ split = NULL;
+ }
spin_unlock(&em_tree->lock);
/* once for us */
/* once for the tree*/
free_extent_map(em);
}
+ if (split)
+ free_extent_map(split);
+ if (split2)
+ free_extent_map(split2);
return 0;
}
struct inode *inode = fdentry(file)->d_inode;
int err = 0;
u64 start_pos;
+ u64 last_pos;
start_pos = pos & ~((u64)root->sectorsize - 1);
+ last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
memset(pages, 0, num_pages * sizeof(struct page *));
-
+again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
if (!pages[i]) {
err = -ENOMEM;
BUG_ON(1);
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- ClearPageDirty(pages[i]);
-#else
- cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
-#endif
wait_on_page_writeback(pages[i]);
- set_page_extent_mapped(pages[i]);
- WARN_ON(!PageLocked(pages[i]));
}
if (start_pos < inode->i_size) {
- u64 last_pos;
- last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
+ struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS);
+ ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
+ if (ordered &&
+ ordered->file_offset + ordered->len > start_pos &&
+ ordered->file_offset < last_pos) {
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent(&BTRFS_I(inode)->io_tree,
+ start_pos, last_pos - 1, GFP_NOFS);
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ btrfs_wait_ordered_range(inode, start_pos,
+ last_pos - start_pos);
+ goto again;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+
clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
GFP_NOFS);
unlock_extent(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS);
}
+ for (i = 0; i < num_pages; i++) {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ ClearPageDirty(pages[i]);
+#else
+ cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
+#endif
+ set_page_extent_mapped(pages[i]);
+ WARN_ON(!PageLocked(pages[i]));
+ }
return 0;
}
PAGE_CACHE_SIZE / (sizeof(struct page *)));
pinned[0] = NULL;
pinned[1] = NULL;
- if (file->f_flags & O_DIRECT)
- return -EINVAL;
pos = *ppos;
start_pos = pos;
goto out_nolock;
if (count == 0)
goto out_nolock;
+#ifdef REMOVE_SUID_PATH
+ err = remove_suid(&file->f_path);
+#else
+# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
+ err = file_remove_suid(file);
+# else
err = remove_suid(fdentry(file));
+# endif
+#endif
if (err)
goto out_nolock;
file_update_time(file);
first_index = pos >> PAGE_CACHE_SHIFT;
last_index = (pos + count) >> PAGE_CACHE_SHIFT;
+ /*
+ * if this is a nodatasum mount, force summing off for the inode
+ * all the time. That way a later mount with summing on won't
+ * get confused
+ */
+ if (btrfs_test_opt(root, NODATASUM))
+ btrfs_set_flag(inode, NODATASUM);
+
/*
* there are lots of better ways to do this, but this code
* makes sure the first and last page in the file range are
WARN_ON(num_pages > nrptrs);
memset(pages, 0, sizeof(pages));
- mutex_lock(&root->fs_info->fs_mutex);
ret = btrfs_check_free_space(root, write_bytes, 0);
- mutex_unlock(&root->fs_info->fs_mutex);
if (ret)
goto out;
start_pos, num_written);
if (err < 0)
num_written = err;
+ } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+ do_sync_file_range(file, start_pos,
+ start_pos + num_written - 1,
+ SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+#else
+ do_sync_mapping_range(inode->i_mapping, start_pos,
+ start_pos + num_written - 1,
+ SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+#endif
+ invalidate_mapping_pages(inode->i_mapping,
+ start_pos >> PAGE_CACHE_SHIFT,
+ (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
}
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
+int btrfs_release_file(struct inode * inode, struct file * filp)
+{
+ if (filp->private_data)
+ btrfs_ioctl_trans_end(filp);
+ return 0;
+}
+
static int btrfs_sync_file(struct file *file,
struct dentry *dentry, int datasync)
{
* check the transaction that last modified this inode
* and see if its already been committed
*/
- mutex_lock(&root->fs_info->fs_mutex);
if (!BTRFS_I(inode)->last_trans)
goto out;
+
mutex_lock(&root->fs_info->trans_mutex);
if (BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) {
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
+ if (file->private_data)
+ btrfs_ioctl_trans_end(file);
+
trans = btrfs_start_transaction(root, 1);
if (!trans) {
ret = -ENOMEM;
}
ret = btrfs_commit_transaction(trans, root);
out:
- mutex_unlock(&root->fs_info->fs_mutex);
return ret > 0 ? EIO : ret;
}
.write = btrfs_file_write,
.mmap = btrfs_file_mmap,
.open = generic_file_open,
+ .release = btrfs_release_file,
.fsync = btrfs_sync_file,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
};
-