]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/btrfs/file.c
Btrfs: Throttle tuning
[linux-2.6-omap-h63xx.git] / fs / btrfs / file.c
index 5d537f26dc832557bb4df79e3d9cd4e579b2cd5f..d3f2fe0b7c6cef6d8ca1466d8b7e7156f1a40998 100644 (file)
@@ -34,9 +34,9 @@
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ordered-data.h"
 #include "ioctl.h"
 #include "print-tree.h"
+#include "compat.h"
 
 
 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
@@ -75,6 +75,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
        for (i = 0; i < num_pages; i++) {
                if (!pages[i])
                        break;
+               ClearPageChecked(pages[i]);
                unlock_page(pages[i]);
                mark_page_accessed(pages[i]);
                page_cache_release(pages[i]);
@@ -242,6 +243,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
        u64 end_of_last_block;
        u64 end_pos = pos + write_bytes;
        u64 inline_size;
+       int did_inline = 0;
        loff_t isize = i_size_read(inode);
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
@@ -251,8 +253,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
        end_of_last_block = start_pos + num_bytes - 1;
 
        lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-       mutex_lock(&root->fs_info->fs_mutex);
-       trans = btrfs_start_transaction(root, 1);
+       trans = btrfs_join_transaction(root, 1);
        if (!trans) {
                err = -ENOMEM;
                goto out_unlock;
@@ -267,13 +268,16 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
 
        /* FIXME...EIEIO, ENOSPC and more */
        /* insert any holes we need to create */
-       if (isize < end_pos) {
+       if (isize < start_pos) {
                u64 last_pos_in_file;
                u64 hole_size;
                u64 mask = root->sectorsize - 1;
                last_pos_in_file = (isize + mask) & ~mask;
-               hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
-               if (last_pos_in_file < end_pos) {
+               hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
+               if (hole_size > 0) {
+                       btrfs_wait_ordered_range(inode, last_pos_in_file,
+                                                last_pos_in_file + hole_size);
+                       mutex_lock(&BTRFS_I(inode)->extent_mutex);
                        err = btrfs_drop_extents(trans, root, inode,
                                                 last_pos_in_file,
                                                 last_pos_in_file + hole_size,
@@ -285,9 +289,10 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
                        err = btrfs_insert_file_extent(trans, root,
                                                       inode->i_ino,
                                                       last_pos_in_file,
-                                                      0, 0, hole_size);
+                                                      0, 0, hole_size, 0);
                        btrfs_drop_extent_cache(inode, last_pos_in_file,
                                        last_pos_in_file + hole_size -1);
+                       mutex_unlock(&BTRFS_I(inode)->extent_mutex);
                        btrfs_check_file(root, inode);
                }
                if (err)
@@ -303,32 +308,24 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
            inline_size > root->fs_info->max_inline ||
            (inline_size & (root->sectorsize -1)) == 0 ||
            inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
-               u64 last_end;
-               u64 existing_delalloc = 0;
-
+               /* check for reserved extents on each page, we don't want
+                * to reset the delalloc bit on things that already have
+                * extents reserved.
+                */
+               set_extent_delalloc(io_tree, start_pos,
+                                   end_of_last_block, GFP_NOFS);
                for (i = 0; i < num_pages; i++) {
                        struct page *p = pages[i];
                        SetPageUptodate(p);
+                       ClearPageChecked(p);
                        set_page_dirty(p);
                }
-               last_end = (u64)(pages[num_pages -1]->index) <<
-                               PAGE_CACHE_SHIFT;
-               last_end += PAGE_CACHE_SIZE - 1;
-               if (start_pos < isize) {
-                       u64 delalloc_start = start_pos;
-                       existing_delalloc = count_range_bits(io_tree,
-                                            &delalloc_start,
-                                            end_of_last_block, (u64)-1,
-                                            EXTENT_DELALLOC);
-               }
-               set_extent_delalloc(io_tree, start_pos, end_of_last_block,
-                                GFP_NOFS);
-               btrfs_add_ordered_inode(inode);
        } else {
                u64 aligned_end;
                /* step one, delete the existing extents in this range */
                aligned_end = (pos + write_bytes + root->sectorsize - 1) &
                        ~((u64)root->sectorsize - 1);
+               mutex_lock(&BTRFS_I(inode)->extent_mutex);
                err = btrfs_drop_extents(trans, root, inode, start_pos,
                                         aligned_end, aligned_end, &hint_byte);
                if (err)
@@ -340,15 +337,18 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
                                           inline_size, pages, 0, num_pages);
                btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
                BUG_ON(err);
+               mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+               did_inline = 1;
        }
        if (end_pos > isize) {
                i_size_write(inode, end_pos);
+               if (did_inline)
+                       BTRFS_I(inode)->disk_i_size = end_pos;
                btrfs_update_inode(trans, root, inode);
        }
 failed:
        err = btrfs_end_transaction(trans, root);
 out_unlock:
-       mutex_unlock(&root->fs_info->fs_mutex);
        unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
        return err;
 }
@@ -363,6 +363,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
        int ret;
        int testend = 1;
 
+       WARN_ON(end < start);
        if (end == (u64)-1) {
                len = (u64)-1;
                testend = 0;
@@ -379,6 +380,15 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
                        spin_unlock(&em_tree->lock);
                        break;
                }
+               if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+                       printk(KERN_CRIT "inode %lu trying to drop pinned "
+                              "extent start %llu end %llu, em [%llu %llu]\n",
+                              inode->i_ino,
+                              (unsigned long long)start,
+                              (unsigned long long)end,
+                              (unsigned long long)em->start,
+                              (unsigned long long)em->len);
+               }
                remove_extent_mapping(em_tree, em);
 
                if (em->block_start < EXTENT_MAP_LAST_BYTE &&
@@ -787,37 +797,58 @@ static int prepare_pages(struct btrfs_root *root, struct file *file,
        struct inode *inode = fdentry(file)->d_inode;
        int err = 0;
        u64 start_pos;
+       u64 last_pos;
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
+       last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
 
        memset(pages, 0, num_pages * sizeof(struct page *));
-
+again:
        for (i = 0; i < num_pages; i++) {
                pages[i] = grab_cache_page(inode->i_mapping, index + i);
                if (!pages[i]) {
                        err = -ENOMEM;
                        BUG_ON(1);
                }
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-               ClearPageDirty(pages[i]);
-#else
-               cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
-#endif
                wait_on_page_writeback(pages[i]);
-               set_page_extent_mapped(pages[i]);
-               WARN_ON(!PageLocked(pages[i]));
        }
        if (start_pos < inode->i_size) {
-               u64 last_pos;
-               last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
+               struct btrfs_ordered_extent *ordered;
                lock_extent(&BTRFS_I(inode)->io_tree,
                            start_pos, last_pos - 1, GFP_NOFS);
+               ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
+               if (ordered &&
+                   ordered->file_offset + ordered->len > start_pos &&
+                   ordered->file_offset < last_pos) {
+                       btrfs_put_ordered_extent(ordered);
+                       unlock_extent(&BTRFS_I(inode)->io_tree,
+                                     start_pos, last_pos - 1, GFP_NOFS);
+                       for (i = 0; i < num_pages; i++) {
+                               unlock_page(pages[i]);
+                               page_cache_release(pages[i]);
+                       }
+                       btrfs_wait_ordered_range(inode, start_pos,
+                                                last_pos - start_pos);
+                       goto again;
+               }
+               if (ordered)
+                       btrfs_put_ordered_extent(ordered);
+
                clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
                                  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
                                  GFP_NOFS);
                unlock_extent(&BTRFS_I(inode)->io_tree,
                              start_pos, last_pos - 1, GFP_NOFS);
        }
+       for (i = 0; i < num_pages; i++) {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+               ClearPageDirty(pages[i]);
+#else
+               cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
+#endif
+               set_page_extent_mapped(pages[i]);
+               WARN_ON(!PageLocked(pages[i]));
+       }
        return 0;
 }
 
@@ -852,7 +883,15 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                goto out_nolock;
        if (count == 0)
                goto out_nolock;
+#ifdef REMOVE_SUID_PATH
+       err = remove_suid(&file->f_path);
+#else
+# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
+       err = file_remove_suid(file);
+# else
        err = remove_suid(fdentry(file));
+# endif
+#endif
        if (err)
                goto out_nolock;
        file_update_time(file);
@@ -908,9 +947,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                WARN_ON(num_pages > nrptrs);
                memset(pages, 0, sizeof(pages));
 
-               mutex_lock(&root->fs_info->fs_mutex);
                ret = btrfs_check_free_space(root, write_bytes, 0);
-               mutex_unlock(&root->fs_info->fs_mutex);
                if (ret)
                        goto out;
 
@@ -961,7 +998,7 @@ out_nolock:
                if (err < 0)
                        num_written = err;
        } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
                do_sync_file_range(file, start_pos,
                                      start_pos + num_written - 1,
                                      SYNC_FILE_RANGE_WRITE |
@@ -980,6 +1017,13 @@ out_nolock:
        return num_written ? num_written : err;
 }
 
+int btrfs_release_file(struct inode * inode, struct file * filp)
+{
+       if (filp->private_data)
+               btrfs_ioctl_trans_end(filp);
+       return 0;
+}
+
 static int btrfs_sync_file(struct file *file,
                           struct dentry *dentry, int datasync)
 {
@@ -992,9 +1036,9 @@ static int btrfs_sync_file(struct file *file,
         * check the transaction that last modified this inode
         * and see if its already been committed
         */
-       mutex_lock(&root->fs_info->fs_mutex);
        if (!BTRFS_I(inode)->last_trans)
                goto out;
+
        mutex_lock(&root->fs_info->trans_mutex);
        if (BTRFS_I(inode)->last_trans <=
            root->fs_info->last_trans_committed) {
@@ -1007,6 +1051,9 @@ static int btrfs_sync_file(struct file *file,
        /*
         * ok we haven't committed the transaction yet, lets do a commit
         */
+       if (file->private_data)
+               btrfs_ioctl_trans_end(file);
+
        trans = btrfs_start_transaction(root, 1);
        if (!trans) {
                ret = -ENOMEM;
@@ -1014,7 +1061,6 @@ static int btrfs_sync_file(struct file *file,
        }
        ret = btrfs_commit_transaction(trans, root);
 out:
-       mutex_unlock(&root->fs_info->fs_mutex);
        return ret > 0 ? EIO : ret;
 }
 
@@ -1046,10 +1092,10 @@ struct file_operations btrfs_file_operations = {
        .write          = btrfs_file_write,
        .mmap           = btrfs_file_mmap,
        .open           = generic_file_open,
+       .release        = btrfs_release_file,
        .fsync          = btrfs_sync_file,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_ioctl,
 #endif
 };
-