X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Fbtrfs%2Finode.c;h=3cee77ae03c81e0fe8340ceedb268ca96c2a1521;hb=23beafbe3abd3f622931e83ef4e6b17642cf408e;hp=806708dd7e3847c10207403505180367c8feb472;hpb=70b99e6959a4c28ae1b314985eca731f3db72f1d;p=linux-2.6-omap-h63xx.git diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 806708dd7e3..3cee77ae03c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -34,10 +34,10 @@ #include #include #include -#include #include #include #include +#include "compat.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -47,10 +47,10 @@ #include "volumes.h" #include "ordered-data.h" #include "xattr.h" -#include "compat.h" #include "tree-log.h" #include "ref-cache.h" #include "compression.h" +#include "locking.h" struct btrfs_iget_args { u64 ino; @@ -86,6 +86,20 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static void btrfs_truncate(struct inode *inode); static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, int unlock); + +static int btrfs_init_inode_security(struct inode *inode, struct inode *dir) +{ + int err; + + err = btrfs_init_acl(inode, dir); + if (!err) + err = btrfs_xattr_security_init(inode, dir); + return err; +} /* * a very lame attempt at stopping writes when the FS is 85% full. There @@ -97,10 +111,9 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, u64 total; u64 used; u64 thresh; - unsigned long flags; int ret = 0; - spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); + spin_lock(&root->fs_info->delalloc_lock); total = btrfs_super_total_bytes(&root->fs_info->super_copy); used = btrfs_super_bytes_used(&root->fs_info->super_copy); if (for_del) @@ -112,7 +125,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, if (used + root->fs_info->delalloc_bytes + num_required > thresh) ret = -ENOSPC; - spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); + spin_unlock(&root->fs_info->delalloc_lock); return ret; } @@ -121,7 +134,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, * the btree. The caller should have done a btrfs_drop_extents so that * no overlapping inline items exist in the btree */ -static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, +static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, struct page **compressed_pages) @@ -145,7 +158,8 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, cur_size = compressed_size; } - path = btrfs_alloc_path(); if (!path) + path = btrfs_alloc_path(); + if (!path) return -ENOMEM; btrfs_set_trans_block_group(trans, inode); @@ -153,7 +167,6 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, key.objectid = inode->i_ino; key.offset = start; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); - inode_add_bytes(inode, size); datasize = btrfs_file_extent_calc_inline_size(cur_size); inode_add_bytes(inode, size); @@ -162,7 +175,6 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); if (ret) { err = ret; - printk("got bad ret %d\n", ret); goto fail; } leaf = path->nodes[0]; @@ -178,9 +190,9 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, if (use_compress) { struct page *cpage; int i = 0; - while(compressed_size > 0) { + while (compressed_size > 0) { cpage = compressed_pages[i]; - cur_size = min(compressed_size, + cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); kaddr = kmap(cpage); @@ -262,35 +274,73 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, return 0; } +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct page **pages; + unsigned long nr_pages; + struct list_head list; +}; + +struct async_cow { + struct inode *inode; + struct btrfs_root *root; + struct page *locked_page; + u64 start; + u64 end; + struct list_head extents; + struct btrfs_work work; +}; + +static noinline int add_async_extent(struct async_cow *cow, + u64 start, u64 ram_size, + u64 compressed_size, + struct page **pages, + unsigned long nr_pages) +{ + struct async_extent *async_extent; + + async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); + async_extent->start = start; + async_extent->ram_size = ram_size; + async_extent->compressed_size = compressed_size; + async_extent->pages = pages; + async_extent->nr_pages = nr_pages; + list_add_tail(&async_extent->list, &cow->extents); + return 0; +} + /* - * when extent_io.c finds a delayed allocation range in the file, - * the call backs end up in this code. The basic idea is to - * allocate extents on disk for the range, and create ordered data structs - * in ram to track those extents. + * we create compressed extents in two phases. The first + * phase compresses a range of pages that have already been + * locked (both pages and state bits are locked). * - * locked_page is the page that writepage had locked already. We use - * it to make sure we don't do extra locks or unlocks. + * This is done inside an ordered work queue, and the compression + * is spread across many cpus. The actual IO submission is step + * two, and the ordered work queue takes care of making sure that + * happens in the same order things were put onto the queue by + * writepages and friends. * - * *page_started is set to one if we unlock locked_page and do everything - * required to start IO on it. It may be clean and already done with - * IO when we return. + * If this code finds it can't get good compression, it puts an + * entry onto the work queue to write the uncompressed bytes. This + * makes sure that both compressed inodes and uncompressed inodes + * are written in the same order that pdflush sent them down. */ -static int cow_file_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) +static noinline int compress_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, + struct async_cow *async_cow, + int *num_added) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - u64 alloc_hint = 0; u64 num_bytes; - unsigned long ram_size; u64 orig_start; u64 disk_num_bytes; - u64 cur_alloc_size; u64 blocksize = root->sectorsize; u64 actual_end; - struct btrfs_key ins; - struct extent_map *em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 isize = i_size_read(inode); int ret = 0; struct page **pages = NULL; unsigned long nr_pages; @@ -298,33 +348,42 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, unsigned long total_compressed = 0; unsigned long total_in = 0; unsigned long max_compressed = 128 * 1024; - unsigned long max_uncompressed = 256 * 1024; + unsigned long max_uncompressed = 128 * 1024; int i; - int ordered_type; int will_compress; - trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); - btrfs_set_trans_block_group(trans, inode); orig_start = start; - /* - * compression made this loop a bit ugly, but the basic idea is to - * compress some pages but keep the total size of the compressed - * extent relatively small. If compression is off, this goto target - * is never used. - */ + actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); - actual_end = min_t(u64, i_size_read(inode), end + 1); + /* + * we don't want to send crud past the end of i_size through + * compression, that's just a waste of CPU time. So, if the + * end of the file is before the start of our current + * requested range of bytes, we bail out to the uncompressed + * cleanup code that can deal with all of this. + * + * It isn't really the fastest way to fix things, but this is a + * very uncommon corner. + */ + if (actual_end <= start) + goto cleanup_and_bail_uncompressed; + total_compressed = actual_end - start; /* we want to make sure that amount of ram required to uncompress * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 256k + * of a compressed extent to 128k. This is a crucial number + * because it also controls how easily we can spread reads across + * cpus for decompression. + * + * We also want to make sure the amount of IO required to do + * a random read is reasonably small, so we limit the size of + * a compressed extent to 128k. */ total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -333,18 +392,16 @@ again: total_in = 0; ret = 0; - /* we do compression for mount -o compress and when the - * inode has not been flagged as nocompress + /* + * we do compression for mount -o compress and when the + * inode has not been flagged as nocompress. This flag can + * change at any time if we discover bad compression ratios. */ if (!btrfs_test_flag(inode, NOCOMPRESS) && btrfs_test_opt(root, COMPRESS)) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - /* we want to make sure the amount of IO required to satisfy - * a random read is reasonably small, so we limit the size - * of a compressed extent to 128k - */ ret = btrfs_zlib_compress_pages(inode->i_mapping, start, total_compressed, pages, nr_pages, &nr_pages_ret, @@ -371,26 +428,34 @@ again: } } if (start == 0) { + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + /* lets try to make an inline extent */ - if (ret || total_in < (end - start + 1)) { + if (ret || total_in < (actual_end - start)) { /* we didn't compress the entire range, try - * to make an uncompressed inline extent. This - * is almost sure to fail, but maybe inline sizes - * will get bigger later + * to make an uncompressed inline extent. */ ret = cow_file_range_inline(trans, root, inode, start, end, 0, NULL); } else { + /* try making a compressed inline extent */ ret = cow_file_range_inline(trans, root, inode, start, end, total_compressed, pages); } + btrfs_end_transaction(trans, root); if (ret == 0) { + /* + * inline extent creation worked, we don't need + * to create any more async work items. Unlock + * and free up our temp pages. + */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, NULL, - 1, 1, 1); - *page_started = 1; + start, end, NULL, 1, 0, + 0, 1, 1, 1); ret = 0; goto free_pages_out; } @@ -435,54 +500,283 @@ again: /* flag the file so we don't compress in the future */ btrfs_set_flag(inode, NOCOMPRESS); } + if (will_compress) { + *num_added += 1; - BUG_ON(disk_num_bytes > - btrfs_super_total_bytes(&root->fs_info->super_copy)); + /* the async work queues will take care of doing actual + * allocation on disk for these compressed pages, + * and will submit them to the elevator. + */ + add_async_extent(async_cow, start, num_bytes, + total_compressed, pages, nr_pages_ret); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + if (start + num_bytes < end && start + num_bytes < actual_end) { + start += num_bytes; + pages = NULL; + cond_resched(); + goto again; + } + } else { +cleanup_and_bail_uncompressed: + /* + * No compression, but we still need to write the pages in + * the file we've been given so far. redirty the locked + * page if it corresponds to our extent and set things up + * for the async work queue to run cow_file_range to do + * the normal delalloc dance + */ + if (page_offset(locked_page) >= start && + page_offset(locked_page) <= end) { + __set_page_dirty_nobuffers(locked_page); + /* unlocked later on in the async handlers */ + } + add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); + *num_added += 1; + } + +out: + return 0; + +free_pages_out: + for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); + page_cache_release(pages[i]); + } + kfree(pages); + + goto out; +} + +/* + * phase two of compressed writeback. This is the ordered portion + * of the code, which only gets called in the order the work was + * queued. We walk all the async extents created by compress_file_range + * and send them down to the disk. + */ +static noinline int submit_compressed_extents(struct inode *inode, + struct async_cow *async_cow) +{ + struct async_extent *async_extent; + u64 alloc_hint = 0; + struct btrfs_trans_handle *trans; + struct btrfs_key ins; + struct extent_map *em; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_io_tree *io_tree; + int ret; + + if (list_empty(&async_cow->extents)) + return 0; + + trans = btrfs_join_transaction(root, 1); + + while (!list_empty(&async_cow->extents)) { + async_extent = list_entry(async_cow->extents.next, + struct async_extent, list); + list_del(&async_extent->list); + + io_tree = &BTRFS_I(inode)->io_tree; + + /* did the compression code fall back to uncompressed IO? */ + if (!async_extent->pages) { + int page_started = 0; + unsigned long nr_written = 0; + + lock_extent(io_tree, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); + + /* allocate blocks */ + cow_file_range(inode, async_cow->locked_page, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + &page_started, &nr_written, 0); + + /* + * if page_started, cow_file_range inserted an + * inline extent and took care of all the unlocking + * and IO for us. Otherwise, we need to submit + * all those pages down to the drive. + */ + if (!page_started) + extent_write_locked_range(io_tree, + inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + btrfs_get_extent, + WB_SYNC_ALL); + kfree(async_extent); + cond_resched(); + continue; + } + + lock_extent(io_tree, async_extent->start, + async_extent->start + async_extent->ram_size - 1, + GFP_NOFS); + /* + * here we're doing allocation and writeback of the + * compressed pages + */ + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + + ret = btrfs_reserve_extent(trans, root, + async_extent->compressed_size, + async_extent->compressed_size, + 0, alloc_hint, + (u64)-1, &ins, 1); + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); + em->start = async_extent->start; + em->len = async_extent->ram_size; + em->orig_start = em->start; - while(disk_num_bytes > 0) { - unsigned long min_bytes; + em->block_start = ins.objectid; + em->block_len = ins.offset; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + + while (1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + } + + ret = btrfs_add_ordered_extent(inode, async_extent->start, + ins.objectid, + async_extent->ram_size, + ins.offset, + BTRFS_ORDERED_COMPRESSED); + BUG_ON(ret); + + btrfs_end_transaction(trans, root); /* - * the max size of a compressed extent is pretty small, - * make the code a little less complex by forcing - * the allocator to find a whole compressed extent at once + * clear dirty, set writeback and unlock the pages. */ - if (will_compress) - min_bytes = disk_num_bytes; - else - min_bytes = root->sectorsize; + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + NULL, 1, 1, 0, 1, 1, 0); + + ret = btrfs_submit_compressed_write(inode, + async_extent->start, + async_extent->ram_size, + ins.objectid, + ins.offset, async_extent->pages, + async_extent->nr_pages); + BUG_ON(ret); + trans = btrfs_join_transaction(root, 1); + alloc_hint = ins.objectid + ins.offset; + kfree(async_extent); + cond_resched(); + } + + btrfs_end_transaction(trans, root); + return 0; +} + +/* + * when extent_io.c finds a delayed allocation range in the file, + * the call backs end up in this code. The basic idea is to + * allocate extents on disk for the range, and create ordered data structs + * in ram to track those extents. + * + * locked_page is the page that writepage had locked already. We use + * it to make sure we don't do extra locks or unlocks. + * + * *page_started is set to one if we unlock locked_page and do everything + * required to start IO on it. It may be clean and already done with + * IO when we return. + */ +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, + int unlock) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 alloc_hint = 0; + u64 num_bytes; + unsigned long ram_size; + u64 disk_num_bytes; + u64 cur_alloc_size; + u64 blocksize = root->sectorsize; + u64 actual_end; + u64 isize = i_size_read(inode); + struct btrfs_key ins; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 0; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + + actual_end = min_t(u64, isize, end + 1); + + num_bytes = (end - start + blocksize) & ~(blocksize - 1); + num_bytes = max(blocksize, num_bytes); + disk_num_bytes = num_bytes; + ret = 0; + + if (start == 0) { + /* lets try to make an inline extent */ + ret = cow_file_range_inline(trans, root, inode, + start, end, 0, NULL); + if (ret == 0) { + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, end, NULL, 1, 1, + 1, 1, 1, 1); + *nr_written = *nr_written + + (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; + *page_started = 1; + ret = 0; + goto out; + } + } + + BUG_ON(disk_num_bytes > + btrfs_super_total_bytes(&root->fs_info->super_copy)); + + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + + while (disk_num_bytes > 0) { cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - min_bytes, 0, alloc_hint, + root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); - if (ret) { - WARN_ON(1); - goto free_pages_out_fail; - } + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); em->start = start; + em->orig_start = em->start; - if (will_compress) { - ram_size = num_bytes; - em->len = num_bytes; - } else { - /* ramsize == disk size */ - ram_size = ins.offset; - em->len = ins.offset; - } + ram_size = ins.offset; + em->len = ins.offset; em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - if (will_compress) - set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - - while(1) { + while (1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); spin_unlock(&em_tree->lock); @@ -495,93 +789,171 @@ again: } cur_alloc_size = ins.offset; - ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ram_size, cur_alloc_size, - ordered_type); + ram_size, cur_alloc_size, 0); BUG_ON(ret); - if (disk_num_bytes < cur_alloc_size) { - printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, - cur_alloc_size); - break; + if (root->root_key.objectid == + BTRFS_DATA_RELOC_TREE_OBJECTID) { + ret = btrfs_reloc_clone_csums(inode, start, + cur_alloc_size); + BUG_ON(ret); } - if (will_compress) { - /* - * we're doing compression, we and we need to - * submit the compressed extents down to the device. - * - * We lock down all the file pages, clearing their - * dirty bits and setting them writeback. Everyone - * that wants to modify the page will wait on the - * ordered extent above. - * - * The writeback bits on the file pages are - * cleared when the compressed pages are on disk - */ - btrfs_end_transaction(trans, root); - - if (start <= page_offset(locked_page) && - page_offset(locked_page) < start + ram_size) { - *page_started = 1; - } - - extent_clear_unlock_delalloc(inode, - &BTRFS_I(inode)->io_tree, - start, - start + ram_size - 1, - NULL, 1, 1, 0); - - ret = btrfs_submit_compressed_write(inode, start, - ram_size, ins.objectid, - cur_alloc_size, pages, - nr_pages_ret); + if (disk_num_bytes < cur_alloc_size) + break; - BUG_ON(ret); - trans = btrfs_join_transaction(root, 1); - if (start + ram_size < end) { - start += ram_size; - alloc_hint = ins.objectid + ins.offset; - /* pages will be freed at end_bio time */ - pages = NULL; - goto again; - } else { - /* we've written everything, time to go */ - break; - } - } /* we're not doing compressed IO, don't unlock the first * page (which the caller expects to stay locked), don't * clear any dirty bits and don't set any writeback bits */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, start, start + ram_size - 1, - locked_page, 0, 0, 0); + locked_page, unlock, 1, + 1, 0, 0, 0); disk_num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } - - ret = 0; out: + ret = 0; btrfs_end_transaction(trans, root); return ret; +} -free_pages_out_fail: - extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, locked_page, 0, 0, 0); -free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { - WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); +/* + * work queue call back to started compression on a file and pages + */ +static noinline void async_cow_start(struct btrfs_work *work) +{ + struct async_cow *async_cow; + int num_added = 0; + async_cow = container_of(work, struct async_cow, work); + + compress_file_range(async_cow->inode, async_cow->locked_page, + async_cow->start, async_cow->end, async_cow, + &num_added); + if (num_added == 0) + async_cow->inode = NULL; +} + +/* + * work queue call back to submit previously compressed pages + */ +static noinline void async_cow_submit(struct btrfs_work *work) +{ + struct async_cow *async_cow; + struct btrfs_root *root; + unsigned long nr_pages; + + async_cow = container_of(work, struct async_cow, work); + + root = async_cow->root; + nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + + atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); + + if (atomic_read(&root->fs_info->async_delalloc_pages) < + 5 * 1042 * 1024 && + waitqueue_active(&root->fs_info->async_submit_wait)) + wake_up(&root->fs_info->async_submit_wait); + + if (async_cow->inode) + submit_compressed_extents(async_cow->inode, async_cow); +} + +static noinline void async_cow_free(struct btrfs_work *work) +{ + struct async_cow *async_cow; + async_cow = container_of(work, struct async_cow, work); + kfree(async_cow); +} + +static int cow_file_range_async(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written) +{ + struct async_cow *async_cow; + struct btrfs_root *root = BTRFS_I(inode)->root; + unsigned long nr_pages; + u64 cur_end; + int limit = 10 * 1024 * 1042; + + if (!btrfs_test_opt(root, COMPRESS)) { + return cow_file_range(inode, locked_page, start, end, + page_started, nr_written, 1); } - if (pages) - kfree(pages); - goto out; + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | + EXTENT_DELALLOC, 1, 0, GFP_NOFS); + while (start < end) { + async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); + async_cow->inode = inode; + async_cow->root = root; + async_cow->locked_page = locked_page; + async_cow->start = start; + + if (btrfs_test_flag(inode, NOCOMPRESS)) + cur_end = end; + else + cur_end = min(end, start + 512 * 1024 - 1); + + async_cow->end = cur_end; + INIT_LIST_HEAD(&async_cow->extents); + + async_cow->work.func = async_cow_start; + async_cow->work.ordered_func = async_cow_submit; + async_cow->work.ordered_free = async_cow_free; + async_cow->work.flags = 0; + + nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); + + btrfs_queue_worker(&root->fs_info->delalloc_workers, + &async_cow->work); + + if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) < + limit)); + } + + while (atomic_read(&root->fs_info->async_submit_draining) && + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) == + 0)); + } + + *nr_written += nr_pages; + start = cur_end + 1; + } + *page_started = 1; + return 0; +} + +static noinline int csum_exist_in_range(struct btrfs_root *root, + u64 bytenr, u64 num_bytes) +{ + int ret; + struct btrfs_ordered_sum *sums; + LIST_HEAD(list); + + ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, + bytenr + num_bytes - 1, &list); + if (ret == 0 && list_empty(&list)) + return 0; + + while (!list_empty(&list)) { + sums = list_entry(list.next, struct btrfs_ordered_sum, list); + list_del(&sums->list); + kfree(sums); + } + return 1; } /* @@ -592,7 +964,8 @@ free_pages_out: * blocks on disk */ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, int force) + u64 start, u64 end, int *page_started, int force, + unsigned long *nr_written) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -644,6 +1017,7 @@ next_slot: nocow = 0; disk_bytenr = 0; + num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid > inode->i_ino || @@ -662,7 +1036,6 @@ next_slot: if (extent_type == BTRFS_FILE_EXTENT_REG || extent_type == BTRFS_FILE_EXTENT_PREALLOC) { - struct btrfs_block_group_cache *block_group; disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); extent_end = found_key.offset + btrfs_file_extent_num_bytes(leaf, fi); @@ -670,21 +1043,29 @@ next_slot: path->slots[0]++; goto next_slot; } + if (disk_bytenr == 0) + goto out_check; if (btrfs_file_extent_compression(leaf, fi) || btrfs_file_extent_encryption(leaf, fi) || btrfs_file_extent_other_encoding(leaf, fi)) goto out_check; - if (disk_bytenr == 0) - goto out_check; if (extent_type == BTRFS_FILE_EXTENT_REG && !force) goto out_check; - if (btrfs_cross_ref_exist(trans, root, disk_bytenr)) + if (btrfs_extent_readonly(root, disk_bytenr)) goto out_check; - block_group = btrfs_lookup_block_group(root->fs_info, - disk_bytenr); - if (!block_group || block_group->ro) + if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + disk_bytenr)) goto out_check; disk_bytenr += btrfs_file_extent_offset(leaf, fi); + disk_bytenr += cur_offset - found_key.offset; + num_bytes = min(end + 1, extent_end) - cur_offset; + /* + * force cow if csum exists in the range. + * this ensure that csum for a given extent are + * either valid or do not exist. + */ + if (csum_exist_in_range(root, disk_bytenr, num_bytes)) + goto out_check; nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -711,19 +1092,19 @@ out_check: btrfs_release_path(root, path); if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, - found_key.offset - 1, page_started); + found_key.offset - 1, page_started, + nr_written, 1); BUG_ON(ret); cow_start = (u64)-1; } - disk_bytenr += cur_offset - found_key.offset; - num_bytes = min(end + 1, extent_end) - cur_offset; if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { struct extent_map *em; struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(GFP_NOFS); em->start = cur_offset; + em->orig_start = em->start; em->len = num_bytes; em->block_len = num_bytes; em->block_start = disk_bytenr; @@ -748,9 +1129,10 @@ out_check: ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, num_bytes, num_bytes, type); BUG_ON(ret); + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, cur_offset, cur_offset + num_bytes - 1, - locked_page, 0, 0, 0); + locked_page, 1, 1, 1, 0, 0, 0); cur_offset = extent_end; if (cur_offset > end) break; @@ -761,7 +1143,7 @@ out_check: cow_start = cur_offset; if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, end, - page_started); + page_started, nr_written, 1); BUG_ON(ret); } @@ -775,21 +1157,20 @@ out_check: * extent_io.c call back to do delayed allocation processing */ static int run_delalloc_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) + u64 start, u64 end, int *page_started, + unsigned long *nr_written) { - struct btrfs_root *root = BTRFS_I(inode)->root; int ret; - if (btrfs_test_opt(root, NODATACOW) || - btrfs_test_flag(inode, NODATACOW)) + if (btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 0); + page_started, 1, nr_written); else if (btrfs_test_flag(inode, PREALLOC)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 1); + page_started, 0, nr_written); else - ret = cow_file_range(inode, locked_page, start, end, - page_started); + ret = cow_file_range_async(inode, locked_page, start, end, + page_started, nr_written); return ret; } @@ -799,20 +1180,24 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, * bytes in this file, and to maintain the list of inodes that * have pending delalloc work to be done. */ -int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, +static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { - unsigned long flags; + /* + * set_bit and clear bit hooks normally require _irqsave/restore + * but in this case, we are only testeing for the DELALLOC + * bit, which is only set or cleared with irqs on + */ if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; - spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); + spin_lock(&root->fs_info->delalloc_lock); BTRFS_I(inode)->delalloc_bytes += end - start + 1; root->fs_info->delalloc_bytes += end - start + 1; if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_add_tail(&BTRFS_I(inode)->delalloc_inodes, &root->fs_info->delalloc_inodes); } - spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); + spin_unlock(&root->fs_info->delalloc_lock); } return 0; } @@ -820,17 +1205,24 @@ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, /* * extent_io.c clear_bit_hook, see set_bit_hook for why */ -int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, +static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { + /* + * set_bit and clear bit hooks normally require _irqsave/restore + * but in this case, we are only testeing for the DELALLOC + * bit, which is only set or cleared with irqs on + */ if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long flags; - spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); + spin_lock(&root->fs_info->delalloc_lock); if (end - start + 1 > root->fs_info->delalloc_bytes) { - printk("warning: delalloc account %Lu %Lu\n", - end - start + 1, root->fs_info->delalloc_bytes); + printk(KERN_INFO "btrfs warning: delalloc account " + "%llu %llu\n", + (unsigned long long)end - start + 1, + (unsigned long long) + root->fs_info->delalloc_bytes); root->fs_info->delalloc_bytes = 0; BTRFS_I(inode)->delalloc_bytes = 0; } else { @@ -841,7 +1233,7 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_del_init(&BTRFS_I(inode)->delalloc_inodes); } - spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); + spin_unlock(&root->fs_info->delalloc_lock); } return 0; } @@ -861,15 +1253,17 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, u64 map_length; int ret; + if (bio_flags & EXTENT_BIO_COMPRESSED) + return 0; + length = bio->bi_size; map_tree = &root->fs_info->mapping_tree; map_length = length; ret = btrfs_map_block(map_tree, READ, logical, &map_length, NULL, 0); - if (map_length < length + size) { + if (map_length < length + size) return 1; - } return 0; } @@ -881,48 +1275,65 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * At IO completion time the cums attached on the ordered extent record * are inserted into the btree */ -int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) +static int __btrfs_submit_bio_start(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - ret = btrfs_csum_one_bio(root, inode, bio); + ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); BUG_ON(ret); + return 0; +} +/* + * in order to insert checksums into the metadata in large chunks, + * we wait until bio submission time. All the pages in the bio are + * checksummed and sums are attached onto the ordered extent record. + * + * At IO completion time the cums attached on the ordered extent record + * are inserted into the btree + */ +static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, + int mirror_num, unsigned long bio_flags) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; return btrfs_map_bio(root, rw, bio, mirror_num, 1); } /* - * extent_io.c submission hook. This does the right thing for csum calculation on write, - * or reading the csums from the tree before a read + * extent_io.c submission hook. This does the right thing for csum calculation + * on write, or reading the csums from the tree before a read */ -int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, +static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; int skip_sum; + skip_sum = btrfs_test_flag(inode, NODATASUM); + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); - skip_sum = btrfs_test_opt(root, NODATASUM) || - btrfs_test_flag(inode, NODATASUM); - if (!(rw & (1 << BIO_RW))) { - if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio); - - if (bio_flags & EXTENT_BIO_COMPRESSED) + if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); + } else if (!skip_sum) + btrfs_lookup_bio_sums(root, inode, bio, NULL); goto mapit; } else if (!skip_sum) { + /* csum items have already been cloned */ + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + goto mapit; /* we're doing a write, do the async checksumming */ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, - bio_flags, __btrfs_submit_bio_hook); + bio_flags, __btrfs_submit_bio_start, + __btrfs_submit_bio_done); } mapit: @@ -937,20 +1348,21 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, struct inode *inode, u64 file_offset, struct list_head *list) { - struct list_head *cur; struct btrfs_ordered_sum *sum; btrfs_set_trans_block_group(trans, inode); - list_for_each(cur, list) { - sum = list_entry(cur, struct btrfs_ordered_sum, list); - btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, - inode, sum); + + list_for_each_entry(sum, list, list) { + btrfs_csum_file_blocks(trans, + BTRFS_I(inode)->root->fs_info->csum_root, sum); } return 0; } int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) + WARN_ON(1); return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -961,7 +1373,7 @@ struct btrfs_writepage_fixup { struct btrfs_work work; }; -void btrfs_writepage_fixup_worker(struct btrfs_work *work) +static void btrfs_writepage_fixup_worker(struct btrfs_work *work) { struct btrfs_writepage_fixup *fixup; struct btrfs_ordered_extent *ordered; @@ -1020,7 +1432,7 @@ out_page: * to fix it up. The async helper will wait for ordered extents, set * the delalloc bit and make it safe to write the page. */ -int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) +static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) { struct inode *inode = page->mapping->host; struct btrfs_writepage_fixup *fixup; @@ -1174,7 +1586,7 @@ nocow: return 0; } -int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, +static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { return btrfs_finish_ordered_io(page->mapping->host, start, end); @@ -1193,10 +1605,11 @@ struct io_failure_record { u64 start; u64 len; u64 logical; + unsigned long bio_flags; int last_mirror; }; -int btrfs_io_failed_hook(struct bio *failed_bio, +static int btrfs_io_failed_hook(struct bio *failed_bio, struct page *page, u64 start, u64 end, struct extent_state *state) { @@ -1211,7 +1624,6 @@ int btrfs_io_failed_hook(struct bio *failed_bio, int ret; int rw; u64 logical; - unsigned long bio_flags = 0; ret = get_state_private(failure_tree, start, &private); if (ret) { @@ -1221,6 +1633,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, failrec->start = start; failrec->len = end - start + 1; failrec->last_mirror = 0; + failrec->bio_flags = 0; spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, failrec->len); @@ -1236,8 +1649,10 @@ int btrfs_io_failed_hook(struct bio *failed_bio, } logical = start - em->start; logical = em->block_start + logical; - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) - bio_flags = EXTENT_BIO_COMPRESSED; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { + logical = em->block_start; + failrec->bio_flags = EXTENT_BIO_COMPRESSED; + } failrec->logical = logical; free_extent_map(em); set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | @@ -1252,13 +1667,13 @@ int btrfs_io_failed_hook(struct bio *failed_bio, failrec->logical, failrec->len); failrec->last_mirror++; if (!state) { - spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); + spin_lock(&BTRFS_I(inode)->io_tree.lock); state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, failrec->start, EXTENT_LOCKED); if (state && state->start != failrec->start) state = NULL; - spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); + spin_unlock(&BTRFS_I(inode)->io_tree.lock); } if (!state || failrec->last_mirror > num_copies) { set_state_private(failure_tree, failrec->start, 0); @@ -1274,6 +1689,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, bio->bi_sector = failrec->logical >> 9; bio->bi_bdev = failed_bio->bi_bdev; bio->bi_size = 0; + bio_add_page(bio, page, failrec->len, start - page_offset(page)); if (failed_bio->bi_rw & (1 << BIO_RW)) rw = WRITE; @@ -1282,7 +1698,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, failrec->last_mirror, - bio_flags); + failrec->bio_flags); return 0; } @@ -1290,7 +1706,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, * each time an IO finishes, we do a fast check in the IO failure tree * to see if we need to process or clean up an io_failure_record */ -int btrfs_clean_io_failures(struct inode *inode, u64 start) +static int btrfs_clean_io_failures(struct inode *inode, u64 start) { u64 private; u64 private_failure; @@ -1323,7 +1739,7 @@ int btrfs_clean_io_failures(struct inode *inode, u64 start) * if there's a match, we allow the bio to finish. If not, we go through * the io_failure_record routines to find good copies */ -int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, +static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); @@ -1334,30 +1750,38 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, int ret; struct btrfs_root *root = BTRFS_I(inode)->root; u32 csum = ~(u32)0; - unsigned long flags; - if (btrfs_test_opt(root, NODATASUM) || - btrfs_test_flag(inode, NODATASUM)) + if (PageChecked(page)) { + ClearPageChecked(page); + goto good; + } + if (btrfs_test_flag(inode, NODATASUM)) + return 0; + + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && + test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) { + clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, + GFP_NOFS); return 0; + } + if (state && state->start == start) { private = state->private; ret = 0; } else { ret = get_state_private(io_tree, start, &private); } - local_irq_save(flags); - kaddr = kmap_atomic(page, KM_IRQ0); - if (ret) { + kaddr = kmap_atomic(page, KM_USER0); + if (ret) goto zeroit; - } + csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); btrfs_csum_final(csum, (char *)&csum); - if (csum != private) { + if (csum != private) goto zeroit; - } - kunmap_atomic(kaddr, KM_IRQ0); - local_irq_restore(flags); + kunmap_atomic(kaddr, KM_USER0); +good: /* if the io failure tree for this inode is non-empty, * check to see if we've recovered from a failed IO */ @@ -1365,13 +1789,13 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, return 0; zeroit: - printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n", - page->mapping->host->i_ino, (unsigned long long)start, csum, - private); + printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + "private %llu\n", page->mapping->host->i_ino, + (unsigned long long)start, csum, + (unsigned long long)private); memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_IRQ0); - local_irq_restore(flags); + kunmap_atomic(kaddr, KM_USER0); if (private == 0) return 0; return -EIO; @@ -1449,10 +1873,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) struct inode *inode; int ret = 0, nr_unlink = 0, nr_truncate = 0; - /* don't do orphan cleanup if the fs is readonly. */ - if (root->fs_info->sb->s_flags & MS_RDONLY) - return; - path = btrfs_alloc_path(); if (!path) return; @@ -1607,21 +2027,18 @@ void btrfs_read_locked_inode(struct inode *inode) inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); + BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item); inode->i_generation = BTRFS_I(inode)->generation; inode->i_rdev = 0; rdev = btrfs_inode_rdev(leaf, inode_item); BTRFS_I(inode)->index_cnt = (u64)-1; + BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); alloc_group_block = btrfs_inode_block_group(leaf, inode_item); - BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info, - alloc_group_block); - BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - if (!BTRFS_I(inode)->block_group) { - BTRFS_I(inode)->block_group = btrfs_find_block_group(root, - NULL, 0, - BTRFS_BLOCK_GROUP_METADATA, 0); - } + + BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, + alloc_group_block, 0); btrfs_free_path(path); inode_item = NULL; @@ -1646,6 +2063,7 @@ void btrfs_read_locked_inode(struct inode *inode) inode->i_mapping->backing_dev_info = &root->fs_info->bdi; break; default: + inode->i_op = &btrfs_special_inode_operations; init_special_inode(inode, inode->i_mode, rdev); break; } @@ -1687,19 +2105,18 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); + btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence); btrfs_set_inode_transid(leaf, item, trans->transid); btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); - btrfs_set_inode_block_group(leaf, item, - BTRFS_I(inode)->block_group->key.objectid); + btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); } /* * copy everything in the in-memory inode into the btree. */ -int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode) +noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) { struct btrfs_inode_item *inode_item; struct btrfs_path *path; @@ -1716,6 +2133,7 @@ int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, goto failed; } + btrfs_unlock_up_safe(path, 1); leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); @@ -1774,7 +2192,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, inode->i_ino, dir->i_ino, &index); if (ret) { - printk("failed to delete reference to %.*s, " + printk(KERN_INFO "btrfs failed to delete reference to %.*s, " "inode %lu parent %lu\n", name_len, name, inode->i_ino, dir->i_ino); goto err; @@ -1857,7 +2275,12 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) struct btrfs_trans_handle *trans; unsigned long nr = 0; - if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) { + /* + * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir + * the root of a subvolume or snapshot + */ + if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || + inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { return -ENOTEMPTY; } @@ -1875,9 +2298,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) /* now the directory is empty */ err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, dentry->d_name.name, dentry->d_name.len); - if (!err) { + if (!err) btrfs_i_size_write(inode, 0); - } fail_trans: nr = trans->blocks_used; @@ -1890,6 +2312,7 @@ fail: return err; } +#if 0 /* * when truncating bytes in a file, it is possible to avoid reading * the leaves that contain only checksum items. This can be the @@ -2032,6 +2455,8 @@ next_node: ref->generation = leaf_gen; ref->nritems = 0; + btrfs_sort_leaf_ref(ref); + ret = btrfs_add_leaf_ref(root, ref, 0); WARN_ON(ret); btrfs_free_leaf_ref(root, ref); @@ -2057,6 +2482,8 @@ out: return ret; } +#endif + /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find @@ -2077,7 +2504,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; - u32 found_type; + u32 found_type = (u8)-1; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; u64 extent_start = 0; @@ -2090,6 +2517,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int pending_del_nr = 0; int pending_del_slot = 0; int extent_type = -1; + int encoding; u64 mask = root->sectorsize - 1; if (root->ref_cows) @@ -2103,16 +2531,11 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, key.offset = (u64)-1; key.type = (u8)-1; - btrfs_init_path(path); - - ret = drop_csum_leaves(trans, root, path, inode, new_size); - BUG_ON(ret); - search_again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) { + if (ret < 0) goto error; - } + if (ret > 0) { /* there are no items in the tree for us to truncate, we're * done @@ -2124,11 +2547,12 @@ search_again: path->slots[0]--; } - while(1) { + while (1) { fi = NULL; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); + encoding = 0; if (found_key.objectid != inode->i_ino) break; @@ -2141,6 +2565,10 @@ search_again: fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); + encoding = btrfs_file_extent_compression(leaf, fi); + encoding |= btrfs_file_extent_encryption(leaf, fi); + encoding |= btrfs_file_extent_other_encoding(leaf, fi); + if (extent_type != BTRFS_FILE_EXTENT_INLINE) { item_end += btrfs_file_extent_num_bytes(leaf, fi); @@ -2150,25 +2578,19 @@ search_again: } item_end--; } - if (found_type == BTRFS_CSUM_ITEM_KEY) { - ret = btrfs_csum_truncate(trans, root, path, - new_size); - BUG_ON(ret); - } if (item_end < new_size) { - if (found_type == BTRFS_DIR_ITEM_KEY) { + if (found_type == BTRFS_DIR_ITEM_KEY) found_type = BTRFS_INODE_ITEM_KEY; - } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { - found_type = BTRFS_CSUM_ITEM_KEY; - } else if (found_type == BTRFS_EXTENT_DATA_KEY) { + else if (found_type == BTRFS_EXTENT_ITEM_KEY) + found_type = BTRFS_EXTENT_DATA_KEY; + else if (found_type == BTRFS_EXTENT_DATA_KEY) found_type = BTRFS_XATTR_ITEM_KEY; - } else if (found_type == BTRFS_XATTR_ITEM_KEY) { + else if (found_type == BTRFS_XATTR_ITEM_KEY) found_type = BTRFS_INODE_REF_KEY; - } else if (found_type) { + else if (found_type) found_type--; - } else { + else break; - } btrfs_set_key_type(&key, found_type); goto next; } @@ -2185,7 +2607,7 @@ search_again: if (extent_type != BTRFS_FILE_EXTENT_INLINE) { u64 num_dec; extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); - if (!del_item) { + if (!del_item && !encoding) { u64 orig_num_bytes = btrfs_file_extent_num_bytes(leaf, fi); extent_num_bytes = new_size - @@ -2250,7 +2672,7 @@ delete: pending_del_nr++; pending_del_slot = path->slots[0]; } else { - printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); + BUG(); } } else { break; @@ -2267,6 +2689,8 @@ next: if (pending_del_nr) goto del_pending; btrfs_release_path(root, path); + if (found_type == BTRFS_INODE_ITEM_KEY) + break; goto search_again; } @@ -2283,6 +2707,8 @@ del_pending: BUG_ON(ret); pending_del_nr = 0; btrfs_release_path(root, path); + if (found_type == BTRFS_INODE_ITEM_KEY) + break; goto search_again; } } @@ -2421,7 +2847,14 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) last_byte = min(extent_map_end(em), block_end); last_byte = (last_byte + mask) & ~mask; if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { + u64 hint_byte = 0; hole_size = last_byte - cur_offset; + err = btrfs_drop_extents(trans, root, inode, + cur_offset, + cur_offset + hole_size, + cur_offset, &hint_byte); + if (err) + break; err = btrfs_insert_file_extent(trans, root, inode->i_ino, cur_offset, 0, 0, hole_size, 0, hole_size, @@ -2478,7 +2911,7 @@ void btrfs_delete_inode(struct inode *inode) btrfs_wait_ordered_range(inode, 0, (u64)-1); btrfs_i_size_write(inode, 0); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_join_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0); @@ -2525,9 +2958,10 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); - if (!di || IS_ERR(di)) { + + if (!di || IS_ERR(di)) goto out_err; - } + btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); out: btrfs_free_path(path); @@ -2576,6 +3010,7 @@ static noinline void init_btrfs_i(struct inode *inode) bi->i_default_acl = NULL; bi->generation = 0; + bi->sequence = 0; bi->last_trans = 0; bi->logged_trans = 0; bi->delalloc_bytes = 0; @@ -2590,7 +3025,6 @@ static noinline void init_btrfs_i(struct inode *inode) inode->i_mapping, GFP_NOFS); INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes); btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); - mutex_init(&BTRFS_I(inode)->csum_mutex); mutex_init(&BTRFS_I(inode)->extent_mutex); mutex_init(&BTRFS_I(inode)->log_mutex); } @@ -2607,8 +3041,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return (args->ino == inode->i_ino && - args->root == BTRFS_I(inode)->root); + return args->ino == inode->i_ino && + args->root == BTRFS_I(inode)->root; } struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, @@ -2670,15 +3104,14 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, return inode; } -static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, - struct nameidata *nd) +struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) { - struct inode * inode; + struct inode *inode; struct btrfs_inode *bi = BTRFS_I(dir); struct btrfs_root *root = bi->root; struct btrfs_root *sub_root = root; struct btrfs_key location; - int ret, new, do_orphan = 0; + int ret, new; if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); @@ -2699,17 +3132,21 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, inode = btrfs_iget(dir->i_sb, &location, sub_root, &new); if (IS_ERR(inode)) return ERR_CAST(inode); - - /* the inode and parent dir are two different roots */ - if (new && root != sub_root) { - igrab(inode); - sub_root->inode = inode; - do_orphan = 1; - } } + return inode; +} - if (unlikely(do_orphan)) - btrfs_orphan_cleanup(sub_root); +static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + struct inode *inode; + + if (dentry->d_name.len > BTRFS_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + + inode = btrfs_lookup_dentry(dir, dentry); + if (IS_ERR(inode)) + return ERR_CAST(inode); return d_splice_alias(inode, dentry); } @@ -2765,7 +3202,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, return 0; filp->f_pos = 2; } - path = btrfs_alloc_path(); path->reada = 2; @@ -2795,6 +3231,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, path->slots[0]++; } } + advance = 1; item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -2830,16 +3267,25 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; btrfs_dir_item_key_to_cpu(leaf, di, &location); + + /* is this a reference to our own snapshot? If so + * skip it + */ + if (location.type == BTRFS_ROOT_ITEM_KEY && + location.objectid == root->root_key.objectid) { + over = 0; + goto skip; + } over = filldir(dirent, name_ptr, name_len, found_key.offset, location.objectid, d_type); +skip: if (name_ptr != tmp_name) kfree(name_ptr); if (over) goto nopos; - di_len = btrfs_dir_name_len(leaf, di) + btrfs_dir_data_len(leaf, di) + sizeof(*di); di_cur += di_len; @@ -2849,7 +3295,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) - filp->f_pos = INT_LIMIT(typeof(filp->f_pos)); + filp->f_pos = INT_LIMIT(off_t); else filp->f_pos++; nopos: @@ -2865,7 +3311,7 @@ int btrfs_write_inode(struct inode *inode, int wait) struct btrfs_trans_handle *trans; int ret = 0; - if (root->fs_info->closing > 1) + if (root->fs_info->btree_inode == inode) return 0; if (wait) { @@ -2954,16 +3400,14 @@ out: * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree */ -static int btrfs_set_inode_index(struct inode *dir, struct inode *inode, - u64 *index) +int btrfs_set_inode_index(struct inode *dir, u64 *index) { int ret = 0; if (BTRFS_I(dir)->index_cnt == (u64)-1) { ret = btrfs_set_inode_index_count(dir); - if (ret) { + if (ret) return ret; - } } *index = BTRFS_I(dir)->index_cnt; @@ -2976,14 +3420,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, const char *name, int name_len, - u64 ref_objectid, - u64 objectid, - struct btrfs_block_group_cache *group, - int mode, u64 *index) + u64 ref_objectid, u64 objectid, + u64 alloc_hint, int mode, u64 *index) { struct inode *inode; struct btrfs_inode_item *inode_item; - struct btrfs_block_group_cache *new_inode_group; struct btrfs_key *location; struct btrfs_path *path; struct btrfs_inode_ref *ref; @@ -3001,7 +3442,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); if (dir) { - ret = btrfs_set_inode_index(dir, inode, index); + ret = btrfs_set_inode_index(dir, index); if (ret) return ERR_PTR(ret); } @@ -3019,13 +3460,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, owner = 0; else owner = 1; - new_inode_group = btrfs_find_block_group(root, group, 0, - BTRFS_BLOCK_GROUP_METADATA, owner); - if (!new_inode_group) { - printk("find_block group failed\n"); - new_inode_group = group; + BTRFS_I(inode)->block_group = + btrfs_find_block_group(root, 0, alloc_hint, owner); + if ((mode & S_IFREG)) { + if (btrfs_test_opt(root, NODATASUM)) + btrfs_set_flag(inode, NODATASUM); + if (btrfs_test_opt(root, NODATACOW)) + btrfs_set_flag(inode, NODATACOW); } - BTRFS_I(inode)->block_group = new_inode_group; key[0].objectid = objectid; btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); @@ -3045,8 +3487,15 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (objectid > root->highest_inode) root->highest_inode = objectid; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + + if (dir && (dir->i_mode & S_ISGID)) { + inode->i_gid = dir->i_gid; + if (S_ISDIR(mode)) + mode |= S_ISGID; + } else + inode->i_gid = current_fsgid(); + inode->i_mode = mode; inode->i_ino = objectid; inode_set_bytes(inode, 0); @@ -3174,7 +3623,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_acl(inode, dir); + err = btrfs_init_inode_security(inode, dir); if (err) { drop_inode = 1; goto out_unlock; @@ -3237,7 +3686,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_acl(inode, dir); + err = btrfs_init_inode_security(inode, dir); if (err) { drop_inode = 1; goto out_unlock; @@ -3287,7 +3736,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, err = btrfs_check_free_space(root, 1, 0); if (err) goto fail; - err = btrfs_set_inode_index(dir, inode, &index); + err = btrfs_set_inode_index(dir, &index); if (err) goto fail; @@ -3360,7 +3809,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) drop_on_err = 1; - err = btrfs_init_acl(inode, dir); + err = btrfs_init_inode_security(inode, dir); if (err) goto out_fail; @@ -3440,7 +3889,7 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); - max_size = min(PAGE_CACHE_SIZE, max_size); + max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); ret = btrfs_zlib_decompress(tmp, page, extent_offset, inline_size, max_size); if (ret) { @@ -3457,12 +3906,13 @@ static noinline int uncompress_inline(struct btrfs_path *path, /* * a bit scary, this does extent mapping from logical file offset to the disk. - * the ugly parts come from merging extents from the disk with the - * in-ram representation. This gets more complex because of the data=ordered code, + * the ugly parts come from merging extents from the disk with the in-ram + * representation. This gets more complex because of the data=ordered code, * where the in-ram extents might be locked pending data=ordered completion. * * This also copies inline extents directly into the page. */ + struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -3507,6 +3957,7 @@ again: } em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; + em->orig_start = EXTENT_MAP_HOLE; em->len = (u64)-1; em->block_len = (u64)-1; @@ -3580,6 +4031,8 @@ again: found_type == BTRFS_FILE_EXTENT_PREALLOC) { em->start = extent_start; em->len = extent_end - extent_start; + em->orig_start = extent_start - + btrfs_file_extent_offset(leaf, item); bytenr = btrfs_file_extent_disk_bytenr(leaf, item); if (bytenr == 0) { em->block_start = EXTENT_MAP_HOLE; @@ -3619,6 +4072,7 @@ again: em->start = extent_start + extent_offset; em->len = (copy_size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); + em->orig_start = EXTENT_MAP_INLINE; if (compressed) set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); ptr = btrfs_file_extent_inline_start(item) + extent_offset; @@ -3655,7 +4109,7 @@ again: extent_map_end(em) - 1, GFP_NOFS); goto insert; } else { - printk("unkknown found_type %d\n", found_type); + printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); WARN_ON(1); } not_found: @@ -3667,7 +4121,11 @@ not_found_em: insert: btrfs_release_path(root, path); if (em->start > start || extent_map_end(em) <= start) { - printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len); + printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " + "[%llu %llu]\n", (unsigned long long)em->start, + (unsigned long long)em->len, + (unsigned long long)start, + (unsigned long long)len); err = -EIO; goto out; } @@ -3704,8 +4162,6 @@ insert: } } else { err = -EIO; - printk("failing to insert %Lu %Lu\n", - start, len); free_extent_map(em); em = NULL; } @@ -3721,9 +4177,8 @@ out: btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); - if (!err) { + if (!err) err = ret; - } } if (err) { free_extent_map(em); @@ -3740,9 +4195,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, return -EINVAL; } -static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) +static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + __u64 start, __u64 len) { - return extent_bmap(mapping, iblock, btrfs_get_extent); + return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); } int btrfs_readpage(struct file *file, struct page *page) @@ -3770,6 +4226,7 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_io_tree *tree; + tree = &BTRFS_I(mapping->host)->io_tree; return extent_writepages(tree, mapping, btrfs_get_extent, wbc); } @@ -3804,7 +4261,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) { if (PageWriteback(page) || PageDirty(page)) return 0; - return __btrfs_releasepage(page, gfp_flags); + return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); } static void btrfs_invalidatepage(struct page *page, unsigned long offset) @@ -3976,49 +4433,23 @@ out: btrfs_btree_balance_dirty(root, nr); } -/* - * Invalidate a single dcache entry at the root of the filesystem. - * Needed after creation of snapshot or subvolume. - */ -void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, - int namelen) -{ - struct dentry *alias, *entry; - struct qstr qstr; - - alias = d_find_alias(root->fs_info->sb->s_root->d_inode); - if (alias) { - qstr.name = name; - qstr.len = namelen; - /* change me if btrfs ever gets a d_hash operation */ - qstr.hash = full_name_hash(qstr.name, qstr.len); - entry = d_lookup(alias, &qstr); - dput(alias); - if (entry) { - d_invalidate(entry); - dput(entry); - } - } -} - /* * create a new subvolume directory/inode (helper for the ioctl). */ -int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry, - struct btrfs_trans_handle *trans, u64 new_dirid, - struct btrfs_block_group_cache *block_group) +int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, + struct btrfs_root *new_root, struct dentry *dentry, + u64 new_dirid, u64 alloc_hint) { struct inode *inode; int error; u64 index = 0; inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, - new_dirid, block_group, S_IFDIR | 0700, &index); + new_dirid, alloc_hint, S_IFDIR | 0700, &index); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &btrfs_dir_inode_operations; inode->i_fop = &btrfs_dir_file_operations; - new_root->inode = inode; inode->i_nlink = 1; btrfs_i_size_write(inode, 0); @@ -4027,7 +4458,6 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry, if (error) return error; - atomic_inc(&inode->i_count); d_instantiate(dentry, inode); return 0; } @@ -4082,13 +4512,15 @@ void btrfs_destroy_inode(struct inode *inode) } spin_unlock(&BTRFS_I(inode)->root->list_lock); - while(1) { + while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); if (!ordered) break; else { - printk("found ordered extent %Lu %Lu\n", - ordered->file_offset, ordered->len); + printk(KERN_ERR "btrfs found ordered " + "extent %llu %llu on inode cleanup\n", + (unsigned long long)ordered->file_offset, + (unsigned long long)ordered->len); btrfs_remove_ordered_extent(inode, ordered); btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); @@ -4165,14 +4597,15 @@ static int btrfs_getattr(struct vfsmount *mnt, { struct inode *inode = dentry->d_inode; generic_fillattr(inode, stat); + stat->dev = BTRFS_I(inode)->root->anon_super.s_dev; stat->blksize = PAGE_CACHE_SIZE; stat->blocks = (inode_get_bytes(inode) + BTRFS_I(inode)->delalloc_bytes) >> 9; return 0; } -static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, - struct inode * new_dir,struct dentry *new_dentry) +static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; @@ -4182,11 +4615,22 @@ static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, u64 index = 0; int ret; + /* we're not allowed to rename between subvolumes */ + if (BTRFS_I(old_inode)->root->root_key.objectid != + BTRFS_I(new_dir)->root->root_key.objectid) + return -EXDEV; + if (S_ISDIR(old_inode->i_mode) && new_inode && new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) { return -ENOTEMPTY; } + /* to rename a snapshot or subvolume, we need to juggle the + * backrefs. This isn't coded yet + */ + if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + return -EXDEV; + ret = btrfs_check_free_space(root, 1, 0); if (ret) goto out_unlock; @@ -4221,7 +4665,7 @@ static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, } } - ret = btrfs_set_inode_index(new_dir, old_inode, &index); + ret = btrfs_set_inode_index(new_dir, &index); if (ret) goto out_fail; @@ -4246,33 +4690,37 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) struct list_head *head = &root->fs_info->delalloc_inodes; struct btrfs_inode *binode; struct inode *inode; - unsigned long flags; - spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); - while(!list_empty(head)) { + if (root->fs_info->sb->s_flags & MS_RDONLY) + return -EROFS; + + spin_lock(&root->fs_info->delalloc_lock); + while (!list_empty(head)) { binode = list_entry(head->next, struct btrfs_inode, delalloc_inodes); inode = igrab(&binode->vfs_inode); if (!inode) list_del_init(&binode->delalloc_inodes); - spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); + spin_unlock(&root->fs_info->delalloc_lock); if (inode) { filemap_flush(inode->i_mapping); iput(inode); } cond_resched(); - spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); + spin_lock(&root->fs_info->delalloc_lock); } - spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); + spin_unlock(&root->fs_info->delalloc_lock); /* the filemap_flush will queue IO into the worker threads, but * we have to make sure the IO is actually started and that * ordered extents get created before we return */ atomic_inc(&root->fs_info->async_submit_draining); - while(atomic_read(&root->fs_info->nr_async_submits)) { + while (atomic_read(&root->fs_info->nr_async_submits) || + atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, - (atomic_read(&root->fs_info->nr_async_submits) == 0)); + (atomic_read(&root->fs_info->nr_async_submits) == 0 && + atomic_read(&root->fs_info->async_delalloc_pages) == 0)); } atomic_dec(&root->fs_info->async_submit_draining); return 0; @@ -4323,7 +4771,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_acl(inode, dir); + err = btrfs_init_inode_security(inode, dir); if (err) { drop_inode = 1; goto out_unlock; @@ -4532,6 +4980,7 @@ static int btrfs_permission(struct inode *inode, int mask) } static struct inode_operations btrfs_dir_inode_operations = { + .getattr = btrfs_getattr, .lookup = btrfs_lookup, .create = btrfs_create, .unlink = btrfs_unlink, @@ -4576,13 +5025,24 @@ static struct extent_io_ops btrfs_extent_io_ops = { .clear_bit_hook = btrfs_clear_bit_hook, }; +/* + * btrfs doesn't support the bmap operation because swapfiles + * use bmap to make a mapping of extents in the file. They assume + * these extents won't change over the life of the file and they + * use the bmap result to do IO directly to the drive. + * + * the btrfs bmap call would return logical addresses that aren't + * suitable for IO and they also will change frequently as COW + * operations happen. So, swapfile + btrfs == corruption. + * + * For now we're avoiding this by dropping bmap. + */ static struct address_space_operations btrfs_aops = { .readpage = btrfs_readpage, .writepage = btrfs_writepage, .writepages = btrfs_writepages, .readpages = btrfs_readpages, .sync_page = block_sync_page, - .bmap = btrfs_bmap, .direct_IO = btrfs_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, @@ -4606,6 +5066,7 @@ static struct inode_operations btrfs_file_inode_operations = { .removexattr = btrfs_removexattr, .permission = btrfs_permission, .fallocate = btrfs_fallocate, + .fiemap = btrfs_fiemap, }; static struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, @@ -4621,4 +5082,8 @@ static struct inode_operations btrfs_symlink_inode_operations = { .follow_link = page_follow_link_light, .put_link = page_put_link, .permission = btrfs_permission, + .setxattr = btrfs_setxattr, + .getxattr = btrfs_getxattr, + .listxattr = btrfs_listxattr, + .removexattr = btrfs_removexattr, };