2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
25 #include "print-tree.h"
26 #include "transaction.h"
29 #include "ref-cache.h"
31 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
32 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
35 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
37 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
38 btrfs_root *extent_root);
39 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
40 btrfs_root *extent_root);
41 static struct btrfs_block_group_cache *
42 __btrfs_find_block_group(struct btrfs_root *root,
43 struct btrfs_block_group_cache *hint,
44 u64 search_start, int data, int owner);
46 void maybe_lock_mutex(struct btrfs_root *root)
48 if (root != root->fs_info->extent_root &&
49 root != root->fs_info->chunk_root &&
50 root != root->fs_info->dev_root) {
51 mutex_lock(&root->fs_info->alloc_mutex);
55 void maybe_unlock_mutex(struct btrfs_root *root)
57 if (root != root->fs_info->extent_root &&
58 root != root->fs_info->chunk_root &&
59 root != root->fs_info->dev_root) {
60 mutex_unlock(&root->fs_info->alloc_mutex);
64 static int cache_block_group(struct btrfs_root *root,
65 struct btrfs_block_group_cache *block_group)
67 struct btrfs_path *path;
70 struct extent_buffer *leaf;
71 struct extent_io_tree *free_space_cache;
81 root = root->fs_info->extent_root;
82 free_space_cache = &root->fs_info->free_space_cache;
84 if (block_group->cached)
87 path = btrfs_alloc_path();
93 * we get into deadlocks with paths held by callers of this function.
94 * since the alloc_mutex is protecting things right now, just
95 * skip the locking here
97 path->skip_locking = 1;
98 first_free = block_group->key.objectid;
99 key.objectid = block_group->key.objectid;
101 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
105 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
109 leaf = path->nodes[0];
110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
111 if (key.objectid + key.offset > first_free)
112 first_free = key.objectid + key.offset;
115 leaf = path->nodes[0];
116 slot = path->slots[0];
117 if (slot >= btrfs_header_nritems(leaf)) {
118 ret = btrfs_next_leaf(root, path);
127 btrfs_item_key_to_cpu(leaf, &key, slot);
128 if (key.objectid < block_group->key.objectid) {
131 if (key.objectid >= block_group->key.objectid +
132 block_group->key.offset) {
136 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
141 if (key.objectid > last) {
142 hole_size = key.objectid - last;
143 set_extent_dirty(free_space_cache, last,
144 last + hole_size - 1,
147 last = key.objectid + key.offset;
155 if (block_group->key.objectid +
156 block_group->key.offset > last) {
157 hole_size = block_group->key.objectid +
158 block_group->key.offset - last;
159 set_extent_dirty(free_space_cache, last,
160 last + hole_size - 1, GFP_NOFS);
162 block_group->cached = 1;
164 btrfs_free_path(path);
168 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
172 struct extent_io_tree *block_group_cache;
173 struct btrfs_block_group_cache *block_group = NULL;
179 bytenr = max_t(u64, bytenr,
180 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
181 block_group_cache = &info->block_group_cache;
182 ret = find_first_extent_bit(block_group_cache,
183 bytenr, &start, &end,
184 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
189 ret = get_state_private(block_group_cache, start, &ptr);
193 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
197 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
201 struct extent_io_tree *block_group_cache;
202 struct btrfs_block_group_cache *block_group = NULL;
208 bytenr = max_t(u64, bytenr,
209 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
210 block_group_cache = &info->block_group_cache;
211 ret = find_first_extent_bit(block_group_cache,
212 bytenr, &start, &end,
213 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
218 ret = get_state_private(block_group_cache, start, &ptr);
222 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
223 if (block_group->key.objectid <= bytenr && bytenr <
224 block_group->key.objectid + block_group->key.offset)
229 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
231 return (cache->flags & bits) == bits;
234 static int noinline find_search_start(struct btrfs_root *root,
235 struct btrfs_block_group_cache **cache_ret,
236 u64 *start_ret, u64 num, int data)
239 struct btrfs_block_group_cache *cache = *cache_ret;
240 struct extent_io_tree *free_space_cache;
241 struct extent_state *state;
246 u64 search_start = *start_ret;
249 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
250 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
251 free_space_cache = &root->fs_info->free_space_cache;
257 ret = cache_block_group(root, cache);
262 last = max(search_start, cache->key.objectid);
263 if (!block_group_bits(cache, data) || cache->ro)
266 spin_lock_irq(&free_space_cache->lock);
267 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
272 spin_unlock_irq(&free_space_cache->lock);
276 start = max(last, state->start);
277 last = state->end + 1;
278 if (last - start < num) {
280 state = extent_state_next(state);
281 } while(state && !(state->state & EXTENT_DIRTY));
284 spin_unlock_irq(&free_space_cache->lock);
288 if (start + num > cache->key.objectid + cache->key.offset)
290 if (!block_group_bits(cache, data)) {
291 printk("block group bits don't match %Lu %d\n", cache->flags, data);
297 cache = btrfs_lookup_block_group(root->fs_info, search_start);
299 printk("Unable to find block group for %Lu\n", search_start);
305 last = cache->key.objectid + cache->key.offset;
307 cache = btrfs_lookup_first_block_group(root->fs_info, last);
308 if (!cache || cache->key.objectid >= total_fs_bytes) {
317 if (cache_miss && !cache->cached) {
318 cache_block_group(root, cache);
320 cache = btrfs_lookup_first_block_group(root->fs_info, last);
323 cache = btrfs_find_block_group(root, cache, last, data, 0);
330 static u64 div_factor(u64 num, int factor)
339 static int block_group_state_bits(u64 flags)
342 if (flags & BTRFS_BLOCK_GROUP_DATA)
343 bits |= BLOCK_GROUP_DATA;
344 if (flags & BTRFS_BLOCK_GROUP_METADATA)
345 bits |= BLOCK_GROUP_METADATA;
346 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
347 bits |= BLOCK_GROUP_SYSTEM;
351 static struct btrfs_block_group_cache *
352 __btrfs_find_block_group(struct btrfs_root *root,
353 struct btrfs_block_group_cache *hint,
354 u64 search_start, int data, int owner)
356 struct btrfs_block_group_cache *cache;
357 struct extent_io_tree *block_group_cache;
358 struct btrfs_block_group_cache *found_group = NULL;
359 struct btrfs_fs_info *info = root->fs_info;
372 block_group_cache = &info->block_group_cache;
374 if (data & BTRFS_BLOCK_GROUP_METADATA)
377 bit = block_group_state_bits(data);
380 struct btrfs_block_group_cache *shint;
381 shint = btrfs_lookup_first_block_group(info, search_start);
382 if (shint && block_group_bits(shint, data) && !shint->ro) {
383 spin_lock(&shint->lock);
384 used = btrfs_block_group_used(&shint->item);
385 if (used + shint->pinned <
386 div_factor(shint->key.offset, factor)) {
387 spin_unlock(&shint->lock);
390 spin_unlock(&shint->lock);
393 if (hint && !hint->ro && block_group_bits(hint, data)) {
394 spin_lock(&hint->lock);
395 used = btrfs_block_group_used(&hint->item);
396 if (used + hint->pinned <
397 div_factor(hint->key.offset, factor)) {
398 spin_unlock(&hint->lock);
401 spin_unlock(&hint->lock);
402 last = hint->key.objectid + hint->key.offset;
405 last = max(hint->key.objectid, search_start);
411 ret = find_first_extent_bit(block_group_cache, last,
416 ret = get_state_private(block_group_cache, start, &ptr);
422 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
423 spin_lock(&cache->lock);
424 last = cache->key.objectid + cache->key.offset;
425 used = btrfs_block_group_used(&cache->item);
427 if (!cache->ro && block_group_bits(cache, data)) {
428 free_check = div_factor(cache->key.offset, factor);
429 if (used + cache->pinned < free_check) {
431 spin_unlock(&cache->lock);
435 spin_unlock(&cache->lock);
443 if (!full_search && factor < 10) {
453 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
454 struct btrfs_block_group_cache
455 *hint, u64 search_start,
459 struct btrfs_block_group_cache *ret;
460 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
463 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
466 u32 high_crc = ~(u32)0;
467 u32 low_crc = ~(u32)0;
469 lenum = cpu_to_le64(root_objectid);
470 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
471 lenum = cpu_to_le64(ref_generation);
472 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
473 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
474 lenum = cpu_to_le64(owner);
475 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
476 lenum = cpu_to_le64(owner_offset);
477 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
479 return ((u64)high_crc << 32) | (u64)low_crc;
482 static int match_extent_ref(struct extent_buffer *leaf,
483 struct btrfs_extent_ref *disk_ref,
484 struct btrfs_extent_ref *cpu_ref)
489 if (cpu_ref->objectid)
490 len = sizeof(*cpu_ref);
492 len = 2 * sizeof(u64);
493 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
498 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root,
500 struct btrfs_path *path, u64 bytenr,
502 u64 ref_generation, u64 owner,
503 u64 owner_offset, int del)
506 struct btrfs_key key;
507 struct btrfs_key found_key;
508 struct btrfs_extent_ref ref;
509 struct extent_buffer *leaf;
510 struct btrfs_extent_ref *disk_ref;
514 btrfs_set_stack_ref_root(&ref, root_objectid);
515 btrfs_set_stack_ref_generation(&ref, ref_generation);
516 btrfs_set_stack_ref_objectid(&ref, owner);
517 btrfs_set_stack_ref_offset(&ref, owner_offset);
519 hash = hash_extent_ref(root_objectid, ref_generation, owner,
522 key.objectid = bytenr;
523 key.type = BTRFS_EXTENT_REF_KEY;
526 ret = btrfs_search_slot(trans, root, &key, path,
530 leaf = path->nodes[0];
532 u32 nritems = btrfs_header_nritems(leaf);
533 if (path->slots[0] >= nritems) {
534 ret2 = btrfs_next_leaf(root, path);
537 leaf = path->nodes[0];
539 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
540 if (found_key.objectid != bytenr ||
541 found_key.type != BTRFS_EXTENT_REF_KEY)
543 key.offset = found_key.offset;
545 btrfs_release_path(root, path);
549 disk_ref = btrfs_item_ptr(path->nodes[0],
551 struct btrfs_extent_ref);
552 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
556 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
557 key.offset = found_key.offset + 1;
558 btrfs_release_path(root, path);
565 * Back reference rules. Back refs have three main goals:
567 * 1) differentiate between all holders of references to an extent so that
568 * when a reference is dropped we can make sure it was a valid reference
569 * before freeing the extent.
571 * 2) Provide enough information to quickly find the holders of an extent
572 * if we notice a given block is corrupted or bad.
574 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575 * maintenance. This is actually the same as #2, but with a slightly
576 * different use case.
578 * File extents can be referenced by:
580 * - multiple snapshots, subvolumes, or different generations in one subvol
581 * - different files inside a single subvolume (in theory, not implemented yet)
582 * - different offsets inside a file (bookend extents in file.c)
584 * The extent ref structure has fields for:
586 * - Objectid of the subvolume root
587 * - Generation number of the tree holding the reference
588 * - objectid of the file holding the reference
589 * - offset in the file corresponding to the key holding the reference
591 * When a file extent is allocated the fields are filled in:
592 * (root_key.objectid, trans->transid, inode objectid, offset in file)
594 * When a leaf is cow'd new references are added for every file extent found
595 * in the leaf. It looks the same as the create case, but trans->transid
596 * will be different when the block is cow'd.
598 * (root_key.objectid, trans->transid, inode objectid, offset in file)
600 * When a file extent is removed either during snapshot deletion or file
601 * truncation, the corresponding back reference is found
604 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605 * inode objectid, offset in file)
607 * Btree extents can be referenced by:
609 * - Different subvolumes
610 * - Different generations of the same subvolume
612 * Storing sufficient information for a full reverse mapping of a btree
613 * block would require storing the lowest key of the block in the backref,
614 * and it would require updating that lowest key either before write out or
615 * every time it changed. Instead, the objectid of the lowest key is stored
616 * along with the level of the tree block. This provides a hint
617 * about where in the btree the block can be found. Searches through the
618 * btree only need to look for a pointer to that block, so they stop one
619 * level higher than the level recorded in the backref.
621 * Some btrees do not do reference counting on their extents. These
622 * include the extent tree and the tree of tree roots. Backrefs for these
623 * trees always have a generation of zero.
625 * When a tree block is created, back references are inserted:
627 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
629 * When a tree block is cow'd in a reference counted root,
630 * new back references are added for all the blocks it points to.
631 * These are of the form (trans->transid will have increased since creation):
633 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
635 * Because the lowest_key_objectid and the level are just hints
636 * they are not used when backrefs are deleted. When a backref is deleted:
638 * if backref was for a tree root:
639 * root_objectid = root->root_key.objectid
641 * root_objectid = btrfs_header_owner(parent)
643 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
645 * Back Reference Key hashing:
647 * Back references have four fields, each 64 bits long. Unfortunately,
648 * This is hashed into a single 64 bit number and placed into the key offset.
649 * The key objectid corresponds to the first byte in the extent, and the
650 * key type is set to BTRFS_EXTENT_REF_KEY
652 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
653 struct btrfs_root *root,
654 struct btrfs_path *path, u64 bytenr,
655 u64 root_objectid, u64 ref_generation,
656 u64 owner, u64 owner_offset)
659 struct btrfs_key key;
660 struct btrfs_extent_ref ref;
661 struct btrfs_extent_ref *disk_ref;
664 btrfs_set_stack_ref_root(&ref, root_objectid);
665 btrfs_set_stack_ref_generation(&ref, ref_generation);
666 btrfs_set_stack_ref_objectid(&ref, owner);
667 btrfs_set_stack_ref_offset(&ref, owner_offset);
669 hash = hash_extent_ref(root_objectid, ref_generation, owner,
672 key.objectid = bytenr;
673 key.type = BTRFS_EXTENT_REF_KEY;
675 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
676 while (ret == -EEXIST) {
677 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
678 struct btrfs_extent_ref);
679 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
682 btrfs_release_path(root, path);
683 ret = btrfs_insert_empty_item(trans, root, path, &key,
688 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
689 struct btrfs_extent_ref);
690 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
692 btrfs_mark_buffer_dirty(path->nodes[0]);
694 btrfs_release_path(root, path);
698 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
699 struct btrfs_root *root,
700 u64 bytenr, u64 num_bytes,
701 u64 root_objectid, u64 ref_generation,
702 u64 owner, u64 owner_offset)
704 struct btrfs_path *path;
706 struct btrfs_key key;
707 struct extent_buffer *l;
708 struct btrfs_extent_item *item;
711 WARN_ON(num_bytes < root->sectorsize);
712 path = btrfs_alloc_path();
717 key.objectid = bytenr;
718 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
719 key.offset = num_bytes;
720 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
729 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
730 refs = btrfs_extent_refs(l, item);
731 btrfs_set_extent_refs(l, item, refs + 1);
732 btrfs_mark_buffer_dirty(path->nodes[0]);
734 btrfs_release_path(root->fs_info->extent_root, path);
737 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
738 path, bytenr, root_objectid,
739 ref_generation, owner, owner_offset);
741 finish_current_insert(trans, root->fs_info->extent_root);
742 del_pending_extents(trans, root->fs_info->extent_root);
744 btrfs_free_path(path);
748 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root,
750 u64 bytenr, u64 num_bytes,
751 u64 root_objectid, u64 ref_generation,
752 u64 owner, u64 owner_offset)
756 mutex_lock(&root->fs_info->alloc_mutex);
757 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
758 root_objectid, ref_generation,
759 owner, owner_offset);
760 mutex_unlock(&root->fs_info->alloc_mutex);
764 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
765 struct btrfs_root *root)
767 finish_current_insert(trans, root->fs_info->extent_root);
768 del_pending_extents(trans, root->fs_info->extent_root);
772 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
773 struct btrfs_root *root, u64 bytenr,
774 u64 num_bytes, u32 *refs)
776 struct btrfs_path *path;
778 struct btrfs_key key;
779 struct extent_buffer *l;
780 struct btrfs_extent_item *item;
782 WARN_ON(num_bytes < root->sectorsize);
783 path = btrfs_alloc_path();
785 key.objectid = bytenr;
786 key.offset = num_bytes;
787 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
788 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
793 btrfs_print_leaf(root, path->nodes[0]);
794 printk("failed to find block number %Lu\n", bytenr);
798 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
799 *refs = btrfs_extent_refs(l, item);
801 btrfs_free_path(path);
805 u32 btrfs_count_snapshots_in_path(struct btrfs_root *root,
806 struct btrfs_path *count_path,
810 struct btrfs_root *extent_root = root->fs_info->extent_root;
811 struct btrfs_path *path;
815 u64 root_objectid = root->root_key.objectid;
821 struct btrfs_key key;
822 struct btrfs_key found_key;
823 struct extent_buffer *l;
824 struct btrfs_extent_item *item;
825 struct btrfs_extent_ref *ref_item;
828 /* FIXME, needs locking */
831 mutex_lock(&root->fs_info->alloc_mutex);
832 path = btrfs_alloc_path();
835 bytenr = first_extent;
837 bytenr = count_path->nodes[level]->start;
840 key.objectid = bytenr;
843 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
844 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
850 btrfs_item_key_to_cpu(l, &found_key, path->slots[0]);
852 if (found_key.objectid != bytenr ||
853 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
857 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
858 extent_refs = btrfs_extent_refs(l, item);
861 nritems = btrfs_header_nritems(l);
862 if (path->slots[0] >= nritems) {
863 ret = btrfs_next_leaf(extent_root, path);
868 btrfs_item_key_to_cpu(l, &found_key, path->slots[0]);
869 if (found_key.objectid != bytenr)
872 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
878 ref_item = btrfs_item_ptr(l, path->slots[0],
879 struct btrfs_extent_ref);
880 found_objectid = btrfs_ref_root(l, ref_item);
882 if (found_objectid != root_objectid) {
887 found_owner = btrfs_ref_objectid(l, ref_item);
888 if (found_owner != expected_owner) {
893 * nasty. we don't count a reference held by
894 * the running transaction. This allows nodatacow
895 * to avoid cow most of the time
897 if (found_owner >= BTRFS_FIRST_FREE_OBJECTID &&
898 btrfs_ref_generation(l, ref_item) ==
899 root->fs_info->generation) {
907 * if there is more than one reference against a data extent,
908 * we have to assume the other ref is another snapshot
910 if (level == -1 && extent_refs > 1) {
914 if (cur_count == 0) {
918 if (level >= 0 && root->node == count_path->nodes[level])
921 btrfs_release_path(root, path);
925 btrfs_free_path(path);
926 mutex_unlock(&root->fs_info->alloc_mutex);
930 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
931 struct extent_buffer *buf, int cache_ref)
935 struct btrfs_key key;
936 struct btrfs_file_extent_item *fi;
941 int nr_file_extents = 0;
946 level = btrfs_header_level(buf);
947 nritems = btrfs_header_nritems(buf);
948 for (i = 0; i < nritems; i++) {
952 btrfs_item_key_to_cpu(buf, &key, i);
953 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
955 fi = btrfs_item_ptr(buf, i,
956 struct btrfs_file_extent_item);
957 if (btrfs_file_extent_type(buf, fi) ==
958 BTRFS_FILE_EXTENT_INLINE)
960 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
961 if (disk_bytenr == 0)
964 if (buf != root->commit_root)
967 mutex_lock(&root->fs_info->alloc_mutex);
968 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
969 btrfs_file_extent_disk_num_bytes(buf, fi),
970 root->root_key.objectid, trans->transid,
971 key.objectid, key.offset);
972 mutex_unlock(&root->fs_info->alloc_mutex);
979 bytenr = btrfs_node_blockptr(buf, i);
980 btrfs_node_key_to_cpu(buf, &key, i);
982 mutex_lock(&root->fs_info->alloc_mutex);
983 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
984 btrfs_level_size(root, level - 1),
985 root->root_key.objectid,
987 level - 1, key.objectid);
988 mutex_unlock(&root->fs_info->alloc_mutex);
996 /* cache orignal leaf block's references */
997 if (level == 0 && cache_ref && buf != root->commit_root) {
998 struct btrfs_leaf_ref *ref;
999 struct btrfs_extent_info *info;
1001 ref = btrfs_alloc_leaf_ref(nr_file_extents);
1007 ref->bytenr = buf->start;
1008 ref->owner = btrfs_header_owner(buf);
1009 ref->generation = btrfs_header_generation(buf);
1010 ref->nritems = nr_file_extents;
1011 info = ref->extents;
1013 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1015 btrfs_item_key_to_cpu(buf, &key, i);
1016 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1018 fi = btrfs_item_ptr(buf, i,
1019 struct btrfs_file_extent_item);
1020 if (btrfs_file_extent_type(buf, fi) ==
1021 BTRFS_FILE_EXTENT_INLINE)
1023 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1024 if (disk_bytenr == 0)
1027 info->bytenr = disk_bytenr;
1029 btrfs_file_extent_disk_num_bytes(buf, fi);
1030 info->objectid = key.objectid;
1031 info->offset = key.offset;
1035 BUG_ON(!root->ref_tree);
1036 ret = btrfs_add_leaf_ref(root, ref);
1038 btrfs_free_leaf_ref(ref);
1045 for (i =0; i < faili; i++) {
1048 btrfs_item_key_to_cpu(buf, &key, i);
1049 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1051 fi = btrfs_item_ptr(buf, i,
1052 struct btrfs_file_extent_item);
1053 if (btrfs_file_extent_type(buf, fi) ==
1054 BTRFS_FILE_EXTENT_INLINE)
1056 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1057 if (disk_bytenr == 0)
1059 err = btrfs_free_extent(trans, root, disk_bytenr,
1060 btrfs_file_extent_disk_num_bytes(buf,
1064 bytenr = btrfs_node_blockptr(buf, i);
1065 err = btrfs_free_extent(trans, root, bytenr,
1066 btrfs_level_size(root, level - 1), 0);
1074 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1075 struct btrfs_root *root,
1076 struct btrfs_path *path,
1077 struct btrfs_block_group_cache *cache)
1081 struct btrfs_root *extent_root = root->fs_info->extent_root;
1083 struct extent_buffer *leaf;
1085 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1090 leaf = path->nodes[0];
1091 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1092 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1093 btrfs_mark_buffer_dirty(leaf);
1094 btrfs_release_path(extent_root, path);
1096 finish_current_insert(trans, extent_root);
1097 pending_ret = del_pending_extents(trans, extent_root);
1106 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1107 struct btrfs_root *root)
1109 struct extent_io_tree *block_group_cache;
1110 struct btrfs_block_group_cache *cache;
1114 struct btrfs_path *path;
1120 block_group_cache = &root->fs_info->block_group_cache;
1121 path = btrfs_alloc_path();
1125 mutex_lock(&root->fs_info->alloc_mutex);
1127 ret = find_first_extent_bit(block_group_cache, last,
1128 &start, &end, BLOCK_GROUP_DIRTY);
1133 ret = get_state_private(block_group_cache, start, &ptr);
1136 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1137 err = write_one_cache_group(trans, root,
1140 * if we fail to write the cache group, we want
1141 * to keep it marked dirty in hopes that a later
1148 clear_extent_bits(block_group_cache, start, end,
1149 BLOCK_GROUP_DIRTY, GFP_NOFS);
1151 btrfs_free_path(path);
1152 mutex_unlock(&root->fs_info->alloc_mutex);
1156 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1159 struct list_head *head = &info->space_info;
1160 struct list_head *cur;
1161 struct btrfs_space_info *found;
1162 list_for_each(cur, head) {
1163 found = list_entry(cur, struct btrfs_space_info, list);
1164 if (found->flags == flags)
1171 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1172 u64 total_bytes, u64 bytes_used,
1173 struct btrfs_space_info **space_info)
1175 struct btrfs_space_info *found;
1177 found = __find_space_info(info, flags);
1179 found->total_bytes += total_bytes;
1180 found->bytes_used += bytes_used;
1182 WARN_ON(found->total_bytes < found->bytes_used);
1183 *space_info = found;
1186 found = kmalloc(sizeof(*found), GFP_NOFS);
1190 list_add(&found->list, &info->space_info);
1191 found->flags = flags;
1192 found->total_bytes = total_bytes;
1193 found->bytes_used = bytes_used;
1194 found->bytes_pinned = 0;
1196 found->force_alloc = 0;
1197 *space_info = found;
1201 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1203 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1204 BTRFS_BLOCK_GROUP_RAID1 |
1205 BTRFS_BLOCK_GROUP_RAID10 |
1206 BTRFS_BLOCK_GROUP_DUP);
1208 if (flags & BTRFS_BLOCK_GROUP_DATA)
1209 fs_info->avail_data_alloc_bits |= extra_flags;
1210 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1211 fs_info->avail_metadata_alloc_bits |= extra_flags;
1212 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1213 fs_info->avail_system_alloc_bits |= extra_flags;
1217 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1219 u64 num_devices = root->fs_info->fs_devices->num_devices;
1221 if (num_devices == 1)
1222 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1223 if (num_devices < 4)
1224 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1226 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1227 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1228 BTRFS_BLOCK_GROUP_RAID10))) {
1229 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1232 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1233 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1234 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1237 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1238 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1239 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1240 (flags & BTRFS_BLOCK_GROUP_DUP)))
1241 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1245 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1246 struct btrfs_root *extent_root, u64 alloc_bytes,
1247 u64 flags, int force)
1249 struct btrfs_space_info *space_info;
1255 flags = reduce_alloc_profile(extent_root, flags);
1257 space_info = __find_space_info(extent_root->fs_info, flags);
1259 ret = update_space_info(extent_root->fs_info, flags,
1263 BUG_ON(!space_info);
1265 if (space_info->force_alloc) {
1267 space_info->force_alloc = 0;
1269 if (space_info->full)
1272 thresh = div_factor(space_info->total_bytes, 6);
1274 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1278 mutex_lock(&extent_root->fs_info->chunk_mutex);
1279 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1280 if (ret == -ENOSPC) {
1281 printk("space info full %Lu\n", flags);
1282 space_info->full = 1;
1287 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1288 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1291 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1296 static int update_block_group(struct btrfs_trans_handle *trans,
1297 struct btrfs_root *root,
1298 u64 bytenr, u64 num_bytes, int alloc,
1301 struct btrfs_block_group_cache *cache;
1302 struct btrfs_fs_info *info = root->fs_info;
1303 u64 total = num_bytes;
1309 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1311 cache = btrfs_lookup_block_group(info, bytenr);
1315 byte_in_group = bytenr - cache->key.objectid;
1316 WARN_ON(byte_in_group > cache->key.offset);
1317 start = cache->key.objectid;
1318 end = start + cache->key.offset - 1;
1319 set_extent_bits(&info->block_group_cache, start, end,
1320 BLOCK_GROUP_DIRTY, GFP_NOFS);
1322 spin_lock(&cache->lock);
1323 old_val = btrfs_block_group_used(&cache->item);
1324 num_bytes = min(total, cache->key.offset - byte_in_group);
1326 old_val += num_bytes;
1327 cache->space_info->bytes_used += num_bytes;
1328 btrfs_set_block_group_used(&cache->item, old_val);
1329 spin_unlock(&cache->lock);
1331 old_val -= num_bytes;
1332 cache->space_info->bytes_used -= num_bytes;
1333 btrfs_set_block_group_used(&cache->item, old_val);
1334 spin_unlock(&cache->lock);
1336 set_extent_dirty(&info->free_space_cache,
1337 bytenr, bytenr + num_bytes - 1,
1342 bytenr += num_bytes;
1347 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1352 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1353 search_start, &start, &end,
1354 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1355 BLOCK_GROUP_SYSTEM);
1362 static int update_pinned_extents(struct btrfs_root *root,
1363 u64 bytenr, u64 num, int pin)
1366 struct btrfs_block_group_cache *cache;
1367 struct btrfs_fs_info *fs_info = root->fs_info;
1369 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1371 set_extent_dirty(&fs_info->pinned_extents,
1372 bytenr, bytenr + num - 1, GFP_NOFS);
1374 clear_extent_dirty(&fs_info->pinned_extents,
1375 bytenr, bytenr + num - 1, GFP_NOFS);
1378 cache = btrfs_lookup_block_group(fs_info, bytenr);
1380 u64 first = first_logical_byte(root, bytenr);
1381 WARN_ON(first < bytenr);
1382 len = min(first - bytenr, num);
1384 len = min(num, cache->key.offset -
1385 (bytenr - cache->key.objectid));
1389 spin_lock(&cache->lock);
1390 cache->pinned += len;
1391 cache->space_info->bytes_pinned += len;
1392 spin_unlock(&cache->lock);
1394 fs_info->total_pinned += len;
1397 spin_lock(&cache->lock);
1398 cache->pinned -= len;
1399 cache->space_info->bytes_pinned -= len;
1400 spin_unlock(&cache->lock);
1402 fs_info->total_pinned -= len;
1410 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1415 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1419 ret = find_first_extent_bit(pinned_extents, last,
1420 &start, &end, EXTENT_DIRTY);
1423 set_extent_dirty(copy, start, end, GFP_NOFS);
1429 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1430 struct btrfs_root *root,
1431 struct extent_io_tree *unpin)
1436 struct extent_io_tree *free_space_cache;
1437 free_space_cache = &root->fs_info->free_space_cache;
1439 mutex_lock(&root->fs_info->alloc_mutex);
1441 ret = find_first_extent_bit(unpin, 0, &start, &end,
1445 update_pinned_extents(root, start, end + 1 - start, 0);
1446 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1447 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1448 if (need_resched()) {
1449 mutex_unlock(&root->fs_info->alloc_mutex);
1451 mutex_lock(&root->fs_info->alloc_mutex);
1454 mutex_unlock(&root->fs_info->alloc_mutex);
1458 static int finish_current_insert(struct btrfs_trans_handle *trans,
1459 struct btrfs_root *extent_root)
1463 struct btrfs_fs_info *info = extent_root->fs_info;
1464 struct extent_buffer *eb;
1465 struct btrfs_path *path;
1466 struct btrfs_key ins;
1467 struct btrfs_disk_key first;
1468 struct btrfs_extent_item extent_item;
1473 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1474 btrfs_set_stack_extent_refs(&extent_item, 1);
1475 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1476 path = btrfs_alloc_path();
1479 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1480 &end, EXTENT_LOCKED);
1484 ins.objectid = start;
1485 ins.offset = end + 1 - start;
1486 err = btrfs_insert_item(trans, extent_root, &ins,
1487 &extent_item, sizeof(extent_item));
1488 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1491 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1494 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1495 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1496 btrfs_read_buffer(eb, trans->transid);
1497 mutex_lock(&extent_root->fs_info->alloc_mutex);
1500 btrfs_tree_lock(eb);
1501 level = btrfs_header_level(eb);
1503 btrfs_item_key(eb, &first, 0);
1505 btrfs_node_key(eb, &first, 0);
1507 btrfs_tree_unlock(eb);
1508 free_extent_buffer(eb);
1510 * the first key is just a hint, so the race we've created
1511 * against reading it is fine
1513 err = btrfs_insert_extent_backref(trans, extent_root, path,
1514 start, extent_root->root_key.objectid,
1516 btrfs_disk_key_objectid(&first));
1518 if (need_resched()) {
1519 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1521 mutex_lock(&extent_root->fs_info->alloc_mutex);
1524 btrfs_free_path(path);
1528 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1533 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1535 struct extent_buffer *buf;
1536 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1538 if (btrfs_buffer_uptodate(buf, 0) &&
1539 btrfs_try_tree_lock(buf)) {
1541 root->fs_info->running_transaction->transid;
1542 u64 header_transid =
1543 btrfs_header_generation(buf);
1544 if (header_transid == transid &&
1545 !btrfs_header_flag(buf,
1546 BTRFS_HEADER_FLAG_WRITTEN)) {
1547 clean_tree_block(NULL, root, buf);
1548 btrfs_tree_unlock(buf);
1549 free_extent_buffer(buf);
1552 btrfs_tree_unlock(buf);
1554 free_extent_buffer(buf);
1556 update_pinned_extents(root, bytenr, num_bytes, 1);
1558 set_extent_bits(&root->fs_info->pending_del,
1559 bytenr, bytenr + num_bytes - 1,
1560 EXTENT_LOCKED, GFP_NOFS);
1567 * remove an extent from the root, returns 0 on success
1569 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1570 *root, u64 bytenr, u64 num_bytes,
1571 u64 root_objectid, u64 ref_generation,
1572 u64 owner_objectid, u64 owner_offset, int pin,
1575 struct btrfs_path *path;
1576 struct btrfs_key key;
1577 struct btrfs_fs_info *info = root->fs_info;
1578 struct btrfs_root *extent_root = info->extent_root;
1579 struct extent_buffer *leaf;
1581 int extent_slot = 0;
1582 int found_extent = 0;
1584 struct btrfs_extent_item *ei;
1587 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1588 key.objectid = bytenr;
1589 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1590 key.offset = num_bytes;
1591 path = btrfs_alloc_path();
1596 ret = lookup_extent_backref(trans, extent_root, path,
1597 bytenr, root_objectid,
1599 owner_objectid, owner_offset, 1);
1601 struct btrfs_key found_key;
1602 extent_slot = path->slots[0];
1603 while(extent_slot > 0) {
1605 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1607 if (found_key.objectid != bytenr)
1609 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1610 found_key.offset == num_bytes) {
1614 if (path->slots[0] - extent_slot > 5)
1618 ret = btrfs_del_item(trans, extent_root, path);
1620 btrfs_print_leaf(extent_root, path->nodes[0]);
1622 printk("Unable to find ref byte nr %Lu root %Lu "
1623 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1624 root_objectid, ref_generation, owner_objectid,
1627 if (!found_extent) {
1628 btrfs_release_path(extent_root, path);
1629 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1633 extent_slot = path->slots[0];
1636 leaf = path->nodes[0];
1637 ei = btrfs_item_ptr(leaf, extent_slot,
1638 struct btrfs_extent_item);
1639 refs = btrfs_extent_refs(leaf, ei);
1642 btrfs_set_extent_refs(leaf, ei, refs);
1644 btrfs_mark_buffer_dirty(leaf);
1646 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1647 /* if the back ref and the extent are next to each other
1648 * they get deleted below in one shot
1650 path->slots[0] = extent_slot;
1652 } else if (found_extent) {
1653 /* otherwise delete the extent back ref */
1654 ret = btrfs_del_item(trans, extent_root, path);
1656 /* if refs are 0, we need to setup the path for deletion */
1658 btrfs_release_path(extent_root, path);
1659 ret = btrfs_search_slot(trans, extent_root, &key, path,
1672 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1678 /* block accounting for super block */
1679 spin_lock_irq(&info->delalloc_lock);
1680 super_used = btrfs_super_bytes_used(&info->super_copy);
1681 btrfs_set_super_bytes_used(&info->super_copy,
1682 super_used - num_bytes);
1683 spin_unlock_irq(&info->delalloc_lock);
1685 /* block accounting for root item */
1686 root_used = btrfs_root_used(&root->root_item);
1687 btrfs_set_root_used(&root->root_item,
1688 root_used - num_bytes);
1689 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1694 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1698 btrfs_free_path(path);
1699 finish_current_insert(trans, extent_root);
1704 * find all the blocks marked as pending in the radix tree and remove
1705 * them from the extent map
1707 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1708 btrfs_root *extent_root)
1714 struct extent_io_tree *pending_del;
1715 struct extent_io_tree *pinned_extents;
1717 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1718 pending_del = &extent_root->fs_info->pending_del;
1719 pinned_extents = &extent_root->fs_info->pinned_extents;
1722 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1726 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1728 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1729 start, end, EXTENT_LOCKED, 0)) {
1730 update_pinned_extents(extent_root, start,
1731 end + 1 - start, 1);
1732 ret = __free_extent(trans, extent_root,
1733 start, end + 1 - start,
1734 extent_root->root_key.objectid,
1737 clear_extent_bits(&extent_root->fs_info->extent_ins,
1738 start, end, EXTENT_LOCKED, GFP_NOFS);
1743 if (need_resched()) {
1744 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1746 mutex_lock(&extent_root->fs_info->alloc_mutex);
1753 * remove an extent from the root, returns 0 on success
1755 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1756 struct btrfs_root *root, u64 bytenr,
1757 u64 num_bytes, u64 root_objectid,
1758 u64 ref_generation, u64 owner_objectid,
1759 u64 owner_offset, int pin)
1761 struct btrfs_root *extent_root = root->fs_info->extent_root;
1765 WARN_ON(num_bytes < root->sectorsize);
1766 if (!root->ref_cows)
1769 if (root == extent_root) {
1770 pin_down_bytes(root, bytenr, num_bytes, 1);
1773 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1774 ref_generation, owner_objectid, owner_offset,
1777 finish_current_insert(trans, root->fs_info->extent_root);
1778 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1779 return ret ? ret : pending_ret;
1782 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1783 struct btrfs_root *root, u64 bytenr,
1784 u64 num_bytes, u64 root_objectid,
1785 u64 ref_generation, u64 owner_objectid,
1786 u64 owner_offset, int pin)
1790 maybe_lock_mutex(root);
1791 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1792 root_objectid, ref_generation,
1793 owner_objectid, owner_offset, pin);
1794 maybe_unlock_mutex(root);
1798 static u64 stripe_align(struct btrfs_root *root, u64 val)
1800 u64 mask = ((u64)root->stripesize - 1);
1801 u64 ret = (val + mask) & ~mask;
1806 * walks the btree of allocated extents and find a hole of a given size.
1807 * The key ins is changed to record the hole:
1808 * ins->objectid == block start
1809 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1810 * ins->offset == number of blocks
1811 * Any available blocks before search_start are skipped.
1813 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1814 struct btrfs_root *orig_root,
1815 u64 num_bytes, u64 empty_size,
1816 u64 search_start, u64 search_end,
1817 u64 hint_byte, struct btrfs_key *ins,
1818 u64 exclude_start, u64 exclude_nr,
1822 u64 orig_search_start;
1823 struct btrfs_root * root = orig_root->fs_info->extent_root;
1824 struct btrfs_fs_info *info = root->fs_info;
1825 u64 total_needed = num_bytes;
1826 u64 *last_ptr = NULL;
1827 struct btrfs_block_group_cache *block_group;
1830 int chunk_alloc_done = 0;
1831 int empty_cluster = 2 * 1024 * 1024;
1832 int allowed_chunk_alloc = 0;
1834 WARN_ON(num_bytes < root->sectorsize);
1835 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1837 if (orig_root->ref_cows || empty_size)
1838 allowed_chunk_alloc = 1;
1840 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1841 last_ptr = &root->fs_info->last_alloc;
1842 empty_cluster = 256 * 1024;
1845 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1846 last_ptr = &root->fs_info->last_data_alloc;
1851 hint_byte = *last_ptr;
1853 empty_size += empty_cluster;
1857 search_start = max(search_start, first_logical_byte(root, 0));
1858 orig_search_start = search_start;
1860 if (search_end == (u64)-1)
1861 search_end = btrfs_super_total_bytes(&info->super_copy);
1864 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1866 hint_byte = search_start;
1867 block_group = btrfs_find_block_group(root, block_group,
1868 hint_byte, data, 1);
1869 if (last_ptr && *last_ptr == 0 && block_group)
1870 hint_byte = block_group->key.objectid;
1872 block_group = btrfs_find_block_group(root,
1874 search_start, data, 1);
1876 search_start = max(search_start, hint_byte);
1878 total_needed += empty_size;
1882 block_group = btrfs_lookup_first_block_group(info,
1885 block_group = btrfs_lookup_first_block_group(info,
1888 if (full_scan && !chunk_alloc_done) {
1889 if (allowed_chunk_alloc) {
1890 do_chunk_alloc(trans, root,
1891 num_bytes + 2 * 1024 * 1024, data, 1);
1892 allowed_chunk_alloc = 0;
1893 } else if (block_group && block_group_bits(block_group, data)) {
1894 block_group->space_info->force_alloc = 1;
1896 chunk_alloc_done = 1;
1898 ret = find_search_start(root, &block_group, &search_start,
1899 total_needed, data);
1900 if (ret == -ENOSPC && last_ptr && *last_ptr) {
1902 block_group = btrfs_lookup_first_block_group(info,
1904 search_start = orig_search_start;
1905 ret = find_search_start(root, &block_group, &search_start,
1906 total_needed, data);
1913 if (last_ptr && *last_ptr && search_start != *last_ptr) {
1916 empty_size += empty_cluster;
1917 total_needed += empty_size;
1919 block_group = btrfs_lookup_first_block_group(info,
1921 search_start = orig_search_start;
1922 ret = find_search_start(root, &block_group,
1923 &search_start, total_needed, data);
1930 search_start = stripe_align(root, search_start);
1931 ins->objectid = search_start;
1932 ins->offset = num_bytes;
1934 if (ins->objectid + num_bytes >= search_end)
1937 if (ins->objectid + num_bytes >
1938 block_group->key.objectid + block_group->key.offset) {
1939 search_start = block_group->key.objectid +
1940 block_group->key.offset;
1944 if (test_range_bit(&info->extent_ins, ins->objectid,
1945 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1946 search_start = ins->objectid + num_bytes;
1950 if (test_range_bit(&info->pinned_extents, ins->objectid,
1951 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
1952 search_start = ins->objectid + num_bytes;
1956 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
1957 ins->objectid < exclude_start + exclude_nr)) {
1958 search_start = exclude_start + exclude_nr;
1962 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
1963 block_group = btrfs_lookup_block_group(info, ins->objectid);
1965 trans->block_group = block_group;
1967 ins->offset = num_bytes;
1969 *last_ptr = ins->objectid + ins->offset;
1971 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
1978 if (search_start + num_bytes >= search_end) {
1980 search_start = orig_search_start;
1987 total_needed -= empty_size;
1992 block_group = btrfs_lookup_first_block_group(info, search_start);
1994 block_group = btrfs_find_block_group(root, block_group,
1995 search_start, data, 0);
2002 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2003 struct btrfs_root *root,
2004 u64 num_bytes, u64 min_alloc_size,
2005 u64 empty_size, u64 hint_byte,
2006 u64 search_end, struct btrfs_key *ins,
2010 u64 search_start = 0;
2012 struct btrfs_fs_info *info = root->fs_info;
2015 alloc_profile = info->avail_data_alloc_bits &
2016 info->data_alloc_profile;
2017 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2018 } else if (root == root->fs_info->chunk_root) {
2019 alloc_profile = info->avail_system_alloc_bits &
2020 info->system_alloc_profile;
2021 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2023 alloc_profile = info->avail_metadata_alloc_bits &
2024 info->metadata_alloc_profile;
2025 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2028 data = reduce_alloc_profile(root, data);
2030 * the only place that sets empty_size is btrfs_realloc_node, which
2031 * is not called recursively on allocations
2033 if (empty_size || root->ref_cows) {
2034 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2035 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2037 BTRFS_BLOCK_GROUP_METADATA |
2038 (info->metadata_alloc_profile &
2039 info->avail_metadata_alloc_bits), 0);
2042 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2043 num_bytes + 2 * 1024 * 1024, data, 0);
2047 WARN_ON(num_bytes < root->sectorsize);
2048 ret = find_free_extent(trans, root, num_bytes, empty_size,
2049 search_start, search_end, hint_byte, ins,
2050 trans->alloc_exclude_start,
2051 trans->alloc_exclude_nr, data);
2053 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2054 num_bytes = num_bytes >> 1;
2055 num_bytes = max(num_bytes, min_alloc_size);
2056 do_chunk_alloc(trans, root->fs_info->extent_root,
2057 num_bytes, data, 1);
2061 printk("allocation failed flags %Lu\n", data);
2064 clear_extent_dirty(&root->fs_info->free_space_cache,
2065 ins->objectid, ins->objectid + ins->offset - 1,
2070 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2071 struct btrfs_root *root,
2072 u64 num_bytes, u64 min_alloc_size,
2073 u64 empty_size, u64 hint_byte,
2074 u64 search_end, struct btrfs_key *ins,
2078 maybe_lock_mutex(root);
2079 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2080 empty_size, hint_byte, search_end, ins,
2082 maybe_unlock_mutex(root);
2086 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2087 struct btrfs_root *root,
2088 u64 root_objectid, u64 ref_generation,
2089 u64 owner, u64 owner_offset,
2090 struct btrfs_key *ins)
2096 u64 num_bytes = ins->offset;
2098 struct btrfs_fs_info *info = root->fs_info;
2099 struct btrfs_root *extent_root = info->extent_root;
2100 struct btrfs_extent_item *extent_item;
2101 struct btrfs_extent_ref *ref;
2102 struct btrfs_path *path;
2103 struct btrfs_key keys[2];
2105 /* block accounting for super block */
2106 spin_lock_irq(&info->delalloc_lock);
2107 super_used = btrfs_super_bytes_used(&info->super_copy);
2108 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2109 spin_unlock_irq(&info->delalloc_lock);
2111 /* block accounting for root item */
2112 root_used = btrfs_root_used(&root->root_item);
2113 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2115 if (root == extent_root) {
2116 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2117 ins->objectid + ins->offset - 1,
2118 EXTENT_LOCKED, GFP_NOFS);
2122 memcpy(&keys[0], ins, sizeof(*ins));
2123 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2124 owner, owner_offset);
2125 keys[1].objectid = ins->objectid;
2126 keys[1].type = BTRFS_EXTENT_REF_KEY;
2127 sizes[0] = sizeof(*extent_item);
2128 sizes[1] = sizeof(*ref);
2130 path = btrfs_alloc_path();
2133 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2137 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2138 struct btrfs_extent_item);
2139 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2140 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2141 struct btrfs_extent_ref);
2143 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2144 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2145 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2146 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2148 btrfs_mark_buffer_dirty(path->nodes[0]);
2150 trans->alloc_exclude_start = 0;
2151 trans->alloc_exclude_nr = 0;
2152 btrfs_free_path(path);
2153 finish_current_insert(trans, extent_root);
2154 pending_ret = del_pending_extents(trans, extent_root);
2164 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2166 printk("update block group failed for %Lu %Lu\n",
2167 ins->objectid, ins->offset);
2174 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2175 struct btrfs_root *root,
2176 u64 root_objectid, u64 ref_generation,
2177 u64 owner, u64 owner_offset,
2178 struct btrfs_key *ins)
2181 maybe_lock_mutex(root);
2182 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2183 ref_generation, owner,
2185 maybe_unlock_mutex(root);
2189 * finds a free extent and does all the dirty work required for allocation
2190 * returns the key for the extent through ins, and a tree buffer for
2191 * the first block of the extent through buf.
2193 * returns 0 if everything worked, non-zero otherwise.
2195 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2196 struct btrfs_root *root,
2197 u64 num_bytes, u64 min_alloc_size,
2198 u64 root_objectid, u64 ref_generation,
2199 u64 owner, u64 owner_offset,
2200 u64 empty_size, u64 hint_byte,
2201 u64 search_end, struct btrfs_key *ins, u64 data)
2205 maybe_lock_mutex(root);
2207 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2208 min_alloc_size, empty_size, hint_byte,
2209 search_end, ins, data);
2211 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2212 ref_generation, owner,
2216 maybe_unlock_mutex(root);
2220 * helper function to allocate a block for a given tree
2221 * returns the tree buffer or NULL.
2223 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2224 struct btrfs_root *root,
2233 struct btrfs_key ins;
2235 struct extent_buffer *buf;
2237 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2238 root_objectid, ref_generation,
2239 level, first_objectid, empty_size, hint,
2243 return ERR_PTR(ret);
2245 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
2247 btrfs_free_extent(trans, root, ins.objectid, blocksize,
2248 root->root_key.objectid, ref_generation,
2250 return ERR_PTR(-ENOMEM);
2252 btrfs_set_header_generation(buf, trans->transid);
2253 btrfs_tree_lock(buf);
2254 clean_tree_block(trans, root, buf);
2255 btrfs_set_buffer_uptodate(buf);
2257 if (PageDirty(buf->first_page)) {
2258 printk("page %lu dirty\n", buf->first_page->index);
2262 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2263 buf->start + buf->len - 1, GFP_NOFS);
2264 trans->blocks_used++;
2268 static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
2269 struct btrfs_root *root,
2270 struct extent_buffer *leaf)
2273 u64 leaf_generation;
2274 struct btrfs_key key;
2275 struct btrfs_file_extent_item *fi;
2280 BUG_ON(!btrfs_is_leaf(leaf));
2281 nritems = btrfs_header_nritems(leaf);
2282 leaf_owner = btrfs_header_owner(leaf);
2283 leaf_generation = btrfs_header_generation(leaf);
2285 mutex_unlock(&root->fs_info->alloc_mutex);
2287 for (i = 0; i < nritems; i++) {
2291 btrfs_item_key_to_cpu(leaf, &key, i);
2292 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2294 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2295 if (btrfs_file_extent_type(leaf, fi) ==
2296 BTRFS_FILE_EXTENT_INLINE)
2299 * FIXME make sure to insert a trans record that
2300 * repeats the snapshot del on crash
2302 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2303 if (disk_bytenr == 0)
2306 mutex_lock(&root->fs_info->alloc_mutex);
2307 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2308 btrfs_file_extent_disk_num_bytes(leaf, fi),
2309 leaf_owner, leaf_generation,
2310 key.objectid, key.offset, 0);
2311 mutex_unlock(&root->fs_info->alloc_mutex);
2315 mutex_lock(&root->fs_info->alloc_mutex);
2319 static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
2320 struct btrfs_root *root,
2321 struct btrfs_leaf_ref *ref)
2325 struct btrfs_extent_info *info = ref->extents;
2327 mutex_unlock(&root->fs_info->alloc_mutex);
2328 for (i = 0; i < ref->nritems; i++) {
2329 mutex_lock(&root->fs_info->alloc_mutex);
2330 ret = __btrfs_free_extent(trans, root,
2331 info->bytenr, info->num_bytes,
2332 ref->owner, ref->generation,
2333 info->objectid, info->offset, 0);
2334 mutex_unlock(&root->fs_info->alloc_mutex);
2338 mutex_lock(&root->fs_info->alloc_mutex);
2343 static void noinline reada_walk_down(struct btrfs_root *root,
2344 struct extent_buffer *node,
2357 nritems = btrfs_header_nritems(node);
2358 level = btrfs_header_level(node);
2362 for (i = slot; i < nritems && skipped < 32; i++) {
2363 bytenr = btrfs_node_blockptr(node, i);
2364 if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
2365 (last > bytenr && last - bytenr > 32 * 1024))) {
2369 blocksize = btrfs_level_size(root, level - 1);
2371 ret = lookup_extent_ref(NULL, root, bytenr,
2379 ret = readahead_tree_block(root, bytenr, blocksize,
2380 btrfs_node_ptr_generation(node, i));
2381 last = bytenr + blocksize;
2388 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2392 mutex_unlock(&root->fs_info->alloc_mutex);
2393 ret = lookup_extent_ref(NULL, root, start, len, refs);
2395 mutex_lock(&root->fs_info->alloc_mutex);
2400 * helper function for drop_snapshot, this walks down the tree dropping ref
2401 * counts as it goes.
2403 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2404 struct btrfs_root *root,
2405 struct btrfs_path *path, int *level)
2411 struct extent_buffer *next;
2412 struct extent_buffer *cur;
2413 struct extent_buffer *parent;
2414 struct btrfs_leaf_ref *ref;
2419 mutex_lock(&root->fs_info->alloc_mutex);
2421 WARN_ON(*level < 0);
2422 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2423 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2424 path->nodes[*level]->len, &refs);
2430 * walk down to the last node level and free all the leaves
2432 while(*level >= 0) {
2433 WARN_ON(*level < 0);
2434 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2435 cur = path->nodes[*level];
2437 if (btrfs_header_level(cur) != *level)
2440 if (path->slots[*level] >=
2441 btrfs_header_nritems(cur))
2444 ret = drop_leaf_ref_no_cache(trans, root, cur);
2448 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2449 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2450 blocksize = btrfs_level_size(root, *level - 1);
2452 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2455 parent = path->nodes[*level];
2456 root_owner = btrfs_header_owner(parent);
2457 root_gen = btrfs_header_generation(parent);
2458 path->slots[*level]++;
2459 ret = __btrfs_free_extent(trans, root, bytenr,
2460 blocksize, root_owner,
2467 struct btrfs_key key;
2468 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2469 ref = btrfs_lookup_leaf_ref(root, bytenr);
2471 ret = drop_leaf_ref(trans, root, ref);
2473 btrfs_remove_leaf_ref(root, ref);
2474 btrfs_free_leaf_ref(ref);
2479 next = btrfs_find_tree_block(root, bytenr, blocksize);
2480 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2481 free_extent_buffer(next);
2482 mutex_unlock(&root->fs_info->alloc_mutex);
2484 if (path->slots[*level] == 0)
2485 reada_walk_down(root, cur, path->slots[*level]);
2486 next = read_tree_block(root, bytenr, blocksize,
2489 mutex_lock(&root->fs_info->alloc_mutex);
2491 /* we've dropped the lock, double check */
2492 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2496 parent = path->nodes[*level];
2497 root_owner = btrfs_header_owner(parent);
2498 root_gen = btrfs_header_generation(parent);
2500 path->slots[*level]++;
2501 free_extent_buffer(next);
2502 ret = __btrfs_free_extent(trans, root, bytenr,
2510 WARN_ON(*level <= 0);
2511 if (path->nodes[*level-1])
2512 free_extent_buffer(path->nodes[*level-1]);
2513 path->nodes[*level-1] = next;
2514 *level = btrfs_header_level(next);
2515 path->slots[*level] = 0;
2518 WARN_ON(*level < 0);
2519 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2521 if (path->nodes[*level] == root->node) {
2522 parent = path->nodes[*level];
2523 bytenr = path->nodes[*level]->start;
2525 parent = path->nodes[*level + 1];
2526 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2529 blocksize = btrfs_level_size(root, *level);
2530 root_owner = btrfs_header_owner(parent);
2531 root_gen = btrfs_header_generation(parent);
2533 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2534 root_owner, root_gen, 0, 0, 1);
2535 free_extent_buffer(path->nodes[*level]);
2536 path->nodes[*level] = NULL;
2539 mutex_unlock(&root->fs_info->alloc_mutex);
2545 * helper for dropping snapshots. This walks back up the tree in the path
2546 * to find the first node higher up where we haven't yet gone through
2549 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2550 struct btrfs_root *root,
2551 struct btrfs_path *path, int *level)
2555 struct btrfs_root_item *root_item = &root->root_item;
2560 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2561 slot = path->slots[i];
2562 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2563 struct extent_buffer *node;
2564 struct btrfs_disk_key disk_key;
2565 node = path->nodes[i];
2568 WARN_ON(*level == 0);
2569 btrfs_node_key(node, &disk_key, path->slots[i]);
2570 memcpy(&root_item->drop_progress,
2571 &disk_key, sizeof(disk_key));
2572 root_item->drop_level = i;
2575 if (path->nodes[*level] == root->node) {
2576 root_owner = root->root_key.objectid;
2578 btrfs_header_generation(path->nodes[*level]);
2580 struct extent_buffer *node;
2581 node = path->nodes[*level + 1];
2582 root_owner = btrfs_header_owner(node);
2583 root_gen = btrfs_header_generation(node);
2585 ret = btrfs_free_extent(trans, root,
2586 path->nodes[*level]->start,
2587 path->nodes[*level]->len,
2588 root_owner, root_gen, 0, 0, 1);
2590 free_extent_buffer(path->nodes[*level]);
2591 path->nodes[*level] = NULL;
2599 * drop the reference count on the tree rooted at 'snap'. This traverses
2600 * the tree freeing any blocks that have a ref count of zero after being
2603 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2609 struct btrfs_path *path;
2612 struct btrfs_root_item *root_item = &root->root_item;
2614 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2615 path = btrfs_alloc_path();
2618 level = btrfs_header_level(root->node);
2620 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2621 path->nodes[level] = root->node;
2622 extent_buffer_get(root->node);
2623 path->slots[level] = 0;
2625 struct btrfs_key key;
2626 struct btrfs_disk_key found_key;
2627 struct extent_buffer *node;
2629 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2630 level = root_item->drop_level;
2631 path->lowest_level = level;
2632 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2637 node = path->nodes[level];
2638 btrfs_node_key(node, &found_key, path->slots[level]);
2639 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2640 sizeof(found_key)));
2642 * unlock our path, this is safe because only this
2643 * function is allowed to delete this snapshot
2645 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2646 if (path->nodes[i] && path->locks[i]) {
2648 btrfs_tree_unlock(path->nodes[i]);
2653 atomic_inc(&root->fs_info->throttle_gen);
2654 wret = walk_down_tree(trans, root, path, &level);
2660 wret = walk_up_tree(trans, root, path, &level);
2665 if (trans->transaction->in_commit) {
2669 wake_up(&root->fs_info->transaction_throttle);
2671 for (i = 0; i <= orig_level; i++) {
2672 if (path->nodes[i]) {
2673 free_extent_buffer(path->nodes[i]);
2674 path->nodes[i] = NULL;
2678 btrfs_free_path(path);
2682 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2689 mutex_lock(&info->alloc_mutex);
2691 ret = find_first_extent_bit(&info->block_group_cache, 0,
2692 &start, &end, (unsigned int)-1);
2695 ret = get_state_private(&info->block_group_cache, start, &ptr);
2697 kfree((void *)(unsigned long)ptr);
2698 clear_extent_bits(&info->block_group_cache, start,
2699 end, (unsigned int)-1, GFP_NOFS);
2702 ret = find_first_extent_bit(&info->free_space_cache, 0,
2703 &start, &end, EXTENT_DIRTY);
2706 clear_extent_dirty(&info->free_space_cache, start,
2709 mutex_unlock(&info->alloc_mutex);
2713 static unsigned long calc_ra(unsigned long start, unsigned long last,
2716 return min(last, start + nr - 1);
2719 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2724 unsigned long last_index;
2727 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2728 struct file_ra_state *ra;
2729 unsigned long total_read = 0;
2730 unsigned long ra_pages;
2731 struct btrfs_ordered_extent *ordered;
2732 struct btrfs_trans_handle *trans;
2734 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2736 mutex_lock(&inode->i_mutex);
2737 i = start >> PAGE_CACHE_SHIFT;
2738 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2740 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2742 file_ra_state_init(ra, inode->i_mapping);
2744 for (; i <= last_index; i++) {
2745 if (total_read % ra_pages == 0) {
2746 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2747 calc_ra(i, last_index, ra_pages));
2751 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2752 goto truncate_racing;
2753 page = grab_cache_page(inode->i_mapping, i);
2757 if (!PageUptodate(page)) {
2758 btrfs_readpage(NULL, page);
2760 if (!PageUptodate(page)) {
2762 page_cache_release(page);
2766 wait_on_page_writeback(page);
2768 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2769 page_end = page_start + PAGE_CACHE_SIZE - 1;
2770 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2772 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2774 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2776 page_cache_release(page);
2777 btrfs_start_ordered_extent(inode, ordered, 1);
2778 btrfs_put_ordered_extent(ordered);
2781 set_page_extent_mapped(page);
2784 set_extent_delalloc(io_tree, page_start,
2785 page_end, GFP_NOFS);
2786 set_page_dirty(page);
2788 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2790 page_cache_release(page);
2794 /* we have to start the IO in order to get the ordered extents
2795 * instantiated. This allows the relocation to code to wait
2796 * for all the ordered extents to hit the disk.
2798 * Otherwise, it would constantly loop over the same extents
2799 * because the old ones don't get deleted until the IO is
2802 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2805 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2807 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2808 mark_inode_dirty(inode);
2810 mutex_unlock(&inode->i_mutex);
2814 vmtruncate(inode, inode->i_size);
2815 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2821 * The back references tell us which tree holds a ref on a block,
2822 * but it is possible for the tree root field in the reference to
2823 * reflect the original root before a snapshot was made. In this
2824 * case we should search through all the children of a given root
2825 * to find potential holders of references on a block.
2827 * Instead, we do something a little less fancy and just search
2828 * all the roots for a given key/block combination.
2830 static int find_root_for_ref(struct btrfs_root *root,
2831 struct btrfs_path *path,
2832 struct btrfs_key *key0,
2835 struct btrfs_root **found_root,
2838 struct btrfs_key root_location;
2839 struct btrfs_root *cur_root = *found_root;
2840 struct btrfs_file_extent_item *file_extent;
2841 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2845 root_location.offset = (u64)-1;
2846 root_location.type = BTRFS_ROOT_ITEM_KEY;
2847 path->lowest_level = level;
2850 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
2852 if (ret == 0 && file_key) {
2853 struct extent_buffer *leaf = path->nodes[0];
2854 file_extent = btrfs_item_ptr(leaf, path->slots[0],
2855 struct btrfs_file_extent_item);
2856 if (btrfs_file_extent_type(leaf, file_extent) ==
2857 BTRFS_FILE_EXTENT_REG) {
2859 btrfs_file_extent_disk_bytenr(leaf,
2862 } else if (!file_key) {
2863 if (path->nodes[level])
2864 found_bytenr = path->nodes[level]->start;
2867 btrfs_release_path(cur_root, path);
2869 if (found_bytenr == bytenr) {
2870 *found_root = cur_root;
2874 ret = btrfs_search_root(root->fs_info->tree_root,
2875 root_search_start, &root_search_start);
2879 root_location.objectid = root_search_start;
2880 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
2888 path->lowest_level = 0;
2893 * note, this releases the path
2895 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
2896 struct btrfs_path *path,
2897 struct btrfs_key *extent_key,
2898 u64 *last_file_objectid,
2899 u64 *last_file_offset,
2900 u64 *last_file_root,
2903 struct inode *inode;
2904 struct btrfs_root *found_root;
2905 struct btrfs_key root_location;
2906 struct btrfs_key found_key;
2907 struct btrfs_extent_ref *ref;
2915 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
2917 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2918 struct btrfs_extent_ref);
2919 ref_root = btrfs_ref_root(path->nodes[0], ref);
2920 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
2921 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
2922 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
2923 btrfs_release_path(extent_root, path);
2925 root_location.objectid = ref_root;
2927 root_location.offset = 0;
2929 root_location.offset = (u64)-1;
2930 root_location.type = BTRFS_ROOT_ITEM_KEY;
2932 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
2934 BUG_ON(!found_root);
2935 mutex_unlock(&extent_root->fs_info->alloc_mutex);
2937 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2938 found_key.objectid = ref_objectid;
2939 found_key.type = BTRFS_EXTENT_DATA_KEY;
2940 found_key.offset = ref_offset;
2943 if (last_extent == extent_key->objectid &&
2944 *last_file_objectid == ref_objectid &&
2945 *last_file_offset == ref_offset &&
2946 *last_file_root == ref_root)
2949 ret = find_root_for_ref(extent_root, path, &found_key,
2950 level, 1, &found_root,
2951 extent_key->objectid);
2956 if (last_extent == extent_key->objectid &&
2957 *last_file_objectid == ref_objectid &&
2958 *last_file_offset == ref_offset &&
2959 *last_file_root == ref_root)
2962 inode = btrfs_iget_locked(extent_root->fs_info->sb,
2963 ref_objectid, found_root);
2964 if (inode->i_state & I_NEW) {
2965 /* the inode and parent dir are two different roots */
2966 BTRFS_I(inode)->root = found_root;
2967 BTRFS_I(inode)->location.objectid = ref_objectid;
2968 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
2969 BTRFS_I(inode)->location.offset = 0;
2970 btrfs_read_locked_inode(inode);
2971 unlock_new_inode(inode);
2974 /* this can happen if the reference is not against
2975 * the latest version of the tree root
2977 if (is_bad_inode(inode))
2980 *last_file_objectid = inode->i_ino;
2981 *last_file_root = found_root->root_key.objectid;
2982 *last_file_offset = ref_offset;
2984 relocate_inode_pages(inode, ref_offset, extent_key->offset);
2987 struct btrfs_trans_handle *trans;
2988 struct extent_buffer *eb;
2991 eb = read_tree_block(found_root, extent_key->objectid,
2992 extent_key->offset, 0);
2993 btrfs_tree_lock(eb);
2994 level = btrfs_header_level(eb);
2997 btrfs_item_key_to_cpu(eb, &found_key, 0);
2999 btrfs_node_key_to_cpu(eb, &found_key, 0);
3001 btrfs_tree_unlock(eb);
3002 free_extent_buffer(eb);
3004 ret = find_root_for_ref(extent_root, path, &found_key,
3005 level, 0, &found_root,
3006 extent_key->objectid);
3012 * right here almost anything could happen to our key,
3013 * but that's ok. The cow below will either relocate it
3014 * or someone else will have relocated it. Either way,
3015 * it is in a different spot than it was before and
3019 trans = btrfs_start_transaction(found_root, 1);
3021 if (found_root == extent_root->fs_info->extent_root ||
3022 found_root == extent_root->fs_info->chunk_root ||
3023 found_root == extent_root->fs_info->dev_root) {
3025 mutex_lock(&extent_root->fs_info->alloc_mutex);
3028 path->lowest_level = level;
3030 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3032 path->lowest_level = 0;
3033 btrfs_release_path(found_root, path);
3035 if (found_root == found_root->fs_info->extent_root)
3036 btrfs_extent_post_op(trans, found_root);
3038 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3040 btrfs_end_transaction(trans, found_root);
3044 mutex_lock(&extent_root->fs_info->alloc_mutex);
3048 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3049 struct btrfs_path *path,
3050 struct btrfs_key *extent_key)
3053 struct btrfs_trans_handle *trans;
3055 trans = btrfs_start_transaction(extent_root, 1);
3056 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3063 ret = btrfs_del_item(trans, extent_root, path);
3065 btrfs_end_transaction(trans, extent_root);
3069 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3070 struct btrfs_path *path,
3071 struct btrfs_key *extent_key)
3073 struct btrfs_key key;
3074 struct btrfs_key found_key;
3075 struct extent_buffer *leaf;
3076 u64 last_file_objectid = 0;
3077 u64 last_file_root = 0;
3078 u64 last_file_offset = (u64)-1;
3079 u64 last_extent = 0;
3084 if (extent_key->objectid == 0) {
3085 ret = del_extent_zero(extent_root, path, extent_key);
3088 key.objectid = extent_key->objectid;
3089 key.type = BTRFS_EXTENT_REF_KEY;
3093 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3099 leaf = path->nodes[0];
3100 nritems = btrfs_header_nritems(leaf);
3101 if (path->slots[0] == nritems) {
3102 ret = btrfs_next_leaf(extent_root, path);
3109 leaf = path->nodes[0];
3112 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3113 if (found_key.objectid != extent_key->objectid) {
3117 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3121 key.offset = found_key.offset + 1;
3122 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3124 ret = relocate_one_reference(extent_root, path, extent_key,
3125 &last_file_objectid,
3127 &last_file_root, last_extent);
3130 last_extent = extent_key->objectid;
3134 btrfs_release_path(extent_root, path);
3138 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3141 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3142 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3144 num_devices = root->fs_info->fs_devices->num_devices;
3145 if (num_devices == 1) {
3146 stripped |= BTRFS_BLOCK_GROUP_DUP;
3147 stripped = flags & ~stripped;
3149 /* turn raid0 into single device chunks */
3150 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3153 /* turn mirroring into duplication */
3154 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3155 BTRFS_BLOCK_GROUP_RAID10))
3156 return stripped | BTRFS_BLOCK_GROUP_DUP;
3159 /* they already had raid on here, just return */
3160 if (flags & stripped)
3163 stripped |= BTRFS_BLOCK_GROUP_DUP;
3164 stripped = flags & ~stripped;
3166 /* switch duplicated blocks with raid1 */
3167 if (flags & BTRFS_BLOCK_GROUP_DUP)
3168 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3170 /* turn single device chunks into raid0 */
3171 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3176 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3177 struct btrfs_block_group_cache *shrink_block_group,
3180 struct btrfs_trans_handle *trans;
3181 u64 new_alloc_flags;
3184 spin_lock(&shrink_block_group->lock);
3185 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3186 spin_unlock(&shrink_block_group->lock);
3187 mutex_unlock(&root->fs_info->alloc_mutex);
3189 trans = btrfs_start_transaction(root, 1);
3190 mutex_lock(&root->fs_info->alloc_mutex);
3191 spin_lock(&shrink_block_group->lock);
3193 new_alloc_flags = update_block_group_flags(root,
3194 shrink_block_group->flags);
3195 if (new_alloc_flags != shrink_block_group->flags) {
3197 btrfs_block_group_used(&shrink_block_group->item);
3199 calc = shrink_block_group->key.offset;
3201 spin_unlock(&shrink_block_group->lock);
3203 do_chunk_alloc(trans, root->fs_info->extent_root,
3204 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3206 mutex_unlock(&root->fs_info->alloc_mutex);
3207 btrfs_end_transaction(trans, root);
3208 mutex_lock(&root->fs_info->alloc_mutex);
3210 spin_unlock(&shrink_block_group->lock);
3214 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3216 struct btrfs_trans_handle *trans;
3217 struct btrfs_root *tree_root = root->fs_info->tree_root;
3218 struct btrfs_path *path;
3221 u64 shrink_last_byte;
3222 struct btrfs_block_group_cache *shrink_block_group;
3223 struct btrfs_fs_info *info = root->fs_info;
3224 struct btrfs_key key;
3225 struct btrfs_key found_key;
3226 struct extent_buffer *leaf;
3231 mutex_lock(&root->fs_info->alloc_mutex);
3232 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3234 BUG_ON(!shrink_block_group);
3236 shrink_last_byte = shrink_block_group->key.objectid +
3237 shrink_block_group->key.offset;
3239 shrink_block_group->space_info->total_bytes -=
3240 shrink_block_group->key.offset;
3241 path = btrfs_alloc_path();
3242 root = root->fs_info->extent_root;
3245 printk("btrfs relocating block group %llu flags %llu\n",
3246 (unsigned long long)shrink_start,
3247 (unsigned long long)shrink_block_group->flags);
3249 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3253 shrink_block_group->ro = 1;
3257 key.objectid = shrink_start;
3260 cur_byte = key.objectid;
3262 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3266 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3271 leaf = path->nodes[0];
3272 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3273 if (found_key.objectid + found_key.offset > shrink_start &&
3274 found_key.objectid < shrink_last_byte) {
3275 cur_byte = found_key.objectid;
3276 key.objectid = cur_byte;
3279 btrfs_release_path(root, path);
3282 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3287 leaf = path->nodes[0];
3288 nritems = btrfs_header_nritems(leaf);
3289 if (path->slots[0] >= nritems) {
3290 ret = btrfs_next_leaf(root, path);
3297 leaf = path->nodes[0];
3298 nritems = btrfs_header_nritems(leaf);
3301 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3303 if (found_key.objectid >= shrink_last_byte)
3306 if (progress && need_resched()) {
3307 memcpy(&key, &found_key, sizeof(key));
3309 btrfs_release_path(root, path);
3310 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3316 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3317 found_key.objectid + found_key.offset <= cur_byte) {
3318 memcpy(&key, &found_key, sizeof(key));
3325 cur_byte = found_key.objectid + found_key.offset;
3326 key.objectid = cur_byte;
3327 btrfs_release_path(root, path);
3328 ret = relocate_one_extent(root, path, &found_key);
3329 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3332 btrfs_release_path(root, path);
3334 if (total_found > 0) {
3335 printk("btrfs relocate found %llu last extent was %llu\n",
3336 (unsigned long long)total_found,
3337 (unsigned long long)found_key.objectid);
3338 mutex_unlock(&root->fs_info->alloc_mutex);
3339 trans = btrfs_start_transaction(tree_root, 1);
3340 btrfs_commit_transaction(trans, tree_root);
3342 btrfs_clean_old_snapshots(tree_root);
3344 btrfs_wait_ordered_extents(tree_root);
3346 trans = btrfs_start_transaction(tree_root, 1);
3347 btrfs_commit_transaction(trans, tree_root);
3348 mutex_lock(&root->fs_info->alloc_mutex);
3353 * we've freed all the extents, now remove the block
3354 * group item from the tree
3356 mutex_unlock(&root->fs_info->alloc_mutex);
3358 trans = btrfs_start_transaction(root, 1);
3360 mutex_lock(&root->fs_info->alloc_mutex);
3361 memcpy(&key, &shrink_block_group->key, sizeof(key));
3363 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3367 btrfs_end_transaction(trans, root);
3371 clear_extent_bits(&info->block_group_cache, key.objectid,
3372 key.objectid + key.offset - 1,
3373 (unsigned int)-1, GFP_NOFS);
3376 clear_extent_bits(&info->free_space_cache,
3377 key.objectid, key.objectid + key.offset - 1,
3378 (unsigned int)-1, GFP_NOFS);
3380 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3381 kfree(shrink_block_group);
3383 btrfs_del_item(trans, root, path);
3384 btrfs_release_path(root, path);
3385 mutex_unlock(&root->fs_info->alloc_mutex);
3386 btrfs_commit_transaction(trans, root);
3388 mutex_lock(&root->fs_info->alloc_mutex);
3390 /* the code to unpin extents might set a few bits in the free
3391 * space cache for this range again
3393 clear_extent_bits(&info->free_space_cache,
3394 key.objectid, key.objectid + key.offset - 1,
3395 (unsigned int)-1, GFP_NOFS);
3397 btrfs_free_path(path);
3398 mutex_unlock(&root->fs_info->alloc_mutex);
3402 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3403 struct btrfs_key *key)
3406 struct btrfs_key found_key;
3407 struct extent_buffer *leaf;
3410 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3415 slot = path->slots[0];
3416 leaf = path->nodes[0];
3417 if (slot >= btrfs_header_nritems(leaf)) {
3418 ret = btrfs_next_leaf(root, path);
3425 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3427 if (found_key.objectid >= key->objectid &&
3428 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3439 int btrfs_read_block_groups(struct btrfs_root *root)
3441 struct btrfs_path *path;
3444 struct btrfs_block_group_cache *cache;
3445 struct btrfs_fs_info *info = root->fs_info;
3446 struct btrfs_space_info *space_info;
3447 struct extent_io_tree *block_group_cache;
3448 struct btrfs_key key;
3449 struct btrfs_key found_key;
3450 struct extent_buffer *leaf;
3452 block_group_cache = &info->block_group_cache;
3453 root = info->extent_root;
3456 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3457 path = btrfs_alloc_path();
3461 mutex_lock(&root->fs_info->alloc_mutex);
3463 ret = find_first_block_group(root, path, &key);
3471 leaf = path->nodes[0];
3472 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3473 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3479 spin_lock_init(&cache->lock);
3480 read_extent_buffer(leaf, &cache->item,
3481 btrfs_item_ptr_offset(leaf, path->slots[0]),
3482 sizeof(cache->item));
3483 memcpy(&cache->key, &found_key, sizeof(found_key));
3485 key.objectid = found_key.objectid + found_key.offset;
3486 btrfs_release_path(root, path);
3487 cache->flags = btrfs_block_group_flags(&cache->item);
3489 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3490 bit = BLOCK_GROUP_DATA;
3491 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3492 bit = BLOCK_GROUP_SYSTEM;
3493 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3494 bit = BLOCK_GROUP_METADATA;
3496 set_avail_alloc_bits(info, cache->flags);
3498 ret = update_space_info(info, cache->flags, found_key.offset,
3499 btrfs_block_group_used(&cache->item),
3502 cache->space_info = space_info;
3504 /* use EXTENT_LOCKED to prevent merging */
3505 set_extent_bits(block_group_cache, found_key.objectid,
3506 found_key.objectid + found_key.offset - 1,
3507 EXTENT_LOCKED, GFP_NOFS);
3508 set_state_private(block_group_cache, found_key.objectid,
3509 (unsigned long)cache);
3510 set_extent_bits(block_group_cache, found_key.objectid,
3511 found_key.objectid + found_key.offset - 1,
3512 bit | EXTENT_LOCKED, GFP_NOFS);
3514 btrfs_super_total_bytes(&info->super_copy))
3519 btrfs_free_path(path);
3520 mutex_unlock(&root->fs_info->alloc_mutex);
3524 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3525 struct btrfs_root *root, u64 bytes_used,
3526 u64 type, u64 chunk_objectid, u64 chunk_offset,
3531 struct btrfs_root *extent_root;
3532 struct btrfs_block_group_cache *cache;
3533 struct extent_io_tree *block_group_cache;
3535 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3536 extent_root = root->fs_info->extent_root;
3537 block_group_cache = &root->fs_info->block_group_cache;
3539 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3541 cache->key.objectid = chunk_offset;
3542 cache->key.offset = size;
3543 spin_lock_init(&cache->lock);
3544 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3546 btrfs_set_block_group_used(&cache->item, bytes_used);
3547 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3548 cache->flags = type;
3549 btrfs_set_block_group_flags(&cache->item, type);
3551 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3552 &cache->space_info);
3555 bit = block_group_state_bits(type);
3556 set_extent_bits(block_group_cache, chunk_offset,
3557 chunk_offset + size - 1,
3558 EXTENT_LOCKED, GFP_NOFS);
3559 set_state_private(block_group_cache, chunk_offset,
3560 (unsigned long)cache);
3561 set_extent_bits(block_group_cache, chunk_offset,
3562 chunk_offset + size - 1,
3563 bit | EXTENT_LOCKED, GFP_NOFS);
3565 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3566 sizeof(cache->item));
3569 finish_current_insert(trans, extent_root);
3570 ret = del_pending_extents(trans, extent_root);
3572 set_avail_alloc_bits(extent_root->fs_info, type);