]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/btrfs/free-space-cache.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6-omap-h63xx.git] / fs / btrfs / free-space-cache.c
index 96241f01fa0a88e26129d38e299e219253245311..d1e5f0e84c58c8733e90ad15d09453a31e114470 100644 (file)
@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
        return ret;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 offset, u64 bytes)
+static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+                                 u64 offset, u64 bytes)
 {
        struct btrfs_free_space *right_info;
        struct btrfs_free_space *left_info;
@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
         * are adding, if there is remove that struct and add a new one to
         * cover the entire range
         */
-       spin_lock(&block_group->lock);
-
        right_info = tree_search_offset(&block_group->free_space_offset,
                                        offset+bytes, 0, 1);
        left_info = tree_search_offset(&block_group->free_space_offset,
@@ -215,10 +213,13 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
                info->offset = offset;
                info->bytes += bytes;
        } else if (right_info && right_info->offset != offset+bytes) {
-               printk(KERN_ERR "adding space in the middle of an existing "
-                      "free space area. existing: offset=%Lu, bytes=%Lu. "
-                      "new: offset=%Lu, bytes=%Lu\n", right_info->offset,
-                      right_info->bytes, offset, bytes);
+               printk(KERN_ERR "btrfs adding space in the middle of an "
+                      "existing free space area. existing: "
+                      "offset=%llu, bytes=%llu. new: offset=%llu, "
+                      "bytes=%llu\n", (unsigned long long)right_info->offset,
+                      (unsigned long long)right_info->bytes,
+                      (unsigned long long)offset,
+                      (unsigned long long)bytes);
                BUG();
        }
 
@@ -227,11 +228,14 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
 
                if (unlikely((left_info->offset + left_info->bytes) !=
                             offset)) {
-                       printk(KERN_ERR "free space to the left of new free "
-                              "space isn't quite right. existing: offset=%Lu,"
-                              " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n",
-                              left_info->offset, left_info->bytes, offset,
-                              bytes);
+                       printk(KERN_ERR "btrfs free space to the left "
+                              "of new free space isn't "
+                              "quite right. existing: offset=%llu, "
+                              "bytes=%llu. new: offset=%llu, bytes=%llu\n",
+                              (unsigned long long)left_info->offset,
+                              (unsigned long long)left_info->bytes,
+                              (unsigned long long)offset,
+                              (unsigned long long)bytes);
                        BUG();
                }
 
@@ -261,39 +265,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
        if (ret)
                kfree(info);
 out:
-       spin_unlock(&block_group->lock);
        if (ret) {
                printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
                if (ret == -EEXIST)
                        BUG();
        }
 
-       if (alloc_info)
-               kfree(alloc_info);
+       kfree(alloc_info);
 
        return ret;
 }
 
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
-                           u64 offset, u64 bytes)
+static int
+__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+                         u64 offset, u64 bytes)
 {
        struct btrfs_free_space *info;
        int ret = 0;
 
-       spin_lock(&block_group->lock);
        info = tree_search_offset(&block_group->free_space_offset, offset, 0,
                                  1);
 
        if (info && info->offset == offset) {
                if (info->bytes < bytes) {
-                       printk(KERN_ERR "Found free space at %Lu, size %Lu,"
-                              "trying to use %Lu\n",
-                              info->offset, info->bytes, bytes);
+                       printk(KERN_ERR "Found free space at %llu, size %llu,"
+                              "trying to use %llu\n",
+                              (unsigned long long)info->offset,
+                              (unsigned long long)info->bytes,
+                              (unsigned long long)bytes);
                        WARN_ON(1);
                        ret = -EINVAL;
                        goto out;
                }
-
                unlink_free_space(block_group, info);
 
                if (info->bytes == bytes) {
@@ -334,17 +337,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                /* step two, insert a new info struct to cover anything
                 * before the hole
                 */
-               spin_unlock(&block_group->lock);
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
+               ret = __btrfs_add_free_space(block_group, old_start,
+                                            offset - old_start);
                BUG_ON(ret);
-               goto out_nolock;
        } else {
                WARN_ON(1);
        }
 out:
-       spin_unlock(&block_group->lock);
-out_nolock:
+       return ret;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+                        u64 offset, u64 bytes)
+{
+       int ret;
+       struct btrfs_free_space *sp;
+
+       mutex_lock(&block_group->alloc_mutex);
+       ret = __btrfs_add_free_space(block_group, offset, bytes);
+       sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+       BUG_ON(!sp);
+       mutex_unlock(&block_group->alloc_mutex);
+
+       return ret;
+}
+
+int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
+                             u64 offset, u64 bytes)
+{
+       int ret;
+       struct btrfs_free_space *sp;
+
+       ret = __btrfs_add_free_space(block_group, offset, bytes);
+       sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+       BUG_ON(!sp);
+
+       return ret;
+}
+
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+                           u64 offset, u64 bytes)
+{
+       int ret = 0;
+
+       mutex_lock(&block_group->alloc_mutex);
+       ret = __btrfs_remove_free_space(block_group, offset, bytes);
+       mutex_unlock(&block_group->alloc_mutex);
+
+       return ret;
+}
+
+int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
+                                u64 offset, u64 bytes)
+{
+       int ret;
+
+       ret = __btrfs_remove_free_space(block_group, offset, bytes);
+
        return ret;
 }
 
@@ -359,8 +408,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                if (info->bytes >= bytes)
                        count++;
-               //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset,
-               //       info->bytes);
        }
        printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
               "\n", count);
@@ -386,64 +433,63 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
        struct btrfs_free_space *info;
        struct rb_node *node;
 
-       spin_lock(&block_group->lock);
+       mutex_lock(&block_group->alloc_mutex);
        while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
                info = rb_entry(node, struct btrfs_free_space, bytes_index);
                unlink_free_space(block_group, info);
                kfree(info);
                if (need_resched()) {
-                       spin_unlock(&block_group->lock);
+                       mutex_unlock(&block_group->alloc_mutex);
                        cond_resched();
-                       spin_lock(&block_group->lock);
+                       mutex_lock(&block_group->alloc_mutex);
                }
        }
-       spin_unlock(&block_group->lock);
+       mutex_unlock(&block_group->alloc_mutex);
 }
 
-struct btrfs_free_space *btrfs_find_free_space_offset(struct
+#if 0
+static struct btrfs_free_space *btrfs_find_free_space_offset(struct
                                                      btrfs_block_group_cache
                                                      *block_group, u64 offset,
                                                      u64 bytes)
 {
        struct btrfs_free_space *ret;
 
-       spin_lock(&block_group->lock);
+       mutex_lock(&block_group->alloc_mutex);
        ret = tree_search_offset(&block_group->free_space_offset, offset,
                                 bytes, 0);
-       spin_unlock(&block_group->lock);
+       mutex_unlock(&block_group->alloc_mutex);
 
        return ret;
 }
 
-struct btrfs_free_space *btrfs_find_free_space_bytes(struct
+static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
                                                     btrfs_block_group_cache
                                                     *block_group, u64 offset,
                                                     u64 bytes)
 {
        struct btrfs_free_space *ret;
 
-       spin_lock(&block_group->lock);
+       mutex_lock(&block_group->alloc_mutex);
 
        ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
-       spin_unlock(&block_group->lock);
+       mutex_unlock(&block_group->alloc_mutex);
 
        return ret;
 }
+#endif
 
 struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
                                               *block_group, u64 offset,
                                               u64 bytes)
 {
-       struct btrfs_free_space *ret;
+       struct btrfs_free_space *ret = NULL;
 
-       spin_lock(&block_group->lock);
        ret = tree_search_offset(&block_group->free_space_offset, offset,
                                 bytes, 0);
        if (!ret)
                ret = tree_search_bytes(&block_group->free_space_bytes,
                                        offset, bytes);
 
-       spin_unlock(&block_group->lock);
-
        return ret;
 }