1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include "extent_map.h"
14 /* temporary define until extent_map moves out of btrfs */
15 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
16 unsigned long extra_flags,
17 void (*ctor)(void *, struct kmem_cache *,
20 static struct kmem_cache *extent_map_cache;
21 static struct kmem_cache *extent_state_cache;
22 static struct kmem_cache *extent_buffer_cache;
24 static LIST_HEAD(buffers);
25 static LIST_HEAD(states);
27 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
28 #define BUFFER_LRU_MAX 64
34 struct rb_node rb_node;
37 void __init extent_map_init(void)
39 extent_map_cache = btrfs_cache_create("extent_map",
40 sizeof(struct extent_map), 0,
42 extent_state_cache = btrfs_cache_create("extent_state",
43 sizeof(struct extent_state), 0,
45 extent_buffer_cache = btrfs_cache_create("extent_buffers",
46 sizeof(struct extent_buffer), 0,
50 void __exit extent_map_exit(void)
52 struct extent_state *state;
54 while (!list_empty(&states)) {
55 state = list_entry(states.next, struct extent_state, list);
56 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
57 list_del(&state->list);
58 kmem_cache_free(extent_state_cache, state);
63 kmem_cache_destroy(extent_map_cache);
64 if (extent_state_cache)
65 kmem_cache_destroy(extent_state_cache);
66 if (extent_buffer_cache)
67 kmem_cache_destroy(extent_buffer_cache);
70 void extent_map_tree_init(struct extent_map_tree *tree,
71 struct address_space *mapping, gfp_t mask)
73 tree->map.rb_node = NULL;
74 tree->state.rb_node = NULL;
76 rwlock_init(&tree->lock);
77 spin_lock_init(&tree->lru_lock);
78 tree->mapping = mapping;
79 INIT_LIST_HEAD(&tree->buffer_lru);
82 EXPORT_SYMBOL(extent_map_tree_init);
84 void extent_map_tree_cleanup(struct extent_map_tree *tree)
86 struct extent_buffer *eb;
87 while(!list_empty(&tree->buffer_lru)) {
88 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
91 free_extent_buffer(eb);
94 EXPORT_SYMBOL(extent_map_tree_cleanup);
96 struct extent_map *alloc_extent_map(gfp_t mask)
98 struct extent_map *em;
99 em = kmem_cache_alloc(extent_map_cache, mask);
100 if (!em || IS_ERR(em))
103 atomic_set(&em->refs, 1);
106 EXPORT_SYMBOL(alloc_extent_map);
108 void free_extent_map(struct extent_map *em)
112 if (atomic_dec_and_test(&em->refs)) {
113 WARN_ON(em->in_tree);
114 kmem_cache_free(extent_map_cache, em);
117 EXPORT_SYMBOL(free_extent_map);
120 struct extent_state *alloc_extent_state(gfp_t mask)
122 struct extent_state *state;
125 state = kmem_cache_alloc(extent_state_cache, mask);
126 if (!state || IS_ERR(state))
132 spin_lock_irqsave(&state_lock, flags);
133 list_add(&state->list, &states);
134 spin_unlock_irqrestore(&state_lock, flags);
136 atomic_set(&state->refs, 1);
137 init_waitqueue_head(&state->wq);
140 EXPORT_SYMBOL(alloc_extent_state);
142 void free_extent_state(struct extent_state *state)
147 if (atomic_dec_and_test(&state->refs)) {
148 WARN_ON(state->in_tree);
149 spin_lock_irqsave(&state_lock, flags);
150 list_del(&state->list);
151 spin_unlock_irqrestore(&state_lock, flags);
152 kmem_cache_free(extent_state_cache, state);
155 EXPORT_SYMBOL(free_extent_state);
157 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
158 struct rb_node *node)
160 struct rb_node ** p = &root->rb_node;
161 struct rb_node * parent = NULL;
162 struct tree_entry *entry;
166 entry = rb_entry(parent, struct tree_entry, rb_node);
168 if (offset < entry->start)
170 else if (offset > entry->end)
176 entry = rb_entry(node, struct tree_entry, rb_node);
178 rb_link_node(node, parent, p);
179 rb_insert_color(node, root);
183 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
184 struct rb_node **prev_ret)
186 struct rb_node * n = root->rb_node;
187 struct rb_node *prev = NULL;
188 struct tree_entry *entry;
189 struct tree_entry *prev_entry = NULL;
192 entry = rb_entry(n, struct tree_entry, rb_node);
196 if (offset < entry->start)
198 else if (offset > entry->end)
205 while(prev && offset > prev_entry->end) {
206 prev = rb_next(prev);
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
215 struct rb_node *prev;
217 ret = __tree_search(root, offset, &prev);
223 static int tree_delete(struct rb_root *root, u64 offset)
225 struct rb_node *node;
226 struct tree_entry *entry;
228 node = __tree_search(root, offset, NULL);
231 entry = rb_entry(node, struct tree_entry, rb_node);
233 rb_erase(node, root);
238 * add_extent_mapping tries a simple backward merge with existing
239 * mappings. The extent_map struct passed in will be inserted into
240 * the tree directly (no copies made, just a reference taken).
242 int add_extent_mapping(struct extent_map_tree *tree,
243 struct extent_map *em)
246 struct extent_map *prev = NULL;
249 write_lock_irq(&tree->lock);
250 rb = tree_insert(&tree->map, em->end, &em->rb_node);
252 prev = rb_entry(rb, struct extent_map, rb_node);
253 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
257 atomic_inc(&em->refs);
258 if (em->start != 0) {
259 rb = rb_prev(&em->rb_node);
261 prev = rb_entry(rb, struct extent_map, rb_node);
262 if (prev && prev->end + 1 == em->start &&
263 ((em->block_start == EXTENT_MAP_HOLE &&
264 prev->block_start == EXTENT_MAP_HOLE) ||
265 (em->block_start == prev->block_end + 1))) {
266 em->start = prev->start;
267 em->block_start = prev->block_start;
268 rb_erase(&prev->rb_node, &tree->map);
270 free_extent_map(prev);
274 write_unlock_irq(&tree->lock);
277 EXPORT_SYMBOL(add_extent_mapping);
280 * lookup_extent_mapping returns the first extent_map struct in the
281 * tree that intersects the [start, end] (inclusive) range. There may
282 * be additional objects in the tree that intersect, so check the object
283 * returned carefully to make sure you don't need additional lookups.
285 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
288 struct extent_map *em;
289 struct rb_node *rb_node;
291 read_lock_irq(&tree->lock);
292 rb_node = tree_search(&tree->map, start);
297 if (IS_ERR(rb_node)) {
298 em = ERR_PTR(PTR_ERR(rb_node));
301 em = rb_entry(rb_node, struct extent_map, rb_node);
302 if (em->end < start || em->start > end) {
306 atomic_inc(&em->refs);
308 read_unlock_irq(&tree->lock);
311 EXPORT_SYMBOL(lookup_extent_mapping);
314 * removes an extent_map struct from the tree. No reference counts are
315 * dropped, and no checks are done to see if the range is in use
317 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
321 write_lock_irq(&tree->lock);
322 ret = tree_delete(&tree->map, em->end);
323 write_unlock_irq(&tree->lock);
326 EXPORT_SYMBOL(remove_extent_mapping);
329 * utility function to look for merge candidates inside a given range.
330 * Any extents with matching state are merged together into a single
331 * extent in the tree. Extents with EXTENT_IO in their state field
332 * are not merged because the end_io handlers need to be able to do
333 * operations on them without sleeping (or doing allocations/splits).
335 * This should be called with the tree lock held.
337 static int merge_state(struct extent_map_tree *tree,
338 struct extent_state *state)
340 struct extent_state *other;
341 struct rb_node *other_node;
343 if (state->state & EXTENT_IOBITS)
346 other_node = rb_prev(&state->rb_node);
348 other = rb_entry(other_node, struct extent_state, rb_node);
349 if (other->end == state->start - 1 &&
350 other->state == state->state) {
351 state->start = other->start;
353 rb_erase(&other->rb_node, &tree->state);
354 free_extent_state(other);
357 other_node = rb_next(&state->rb_node);
359 other = rb_entry(other_node, struct extent_state, rb_node);
360 if (other->start == state->end + 1 &&
361 other->state == state->state) {
362 other->start = state->start;
364 rb_erase(&state->rb_node, &tree->state);
365 free_extent_state(state);
372 * insert an extent_state struct into the tree. 'bits' are set on the
373 * struct before it is inserted.
375 * This may return -EEXIST if the extent is already there, in which case the
376 * state struct is freed.
378 * The tree lock is not taken internally. This is a utility function and
379 * probably isn't what you want to call (see set/clear_extent_bit).
381 static int insert_state(struct extent_map_tree *tree,
382 struct extent_state *state, u64 start, u64 end,
385 struct rb_node *node;
388 printk("end < start %Lu %Lu\n", end, start);
391 state->state |= bits;
392 state->start = start;
394 node = tree_insert(&tree->state, end, &state->rb_node);
396 struct extent_state *found;
397 found = rb_entry(node, struct extent_state, rb_node);
398 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
399 free_extent_state(state);
402 merge_state(tree, state);
407 * split a given extent state struct in two, inserting the preallocated
408 * struct 'prealloc' as the newly created second half. 'split' indicates an
409 * offset inside 'orig' where it should be split.
412 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
413 * are two extent state structs in the tree:
414 * prealloc: [orig->start, split - 1]
415 * orig: [ split, orig->end ]
417 * The tree locks are not taken by this function. They need to be held
420 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
421 struct extent_state *prealloc, u64 split)
423 struct rb_node *node;
424 prealloc->start = orig->start;
425 prealloc->end = split - 1;
426 prealloc->state = orig->state;
429 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
431 struct extent_state *found;
432 found = rb_entry(node, struct extent_state, rb_node);
433 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
434 free_extent_state(prealloc);
441 * utility function to clear some bits in an extent state struct.
442 * it will optionally wake up any one waiting on this state (wake == 1), or
443 * forcibly remove the state from the tree (delete == 1).
445 * If no bits are set on the state struct after clearing things, the
446 * struct is freed and removed from the tree
448 static int clear_state_bit(struct extent_map_tree *tree,
449 struct extent_state *state, int bits, int wake,
452 int ret = state->state & bits;
453 state->state &= ~bits;
456 if (delete || state->state == 0) {
457 if (state->in_tree) {
458 rb_erase(&state->rb_node, &tree->state);
460 free_extent_state(state);
465 merge_state(tree, state);
471 * clear some bits on a range in the tree. This may require splitting
472 * or inserting elements in the tree, so the gfp mask is used to
473 * indicate which allocations or sleeping are allowed.
475 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
476 * the given range from the tree regardless of state (ie for truncate).
478 * the range [start, end] is inclusive.
480 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
481 * bits were already set, or zero if none of the bits were already set.
483 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
484 int bits, int wake, int delete, gfp_t mask)
486 struct extent_state *state;
487 struct extent_state *prealloc = NULL;
488 struct rb_node *node;
494 if (!prealloc && (mask & __GFP_WAIT)) {
495 prealloc = alloc_extent_state(mask);
500 write_lock_irqsave(&tree->lock, flags);
502 * this search will find the extents that end after
505 node = tree_search(&tree->state, start);
508 state = rb_entry(node, struct extent_state, rb_node);
509 if (state->start > end)
511 WARN_ON(state->end < start);
514 * | ---- desired range ---- |
516 * | ------------- state -------------- |
518 * We need to split the extent we found, and may flip
519 * bits on second half.
521 * If the extent we found extends past our range, we
522 * just split and search again. It'll get split again
523 * the next time though.
525 * If the extent we found is inside our range, we clear
526 * the desired bit on it.
529 if (state->start < start) {
530 err = split_state(tree, state, prealloc, start);
531 BUG_ON(err == -EEXIST);
535 if (state->end <= end) {
536 start = state->end + 1;
537 set |= clear_state_bit(tree, state, bits,
540 start = state->start;
545 * | ---- desired range ---- |
547 * We need to split the extent, and clear the bit
550 if (state->start <= end && state->end > end) {
551 err = split_state(tree, state, prealloc, end + 1);
552 BUG_ON(err == -EEXIST);
556 set |= clear_state_bit(tree, prealloc, bits,
562 start = state->end + 1;
563 set |= clear_state_bit(tree, state, bits, wake, delete);
567 write_unlock_irqrestore(&tree->lock, flags);
569 free_extent_state(prealloc);
576 write_unlock_irqrestore(&tree->lock, flags);
577 if (mask & __GFP_WAIT)
581 EXPORT_SYMBOL(clear_extent_bit);
583 static int wait_on_state(struct extent_map_tree *tree,
584 struct extent_state *state)
587 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
588 read_unlock_irq(&tree->lock);
590 read_lock_irq(&tree->lock);
591 finish_wait(&state->wq, &wait);
596 * waits for one or more bits to clear on a range in the state tree.
597 * The range [start, end] is inclusive.
598 * The tree lock is taken by this function
600 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
602 struct extent_state *state;
603 struct rb_node *node;
605 read_lock_irq(&tree->lock);
609 * this search will find all the extents that end after
612 node = tree_search(&tree->state, start);
616 state = rb_entry(node, struct extent_state, rb_node);
618 if (state->start > end)
621 if (state->state & bits) {
622 start = state->start;
623 atomic_inc(&state->refs);
624 wait_on_state(tree, state);
625 free_extent_state(state);
628 start = state->end + 1;
633 if (need_resched()) {
634 read_unlock_irq(&tree->lock);
636 read_lock_irq(&tree->lock);
640 read_unlock_irq(&tree->lock);
643 EXPORT_SYMBOL(wait_extent_bit);
646 * set some bits on a range in the tree. This may require allocations
647 * or sleeping, so the gfp mask is used to indicate what is allowed.
649 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
650 * range already has the desired bits set. The start of the existing
651 * range is returned in failed_start in this case.
653 * [start, end] is inclusive
654 * This takes the tree lock.
656 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
657 int exclusive, u64 *failed_start, gfp_t mask)
659 struct extent_state *state;
660 struct extent_state *prealloc = NULL;
661 struct rb_node *node;
668 if (!prealloc && (mask & __GFP_WAIT)) {
669 prealloc = alloc_extent_state(mask);
674 write_lock_irqsave(&tree->lock, flags);
676 * this search will find all the extents that end after
679 node = tree_search(&tree->state, start);
681 err = insert_state(tree, prealloc, start, end, bits);
683 BUG_ON(err == -EEXIST);
687 state = rb_entry(node, struct extent_state, rb_node);
688 last_start = state->start;
689 last_end = state->end;
692 * | ---- desired range ---- |
695 * Just lock what we found and keep going
697 if (state->start == start && state->end <= end) {
698 set = state->state & bits;
699 if (set && exclusive) {
700 *failed_start = state->start;
704 state->state |= bits;
705 start = state->end + 1;
706 merge_state(tree, state);
711 * | ---- desired range ---- |
714 * | ------------- state -------------- |
716 * We need to split the extent we found, and may flip bits on
719 * If the extent we found extends past our
720 * range, we just split and search again. It'll get split
721 * again the next time though.
723 * If the extent we found is inside our range, we set the
726 if (state->start < start) {
727 set = state->state & bits;
728 if (exclusive && set) {
729 *failed_start = start;
733 err = split_state(tree, state, prealloc, start);
734 BUG_ON(err == -EEXIST);
738 if (state->end <= end) {
739 state->state |= bits;
740 start = state->end + 1;
741 merge_state(tree, state);
743 start = state->start;
748 * | ---- desired range ---- |
749 * | state | or | state |
751 * There's a hole, we need to insert something in it and
752 * ignore the extent we found.
754 if (state->start > start) {
756 if (end < last_start)
759 this_end = last_start -1;
760 err = insert_state(tree, prealloc, start, this_end,
763 BUG_ON(err == -EEXIST);
766 start = this_end + 1;
770 * | ---- desired range ---- |
772 * We need to split the extent, and set the bit
775 if (state->start <= end && state->end > end) {
776 set = state->state & bits;
777 if (exclusive && set) {
778 *failed_start = start;
782 err = split_state(tree, state, prealloc, end + 1);
783 BUG_ON(err == -EEXIST);
785 prealloc->state |= bits;
786 merge_state(tree, prealloc);
794 write_unlock_irqrestore(&tree->lock, flags);
796 free_extent_state(prealloc);
803 write_unlock_irqrestore(&tree->lock, flags);
804 if (mask & __GFP_WAIT)
808 EXPORT_SYMBOL(set_extent_bit);
810 /* wrappers around set/clear extent bit */
811 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
814 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
817 EXPORT_SYMBOL(set_extent_dirty);
819 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
820 int bits, gfp_t mask)
822 return set_extent_bit(tree, start, end, bits, 0, NULL,
825 EXPORT_SYMBOL(set_extent_bits);
827 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
830 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
832 EXPORT_SYMBOL(clear_extent_bits);
834 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
837 return set_extent_bit(tree, start, end,
838 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
841 EXPORT_SYMBOL(set_extent_delalloc);
843 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
846 return clear_extent_bit(tree, start, end,
847 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
849 EXPORT_SYMBOL(clear_extent_dirty);
851 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
854 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
857 EXPORT_SYMBOL(set_extent_new);
859 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
862 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
864 EXPORT_SYMBOL(clear_extent_new);
866 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
869 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
872 EXPORT_SYMBOL(set_extent_uptodate);
874 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
877 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
879 EXPORT_SYMBOL(clear_extent_uptodate);
881 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
884 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
887 EXPORT_SYMBOL(set_extent_writeback);
889 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
892 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
894 EXPORT_SYMBOL(clear_extent_writeback);
896 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
898 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
900 EXPORT_SYMBOL(wait_on_extent_writeback);
903 * locks a range in ascending order, waiting for any locked regions
904 * it hits on the way. [start,end] are inclusive, and this will sleep.
906 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
911 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
912 &failed_start, mask);
913 if (err == -EEXIST && (mask & __GFP_WAIT)) {
914 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
915 start = failed_start;
919 WARN_ON(start > end);
923 EXPORT_SYMBOL(lock_extent);
925 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
928 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
930 EXPORT_SYMBOL(unlock_extent);
933 * helper function to set pages and extents in the tree dirty
935 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
937 unsigned long index = start >> PAGE_CACHE_SHIFT;
938 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
941 while (index <= end_index) {
942 page = find_get_page(tree->mapping, index);
944 __set_page_dirty_nobuffers(page);
945 page_cache_release(page);
948 set_extent_dirty(tree, start, end, GFP_NOFS);
951 EXPORT_SYMBOL(set_range_dirty);
954 * helper function to set both pages and extents in the tree writeback
956 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
958 unsigned long index = start >> PAGE_CACHE_SHIFT;
959 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
962 while (index <= end_index) {
963 page = find_get_page(tree->mapping, index);
965 set_page_writeback(page);
966 page_cache_release(page);
969 set_extent_writeback(tree, start, end, GFP_NOFS);
972 EXPORT_SYMBOL(set_range_writeback);
974 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
975 u64 *start_ret, u64 *end_ret, int bits)
977 struct rb_node *node;
978 struct extent_state *state;
981 read_lock_irq(&tree->lock);
983 * this search will find all the extents that end after
986 node = tree_search(&tree->state, start);
987 if (!node || IS_ERR(node)) {
992 state = rb_entry(node, struct extent_state, rb_node);
993 if (state->end >= start && (state->state & bits)) {
994 *start_ret = state->start;
995 *end_ret = state->end;
999 node = rb_next(node);
1004 read_unlock_irq(&tree->lock);
1007 EXPORT_SYMBOL(find_first_extent_bit);
1009 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1010 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1012 struct rb_node *node;
1013 struct extent_state *state;
1014 u64 cur_start = start;
1016 u64 total_bytes = 0;
1018 write_lock_irq(&tree->lock);
1020 * this search will find all the extents that end after
1024 node = tree_search(&tree->state, cur_start);
1025 if (!node || IS_ERR(node)) {
1030 state = rb_entry(node, struct extent_state, rb_node);
1031 if (state->start != cur_start) {
1034 if (!(state->state & EXTENT_DELALLOC)) {
1037 if (state->start >= lock_start) {
1038 if (state->state & EXTENT_LOCKED) {
1040 atomic_inc(&state->refs);
1041 write_unlock_irq(&tree->lock);
1043 write_lock_irq(&tree->lock);
1044 finish_wait(&state->wq, &wait);
1045 free_extent_state(state);
1048 state->state |= EXTENT_LOCKED;
1052 cur_start = state->end + 1;
1053 node = rb_next(node);
1056 total_bytes = state->end - state->start + 1;
1057 if (total_bytes >= max_bytes)
1061 write_unlock_irq(&tree->lock);
1066 * helper function to lock both pages and extents in the tree.
1067 * pages must be locked first.
1069 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1071 unsigned long index = start >> PAGE_CACHE_SHIFT;
1072 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1076 while (index <= end_index) {
1077 page = grab_cache_page(tree->mapping, index);
1083 err = PTR_ERR(page);
1088 lock_extent(tree, start, end, GFP_NOFS);
1093 * we failed above in getting the page at 'index', so we undo here
1094 * up to but not including the page at 'index'
1097 index = start >> PAGE_CACHE_SHIFT;
1098 while (index < end_index) {
1099 page = find_get_page(tree->mapping, index);
1101 page_cache_release(page);
1106 EXPORT_SYMBOL(lock_range);
1109 * helper function to unlock both pages and extents in the tree.
1111 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1113 unsigned long index = start >> PAGE_CACHE_SHIFT;
1114 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1117 while (index <= end_index) {
1118 page = find_get_page(tree->mapping, index);
1120 page_cache_release(page);
1123 unlock_extent(tree, start, end, GFP_NOFS);
1126 EXPORT_SYMBOL(unlock_range);
1128 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1130 struct rb_node *node;
1131 struct extent_state *state;
1134 write_lock_irq(&tree->lock);
1136 * this search will find all the extents that end after
1139 node = tree_search(&tree->state, start);
1140 if (!node || IS_ERR(node)) {
1144 state = rb_entry(node, struct extent_state, rb_node);
1145 if (state->start != start) {
1149 state->private = private;
1151 write_unlock_irq(&tree->lock);
1155 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1157 struct rb_node *node;
1158 struct extent_state *state;
1161 read_lock_irq(&tree->lock);
1163 * this search will find all the extents that end after
1166 node = tree_search(&tree->state, start);
1167 if (!node || IS_ERR(node)) {
1171 state = rb_entry(node, struct extent_state, rb_node);
1172 if (state->start != start) {
1176 *private = state->private;
1178 read_unlock_irq(&tree->lock);
1183 * searches a range in the state tree for a given mask.
1184 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1185 * has the bits set. Otherwise, 1 is returned if any bit in the
1186 * range is found set.
1188 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1189 int bits, int filled)
1191 struct extent_state *state = NULL;
1192 struct rb_node *node;
1195 read_lock_irq(&tree->lock);
1196 node = tree_search(&tree->state, start);
1197 while (node && start <= end) {
1198 state = rb_entry(node, struct extent_state, rb_node);
1199 if (state->start > end)
1202 if (filled && state->start > start) {
1206 if (state->state & bits) {
1210 } else if (filled) {
1214 start = state->end + 1;
1217 node = rb_next(node);
1219 read_unlock_irq(&tree->lock);
1222 EXPORT_SYMBOL(test_range_bit);
1225 * helper function to set a given page up to date if all the
1226 * extents in the tree for that page are up to date
1228 static int check_page_uptodate(struct extent_map_tree *tree,
1231 u64 start = page->index << PAGE_CACHE_SHIFT;
1232 u64 end = start + PAGE_CACHE_SIZE - 1;
1233 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1234 SetPageUptodate(page);
1239 * helper function to unlock a page if all the extents in the tree
1240 * for that page are unlocked
1242 static int check_page_locked(struct extent_map_tree *tree,
1245 u64 start = page->index << PAGE_CACHE_SHIFT;
1246 u64 end = start + PAGE_CACHE_SIZE - 1;
1247 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1253 * helper function to end page writeback if all the extents
1254 * in the tree for that page are done with writeback
1256 static int check_page_writeback(struct extent_map_tree *tree,
1259 u64 start = page->index << PAGE_CACHE_SHIFT;
1260 u64 end = start + PAGE_CACHE_SIZE - 1;
1261 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1262 end_page_writeback(page);
1266 /* lots and lots of room for performance fixes in the end_bio funcs */
1269 * after a writepage IO is done, we need to:
1270 * clear the uptodate bits on error
1271 * clear the writeback bits in the extent tree for this IO
1272 * end_page_writeback if the page has no more pending IO
1274 * Scheduling is not allowed, so the extent state tree is expected
1275 * to have one and only one object corresponding to this IO.
1277 static int end_bio_extent_writepage(struct bio *bio,
1278 unsigned int bytes_done, int err)
1280 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1281 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1282 struct extent_map_tree *tree = bio->bi_private;
1291 struct page *page = bvec->bv_page;
1292 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1293 end = start + bvec->bv_len - 1;
1295 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1300 if (--bvec >= bio->bi_io_vec)
1301 prefetchw(&bvec->bv_page->flags);
1304 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1305 ClearPageUptodate(page);
1308 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1311 end_page_writeback(page);
1313 check_page_writeback(tree, page);
1314 if (tree->ops && tree->ops->writepage_end_io_hook)
1315 tree->ops->writepage_end_io_hook(page, start, end);
1316 } while (bvec >= bio->bi_io_vec);
1323 * after a readpage IO is done, we need to:
1324 * clear the uptodate bits on error
1325 * set the uptodate bits if things worked
1326 * set the page up to date if all extents in the tree are uptodate
1327 * clear the lock bit in the extent tree
1328 * unlock the page if there are no other extents locked for it
1330 * Scheduling is not allowed, so the extent state tree is expected
1331 * to have one and only one object corresponding to this IO.
1333 static int end_bio_extent_readpage(struct bio *bio,
1334 unsigned int bytes_done, int err)
1336 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1337 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1338 struct extent_map_tree *tree = bio->bi_private;
1348 struct page *page = bvec->bv_page;
1349 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1350 end = start + bvec->bv_len - 1;
1352 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1357 if (--bvec >= bio->bi_io_vec)
1358 prefetchw(&bvec->bv_page->flags);
1360 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1361 ret = tree->ops->readpage_end_io_hook(page, start, end);
1366 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1368 SetPageUptodate(page);
1370 check_page_uptodate(tree, page);
1372 ClearPageUptodate(page);
1376 unlock_extent(tree, start, end, GFP_ATOMIC);
1381 check_page_locked(tree, page);
1382 } while (bvec >= bio->bi_io_vec);
1389 * IO done from prepare_write is pretty simple, we just unlock
1390 * the structs in the extent tree when done, and set the uptodate bits
1393 static int end_bio_extent_preparewrite(struct bio *bio,
1394 unsigned int bytes_done, int err)
1396 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1397 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1398 struct extent_map_tree *tree = bio->bi_private;
1406 struct page *page = bvec->bv_page;
1407 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1408 end = start + bvec->bv_len - 1;
1410 if (--bvec >= bio->bi_io_vec)
1411 prefetchw(&bvec->bv_page->flags);
1414 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1416 ClearPageUptodate(page);
1420 unlock_extent(tree, start, end, GFP_ATOMIC);
1422 } while (bvec >= bio->bi_io_vec);
1428 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1429 struct page *page, sector_t sector,
1430 size_t size, unsigned long offset,
1431 struct block_device *bdev,
1432 bio_end_io_t end_io_func)
1437 bio = bio_alloc(GFP_NOIO, 1);
1439 bio->bi_sector = sector;
1440 bio->bi_bdev = bdev;
1441 bio->bi_io_vec[0].bv_page = page;
1442 bio->bi_io_vec[0].bv_len = size;
1443 bio->bi_io_vec[0].bv_offset = offset;
1447 bio->bi_size = size;
1449 bio->bi_end_io = end_io_func;
1450 bio->bi_private = tree;
1453 submit_bio(rw, bio);
1455 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1462 void set_page_extent_mapped(struct page *page)
1464 if (!PagePrivate(page)) {
1465 SetPagePrivate(page);
1466 WARN_ON(!page->mapping->a_ops->invalidatepage);
1467 set_page_private(page, 1);
1468 page_cache_get(page);
1473 * basic readpage implementation. Locked extent state structs are inserted
1474 * into the tree that are removed when the IO is done (by the end_io
1477 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1478 get_extent_t *get_extent)
1480 struct inode *inode = page->mapping->host;
1481 u64 start = page->index << PAGE_CACHE_SHIFT;
1482 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1486 u64 last_byte = i_size_read(inode);
1490 struct extent_map *em;
1491 struct block_device *bdev;
1494 size_t page_offset = 0;
1496 size_t blocksize = inode->i_sb->s_blocksize;
1498 set_page_extent_mapped(page);
1501 lock_extent(tree, start, end, GFP_NOFS);
1503 while (cur <= end) {
1504 if (cur >= last_byte) {
1505 iosize = PAGE_CACHE_SIZE - page_offset;
1506 zero_user_page(page, page_offset, iosize, KM_USER0);
1507 set_extent_uptodate(tree, cur, cur + iosize - 1,
1509 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1512 em = get_extent(inode, page, page_offset, cur, end, 0);
1513 if (IS_ERR(em) || !em) {
1515 unlock_extent(tree, cur, end, GFP_NOFS);
1519 extent_offset = cur - em->start;
1520 BUG_ON(em->end < cur);
1523 iosize = min(em->end - cur, end - cur) + 1;
1524 cur_end = min(em->end, end);
1525 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1526 sector = (em->block_start + extent_offset) >> 9;
1528 block_start = em->block_start;
1529 free_extent_map(em);
1532 /* we've found a hole, just zero and go on */
1533 if (block_start == EXTENT_MAP_HOLE) {
1534 zero_user_page(page, page_offset, iosize, KM_USER0);
1535 set_extent_uptodate(tree, cur, cur + iosize - 1,
1537 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1539 page_offset += iosize;
1542 /* the get_extent function already copied into the page */
1543 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1544 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1546 page_offset += iosize;
1551 if (tree->ops && tree->ops->readpage_io_hook) {
1552 ret = tree->ops->readpage_io_hook(page, cur,
1556 ret = submit_extent_page(READ, tree, page,
1557 sector, iosize, page_offset,
1558 bdev, end_bio_extent_readpage);
1563 page_offset += iosize;
1567 if (!PageError(page))
1568 SetPageUptodate(page);
1573 EXPORT_SYMBOL(extent_read_full_page);
1576 * the writepage semantics are similar to regular writepage. extent
1577 * records are inserted to lock ranges in the tree, and as dirty areas
1578 * are found, they are marked writeback. Then the lock bits are removed
1579 * and the end_io handler clears the writeback ranges
1581 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1582 get_extent_t *get_extent,
1583 struct writeback_control *wbc)
1585 struct inode *inode = page->mapping->host;
1586 u64 start = page->index << PAGE_CACHE_SHIFT;
1587 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1591 u64 last_byte = i_size_read(inode);
1594 struct extent_map *em;
1595 struct block_device *bdev;
1598 size_t page_offset = 0;
1601 loff_t i_size = i_size_read(inode);
1602 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1606 WARN_ON(!PageLocked(page));
1607 if (page->index > end_index) {
1608 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1613 if (page->index == end_index) {
1614 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1615 zero_user_page(page, offset,
1616 PAGE_CACHE_SIZE - offset, KM_USER0);
1619 set_page_extent_mapped(page);
1621 lock_extent(tree, start, page_end, GFP_NOFS);
1622 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1626 tree->ops->fill_delalloc(inode, start, delalloc_end);
1627 if (delalloc_end >= page_end + 1) {
1628 clear_extent_bit(tree, page_end + 1, delalloc_end,
1629 EXTENT_LOCKED | EXTENT_DELALLOC,
1632 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1634 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1635 printk("found delalloc bits after clear extent_bit\n");
1637 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1638 printk("found delalloc bits after find_delalloc_range returns 0\n");
1642 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1643 printk("found delalloc bits after lock_extent\n");
1646 if (last_byte <= start) {
1647 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1651 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1652 blocksize = inode->i_sb->s_blocksize;
1654 while (cur <= end) {
1655 if (cur >= last_byte) {
1656 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1659 em = get_extent(inode, page, page_offset, cur, end, 0);
1660 if (IS_ERR(em) || !em) {
1665 extent_offset = cur - em->start;
1666 BUG_ON(em->end < cur);
1668 iosize = min(em->end - cur, end - cur) + 1;
1669 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1670 sector = (em->block_start + extent_offset) >> 9;
1672 block_start = em->block_start;
1673 free_extent_map(em);
1676 if (block_start == EXTENT_MAP_HOLE ||
1677 block_start == EXTENT_MAP_INLINE) {
1678 clear_extent_dirty(tree, cur,
1679 cur + iosize - 1, GFP_NOFS);
1681 page_offset += iosize;
1685 /* leave this out until we have a page_mkwrite call */
1686 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1689 page_offset += iosize;
1692 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1693 if (tree->ops && tree->ops->writepage_io_hook) {
1694 ret = tree->ops->writepage_io_hook(page, cur,
1702 set_range_writeback(tree, cur, cur + iosize - 1);
1703 ret = submit_extent_page(WRITE, tree, page, sector,
1704 iosize, page_offset, bdev,
1705 end_bio_extent_writepage);
1710 page_offset += iosize;
1714 unlock_extent(tree, start, page_end, GFP_NOFS);
1718 EXPORT_SYMBOL(extent_write_full_page);
1721 * basic invalidatepage code, this waits on any locked or writeback
1722 * ranges corresponding to the page, and then deletes any extent state
1723 * records from the tree
1725 int extent_invalidatepage(struct extent_map_tree *tree,
1726 struct page *page, unsigned long offset)
1728 u64 start = (page->index << PAGE_CACHE_SHIFT);
1729 u64 end = start + PAGE_CACHE_SIZE - 1;
1730 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1732 start += (offset + blocksize -1) & ~(blocksize - 1);
1736 lock_extent(tree, start, end, GFP_NOFS);
1737 wait_on_extent_writeback(tree, start, end);
1738 clear_extent_bit(tree, start, end,
1739 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1743 EXPORT_SYMBOL(extent_invalidatepage);
1746 * simple commit_write call, set_range_dirty is used to mark both
1747 * the pages and the extent records as dirty
1749 int extent_commit_write(struct extent_map_tree *tree,
1750 struct inode *inode, struct page *page,
1751 unsigned from, unsigned to)
1753 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1755 set_page_extent_mapped(page);
1756 set_page_dirty(page);
1758 if (pos > inode->i_size) {
1759 i_size_write(inode, pos);
1760 mark_inode_dirty(inode);
1764 EXPORT_SYMBOL(extent_commit_write);
1766 int extent_prepare_write(struct extent_map_tree *tree,
1767 struct inode *inode, struct page *page,
1768 unsigned from, unsigned to, get_extent_t *get_extent)
1770 u64 page_start = page->index << PAGE_CACHE_SHIFT;
1771 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1773 u64 orig_block_start;
1776 struct extent_map *em;
1777 unsigned blocksize = 1 << inode->i_blkbits;
1778 size_t page_offset = 0;
1779 size_t block_off_start;
1780 size_t block_off_end;
1786 set_page_extent_mapped(page);
1788 block_start = (page_start + from) & ~((u64)blocksize - 1);
1789 block_end = (page_start + to - 1) | (blocksize - 1);
1790 orig_block_start = block_start;
1792 lock_extent(tree, page_start, page_end, GFP_NOFS);
1793 while(block_start <= block_end) {
1794 em = get_extent(inode, page, page_offset, block_start,
1796 if (IS_ERR(em) || !em) {
1799 cur_end = min(block_end, em->end);
1800 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1801 block_off_end = block_off_start + blocksize;
1802 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1804 if (!PageUptodate(page) && isnew &&
1805 (block_off_end > to || block_off_start < from)) {
1808 kaddr = kmap_atomic(page, KM_USER0);
1809 if (block_off_end > to)
1810 memset(kaddr + to, 0, block_off_end - to);
1811 if (block_off_start < from)
1812 memset(kaddr + block_off_start, 0,
1813 from - block_off_start);
1814 flush_dcache_page(page);
1815 kunmap_atomic(kaddr, KM_USER0);
1817 if (!isnew && !PageUptodate(page) &&
1818 (block_off_end > to || block_off_start < from) &&
1819 !test_range_bit(tree, block_start, cur_end,
1820 EXTENT_UPTODATE, 1)) {
1822 u64 extent_offset = block_start - em->start;
1824 sector = (em->block_start + extent_offset) >> 9;
1825 iosize = (cur_end - block_start + blocksize - 1) &
1826 ~((u64)blocksize - 1);
1828 * we've already got the extent locked, but we
1829 * need to split the state such that our end_bio
1830 * handler can clear the lock.
1832 set_extent_bit(tree, block_start,
1833 block_start + iosize - 1,
1834 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1835 ret = submit_extent_page(READ, tree, page,
1836 sector, iosize, page_offset, em->bdev,
1837 end_bio_extent_preparewrite);
1839 block_start = block_start + iosize;
1841 set_extent_uptodate(tree, block_start, cur_end,
1843 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1844 block_start = cur_end + 1;
1846 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1847 free_extent_map(em);
1850 wait_extent_bit(tree, orig_block_start,
1851 block_end, EXTENT_LOCKED);
1853 check_page_uptodate(tree, page);
1855 /* FIXME, zero out newly allocated blocks on error */
1858 EXPORT_SYMBOL(extent_prepare_write);
1861 * a helper for releasepage. As long as there are no locked extents
1862 * in the range corresponding to the page, both state records and extent
1863 * map records are removed
1865 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1867 struct extent_map *em;
1868 u64 start = page->index << PAGE_CACHE_SHIFT;
1869 u64 end = start + PAGE_CACHE_SIZE - 1;
1870 u64 orig_start = start;
1873 while (start <= end) {
1874 em = lookup_extent_mapping(tree, start, end);
1875 if (!em || IS_ERR(em))
1877 if (!test_range_bit(tree, em->start, em->end,
1878 EXTENT_LOCKED, 0)) {
1879 remove_extent_mapping(tree, em);
1880 /* once for the rb tree */
1881 free_extent_map(em);
1883 start = em->end + 1;
1885 free_extent_map(em);
1887 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1890 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1894 EXPORT_SYMBOL(try_release_extent_mapping);
1896 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1897 get_extent_t *get_extent)
1899 struct inode *inode = mapping->host;
1900 u64 start = iblock << inode->i_blkbits;
1901 u64 end = start + (1 << inode->i_blkbits) - 1;
1902 struct extent_map *em;
1904 em = get_extent(inode, NULL, 0, start, end, 0);
1905 if (!em || IS_ERR(em))
1908 if (em->block_start == EXTENT_MAP_INLINE ||
1909 em->block_start == EXTENT_MAP_HOLE)
1912 return (em->block_start + start - em->start) >> inode->i_blkbits;
1915 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
1917 if (list_empty(&eb->lru)) {
1918 extent_buffer_get(eb);
1919 list_add(&eb->lru, &tree->buffer_lru);
1921 if (tree->lru_size >= BUFFER_LRU_MAX) {
1922 struct extent_buffer *rm;
1923 rm = list_entry(tree->buffer_lru.prev,
1924 struct extent_buffer, lru);
1927 free_extent_buffer(rm);
1930 list_move(&eb->lru, &tree->buffer_lru);
1933 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1934 u64 start, unsigned long len)
1936 struct list_head *lru = &tree->buffer_lru;
1937 struct list_head *cur = lru->next;
1938 struct extent_buffer *eb;
1940 if (list_empty(lru))
1944 eb = list_entry(cur, struct extent_buffer, lru);
1945 if (eb->start == start && eb->len == len) {
1946 extent_buffer_get(eb);
1950 } while (cur != lru);
1954 static inline unsigned long num_extent_pages(u64 start, u64 len)
1956 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1957 (start >> PAGE_CACHE_SHIFT);
1960 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
1966 return eb->first_page;
1967 i += eb->start >> PAGE_CACHE_SHIFT;
1968 p = find_get_page(eb->first_page->mapping, i);
1969 page_cache_release(p);
1973 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
1978 struct extent_buffer *eb = NULL;
1980 spin_lock(&tree->lru_lock);
1981 eb = find_lru(tree, start, len);
1984 spin_unlock(&tree->lru_lock);
1987 memset(eb, 0, sizeof(*eb));
1989 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
1991 INIT_LIST_HEAD(&eb->lru);
1994 atomic_set(&eb->refs, 1);
1996 spin_lock(&tree->lru_lock);
1999 spin_unlock(&tree->lru_lock);
2003 static void __free_extent_buffer(struct extent_buffer *eb)
2005 kmem_cache_free(extent_buffer_cache, eb);
2008 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2009 u64 start, unsigned long len,
2012 unsigned long num_pages = num_extent_pages(start, len);
2014 unsigned long index = start >> PAGE_CACHE_SHIFT;
2015 struct extent_buffer *eb;
2017 struct address_space *mapping = tree->mapping;
2020 eb = __alloc_extent_buffer(tree, start, len, mask);
2021 if (!eb || IS_ERR(eb))
2024 if (eb->flags & EXTENT_BUFFER_FILLED)
2027 for (i = 0; i < num_pages; i++, index++) {
2028 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2031 /* make sure the free only frees the pages we've
2032 * grabbed a reference on
2034 eb->len = i << PAGE_CACHE_SHIFT;
2035 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2038 set_page_extent_mapped(p);
2041 if (!PageUptodate(p))
2046 eb->flags |= EXTENT_UPTODATE;
2047 eb->flags |= EXTENT_BUFFER_FILLED;
2050 free_extent_buffer(eb);
2053 EXPORT_SYMBOL(alloc_extent_buffer);
2055 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2056 u64 start, unsigned long len,
2059 unsigned long num_pages = num_extent_pages(start, len);
2061 unsigned long index = start >> PAGE_CACHE_SHIFT;
2062 struct extent_buffer *eb;
2064 struct address_space *mapping = tree->mapping;
2067 eb = __alloc_extent_buffer(tree, start, len, mask);
2068 if (!eb || IS_ERR(eb))
2071 if (eb->flags & EXTENT_BUFFER_FILLED)
2074 for (i = 0; i < num_pages; i++, index++) {
2075 p = find_lock_page(mapping, index);
2077 /* make sure the free only frees the pages we've
2078 * grabbed a reference on
2080 eb->len = i << PAGE_CACHE_SHIFT;
2081 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2084 set_page_extent_mapped(p);
2087 if (!PageUptodate(p))
2092 eb->flags |= EXTENT_UPTODATE;
2093 eb->flags |= EXTENT_BUFFER_FILLED;
2096 free_extent_buffer(eb);
2099 EXPORT_SYMBOL(find_extent_buffer);
2101 void free_extent_buffer(struct extent_buffer *eb)
2104 unsigned long num_pages;
2109 if (!atomic_dec_and_test(&eb->refs))
2112 num_pages = num_extent_pages(eb->start, eb->len);
2114 for (i = 0; i < num_pages; i++) {
2115 page_cache_release(extent_buffer_page(eb, i));
2117 __free_extent_buffer(eb);
2119 EXPORT_SYMBOL(free_extent_buffer);
2121 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2122 struct extent_buffer *eb)
2126 unsigned long num_pages;
2129 u64 start = eb->start;
2130 u64 end = start + eb->len - 1;
2132 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2133 num_pages = num_extent_pages(eb->start, eb->len);
2135 for (i = 0; i < num_pages; i++) {
2136 page = extent_buffer_page(eb, i);
2139 * if we're on the last page or the first page and the
2140 * block isn't aligned on a page boundary, do extra checks
2141 * to make sure we don't clean page that is partially dirty
2143 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2144 ((i == num_pages - 1) &&
2145 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2146 start = page->index << PAGE_CACHE_SHIFT;
2147 end = start + PAGE_CACHE_SIZE - 1;
2148 if (test_range_bit(tree, start, end,
2154 clear_page_dirty_for_io(page);
2159 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2161 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2162 struct extent_buffer *eb)
2164 return wait_on_extent_writeback(tree, eb->start,
2165 eb->start + eb->len - 1);
2167 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2169 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2170 struct extent_buffer *eb)
2173 unsigned long num_pages;
2175 num_pages = num_extent_pages(eb->start, eb->len);
2176 for (i = 0; i < num_pages; i++) {
2177 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2179 return set_extent_dirty(tree, eb->start,
2180 eb->start + eb->len - 1, GFP_NOFS);
2182 EXPORT_SYMBOL(set_extent_buffer_dirty);
2184 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2185 struct extent_buffer *eb)
2189 unsigned long num_pages;
2191 num_pages = num_extent_pages(eb->start, eb->len);
2193 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2195 for (i = 0; i < num_pages; i++) {
2196 page = extent_buffer_page(eb, i);
2197 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2198 ((i == num_pages - 1) &&
2199 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2200 check_page_uptodate(tree, page);
2203 SetPageUptodate(page);
2207 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2209 int extent_buffer_uptodate(struct extent_map_tree *tree,
2210 struct extent_buffer *eb)
2212 if (eb->flags & EXTENT_UPTODATE)
2214 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2215 EXTENT_UPTODATE, 1);
2217 EXPORT_SYMBOL(extent_buffer_uptodate);
2219 int read_extent_buffer_pages(struct extent_map_tree *tree,
2220 struct extent_buffer *eb, int wait)
2226 unsigned long num_pages;
2228 if (eb->flags & EXTENT_UPTODATE)
2231 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2232 EXTENT_UPTODATE, 1)) {
2236 num_pages = num_extent_pages(eb->start, eb->len);
2237 for (i = 0; i < num_pages; i++) {
2238 page = extent_buffer_page(eb, i);
2239 if (PageUptodate(page)) {
2243 if (TestSetPageLocked(page)) {
2249 if (!PageUptodate(page)) {
2250 err = page->mapping->a_ops->readpage(NULL, page);
2263 for (i = 0; i < num_pages; i++) {
2264 page = extent_buffer_page(eb, i);
2265 wait_on_page_locked(page);
2266 if (!PageUptodate(page)) {
2271 eb->flags |= EXTENT_UPTODATE;
2274 EXPORT_SYMBOL(read_extent_buffer_pages);
2276 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2277 unsigned long start,
2284 char *dst = (char *)dstv;
2285 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2286 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2287 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2289 WARN_ON(start > eb->len);
2290 WARN_ON(start + len > eb->start + eb->len);
2292 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2294 offset += start_offset;
2297 page = extent_buffer_page(eb, i);
2298 if (!PageUptodate(page)) {
2299 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2302 WARN_ON(!PageUptodate(page));
2304 cur = min(len, (PAGE_CACHE_SIZE - offset));
2305 kaddr = kmap_atomic(page, KM_USER0);
2306 memcpy(dst, kaddr + offset, cur);
2307 kunmap_atomic(kaddr, KM_USER0);
2315 EXPORT_SYMBOL(read_extent_buffer);
2317 static int __map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2318 unsigned long min_len, char **token, char **map,
2319 unsigned long *map_start,
2320 unsigned long *map_len, int km)
2322 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2325 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2326 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2327 unsigned long end_i = (start_offset + start + min_len) >>
2334 offset = start_offset;
2338 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2341 p = extent_buffer_page(eb, i);
2342 WARN_ON(!PageUptodate(p));
2343 kaddr = kmap_atomic(p, km);
2345 *map = kaddr + offset;
2346 *map_len = PAGE_CACHE_SIZE - offset;
2350 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2351 unsigned long min_len,
2352 char **token, char **map,
2353 unsigned long *map_start,
2354 unsigned long *map_len, int km)
2358 if (eb->map_token) {
2359 unmap_extent_buffer(eb, eb->map_token, km);
2360 eb->map_token = NULL;
2363 err = __map_extent_buffer(eb, start, min_len, token, map,
2364 map_start, map_len, km);
2366 eb->map_token = *token;
2368 eb->map_start = *map_start;
2369 eb->map_len = *map_len;
2373 EXPORT_SYMBOL(map_extent_buffer);
2375 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2377 kunmap_atomic(token, km);
2379 EXPORT_SYMBOL(unmap_extent_buffer);
2381 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2382 unsigned long start,
2389 char *ptr = (char *)ptrv;
2390 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2391 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2394 WARN_ON(start > eb->len);
2395 WARN_ON(start + len > eb->start + eb->len);
2397 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2399 offset += start_offset;
2402 page = extent_buffer_page(eb, i);
2403 WARN_ON(!PageUptodate(page));
2405 cur = min(len, (PAGE_CACHE_SIZE - offset));
2407 kaddr = kmap_atomic(page, KM_USER0);
2408 ret = memcmp(ptr, kaddr + offset, cur);
2409 kunmap_atomic(kaddr, KM_USER0);
2420 EXPORT_SYMBOL(memcmp_extent_buffer);
2422 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2423 unsigned long start, unsigned long len)
2429 char *src = (char *)srcv;
2430 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2431 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2433 WARN_ON(start > eb->len);
2434 WARN_ON(start + len > eb->start + eb->len);
2436 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2438 offset += start_offset;
2441 page = extent_buffer_page(eb, i);
2442 WARN_ON(!PageUptodate(page));
2444 cur = min(len, PAGE_CACHE_SIZE - offset);
2445 kaddr = kmap_atomic(page, KM_USER0);
2446 memcpy(kaddr + offset, src, cur);
2447 kunmap_atomic(kaddr, KM_USER0);
2455 EXPORT_SYMBOL(write_extent_buffer);
2457 void memset_extent_buffer(struct extent_buffer *eb, char c,
2458 unsigned long start, unsigned long len)
2464 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2465 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2467 WARN_ON(start > eb->len);
2468 WARN_ON(start + len > eb->start + eb->len);
2470 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2472 offset += start_offset;
2475 page = extent_buffer_page(eb, i);
2476 WARN_ON(!PageUptodate(page));
2478 cur = min(len, PAGE_CACHE_SIZE - offset);
2479 kaddr = kmap_atomic(page, KM_USER0);
2480 memset(kaddr + offset, c, cur);
2481 kunmap_atomic(kaddr, KM_USER0);
2488 EXPORT_SYMBOL(memset_extent_buffer);
2490 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2491 unsigned long dst_offset, unsigned long src_offset,
2494 u64 dst_len = dst->len;
2499 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2500 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2502 WARN_ON(src->len != dst_len);
2504 offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1);
2506 offset += start_offset;
2509 page = extent_buffer_page(dst, i);
2510 WARN_ON(!PageUptodate(page));
2512 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2514 kaddr = kmap_atomic(page, KM_USER1);
2515 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2516 kunmap_atomic(kaddr, KM_USER1);
2524 EXPORT_SYMBOL(copy_extent_buffer);
2526 static void move_pages(struct page *dst_page, struct page *src_page,
2527 unsigned long dst_off, unsigned long src_off,
2530 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2531 if (dst_page == src_page) {
2532 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2534 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2535 char *p = dst_kaddr + dst_off + len;
2536 char *s = src_kaddr + src_off + len;
2541 kunmap_atomic(src_kaddr, KM_USER1);
2543 kunmap_atomic(dst_kaddr, KM_USER0);
2546 static void copy_pages(struct page *dst_page, struct page *src_page,
2547 unsigned long dst_off, unsigned long src_off,
2550 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2553 if (dst_page != src_page)
2554 src_kaddr = kmap_atomic(src_page, KM_USER1);
2556 src_kaddr = dst_kaddr;
2558 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2559 kunmap_atomic(dst_kaddr, KM_USER0);
2560 if (dst_page != src_page)
2561 kunmap_atomic(src_kaddr, KM_USER1);
2564 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2565 unsigned long src_offset, unsigned long len)
2568 size_t dst_off_in_page;
2569 size_t src_off_in_page;
2570 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2571 unsigned long dst_i;
2572 unsigned long src_i;
2574 if (src_offset + len > dst->len) {
2575 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2576 src_offset, len, dst->len);
2579 if (dst_offset + len > dst->len) {
2580 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2581 dst_offset, len, dst->len);
2586 dst_off_in_page = dst_offset &
2587 ((unsigned long)PAGE_CACHE_SIZE - 1);
2588 src_off_in_page = src_offset &
2589 ((unsigned long)PAGE_CACHE_SIZE - 1);
2591 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2592 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2595 src_off_in_page += start_offset;
2597 dst_off_in_page += start_offset;
2599 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2601 cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
2604 copy_pages(extent_buffer_page(dst, dst_i),
2605 extent_buffer_page(dst, src_i),
2606 dst_off_in_page, src_off_in_page, cur);
2613 EXPORT_SYMBOL(memcpy_extent_buffer);
2615 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2616 unsigned long src_offset, unsigned long len)
2619 size_t dst_off_in_page;
2620 size_t src_off_in_page;
2621 unsigned long dst_end = dst_offset + len - 1;
2622 unsigned long src_end = src_offset + len - 1;
2623 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2624 unsigned long dst_i;
2625 unsigned long src_i;
2627 if (src_offset + len > dst->len) {
2628 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2629 src_offset, len, dst->len);
2632 if (dst_offset + len > dst->len) {
2633 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2634 dst_offset, len, dst->len);
2637 if (dst_offset < src_offset) {
2638 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2642 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2643 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2645 dst_off_in_page = dst_end &
2646 ((unsigned long)PAGE_CACHE_SIZE - 1);
2647 src_off_in_page = src_end &
2648 ((unsigned long)PAGE_CACHE_SIZE - 1);
2650 src_off_in_page += start_offset;
2652 dst_off_in_page += start_offset;
2654 cur = min(len, src_off_in_page + 1);
2655 cur = min(cur, dst_off_in_page + 1);
2656 move_pages(extent_buffer_page(dst, dst_i),
2657 extent_buffer_page(dst, src_i),
2658 dst_off_in_page - cur + 1,
2659 src_off_in_page - cur + 1, cur);
2666 EXPORT_SYMBOL(memmove_extent_buffer);