1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
18 /* temporary define until extent_map moves out of btrfs */
19 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
20 unsigned long extra_flags,
21 void (*ctor)(void *, struct kmem_cache *,
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 #define BUFFER_LRU_MAX 64
35 struct rb_node rb_node;
38 struct extent_page_data {
40 struct extent_io_tree *tree;
41 get_extent_t *get_extent;
44 int __init extent_io_init(void)
46 extent_state_cache = btrfs_cache_create("extent_state",
47 sizeof(struct extent_state), 0,
49 if (!extent_state_cache)
52 extent_buffer_cache = btrfs_cache_create("extent_buffers",
53 sizeof(struct extent_buffer), 0,
55 if (!extent_buffer_cache)
56 goto free_state_cache;
60 kmem_cache_destroy(extent_state_cache);
64 void extent_io_exit(void)
66 struct extent_state *state;
68 while (!list_empty(&states)) {
69 state = list_entry(states.next, struct extent_state, list);
70 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
71 list_del(&state->list);
72 kmem_cache_free(extent_state_cache, state);
76 if (extent_state_cache)
77 kmem_cache_destroy(extent_state_cache);
78 if (extent_buffer_cache)
79 kmem_cache_destroy(extent_buffer_cache);
82 void extent_io_tree_init(struct extent_io_tree *tree,
83 struct address_space *mapping, gfp_t mask)
85 tree->state.rb_node = NULL;
87 tree->dirty_bytes = 0;
88 spin_lock_init(&tree->lock);
89 spin_lock_init(&tree->lru_lock);
90 tree->mapping = mapping;
91 INIT_LIST_HEAD(&tree->buffer_lru);
95 EXPORT_SYMBOL(extent_io_tree_init);
97 void extent_io_tree_empty_lru(struct extent_io_tree *tree)
99 struct extent_buffer *eb;
100 while(!list_empty(&tree->buffer_lru)) {
101 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
103 list_del_init(&eb->lru);
104 free_extent_buffer(eb);
107 EXPORT_SYMBOL(extent_io_tree_empty_lru);
109 struct extent_state *alloc_extent_state(gfp_t mask)
111 struct extent_state *state;
113 state = kmem_cache_alloc(extent_state_cache, mask);
114 if (!state || IS_ERR(state))
120 atomic_set(&state->refs, 1);
121 init_waitqueue_head(&state->wq);
124 EXPORT_SYMBOL(alloc_extent_state);
126 void free_extent_state(struct extent_state *state)
130 if (atomic_dec_and_test(&state->refs)) {
131 WARN_ON(state->tree);
132 kmem_cache_free(extent_state_cache, state);
135 EXPORT_SYMBOL(free_extent_state);
137 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
138 struct rb_node *node)
140 struct rb_node ** p = &root->rb_node;
141 struct rb_node * parent = NULL;
142 struct tree_entry *entry;
146 entry = rb_entry(parent, struct tree_entry, rb_node);
148 if (offset < entry->start)
150 else if (offset > entry->end)
156 entry = rb_entry(node, struct tree_entry, rb_node);
157 rb_link_node(node, parent, p);
158 rb_insert_color(node, root);
162 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
163 struct rb_node **prev_ret,
164 struct rb_node **next_ret)
166 struct rb_root *root = &tree->state;
167 struct rb_node * n = root->rb_node;
168 struct rb_node *prev = NULL;
169 struct rb_node *orig_prev = NULL;
170 struct tree_entry *entry;
171 struct tree_entry *prev_entry = NULL;
174 struct extent_state *state;
176 if (state->start <= offset && offset <= state->end)
177 return &tree->last->rb_node;
180 entry = rb_entry(n, struct tree_entry, rb_node);
184 if (offset < entry->start)
186 else if (offset > entry->end)
189 tree->last = rb_entry(n, struct extent_state, rb_node);
196 while(prev && offset > prev_entry->end) {
197 prev = rb_next(prev);
198 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
205 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
206 while(prev && offset < prev_entry->start) {
207 prev = rb_prev(prev);
208 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
218 struct rb_node *prev = NULL;
221 ret = __etree_search(tree, offset, &prev, NULL);
224 tree->last = rb_entry(prev, struct extent_state,
233 * utility function to look for merge candidates inside a given range.
234 * Any extents with matching state are merged together into a single
235 * extent in the tree. Extents with EXTENT_IO in their state field
236 * are not merged because the end_io handlers need to be able to do
237 * operations on them without sleeping (or doing allocations/splits).
239 * This should be called with the tree lock held.
241 static int merge_state(struct extent_io_tree *tree,
242 struct extent_state *state)
244 struct extent_state *other;
245 struct rb_node *other_node;
247 if (state->state & EXTENT_IOBITS)
250 other_node = rb_prev(&state->rb_node);
252 other = rb_entry(other_node, struct extent_state, rb_node);
253 if (other->end == state->start - 1 &&
254 other->state == state->state) {
255 state->start = other->start;
257 if (tree->last == other)
259 rb_erase(&other->rb_node, &tree->state);
260 free_extent_state(other);
263 other_node = rb_next(&state->rb_node);
265 other = rb_entry(other_node, struct extent_state, rb_node);
266 if (other->start == state->end + 1 &&
267 other->state == state->state) {
268 other->start = state->start;
270 if (tree->last == state)
272 rb_erase(&state->rb_node, &tree->state);
273 free_extent_state(state);
279 static void set_state_cb(struct extent_io_tree *tree,
280 struct extent_state *state,
283 if (tree->ops && tree->ops->set_bit_hook) {
284 tree->ops->set_bit_hook(tree->mapping->host, state->start,
285 state->end, state->state, bits);
289 static void clear_state_cb(struct extent_io_tree *tree,
290 struct extent_state *state,
293 if (tree->ops && tree->ops->set_bit_hook) {
294 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
295 state->end, state->state, bits);
300 * insert an extent_state struct into the tree. 'bits' are set on the
301 * struct before it is inserted.
303 * This may return -EEXIST if the extent is already there, in which case the
304 * state struct is freed.
306 * The tree lock is not taken internally. This is a utility function and
307 * probably isn't what you want to call (see set/clear_extent_bit).
309 static int insert_state(struct extent_io_tree *tree,
310 struct extent_state *state, u64 start, u64 end,
313 struct rb_node *node;
316 printk("end < start %Lu %Lu\n", end, start);
319 if (bits & EXTENT_DIRTY)
320 tree->dirty_bytes += end - start + 1;
321 set_state_cb(tree, state, bits);
322 state->state |= bits;
323 state->start = start;
325 node = tree_insert(&tree->state, end, &state->rb_node);
327 struct extent_state *found;
328 found = rb_entry(node, struct extent_state, rb_node);
329 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
330 free_extent_state(state);
335 merge_state(tree, state);
340 * split a given extent state struct in two, inserting the preallocated
341 * struct 'prealloc' as the newly created second half. 'split' indicates an
342 * offset inside 'orig' where it should be split.
345 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
346 * are two extent state structs in the tree:
347 * prealloc: [orig->start, split - 1]
348 * orig: [ split, orig->end ]
350 * The tree locks are not taken by this function. They need to be held
353 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
354 struct extent_state *prealloc, u64 split)
356 struct rb_node *node;
357 prealloc->start = orig->start;
358 prealloc->end = split - 1;
359 prealloc->state = orig->state;
362 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
364 struct extent_state *found;
365 found = rb_entry(node, struct extent_state, rb_node);
366 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
367 free_extent_state(prealloc);
370 prealloc->tree = tree;
375 * utility function to clear some bits in an extent state struct.
376 * it will optionally wake up any one waiting on this state (wake == 1), or
377 * forcibly remove the state from the tree (delete == 1).
379 * If no bits are set on the state struct after clearing things, the
380 * struct is freed and removed from the tree
382 static int clear_state_bit(struct extent_io_tree *tree,
383 struct extent_state *state, int bits, int wake,
386 int ret = state->state & bits;
388 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
389 u64 range = state->end - state->start + 1;
390 WARN_ON(range > tree->dirty_bytes);
391 tree->dirty_bytes -= range;
393 clear_state_cb(tree, state, bits);
394 state->state &= ~bits;
397 if (delete || state->state == 0) {
399 clear_state_cb(tree, state, state->state);
400 if (tree->last == state) {
401 tree->last = extent_state_next(state);
403 rb_erase(&state->rb_node, &tree->state);
405 free_extent_state(state);
410 merge_state(tree, state);
416 * clear some bits on a range in the tree. This may require splitting
417 * or inserting elements in the tree, so the gfp mask is used to
418 * indicate which allocations or sleeping are allowed.
420 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
421 * the given range from the tree regardless of state (ie for truncate).
423 * the range [start, end] is inclusive.
425 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
426 * bits were already set, or zero if none of the bits were already set.
428 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
429 int bits, int wake, int delete, gfp_t mask)
431 struct extent_state *state;
432 struct extent_state *prealloc = NULL;
433 struct rb_node *node;
439 if (!prealloc && (mask & __GFP_WAIT)) {
440 prealloc = alloc_extent_state(mask);
445 spin_lock_irqsave(&tree->lock, flags);
447 * this search will find the extents that end after
450 node = tree_search(tree, start);
453 state = rb_entry(node, struct extent_state, rb_node);
454 if (state->start > end)
456 WARN_ON(state->end < start);
459 * | ---- desired range ---- |
461 * | ------------- state -------------- |
463 * We need to split the extent we found, and may flip
464 * bits on second half.
466 * If the extent we found extends past our range, we
467 * just split and search again. It'll get split again
468 * the next time though.
470 * If the extent we found is inside our range, we clear
471 * the desired bit on it.
474 if (state->start < start) {
476 prealloc = alloc_extent_state(GFP_ATOMIC);
477 err = split_state(tree, state, prealloc, start);
478 BUG_ON(err == -EEXIST);
482 if (state->end <= end) {
483 start = state->end + 1;
484 set |= clear_state_bit(tree, state, bits,
487 start = state->start;
492 * | ---- desired range ---- |
494 * We need to split the extent, and clear the bit
497 if (state->start <= end && state->end > end) {
499 prealloc = alloc_extent_state(GFP_ATOMIC);
500 err = split_state(tree, state, prealloc, end + 1);
501 BUG_ON(err == -EEXIST);
505 set |= clear_state_bit(tree, prealloc, bits,
511 start = state->end + 1;
512 set |= clear_state_bit(tree, state, bits, wake, delete);
516 spin_unlock_irqrestore(&tree->lock, flags);
518 free_extent_state(prealloc);
525 spin_unlock_irqrestore(&tree->lock, flags);
526 if (mask & __GFP_WAIT)
530 EXPORT_SYMBOL(clear_extent_bit);
532 static int wait_on_state(struct extent_io_tree *tree,
533 struct extent_state *state)
536 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
537 spin_unlock_irq(&tree->lock);
539 spin_lock_irq(&tree->lock);
540 finish_wait(&state->wq, &wait);
545 * waits for one or more bits to clear on a range in the state tree.
546 * The range [start, end] is inclusive.
547 * The tree lock is taken by this function
549 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
551 struct extent_state *state;
552 struct rb_node *node;
554 spin_lock_irq(&tree->lock);
558 * this search will find all the extents that end after
561 node = tree_search(tree, start);
565 state = rb_entry(node, struct extent_state, rb_node);
567 if (state->start > end)
570 if (state->state & bits) {
571 start = state->start;
572 atomic_inc(&state->refs);
573 wait_on_state(tree, state);
574 free_extent_state(state);
577 start = state->end + 1;
582 if (need_resched()) {
583 spin_unlock_irq(&tree->lock);
585 spin_lock_irq(&tree->lock);
589 spin_unlock_irq(&tree->lock);
592 EXPORT_SYMBOL(wait_extent_bit);
594 static void set_state_bits(struct extent_io_tree *tree,
595 struct extent_state *state,
598 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
599 u64 range = state->end - state->start + 1;
600 tree->dirty_bytes += range;
602 set_state_cb(tree, state, bits);
603 state->state |= bits;
607 * set some bits on a range in the tree. This may require allocations
608 * or sleeping, so the gfp mask is used to indicate what is allowed.
610 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
611 * range already has the desired bits set. The start of the existing
612 * range is returned in failed_start in this case.
614 * [start, end] is inclusive
615 * This takes the tree lock.
617 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
618 int exclusive, u64 *failed_start, gfp_t mask)
620 struct extent_state *state;
621 struct extent_state *prealloc = NULL;
622 struct rb_node *node;
629 if (!prealloc && (mask & __GFP_WAIT)) {
630 prealloc = alloc_extent_state(mask);
635 spin_lock_irqsave(&tree->lock, flags);
637 * this search will find all the extents that end after
640 node = tree_search(tree, start);
642 err = insert_state(tree, prealloc, start, end, bits);
644 BUG_ON(err == -EEXIST);
648 state = rb_entry(node, struct extent_state, rb_node);
649 last_start = state->start;
650 last_end = state->end;
653 * | ---- desired range ---- |
656 * Just lock what we found and keep going
658 if (state->start == start && state->end <= end) {
659 set = state->state & bits;
660 if (set && exclusive) {
661 *failed_start = state->start;
665 set_state_bits(tree, state, bits);
666 start = state->end + 1;
667 merge_state(tree, state);
672 * | ---- desired range ---- |
675 * | ------------- state -------------- |
677 * We need to split the extent we found, and may flip bits on
680 * If the extent we found extends past our
681 * range, we just split and search again. It'll get split
682 * again the next time though.
684 * If the extent we found is inside our range, we set the
687 if (state->start < start) {
688 set = state->state & bits;
689 if (exclusive && set) {
690 *failed_start = start;
694 err = split_state(tree, state, prealloc, start);
695 BUG_ON(err == -EEXIST);
699 if (state->end <= end) {
700 set_state_bits(tree, state, bits);
701 start = state->end + 1;
702 merge_state(tree, state);
704 start = state->start;
709 * | ---- desired range ---- |
710 * | state | or | state |
712 * There's a hole, we need to insert something in it and
713 * ignore the extent we found.
715 if (state->start > start) {
717 if (end < last_start)
720 this_end = last_start -1;
721 err = insert_state(tree, prealloc, start, this_end,
724 BUG_ON(err == -EEXIST);
727 start = this_end + 1;
731 * | ---- desired range ---- |
733 * We need to split the extent, and set the bit
736 if (state->start <= end && state->end > end) {
737 set = state->state & bits;
738 if (exclusive && set) {
739 *failed_start = start;
743 err = split_state(tree, state, prealloc, end + 1);
744 BUG_ON(err == -EEXIST);
746 set_state_bits(tree, prealloc, bits);
747 merge_state(tree, prealloc);
755 spin_unlock_irqrestore(&tree->lock, flags);
757 free_extent_state(prealloc);
764 spin_unlock_irqrestore(&tree->lock, flags);
765 if (mask & __GFP_WAIT)
769 EXPORT_SYMBOL(set_extent_bit);
771 /* wrappers around set/clear extent bit */
772 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
775 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
778 EXPORT_SYMBOL(set_extent_dirty);
780 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
781 int bits, gfp_t mask)
783 return set_extent_bit(tree, start, end, bits, 0, NULL,
786 EXPORT_SYMBOL(set_extent_bits);
788 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
789 int bits, gfp_t mask)
791 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
793 EXPORT_SYMBOL(clear_extent_bits);
795 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
798 return set_extent_bit(tree, start, end,
799 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
802 EXPORT_SYMBOL(set_extent_delalloc);
804 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
807 return clear_extent_bit(tree, start, end,
808 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
810 EXPORT_SYMBOL(clear_extent_dirty);
812 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
815 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
818 EXPORT_SYMBOL(set_extent_new);
820 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
823 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
825 EXPORT_SYMBOL(clear_extent_new);
827 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
830 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
833 EXPORT_SYMBOL(set_extent_uptodate);
835 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
838 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
840 EXPORT_SYMBOL(clear_extent_uptodate);
842 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
845 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
848 EXPORT_SYMBOL(set_extent_writeback);
850 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
853 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
855 EXPORT_SYMBOL(clear_extent_writeback);
857 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
859 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
861 EXPORT_SYMBOL(wait_on_extent_writeback);
863 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
868 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
869 &failed_start, mask);
870 if (err == -EEXIST && (mask & __GFP_WAIT)) {
871 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
872 start = failed_start;
876 WARN_ON(start > end);
880 EXPORT_SYMBOL(lock_extent);
882 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
885 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
887 EXPORT_SYMBOL(unlock_extent);
890 * helper function to set pages and extents in the tree dirty
892 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
894 unsigned long index = start >> PAGE_CACHE_SHIFT;
895 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
898 while (index <= end_index) {
899 page = find_get_page(tree->mapping, index);
901 __set_page_dirty_nobuffers(page);
902 page_cache_release(page);
905 set_extent_dirty(tree, start, end, GFP_NOFS);
908 EXPORT_SYMBOL(set_range_dirty);
911 * helper function to set both pages and extents in the tree writeback
913 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
915 unsigned long index = start >> PAGE_CACHE_SHIFT;
916 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
919 while (index <= end_index) {
920 page = find_get_page(tree->mapping, index);
922 set_page_writeback(page);
923 page_cache_release(page);
926 set_extent_writeback(tree, start, end, GFP_NOFS);
929 EXPORT_SYMBOL(set_range_writeback);
931 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
932 u64 *start_ret, u64 *end_ret, int bits)
934 struct rb_node *node;
935 struct extent_state *state;
938 spin_lock_irq(&tree->lock);
940 * this search will find all the extents that end after
943 node = tree_search(tree, start);
944 if (!node || IS_ERR(node)) {
949 state = rb_entry(node, struct extent_state, rb_node);
950 if (state->end >= start && (state->state & bits)) {
951 *start_ret = state->start;
952 *end_ret = state->end;
956 node = rb_next(node);
961 spin_unlock_irq(&tree->lock);
964 EXPORT_SYMBOL(find_first_extent_bit);
966 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
969 struct rb_node *node;
970 struct extent_state *state;
973 * this search will find all the extents that end after
976 node = tree_search(tree, start);
977 if (!node || IS_ERR(node)) {
982 state = rb_entry(node, struct extent_state, rb_node);
983 if (state->end >= start && (state->state & bits)) {
986 node = rb_next(node);
993 EXPORT_SYMBOL(find_first_extent_bit_state);
995 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
996 u64 *start, u64 *end, u64 max_bytes)
998 struct rb_node *node;
999 struct extent_state *state;
1000 u64 cur_start = *start;
1002 u64 total_bytes = 0;
1004 spin_lock_irq(&tree->lock);
1006 * this search will find all the extents that end after
1010 node = tree_search(tree, cur_start);
1011 if (!node || IS_ERR(node)) {
1017 state = rb_entry(node, struct extent_state, rb_node);
1018 if (found && state->start != cur_start) {
1021 if (!(state->state & EXTENT_DELALLOC)) {
1027 struct extent_state *prev_state;
1028 struct rb_node *prev_node = node;
1030 prev_node = rb_prev(prev_node);
1033 prev_state = rb_entry(prev_node,
1034 struct extent_state,
1036 if (!(prev_state->state & EXTENT_DELALLOC))
1042 if (state->state & EXTENT_LOCKED) {
1044 atomic_inc(&state->refs);
1045 prepare_to_wait(&state->wq, &wait,
1046 TASK_UNINTERRUPTIBLE);
1047 spin_unlock_irq(&tree->lock);
1049 spin_lock_irq(&tree->lock);
1050 finish_wait(&state->wq, &wait);
1051 free_extent_state(state);
1054 set_state_cb(tree, state, EXTENT_LOCKED);
1055 state->state |= EXTENT_LOCKED;
1057 *start = state->start;
1060 cur_start = state->end + 1;
1061 node = rb_next(node);
1064 total_bytes += state->end - state->start + 1;
1065 if (total_bytes >= max_bytes)
1069 spin_unlock_irq(&tree->lock);
1073 u64 count_range_bits(struct extent_io_tree *tree,
1074 u64 *start, u64 search_end, u64 max_bytes,
1077 struct rb_node *node;
1078 struct extent_state *state;
1079 u64 cur_start = *start;
1080 u64 total_bytes = 0;
1083 if (search_end <= cur_start) {
1084 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1089 spin_lock_irq(&tree->lock);
1090 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1091 total_bytes = tree->dirty_bytes;
1095 * this search will find all the extents that end after
1098 node = tree_search(tree, cur_start);
1099 if (!node || IS_ERR(node)) {
1104 state = rb_entry(node, struct extent_state, rb_node);
1105 if (state->start > search_end)
1107 if (state->end >= cur_start && (state->state & bits)) {
1108 total_bytes += min(search_end, state->end) + 1 -
1109 max(cur_start, state->start);
1110 if (total_bytes >= max_bytes)
1113 *start = state->start;
1117 node = rb_next(node);
1122 spin_unlock_irq(&tree->lock);
1126 * helper function to lock both pages and extents in the tree.
1127 * pages must be locked first.
1129 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1131 unsigned long index = start >> PAGE_CACHE_SHIFT;
1132 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1136 while (index <= end_index) {
1137 page = grab_cache_page(tree->mapping, index);
1143 err = PTR_ERR(page);
1148 lock_extent(tree, start, end, GFP_NOFS);
1153 * we failed above in getting the page at 'index', so we undo here
1154 * up to but not including the page at 'index'
1157 index = start >> PAGE_CACHE_SHIFT;
1158 while (index < end_index) {
1159 page = find_get_page(tree->mapping, index);
1161 page_cache_release(page);
1166 EXPORT_SYMBOL(lock_range);
1169 * helper function to unlock both pages and extents in the tree.
1171 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1173 unsigned long index = start >> PAGE_CACHE_SHIFT;
1174 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1177 while (index <= end_index) {
1178 page = find_get_page(tree->mapping, index);
1180 page_cache_release(page);
1183 unlock_extent(tree, start, end, GFP_NOFS);
1186 EXPORT_SYMBOL(unlock_range);
1188 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1190 struct rb_node *node;
1191 struct extent_state *state;
1194 spin_lock_irq(&tree->lock);
1196 * this search will find all the extents that end after
1199 node = tree_search(tree, start);
1200 if (!node || IS_ERR(node)) {
1204 state = rb_entry(node, struct extent_state, rb_node);
1205 if (state->start != start) {
1209 state->private = private;
1211 spin_unlock_irq(&tree->lock);
1215 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1217 struct rb_node *node;
1218 struct extent_state *state;
1221 spin_lock_irq(&tree->lock);
1223 * this search will find all the extents that end after
1226 node = tree_search(tree, start);
1227 if (!node || IS_ERR(node)) {
1231 state = rb_entry(node, struct extent_state, rb_node);
1232 if (state->start != start) {
1236 *private = state->private;
1238 spin_unlock_irq(&tree->lock);
1243 * searches a range in the state tree for a given mask.
1244 * If 'filled' == 1, this returns 1 only if every extent in the tree
1245 * has the bits set. Otherwise, 1 is returned if any bit in the
1246 * range is found set.
1248 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1249 int bits, int filled)
1251 struct extent_state *state = NULL;
1252 struct rb_node *node;
1254 unsigned long flags;
1256 spin_lock_irqsave(&tree->lock, flags);
1257 node = tree_search(tree, start);
1258 while (node && start <= end) {
1259 state = rb_entry(node, struct extent_state, rb_node);
1261 if (filled && state->start > start) {
1266 if (state->start > end)
1269 if (state->state & bits) {
1273 } else if (filled) {
1277 start = state->end + 1;
1280 node = rb_next(node);
1287 spin_unlock_irqrestore(&tree->lock, flags);
1290 EXPORT_SYMBOL(test_range_bit);
1293 * helper function to set a given page up to date if all the
1294 * extents in the tree for that page are up to date
1296 static int check_page_uptodate(struct extent_io_tree *tree,
1299 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1300 u64 end = start + PAGE_CACHE_SIZE - 1;
1301 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1302 SetPageUptodate(page);
1307 * helper function to unlock a page if all the extents in the tree
1308 * for that page are unlocked
1310 static int check_page_locked(struct extent_io_tree *tree,
1313 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1314 u64 end = start + PAGE_CACHE_SIZE - 1;
1315 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1321 * helper function to end page writeback if all the extents
1322 * in the tree for that page are done with writeback
1324 static int check_page_writeback(struct extent_io_tree *tree,
1327 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1328 u64 end = start + PAGE_CACHE_SIZE - 1;
1329 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1330 end_page_writeback(page);
1334 /* lots and lots of room for performance fixes in the end_bio funcs */
1337 * after a writepage IO is done, we need to:
1338 * clear the uptodate bits on error
1339 * clear the writeback bits in the extent tree for this IO
1340 * end_page_writeback if the page has no more pending IO
1342 * Scheduling is not allowed, so the extent state tree is expected
1343 * to have one and only one object corresponding to this IO.
1345 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1346 static void end_bio_extent_writepage(struct bio *bio, int err)
1348 static int end_bio_extent_writepage(struct bio *bio,
1349 unsigned int bytes_done, int err)
1352 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1353 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1354 struct extent_state *state = bio->bi_private;
1355 struct extent_io_tree *tree = state->tree;
1356 struct rb_node *node;
1361 unsigned long flags;
1363 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1368 struct page *page = bvec->bv_page;
1369 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1371 end = start + bvec->bv_len - 1;
1373 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1378 if (--bvec >= bio->bi_io_vec)
1379 prefetchw(&bvec->bv_page->flags);
1382 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1383 ClearPageUptodate(page);
1387 if (tree->ops && tree->ops->writepage_end_io_hook) {
1388 tree->ops->writepage_end_io_hook(page, start, end,
1393 * bios can get merged in funny ways, and so we need to
1394 * be careful with the state variable. We know the
1395 * state won't be merged with others because it has
1396 * WRITEBACK set, but we can't be sure each biovec is
1397 * sequential in the file. So, if our cached state
1398 * doesn't match the expected end, search the tree
1399 * for the correct one.
1402 spin_lock_irqsave(&tree->lock, flags);
1403 if (!state || state->end != end) {
1405 node = __etree_search(tree, start, NULL, NULL);
1407 state = rb_entry(node, struct extent_state,
1409 if (state->end != end ||
1410 !(state->state & EXTENT_WRITEBACK))
1414 spin_unlock_irqrestore(&tree->lock, flags);
1415 clear_extent_writeback(tree, start,
1422 struct extent_state *clear = state;
1424 node = rb_prev(&state->rb_node);
1426 state = rb_entry(node,
1427 struct extent_state,
1433 clear_state_bit(tree, clear, EXTENT_WRITEBACK,
1444 /* before releasing the lock, make sure the next state
1445 * variable has the expected bits set and corresponds
1446 * to the correct offsets in the file
1448 if (state && (state->end + 1 != start ||
1449 !(state->state & EXTENT_WRITEBACK))) {
1452 spin_unlock_irqrestore(&tree->lock, flags);
1456 end_page_writeback(page);
1458 check_page_writeback(tree, page);
1459 } while (bvec >= bio->bi_io_vec);
1461 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1467 * after a readpage IO is done, we need to:
1468 * clear the uptodate bits on error
1469 * set the uptodate bits if things worked
1470 * set the page up to date if all extents in the tree are uptodate
1471 * clear the lock bit in the extent tree
1472 * unlock the page if there are no other extents locked for it
1474 * Scheduling is not allowed, so the extent state tree is expected
1475 * to have one and only one object corresponding to this IO.
1477 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1478 static void end_bio_extent_readpage(struct bio *bio, int err)
1480 static int end_bio_extent_readpage(struct bio *bio,
1481 unsigned int bytes_done, int err)
1484 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1485 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1486 struct extent_state *state = bio->bi_private;
1487 struct extent_io_tree *tree = state->tree;
1488 struct rb_node *node;
1492 unsigned long flags;
1496 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1502 struct page *page = bvec->bv_page;
1503 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1505 end = start + bvec->bv_len - 1;
1507 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1512 if (--bvec >= bio->bi_io_vec)
1513 prefetchw(&bvec->bv_page->flags);
1515 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1516 ret = tree->ops->readpage_end_io_hook(page, start, end,
1522 spin_lock_irqsave(&tree->lock, flags);
1523 if (!state || state->end != end) {
1525 node = __etree_search(tree, start, NULL, NULL);
1527 state = rb_entry(node, struct extent_state,
1529 if (state->end != end ||
1530 !(state->state & EXTENT_LOCKED))
1534 spin_unlock_irqrestore(&tree->lock, flags);
1535 set_extent_uptodate(tree, start, end,
1537 unlock_extent(tree, start, end, GFP_ATOMIC);
1544 struct extent_state *clear = state;
1546 node = rb_prev(&state->rb_node);
1548 state = rb_entry(node,
1549 struct extent_state,
1554 set_state_cb(tree, clear, EXTENT_UPTODATE);
1555 clear->state |= EXTENT_UPTODATE;
1556 clear_state_bit(tree, clear, EXTENT_LOCKED,
1567 /* before releasing the lock, make sure the next state
1568 * variable has the expected bits set and corresponds
1569 * to the correct offsets in the file
1571 if (state && (state->end + 1 != start ||
1572 !(state->state & EXTENT_LOCKED))) {
1575 spin_unlock_irqrestore(&tree->lock, flags);
1579 SetPageUptodate(page);
1581 ClearPageUptodate(page);
1587 check_page_uptodate(tree, page);
1589 ClearPageUptodate(page);
1592 check_page_locked(tree, page);
1594 } while (bvec >= bio->bi_io_vec);
1597 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1603 * IO done from prepare_write is pretty simple, we just unlock
1604 * the structs in the extent tree when done, and set the uptodate bits
1607 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1608 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1610 static int end_bio_extent_preparewrite(struct bio *bio,
1611 unsigned int bytes_done, int err)
1614 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1615 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1616 struct extent_state *state = bio->bi_private;
1617 struct extent_io_tree *tree = state->tree;
1621 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1627 struct page *page = bvec->bv_page;
1628 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1630 end = start + bvec->bv_len - 1;
1632 if (--bvec >= bio->bi_io_vec)
1633 prefetchw(&bvec->bv_page->flags);
1636 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1638 ClearPageUptodate(page);
1642 unlock_extent(tree, start, end, GFP_ATOMIC);
1644 } while (bvec >= bio->bi_io_vec);
1647 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1653 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1658 bio = bio_alloc(gfp_flags, nr_vecs);
1660 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1661 while (!bio && (nr_vecs /= 2))
1662 bio = bio_alloc(gfp_flags, nr_vecs);
1666 bio->bi_bdev = bdev;
1667 bio->bi_sector = first_sector;
1672 static int submit_one_bio(int rw, struct bio *bio)
1676 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1677 struct page *page = bvec->bv_page;
1678 struct extent_io_tree *tree = bio->bi_private;
1679 struct rb_node *node;
1680 struct extent_state *state;
1684 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1685 end = start + bvec->bv_len - 1;
1687 spin_lock_irq(&tree->lock);
1688 node = __etree_search(tree, start, NULL, NULL);
1690 state = rb_entry(node, struct extent_state, rb_node);
1691 while(state->end < end) {
1692 node = rb_next(node);
1693 state = rb_entry(node, struct extent_state, rb_node);
1695 BUG_ON(state->end != end);
1696 spin_unlock_irq(&tree->lock);
1698 bio->bi_private = state;
1702 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1703 if (maxsector < bio->bi_sector) {
1704 printk("sector too large max %Lu got %llu\n", maxsector,
1705 (unsigned long long)bio->bi_sector);
1708 if (tree->ops && tree->ops->submit_bio_hook)
1709 tree->ops->submit_bio_hook(page->mapping->host, rw, bio);
1711 submit_bio(rw, bio);
1712 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1718 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1719 struct page *page, sector_t sector,
1720 size_t size, unsigned long offset,
1721 struct block_device *bdev,
1722 struct bio **bio_ret,
1723 unsigned long max_pages,
1724 bio_end_io_t end_io_func)
1730 if (bio_ret && *bio_ret) {
1732 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1733 (tree->ops && tree->ops->merge_bio_hook &&
1734 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1735 bio_add_page(bio, page, size, offset) < size) {
1736 ret = submit_one_bio(rw, bio);
1742 nr = bio_get_nr_vecs(bdev);
1743 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1745 printk("failed to allocate bio nr %d\n", nr);
1749 bio_add_page(bio, page, size, offset);
1750 bio->bi_end_io = end_io_func;
1751 bio->bi_private = tree;
1756 ret = submit_one_bio(rw, bio);
1762 void set_page_extent_mapped(struct page *page)
1764 if (!PagePrivate(page)) {
1765 SetPagePrivate(page);
1766 WARN_ON(!page->mapping->a_ops->invalidatepage);
1767 set_page_private(page, EXTENT_PAGE_PRIVATE);
1768 page_cache_get(page);
1772 void set_page_extent_head(struct page *page, unsigned long len)
1774 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1778 * basic readpage implementation. Locked extent state structs are inserted
1779 * into the tree that are removed when the IO is done (by the end_io
1782 static int __extent_read_full_page(struct extent_io_tree *tree,
1784 get_extent_t *get_extent,
1787 struct inode *inode = page->mapping->host;
1788 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1789 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1793 u64 last_byte = i_size_read(inode);
1797 struct extent_map *em;
1798 struct block_device *bdev;
1801 size_t page_offset = 0;
1803 size_t blocksize = inode->i_sb->s_blocksize;
1805 set_page_extent_mapped(page);
1808 lock_extent(tree, start, end, GFP_NOFS);
1810 while (cur <= end) {
1811 if (cur >= last_byte) {
1813 iosize = PAGE_CACHE_SIZE - page_offset;
1814 userpage = kmap_atomic(page, KM_USER0);
1815 memset(userpage + page_offset, 0, iosize);
1816 flush_dcache_page(page);
1817 kunmap_atomic(userpage, KM_USER0);
1818 set_extent_uptodate(tree, cur, cur + iosize - 1,
1820 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1823 em = get_extent(inode, page, page_offset, cur,
1825 if (IS_ERR(em) || !em) {
1827 unlock_extent(tree, cur, end, GFP_NOFS);
1831 extent_offset = cur - em->start;
1832 BUG_ON(extent_map_end(em) <= cur);
1835 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1836 cur_end = min(extent_map_end(em) - 1, end);
1837 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1838 sector = (em->block_start + extent_offset) >> 9;
1840 block_start = em->block_start;
1841 free_extent_map(em);
1844 /* we've found a hole, just zero and go on */
1845 if (block_start == EXTENT_MAP_HOLE) {
1847 userpage = kmap_atomic(page, KM_USER0);
1848 memset(userpage + page_offset, 0, iosize);
1849 flush_dcache_page(page);
1850 kunmap_atomic(userpage, KM_USER0);
1852 set_extent_uptodate(tree, cur, cur + iosize - 1,
1854 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1856 page_offset += iosize;
1859 /* the get_extent function already copied into the page */
1860 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1861 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1863 page_offset += iosize;
1866 /* we have an inline extent but it didn't get marked up
1867 * to date. Error out
1869 if (block_start == EXTENT_MAP_INLINE) {
1871 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1873 page_offset += iosize;
1878 if (tree->ops && tree->ops->readpage_io_hook) {
1879 ret = tree->ops->readpage_io_hook(page, cur,
1883 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1885 ret = submit_extent_page(READ, tree, page,
1886 sector, iosize, page_offset,
1888 end_bio_extent_readpage);
1893 page_offset += iosize;
1897 if (!PageError(page))
1898 SetPageUptodate(page);
1904 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1905 get_extent_t *get_extent)
1907 struct bio *bio = NULL;
1910 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1912 submit_one_bio(READ, bio);
1915 EXPORT_SYMBOL(extent_read_full_page);
1918 * the writepage semantics are similar to regular writepage. extent
1919 * records are inserted to lock ranges in the tree, and as dirty areas
1920 * are found, they are marked writeback. Then the lock bits are removed
1921 * and the end_io handler clears the writeback ranges
1923 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1926 struct inode *inode = page->mapping->host;
1927 struct extent_page_data *epd = data;
1928 struct extent_io_tree *tree = epd->tree;
1929 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1931 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1935 u64 last_byte = i_size_read(inode);
1939 struct extent_map *em;
1940 struct block_device *bdev;
1943 size_t page_offset = 0;
1945 loff_t i_size = i_size_read(inode);
1946 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1950 WARN_ON(!PageLocked(page));
1951 if (page->index > end_index) {
1952 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1957 if (page->index == end_index) {
1960 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1962 userpage = kmap_atomic(page, KM_USER0);
1963 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1964 flush_dcache_page(page);
1965 kunmap_atomic(userpage, KM_USER0);
1968 set_page_extent_mapped(page);
1970 delalloc_start = start;
1972 while(delalloc_end < page_end) {
1973 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1976 if (nr_delalloc == 0) {
1977 delalloc_start = delalloc_end + 1;
1980 tree->ops->fill_delalloc(inode, delalloc_start,
1982 clear_extent_bit(tree, delalloc_start,
1984 EXTENT_LOCKED | EXTENT_DELALLOC,
1986 delalloc_start = delalloc_end + 1;
1988 lock_extent(tree, start, page_end, GFP_NOFS);
1991 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1992 printk("found delalloc bits after lock_extent\n");
1995 if (last_byte <= start) {
1996 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2000 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2001 blocksize = inode->i_sb->s_blocksize;
2003 while (cur <= end) {
2004 if (cur >= last_byte) {
2005 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2008 em = epd->get_extent(inode, page, page_offset, cur,
2010 if (IS_ERR(em) || !em) {
2015 extent_offset = cur - em->start;
2016 BUG_ON(extent_map_end(em) <= cur);
2018 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2019 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2020 sector = (em->block_start + extent_offset) >> 9;
2022 block_start = em->block_start;
2023 free_extent_map(em);
2026 if (block_start == EXTENT_MAP_HOLE ||
2027 block_start == EXTENT_MAP_INLINE) {
2028 clear_extent_dirty(tree, cur,
2029 cur + iosize - 1, GFP_NOFS);
2031 page_offset += iosize;
2035 /* leave this out until we have a page_mkwrite call */
2036 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2039 page_offset += iosize;
2042 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2043 if (tree->ops && tree->ops->writepage_io_hook) {
2044 ret = tree->ops->writepage_io_hook(page, cur,
2052 unsigned long max_nr = end_index + 1;
2053 set_range_writeback(tree, cur, cur + iosize - 1);
2054 if (!PageWriteback(page)) {
2055 printk("warning page %lu not writeback, "
2056 "cur %llu end %llu\n", page->index,
2057 (unsigned long long)cur,
2058 (unsigned long long)end);
2061 ret = submit_extent_page(WRITE, tree, page, sector,
2062 iosize, page_offset, bdev,
2064 end_bio_extent_writepage);
2069 page_offset += iosize;
2074 /* make sure the mapping tag for page dirty gets cleared */
2075 set_page_writeback(page);
2076 end_page_writeback(page);
2078 unlock_extent(tree, start, page_end, GFP_NOFS);
2083 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2085 /* Taken directly from 2.6.23 for 2.6.18 back port */
2086 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2090 * write_cache_pages - walk the list of dirty pages of the given address space
2091 * and write all of them.
2092 * @mapping: address space structure to write
2093 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2094 * @writepage: function called for each page
2095 * @data: data passed to writepage function
2097 * If a page is already under I/O, write_cache_pages() skips it, even
2098 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2099 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2100 * and msync() need to guarantee that all the data which was dirty at the time
2101 * the call was made get new I/O started against them. If wbc->sync_mode is
2102 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2103 * existing IO to complete.
2105 static int write_cache_pages(struct address_space *mapping,
2106 struct writeback_control *wbc, writepage_t writepage,
2109 struct backing_dev_info *bdi = mapping->backing_dev_info;
2112 struct pagevec pvec;
2115 pgoff_t end; /* Inclusive */
2117 int range_whole = 0;
2119 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2120 wbc->encountered_congestion = 1;
2124 pagevec_init(&pvec, 0);
2125 if (wbc->range_cyclic) {
2126 index = mapping->writeback_index; /* Start from prev offset */
2129 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2130 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2131 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2136 while (!done && (index <= end) &&
2137 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2138 PAGECACHE_TAG_DIRTY,
2139 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2143 for (i = 0; i < nr_pages; i++) {
2144 struct page *page = pvec.pages[i];
2147 * At this point we hold neither mapping->tree_lock nor
2148 * lock on the page itself: the page may be truncated or
2149 * invalidated (changing page->mapping to NULL), or even
2150 * swizzled back from swapper_space to tmpfs file
2155 if (unlikely(page->mapping != mapping)) {
2160 if (!wbc->range_cyclic && page->index > end) {
2166 if (wbc->sync_mode != WB_SYNC_NONE)
2167 wait_on_page_writeback(page);
2169 if (PageWriteback(page) ||
2170 !clear_page_dirty_for_io(page)) {
2175 ret = (*writepage)(page, wbc, data);
2177 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2181 if (ret || (--(wbc->nr_to_write) <= 0))
2183 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2184 wbc->encountered_congestion = 1;
2188 pagevec_release(&pvec);
2191 if (!scanned && !done) {
2193 * We hit the last page and there is more work to be done: wrap
2194 * back to the start of the file
2200 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2201 mapping->writeback_index = index;
2206 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2207 get_extent_t *get_extent,
2208 struct writeback_control *wbc)
2211 struct address_space *mapping = page->mapping;
2212 struct extent_page_data epd = {
2215 .get_extent = get_extent,
2217 struct writeback_control wbc_writepages = {
2219 .sync_mode = WB_SYNC_NONE,
2220 .older_than_this = NULL,
2222 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2223 .range_end = (loff_t)-1,
2227 ret = __extent_writepage(page, wbc, &epd);
2229 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2231 submit_one_bio(WRITE, epd.bio);
2235 EXPORT_SYMBOL(extent_write_full_page);
2238 int extent_writepages(struct extent_io_tree *tree,
2239 struct address_space *mapping,
2240 get_extent_t *get_extent,
2241 struct writeback_control *wbc)
2244 struct extent_page_data epd = {
2247 .get_extent = get_extent,
2250 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2252 submit_one_bio(WRITE, epd.bio);
2256 EXPORT_SYMBOL(extent_writepages);
2258 int extent_readpages(struct extent_io_tree *tree,
2259 struct address_space *mapping,
2260 struct list_head *pages, unsigned nr_pages,
2261 get_extent_t get_extent)
2263 struct bio *bio = NULL;
2265 struct pagevec pvec;
2267 pagevec_init(&pvec, 0);
2268 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2269 struct page *page = list_entry(pages->prev, struct page, lru);
2271 prefetchw(&page->flags);
2272 list_del(&page->lru);
2274 * what we want to do here is call add_to_page_cache_lru,
2275 * but that isn't exported, so we reproduce it here
2277 if (!add_to_page_cache(page, mapping,
2278 page->index, GFP_KERNEL)) {
2280 /* open coding of lru_cache_add, also not exported */
2281 page_cache_get(page);
2282 if (!pagevec_add(&pvec, page))
2283 __pagevec_lru_add(&pvec);
2284 __extent_read_full_page(tree, page, get_extent, &bio);
2286 page_cache_release(page);
2288 if (pagevec_count(&pvec))
2289 __pagevec_lru_add(&pvec);
2290 BUG_ON(!list_empty(pages));
2292 submit_one_bio(READ, bio);
2295 EXPORT_SYMBOL(extent_readpages);
2298 * basic invalidatepage code, this waits on any locked or writeback
2299 * ranges corresponding to the page, and then deletes any extent state
2300 * records from the tree
2302 int extent_invalidatepage(struct extent_io_tree *tree,
2303 struct page *page, unsigned long offset)
2305 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2306 u64 end = start + PAGE_CACHE_SIZE - 1;
2307 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2309 start += (offset + blocksize -1) & ~(blocksize - 1);
2313 lock_extent(tree, start, end, GFP_NOFS);
2314 wait_on_extent_writeback(tree, start, end);
2315 clear_extent_bit(tree, start, end,
2316 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2320 EXPORT_SYMBOL(extent_invalidatepage);
2323 * simple commit_write call, set_range_dirty is used to mark both
2324 * the pages and the extent records as dirty
2326 int extent_commit_write(struct extent_io_tree *tree,
2327 struct inode *inode, struct page *page,
2328 unsigned from, unsigned to)
2330 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2332 set_page_extent_mapped(page);
2333 set_page_dirty(page);
2335 if (pos > inode->i_size) {
2336 i_size_write(inode, pos);
2337 mark_inode_dirty(inode);
2341 EXPORT_SYMBOL(extent_commit_write);
2343 int extent_prepare_write(struct extent_io_tree *tree,
2344 struct inode *inode, struct page *page,
2345 unsigned from, unsigned to, get_extent_t *get_extent)
2347 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2348 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2350 u64 orig_block_start;
2353 struct extent_map *em;
2354 unsigned blocksize = 1 << inode->i_blkbits;
2355 size_t page_offset = 0;
2356 size_t block_off_start;
2357 size_t block_off_end;
2363 set_page_extent_mapped(page);
2365 block_start = (page_start + from) & ~((u64)blocksize - 1);
2366 block_end = (page_start + to - 1) | (blocksize - 1);
2367 orig_block_start = block_start;
2369 lock_extent(tree, page_start, page_end, GFP_NOFS);
2370 while(block_start <= block_end) {
2371 em = get_extent(inode, page, page_offset, block_start,
2372 block_end - block_start + 1, 1);
2373 if (IS_ERR(em) || !em) {
2376 cur_end = min(block_end, extent_map_end(em) - 1);
2377 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2378 block_off_end = block_off_start + blocksize;
2379 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2381 if (!PageUptodate(page) && isnew &&
2382 (block_off_end > to || block_off_start < from)) {
2385 kaddr = kmap_atomic(page, KM_USER0);
2386 if (block_off_end > to)
2387 memset(kaddr + to, 0, block_off_end - to);
2388 if (block_off_start < from)
2389 memset(kaddr + block_off_start, 0,
2390 from - block_off_start);
2391 flush_dcache_page(page);
2392 kunmap_atomic(kaddr, KM_USER0);
2394 if ((em->block_start != EXTENT_MAP_HOLE &&
2395 em->block_start != EXTENT_MAP_INLINE) &&
2396 !isnew && !PageUptodate(page) &&
2397 (block_off_end > to || block_off_start < from) &&
2398 !test_range_bit(tree, block_start, cur_end,
2399 EXTENT_UPTODATE, 1)) {
2401 u64 extent_offset = block_start - em->start;
2403 sector = (em->block_start + extent_offset) >> 9;
2404 iosize = (cur_end - block_start + blocksize) &
2405 ~((u64)blocksize - 1);
2407 * we've already got the extent locked, but we
2408 * need to split the state such that our end_bio
2409 * handler can clear the lock.
2411 set_extent_bit(tree, block_start,
2412 block_start + iosize - 1,
2413 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2414 ret = submit_extent_page(READ, tree, page,
2415 sector, iosize, page_offset, em->bdev,
2417 end_bio_extent_preparewrite);
2419 block_start = block_start + iosize;
2421 set_extent_uptodate(tree, block_start, cur_end,
2423 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2424 block_start = cur_end + 1;
2426 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2427 free_extent_map(em);
2430 wait_extent_bit(tree, orig_block_start,
2431 block_end, EXTENT_LOCKED);
2433 check_page_uptodate(tree, page);
2435 /* FIXME, zero out newly allocated blocks on error */
2438 EXPORT_SYMBOL(extent_prepare_write);
2441 * a helper for releasepage. As long as there are no locked extents
2442 * in the range corresponding to the page, both state records and extent
2443 * map records are removed
2445 int try_release_extent_mapping(struct extent_map_tree *map,
2446 struct extent_io_tree *tree, struct page *page,
2449 struct extent_map *em;
2450 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2451 u64 end = start + PAGE_CACHE_SIZE - 1;
2452 u64 orig_start = start;
2454 if ((mask & __GFP_WAIT) &&
2455 page->mapping->host->i_size > 16 * 1024 * 1024) {
2457 while (start <= end) {
2458 len = end - start + 1;
2459 spin_lock(&map->lock);
2460 em = lookup_extent_mapping(map, start, len);
2461 if (!em || IS_ERR(em)) {
2462 spin_unlock(&map->lock);
2465 if (em->start != start) {
2466 spin_unlock(&map->lock);
2467 free_extent_map(em);
2470 if (!test_range_bit(tree, em->start,
2471 extent_map_end(em) - 1,
2472 EXTENT_LOCKED, 0)) {
2473 remove_extent_mapping(map, em);
2474 /* once for the rb tree */
2475 free_extent_map(em);
2477 start = extent_map_end(em);
2478 spin_unlock(&map->lock);
2481 free_extent_map(em);
2484 if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
2487 if ((mask & GFP_NOFS) == GFP_NOFS)
2489 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2494 EXPORT_SYMBOL(try_release_extent_mapping);
2496 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2497 get_extent_t *get_extent)
2499 struct inode *inode = mapping->host;
2500 u64 start = iblock << inode->i_blkbits;
2501 sector_t sector = 0;
2502 struct extent_map *em;
2504 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2505 if (!em || IS_ERR(em))
2508 if (em->block_start == EXTENT_MAP_INLINE ||
2509 em->block_start == EXTENT_MAP_HOLE)
2512 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2514 free_extent_map(em);
2518 static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
2520 if (list_empty(&eb->lru)) {
2521 extent_buffer_get(eb);
2522 list_add(&eb->lru, &tree->buffer_lru);
2524 if (tree->lru_size >= BUFFER_LRU_MAX) {
2525 struct extent_buffer *rm;
2526 rm = list_entry(tree->buffer_lru.prev,
2527 struct extent_buffer, lru);
2529 list_del_init(&rm->lru);
2530 free_extent_buffer(rm);
2533 list_move(&eb->lru, &tree->buffer_lru);
2536 static struct extent_buffer *find_lru(struct extent_io_tree *tree,
2537 u64 start, unsigned long len)
2539 struct list_head *lru = &tree->buffer_lru;
2540 struct list_head *cur = lru->next;
2541 struct extent_buffer *eb;
2543 if (list_empty(lru))
2547 eb = list_entry(cur, struct extent_buffer, lru);
2548 if (eb->start == start && eb->len == len) {
2549 extent_buffer_get(eb);
2553 } while (cur != lru);
2557 static inline unsigned long num_extent_pages(u64 start, u64 len)
2559 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2560 (start >> PAGE_CACHE_SHIFT);
2563 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2567 struct address_space *mapping;
2570 return eb->first_page;
2571 i += eb->start >> PAGE_CACHE_SHIFT;
2572 mapping = eb->first_page->mapping;
2573 read_lock_irq(&mapping->tree_lock);
2574 p = radix_tree_lookup(&mapping->page_tree, i);
2575 read_unlock_irq(&mapping->tree_lock);
2579 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2584 struct extent_buffer *eb = NULL;
2586 spin_lock(&tree->lru_lock);
2587 eb = find_lru(tree, start, len);
2588 spin_unlock(&tree->lru_lock);
2593 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2594 INIT_LIST_HEAD(&eb->lru);
2597 atomic_set(&eb->refs, 1);
2602 static void __free_extent_buffer(struct extent_buffer *eb)
2604 kmem_cache_free(extent_buffer_cache, eb);
2607 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2608 u64 start, unsigned long len,
2612 unsigned long num_pages = num_extent_pages(start, len);
2614 unsigned long index = start >> PAGE_CACHE_SHIFT;
2615 struct extent_buffer *eb;
2617 struct address_space *mapping = tree->mapping;
2620 eb = __alloc_extent_buffer(tree, start, len, mask);
2621 if (!eb || IS_ERR(eb))
2624 if (eb->flags & EXTENT_BUFFER_FILLED)
2628 eb->first_page = page0;
2631 page_cache_get(page0);
2632 mark_page_accessed(page0);
2633 set_page_extent_mapped(page0);
2634 WARN_ON(!PageUptodate(page0));
2635 set_page_extent_head(page0, len);
2639 for (; i < num_pages; i++, index++) {
2640 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2645 set_page_extent_mapped(p);
2646 mark_page_accessed(p);
2649 set_page_extent_head(p, len);
2651 set_page_private(p, EXTENT_PAGE_PRIVATE);
2653 if (!PageUptodate(p))
2658 eb->flags |= EXTENT_UPTODATE;
2659 eb->flags |= EXTENT_BUFFER_FILLED;
2662 spin_lock(&tree->lru_lock);
2664 spin_unlock(&tree->lru_lock);
2668 spin_lock(&tree->lru_lock);
2669 list_del_init(&eb->lru);
2670 spin_unlock(&tree->lru_lock);
2671 if (!atomic_dec_and_test(&eb->refs))
2673 for (index = 1; index < i; index++) {
2674 page_cache_release(extent_buffer_page(eb, index));
2677 page_cache_release(extent_buffer_page(eb, 0));
2678 __free_extent_buffer(eb);
2681 EXPORT_SYMBOL(alloc_extent_buffer);
2683 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2684 u64 start, unsigned long len,
2687 unsigned long num_pages = num_extent_pages(start, len);
2689 unsigned long index = start >> PAGE_CACHE_SHIFT;
2690 struct extent_buffer *eb;
2692 struct address_space *mapping = tree->mapping;
2695 eb = __alloc_extent_buffer(tree, start, len, mask);
2696 if (!eb || IS_ERR(eb))
2699 if (eb->flags & EXTENT_BUFFER_FILLED)
2702 for (i = 0; i < num_pages; i++, index++) {
2703 p = find_lock_page(mapping, index);
2707 set_page_extent_mapped(p);
2708 mark_page_accessed(p);
2712 set_page_extent_head(p, len);
2714 set_page_private(p, EXTENT_PAGE_PRIVATE);
2717 if (!PageUptodate(p))
2722 eb->flags |= EXTENT_UPTODATE;
2723 eb->flags |= EXTENT_BUFFER_FILLED;
2726 spin_lock(&tree->lru_lock);
2728 spin_unlock(&tree->lru_lock);
2731 spin_lock(&tree->lru_lock);
2732 list_del_init(&eb->lru);
2733 spin_unlock(&tree->lru_lock);
2734 if (!atomic_dec_and_test(&eb->refs))
2736 for (index = 1; index < i; index++) {
2737 page_cache_release(extent_buffer_page(eb, index));
2740 page_cache_release(extent_buffer_page(eb, 0));
2741 __free_extent_buffer(eb);
2744 EXPORT_SYMBOL(find_extent_buffer);
2746 void free_extent_buffer(struct extent_buffer *eb)
2749 unsigned long num_pages;
2754 if (!atomic_dec_and_test(&eb->refs))
2757 WARN_ON(!list_empty(&eb->lru));
2758 num_pages = num_extent_pages(eb->start, eb->len);
2760 for (i = 1; i < num_pages; i++) {
2761 page_cache_release(extent_buffer_page(eb, i));
2763 page_cache_release(extent_buffer_page(eb, 0));
2764 __free_extent_buffer(eb);
2766 EXPORT_SYMBOL(free_extent_buffer);
2768 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2769 struct extent_buffer *eb)
2773 unsigned long num_pages;
2776 u64 start = eb->start;
2777 u64 end = start + eb->len - 1;
2779 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2780 num_pages = num_extent_pages(eb->start, eb->len);
2782 for (i = 0; i < num_pages; i++) {
2783 page = extent_buffer_page(eb, i);
2786 set_page_extent_head(page, eb->len);
2788 set_page_private(page, EXTENT_PAGE_PRIVATE);
2791 * if we're on the last page or the first page and the
2792 * block isn't aligned on a page boundary, do extra checks
2793 * to make sure we don't clean page that is partially dirty
2795 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2796 ((i == num_pages - 1) &&
2797 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2798 start = (u64)page->index << PAGE_CACHE_SHIFT;
2799 end = start + PAGE_CACHE_SIZE - 1;
2800 if (test_range_bit(tree, start, end,
2806 clear_page_dirty_for_io(page);
2807 read_lock_irq(&page->mapping->tree_lock);
2808 if (!PageDirty(page)) {
2809 radix_tree_tag_clear(&page->mapping->page_tree,
2811 PAGECACHE_TAG_DIRTY);
2813 read_unlock_irq(&page->mapping->tree_lock);
2818 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2820 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2821 struct extent_buffer *eb)
2823 return wait_on_extent_writeback(tree, eb->start,
2824 eb->start + eb->len - 1);
2826 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2828 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2829 struct extent_buffer *eb)
2832 unsigned long num_pages;
2834 num_pages = num_extent_pages(eb->start, eb->len);
2835 for (i = 0; i < num_pages; i++) {
2836 struct page *page = extent_buffer_page(eb, i);
2837 /* writepage may need to do something special for the
2838 * first page, we have to make sure page->private is
2839 * properly set. releasepage may drop page->private
2840 * on us if the page isn't already dirty.
2844 set_page_extent_head(page, eb->len);
2845 } else if (PagePrivate(page) &&
2846 page->private != EXTENT_PAGE_PRIVATE) {
2848 set_page_extent_mapped(page);
2851 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2855 return set_extent_dirty(tree, eb->start,
2856 eb->start + eb->len - 1, GFP_NOFS);
2858 EXPORT_SYMBOL(set_extent_buffer_dirty);
2860 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2861 struct extent_buffer *eb)
2865 unsigned long num_pages;
2867 num_pages = num_extent_pages(eb->start, eb->len);
2869 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2871 for (i = 0; i < num_pages; i++) {
2872 page = extent_buffer_page(eb, i);
2873 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2874 ((i == num_pages - 1) &&
2875 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2876 check_page_uptodate(tree, page);
2879 SetPageUptodate(page);
2883 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2885 int extent_buffer_uptodate(struct extent_io_tree *tree,
2886 struct extent_buffer *eb)
2888 if (eb->flags & EXTENT_UPTODATE)
2890 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2891 EXTENT_UPTODATE, 1);
2893 EXPORT_SYMBOL(extent_buffer_uptodate);
2895 int read_extent_buffer_pages(struct extent_io_tree *tree,
2896 struct extent_buffer *eb,
2897 u64 start, int wait,
2898 get_extent_t *get_extent)
2901 unsigned long start_i;
2905 unsigned long num_pages;
2906 struct bio *bio = NULL;
2909 if (eb->flags & EXTENT_UPTODATE)
2912 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2913 EXTENT_UPTODATE, 1)) {
2918 WARN_ON(start < eb->start);
2919 start_i = (start >> PAGE_CACHE_SHIFT) -
2920 (eb->start >> PAGE_CACHE_SHIFT);
2925 num_pages = num_extent_pages(eb->start, eb->len);
2926 for (i = start_i; i < num_pages; i++) {
2927 page = extent_buffer_page(eb, i);
2928 if (PageUptodate(page)) {
2932 if (TestSetPageLocked(page)) {
2938 if (!PageUptodate(page)) {
2939 err = __extent_read_full_page(tree, page,
2950 submit_one_bio(READ, bio);
2955 for (i = start_i; i < num_pages; i++) {
2956 page = extent_buffer_page(eb, i);
2957 wait_on_page_locked(page);
2958 if (!PageUptodate(page)) {
2963 eb->flags |= EXTENT_UPTODATE;
2966 EXPORT_SYMBOL(read_extent_buffer_pages);
2968 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2969 unsigned long start,
2976 char *dst = (char *)dstv;
2977 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2978 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2979 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2981 WARN_ON(start > eb->len);
2982 WARN_ON(start + len > eb->start + eb->len);
2984 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2987 page = extent_buffer_page(eb, i);
2988 if (!PageUptodate(page)) {
2989 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2992 WARN_ON(!PageUptodate(page));
2994 cur = min(len, (PAGE_CACHE_SIZE - offset));
2995 kaddr = kmap_atomic(page, KM_USER1);
2996 memcpy(dst, kaddr + offset, cur);
2997 kunmap_atomic(kaddr, KM_USER1);
3005 EXPORT_SYMBOL(read_extent_buffer);
3007 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3008 unsigned long min_len, char **token, char **map,
3009 unsigned long *map_start,
3010 unsigned long *map_len, int km)
3012 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3015 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3016 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3017 unsigned long end_i = (start_offset + start + min_len - 1) >>
3024 offset = start_offset;
3028 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3030 if (start + min_len > eb->len) {
3031 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3035 p = extent_buffer_page(eb, i);
3036 WARN_ON(!PageUptodate(p));
3037 kaddr = kmap_atomic(p, km);
3039 *map = kaddr + offset;
3040 *map_len = PAGE_CACHE_SIZE - offset;
3043 EXPORT_SYMBOL(map_private_extent_buffer);
3045 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3046 unsigned long min_len,
3047 char **token, char **map,
3048 unsigned long *map_start,
3049 unsigned long *map_len, int km)
3053 if (eb->map_token) {
3054 unmap_extent_buffer(eb, eb->map_token, km);
3055 eb->map_token = NULL;
3058 err = map_private_extent_buffer(eb, start, min_len, token, map,
3059 map_start, map_len, km);
3061 eb->map_token = *token;
3063 eb->map_start = *map_start;
3064 eb->map_len = *map_len;
3068 EXPORT_SYMBOL(map_extent_buffer);
3070 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3072 kunmap_atomic(token, km);
3074 EXPORT_SYMBOL(unmap_extent_buffer);
3076 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3077 unsigned long start,
3084 char *ptr = (char *)ptrv;
3085 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3086 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3089 WARN_ON(start > eb->len);
3090 WARN_ON(start + len > eb->start + eb->len);
3092 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3095 page = extent_buffer_page(eb, i);
3096 WARN_ON(!PageUptodate(page));
3098 cur = min(len, (PAGE_CACHE_SIZE - offset));
3100 kaddr = kmap_atomic(page, KM_USER0);
3101 ret = memcmp(ptr, kaddr + offset, cur);
3102 kunmap_atomic(kaddr, KM_USER0);
3113 EXPORT_SYMBOL(memcmp_extent_buffer);
3115 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3116 unsigned long start, unsigned long len)
3122 char *src = (char *)srcv;
3123 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3124 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3126 WARN_ON(start > eb->len);
3127 WARN_ON(start + len > eb->start + eb->len);
3129 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3132 page = extent_buffer_page(eb, i);
3133 WARN_ON(!PageUptodate(page));
3135 cur = min(len, PAGE_CACHE_SIZE - offset);
3136 kaddr = kmap_atomic(page, KM_USER1);
3137 memcpy(kaddr + offset, src, cur);
3138 kunmap_atomic(kaddr, KM_USER1);
3146 EXPORT_SYMBOL(write_extent_buffer);
3148 void memset_extent_buffer(struct extent_buffer *eb, char c,
3149 unsigned long start, unsigned long len)
3155 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3156 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3158 WARN_ON(start > eb->len);
3159 WARN_ON(start + len > eb->start + eb->len);
3161 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3164 page = extent_buffer_page(eb, i);
3165 WARN_ON(!PageUptodate(page));
3167 cur = min(len, PAGE_CACHE_SIZE - offset);
3168 kaddr = kmap_atomic(page, KM_USER0);
3169 memset(kaddr + offset, c, cur);
3170 kunmap_atomic(kaddr, KM_USER0);
3177 EXPORT_SYMBOL(memset_extent_buffer);
3179 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3180 unsigned long dst_offset, unsigned long src_offset,
3183 u64 dst_len = dst->len;
3188 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3189 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3191 WARN_ON(src->len != dst_len);
3193 offset = (start_offset + dst_offset) &
3194 ((unsigned long)PAGE_CACHE_SIZE - 1);
3197 page = extent_buffer_page(dst, i);
3198 WARN_ON(!PageUptodate(page));
3200 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3202 kaddr = kmap_atomic(page, KM_USER0);
3203 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3204 kunmap_atomic(kaddr, KM_USER0);
3212 EXPORT_SYMBOL(copy_extent_buffer);
3214 static void move_pages(struct page *dst_page, struct page *src_page,
3215 unsigned long dst_off, unsigned long src_off,
3218 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3219 if (dst_page == src_page) {
3220 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3222 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3223 char *p = dst_kaddr + dst_off + len;
3224 char *s = src_kaddr + src_off + len;
3229 kunmap_atomic(src_kaddr, KM_USER1);
3231 kunmap_atomic(dst_kaddr, KM_USER0);
3234 static void copy_pages(struct page *dst_page, struct page *src_page,
3235 unsigned long dst_off, unsigned long src_off,
3238 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3241 if (dst_page != src_page)
3242 src_kaddr = kmap_atomic(src_page, KM_USER1);
3244 src_kaddr = dst_kaddr;
3246 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3247 kunmap_atomic(dst_kaddr, KM_USER0);
3248 if (dst_page != src_page)
3249 kunmap_atomic(src_kaddr, KM_USER1);
3252 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3253 unsigned long src_offset, unsigned long len)
3256 size_t dst_off_in_page;
3257 size_t src_off_in_page;
3258 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3259 unsigned long dst_i;
3260 unsigned long src_i;
3262 if (src_offset + len > dst->len) {
3263 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3264 src_offset, len, dst->len);
3267 if (dst_offset + len > dst->len) {
3268 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3269 dst_offset, len, dst->len);
3274 dst_off_in_page = (start_offset + dst_offset) &
3275 ((unsigned long)PAGE_CACHE_SIZE - 1);
3276 src_off_in_page = (start_offset + src_offset) &
3277 ((unsigned long)PAGE_CACHE_SIZE - 1);
3279 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3280 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3282 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3284 cur = min_t(unsigned long, cur,
3285 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3287 copy_pages(extent_buffer_page(dst, dst_i),
3288 extent_buffer_page(dst, src_i),
3289 dst_off_in_page, src_off_in_page, cur);
3296 EXPORT_SYMBOL(memcpy_extent_buffer);
3298 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3299 unsigned long src_offset, unsigned long len)
3302 size_t dst_off_in_page;
3303 size_t src_off_in_page;
3304 unsigned long dst_end = dst_offset + len - 1;
3305 unsigned long src_end = src_offset + len - 1;
3306 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3307 unsigned long dst_i;
3308 unsigned long src_i;
3310 if (src_offset + len > dst->len) {
3311 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3312 src_offset, len, dst->len);
3315 if (dst_offset + len > dst->len) {
3316 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3317 dst_offset, len, dst->len);
3320 if (dst_offset < src_offset) {
3321 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3325 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3326 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3328 dst_off_in_page = (start_offset + dst_end) &
3329 ((unsigned long)PAGE_CACHE_SIZE - 1);
3330 src_off_in_page = (start_offset + src_end) &
3331 ((unsigned long)PAGE_CACHE_SIZE - 1);
3333 cur = min_t(unsigned long, len, src_off_in_page + 1);
3334 cur = min(cur, dst_off_in_page + 1);
3335 move_pages(extent_buffer_page(dst, dst_i),
3336 extent_buffer_page(dst, src_i),
3337 dst_off_in_page - cur + 1,
3338 src_off_in_page - cur + 1, cur);
3345 EXPORT_SYMBOL(memmove_extent_buffer);